]> git.proxmox.com Git - mirror_qemu.git/blame - cpu-exec.c
Suppress i386 warnings
[mirror_qemu.git] / cpu-exec.c
CommitLineData
7d13299d
FB
1/*
2 * i386 emulator main execution loop
5fafdf24 3 *
66321a11 4 * Copyright (c) 2003-2005 Fabrice Bellard
7d13299d 5 *
3ef693a0
FB
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
7d13299d 10 *
3ef693a0
FB
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
7d13299d 15 *
3ef693a0
FB
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
fad6cb1a 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
7d13299d 19 */
e4533c7a 20#include "config.h"
7cb69cae 21#define CPU_NO_GLOBAL_REGS
93ac68bc 22#include "exec.h"
956034d7 23#include "disas.h"
7cb69cae 24#include "tcg.h"
7ba1e619 25#include "kvm.h"
7d13299d 26
fbf9eeb3
FB
27#if !defined(CONFIG_SOFTMMU)
28#undef EAX
29#undef ECX
30#undef EDX
31#undef EBX
32#undef ESP
33#undef EBP
34#undef ESI
35#undef EDI
36#undef EIP
37#include <signal.h>
84778508 38#ifdef __linux__
fbf9eeb3
FB
39#include <sys/ucontext.h>
40#endif
84778508 41#endif
fbf9eeb3 42
572a9d4a
BS
43#if defined(__sparc__) && !defined(HOST_SOLARIS)
44// Work around ugly bugs in glibc that mangle global register contents
45#undef env
46#define env cpu_single_env
47#endif
48
36bdbe54
FB
49int tb_invalidated_flag;
50
dc99065b 51//#define DEBUG_EXEC
9de5e440 52//#define DEBUG_SIGNAL
7d13299d 53
e4533c7a
FB
54void cpu_loop_exit(void)
55{
bfed01fc
TS
56 /* NOTE: the register at this point must be saved by hand because
57 longjmp restore them */
58 regs_to_env();
e4533c7a
FB
59 longjmp(env->jmp_env, 1);
60}
bfed01fc 61
fbf9eeb3
FB
62/* exit the current TB from a signal handler. The host registers are
63 restored in a state compatible with the CPU emulator
64 */
5fafdf24 65void cpu_resume_from_signal(CPUState *env1, void *puc)
fbf9eeb3
FB
66{
67#if !defined(CONFIG_SOFTMMU)
84778508 68#ifdef __linux__
fbf9eeb3 69 struct ucontext *uc = puc;
84778508
BS
70#elif defined(__OpenBSD__)
71 struct sigcontext *uc = puc;
72#endif
fbf9eeb3
FB
73#endif
74
75 env = env1;
76
77 /* XXX: restore cpu registers saved in host registers */
78
79#if !defined(CONFIG_SOFTMMU)
80 if (puc) {
81 /* XXX: use siglongjmp ? */
84778508 82#ifdef __linux__
fbf9eeb3 83 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
84778508
BS
84#elif defined(__OpenBSD__)
85 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
86#endif
fbf9eeb3
FB
87 }
88#endif
9a3ea654 89 env->exception_index = -1;
fbf9eeb3
FB
90 longjmp(env->jmp_env, 1);
91}
92
2e70f6ef
PB
93/* Execute the code without caching the generated code. An interpreter
94 could be used if available. */
95static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
96{
97 unsigned long next_tb;
98 TranslationBlock *tb;
99
100 /* Should never happen.
101 We only end up here when an existing TB is too long. */
102 if (max_cycles > CF_COUNT_MASK)
103 max_cycles = CF_COUNT_MASK;
104
105 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
106 max_cycles);
107 env->current_tb = tb;
108 /* execute the generated code */
109 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
110
111 if ((next_tb & 3) == 2) {
112 /* Restore PC. This may happen if async event occurs before
113 the TB starts executing. */
622ed360 114 cpu_pc_from_tb(env, tb);
2e70f6ef
PB
115 }
116 tb_phys_invalidate(tb, -1);
117 tb_free(tb);
118}
119
8a40a180
FB
120static TranslationBlock *tb_find_slow(target_ulong pc,
121 target_ulong cs_base,
c068688b 122 uint64_t flags)
8a40a180
FB
123{
124 TranslationBlock *tb, **ptb1;
8a40a180
FB
125 unsigned int h;
126 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
3b46e624 127
8a40a180 128 tb_invalidated_flag = 0;
3b46e624 129
8a40a180 130 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
3b46e624 131
8a40a180
FB
132 /* find translated block using physical mappings */
133 phys_pc = get_phys_addr_code(env, pc);
134 phys_page1 = phys_pc & TARGET_PAGE_MASK;
135 phys_page2 = -1;
136 h = tb_phys_hash_func(phys_pc);
137 ptb1 = &tb_phys_hash[h];
138 for(;;) {
139 tb = *ptb1;
140 if (!tb)
141 goto not_found;
5fafdf24 142 if (tb->pc == pc &&
8a40a180 143 tb->page_addr[0] == phys_page1 &&
5fafdf24 144 tb->cs_base == cs_base &&
8a40a180
FB
145 tb->flags == flags) {
146 /* check next page if needed */
147 if (tb->page_addr[1] != -1) {
5fafdf24 148 virt_page2 = (pc & TARGET_PAGE_MASK) +
8a40a180
FB
149 TARGET_PAGE_SIZE;
150 phys_page2 = get_phys_addr_code(env, virt_page2);
151 if (tb->page_addr[1] == phys_page2)
152 goto found;
153 } else {
154 goto found;
155 }
156 }
157 ptb1 = &tb->phys_hash_next;
158 }
159 not_found:
2e70f6ef
PB
160 /* if no translated code available, then translate it now */
161 tb = tb_gen_code(env, pc, cs_base, flags, 0);
3b46e624 162
8a40a180 163 found:
8a40a180
FB
164 /* we add the TB in the virtual pc hash table */
165 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
8a40a180
FB
166 return tb;
167}
168
169static inline TranslationBlock *tb_find_fast(void)
170{
171 TranslationBlock *tb;
172 target_ulong cs_base, pc;
6b917547 173 int flags;
8a40a180
FB
174
175 /* we record a subset of the CPU state. It will
176 always be the same before a given translated block
177 is executed. */
6b917547 178 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
bce61846 179 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
551bd27f
TS
180 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
181 tb->flags != flags)) {
8a40a180
FB
182 tb = tb_find_slow(pc, cs_base, flags);
183 }
184 return tb;
185}
186
dde2367e
AL
187static CPUDebugExcpHandler *debug_excp_handler;
188
189CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
190{
191 CPUDebugExcpHandler *old_handler = debug_excp_handler;
192
193 debug_excp_handler = handler;
194 return old_handler;
195}
196
6e140f28
AL
197static void cpu_handle_debug_exception(CPUState *env)
198{
199 CPUWatchpoint *wp;
200
201 if (!env->watchpoint_hit)
c0ce998e 202 TAILQ_FOREACH(wp, &env->watchpoints, entry)
6e140f28 203 wp->flags &= ~BP_WATCHPOINT_HIT;
dde2367e
AL
204
205 if (debug_excp_handler)
206 debug_excp_handler(env);
6e140f28
AL
207}
208
7d13299d
FB
209/* main execution loop */
210
e4533c7a 211int cpu_exec(CPUState *env1)
7d13299d 212{
1057eaa7
PB
213#define DECLARE_HOST_REGS 1
214#include "hostregs_helper.h"
8a40a180 215 int ret, interrupt_request;
8a40a180 216 TranslationBlock *tb;
c27004ec 217 uint8_t *tc_ptr;
d5975363 218 unsigned long next_tb;
8c6939c0 219
bfed01fc
TS
220 if (cpu_halted(env1) == EXCP_HALTED)
221 return EXCP_HALTED;
5a1e3cfc 222
5fafdf24 223 cpu_single_env = env1;
6a00d601 224
7d13299d 225 /* first we save global registers */
1057eaa7
PB
226#define SAVE_HOST_REGS 1
227#include "hostregs_helper.h"
c27004ec 228 env = env1;
e4533c7a 229
0d1a29f9 230 env_to_regs();
ecb644f4 231#if defined(TARGET_I386)
9de5e440 232 /* put eflags in CPU temporary format */
fc2b4c48
FB
233 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
234 DF = 1 - (2 * ((env->eflags >> 10) & 1));
9de5e440 235 CC_OP = CC_OP_EFLAGS;
fc2b4c48 236 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
93ac68bc 237#elif defined(TARGET_SPARC)
e6e5906b
PB
238#elif defined(TARGET_M68K)
239 env->cc_op = CC_OP_FLAGS;
240 env->cc_dest = env->sr & 0xf;
241 env->cc_x = (env->sr >> 4) & 1;
ecb644f4
TS
242#elif defined(TARGET_ALPHA)
243#elif defined(TARGET_ARM)
244#elif defined(TARGET_PPC)
6af0bf9c 245#elif defined(TARGET_MIPS)
fdf9b3e8 246#elif defined(TARGET_SH4)
f1ccf904 247#elif defined(TARGET_CRIS)
fdf9b3e8 248 /* XXXXX */
e4533c7a
FB
249#else
250#error unsupported target CPU
251#endif
3fb2ded1 252 env->exception_index = -1;
9d27abd9 253
7d13299d 254 /* prepare setjmp context for exception handling */
3fb2ded1
FB
255 for(;;) {
256 if (setjmp(env->jmp_env) == 0) {
ee8b7021 257 env->current_tb = NULL;
3fb2ded1
FB
258 /* if an exception is pending, we execute it here */
259 if (env->exception_index >= 0) {
260 if (env->exception_index >= EXCP_INTERRUPT) {
261 /* exit request from the cpu execution loop */
262 ret = env->exception_index;
6e140f28
AL
263 if (ret == EXCP_DEBUG)
264 cpu_handle_debug_exception(env);
3fb2ded1
FB
265 break;
266 } else if (env->user_mode_only) {
267 /* if user mode only, we simulate a fake exception
9f083493 268 which will be handled outside the cpu execution
3fb2ded1 269 loop */
83479e77 270#if defined(TARGET_I386)
5fafdf24
TS
271 do_interrupt_user(env->exception_index,
272 env->exception_is_int,
273 env->error_code,
3fb2ded1 274 env->exception_next_eip);
eba01623
FB
275 /* successfully delivered */
276 env->old_exception = -1;
83479e77 277#endif
3fb2ded1
FB
278 ret = env->exception_index;
279 break;
280 } else {
83479e77 281#if defined(TARGET_I386)
3fb2ded1
FB
282 /* simulate a real cpu exception. On i386, it can
283 trigger new exceptions, but we do not handle
284 double or triple faults yet. */
5fafdf24
TS
285 do_interrupt(env->exception_index,
286 env->exception_is_int,
287 env->error_code,
d05e66d2 288 env->exception_next_eip, 0);
678dde13
TS
289 /* successfully delivered */
290 env->old_exception = -1;
ce09776b
FB
291#elif defined(TARGET_PPC)
292 do_interrupt(env);
6af0bf9c
FB
293#elif defined(TARGET_MIPS)
294 do_interrupt(env);
e95c8d51 295#elif defined(TARGET_SPARC)
f2bc7e7f 296 do_interrupt(env);
b5ff1b31
FB
297#elif defined(TARGET_ARM)
298 do_interrupt(env);
fdf9b3e8
FB
299#elif defined(TARGET_SH4)
300 do_interrupt(env);
eddf68a6
JM
301#elif defined(TARGET_ALPHA)
302 do_interrupt(env);
f1ccf904
TS
303#elif defined(TARGET_CRIS)
304 do_interrupt(env);
0633879f
PB
305#elif defined(TARGET_M68K)
306 do_interrupt(0);
83479e77 307#endif
3fb2ded1
FB
308 }
309 env->exception_index = -1;
5fafdf24 310 }
9df217a3
FB
311#ifdef USE_KQEMU
312 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
313 int ret;
a7812ae4 314 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
9df217a3
FB
315 ret = kqemu_cpu_exec(env);
316 /* put eflags in CPU temporary format */
317 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
318 DF = 1 - (2 * ((env->eflags >> 10) & 1));
319 CC_OP = CC_OP_EFLAGS;
320 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
321 if (ret == 1) {
322 /* exception */
323 longjmp(env->jmp_env, 1);
324 } else if (ret == 2) {
325 /* softmmu execution needed */
326 } else {
327 if (env->interrupt_request != 0) {
328 /* hardware interrupt will be executed just after */
329 } else {
330 /* otherwise, we restart */
331 longjmp(env->jmp_env, 1);
332 }
333 }
3fb2ded1 334 }
9df217a3
FB
335#endif
336
7ba1e619 337 if (kvm_enabled()) {
becfc390
AL
338 kvm_cpu_exec(env);
339 longjmp(env->jmp_env, 1);
7ba1e619
AL
340 }
341
b5fc09ae 342 next_tb = 0; /* force lookup of first TB */
3fb2ded1 343 for(;;) {
68a79315 344 interrupt_request = env->interrupt_request;
e1638bd8 345 if (unlikely(interrupt_request)) {
346 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
347 /* Mask out external interrupts for this step. */
348 interrupt_request &= ~(CPU_INTERRUPT_HARD |
349 CPU_INTERRUPT_FIQ |
350 CPU_INTERRUPT_SMI |
351 CPU_INTERRUPT_NMI);
352 }
6658ffb8
PB
353 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
354 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
355 env->exception_index = EXCP_DEBUG;
356 cpu_loop_exit();
357 }
a90b7318 358#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
f1ccf904 359 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
a90b7318
AZ
360 if (interrupt_request & CPU_INTERRUPT_HALT) {
361 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
362 env->halted = 1;
363 env->exception_index = EXCP_HLT;
364 cpu_loop_exit();
365 }
366#endif
68a79315 367#if defined(TARGET_I386)
db620f46
FB
368 if (env->hflags2 & HF2_GIF_MASK) {
369 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
370 !(env->hflags & HF_SMM_MASK)) {
371 svm_check_intercept(SVM_EXIT_SMI);
372 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
373 do_smm_enter();
374 next_tb = 0;
375 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
376 !(env->hflags2 & HF2_NMI_MASK)) {
377 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
378 env->hflags2 |= HF2_NMI_MASK;
379 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
380 next_tb = 0;
381 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
382 (((env->hflags2 & HF2_VINTR_MASK) &&
383 (env->hflags2 & HF2_HIF_MASK)) ||
384 (!(env->hflags2 & HF2_VINTR_MASK) &&
385 (env->eflags & IF_MASK &&
386 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
387 int intno;
388 svm_check_intercept(SVM_EXIT_INTR);
389 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
390 intno = cpu_get_pic_interrupt(env);
391 if (loglevel & CPU_LOG_TB_IN_ASM) {
392 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
393 }
394 do_interrupt(intno, 0, 0, 0, 1);
395 /* ensure that no TB jump will be modified as
396 the program flow was changed */
397 next_tb = 0;
0573fbfc 398#if !defined(CONFIG_USER_ONLY)
db620f46
FB
399 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
400 (env->eflags & IF_MASK) &&
401 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
402 int intno;
403 /* FIXME: this should respect TPR */
404 svm_check_intercept(SVM_EXIT_VINTR);
db620f46
FB
405 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
406 if (loglevel & CPU_LOG_TB_IN_ASM)
407 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
408 do_interrupt(intno, 0, 0, 0, 1);
d40c54d6 409 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
db620f46 410 next_tb = 0;
907a5b26 411#endif
db620f46 412 }
68a79315 413 }
ce09776b 414#elif defined(TARGET_PPC)
9fddaa0c
FB
415#if 0
416 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
417 cpu_ppc_reset(env);
418 }
419#endif
47103572 420 if (interrupt_request & CPU_INTERRUPT_HARD) {
e9df014c
JM
421 ppc_hw_interrupt(env);
422 if (env->pending_interrupts == 0)
423 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
b5fc09ae 424 next_tb = 0;
ce09776b 425 }
6af0bf9c
FB
426#elif defined(TARGET_MIPS)
427 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
24c7b0e3 428 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
6af0bf9c 429 (env->CP0_Status & (1 << CP0St_IE)) &&
24c7b0e3
TS
430 !(env->CP0_Status & (1 << CP0St_EXL)) &&
431 !(env->CP0_Status & (1 << CP0St_ERL)) &&
6af0bf9c
FB
432 !(env->hflags & MIPS_HFLAG_DM)) {
433 /* Raise it */
434 env->exception_index = EXCP_EXT_INTERRUPT;
435 env->error_code = 0;
436 do_interrupt(env);
b5fc09ae 437 next_tb = 0;
6af0bf9c 438 }
e95c8d51 439#elif defined(TARGET_SPARC)
66321a11
FB
440 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
441 (env->psret != 0)) {
442 int pil = env->interrupt_index & 15;
443 int type = env->interrupt_index & 0xf0;
444
445 if (((type == TT_EXTINT) &&
446 (pil == 15 || pil > env->psrpil)) ||
447 type != TT_EXTINT) {
448 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
f2bc7e7f
BS
449 env->exception_index = env->interrupt_index;
450 do_interrupt(env);
66321a11 451 env->interrupt_index = 0;
327ac2e7
BS
452#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
453 cpu_check_irqs(env);
454#endif
b5fc09ae 455 next_tb = 0;
66321a11 456 }
e95c8d51
FB
457 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
458 //do_interrupt(0, 0, 0, 0, 0);
459 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
a90b7318 460 }
b5ff1b31
FB
461#elif defined(TARGET_ARM)
462 if (interrupt_request & CPU_INTERRUPT_FIQ
463 && !(env->uncached_cpsr & CPSR_F)) {
464 env->exception_index = EXCP_FIQ;
465 do_interrupt(env);
b5fc09ae 466 next_tb = 0;
b5ff1b31 467 }
9ee6e8bb
PB
468 /* ARMv7-M interrupt return works by loading a magic value
469 into the PC. On real hardware the load causes the
470 return to occur. The qemu implementation performs the
471 jump normally, then does the exception return when the
472 CPU tries to execute code at the magic address.
473 This will cause the magic PC value to be pushed to
474 the stack if an interrupt occured at the wrong time.
475 We avoid this by disabling interrupts when
476 pc contains a magic address. */
b5ff1b31 477 if (interrupt_request & CPU_INTERRUPT_HARD
9ee6e8bb
PB
478 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
479 || !(env->uncached_cpsr & CPSR_I))) {
b5ff1b31
FB
480 env->exception_index = EXCP_IRQ;
481 do_interrupt(env);
b5fc09ae 482 next_tb = 0;
b5ff1b31 483 }
fdf9b3e8 484#elif defined(TARGET_SH4)
e96e2044
TS
485 if (interrupt_request & CPU_INTERRUPT_HARD) {
486 do_interrupt(env);
b5fc09ae 487 next_tb = 0;
e96e2044 488 }
eddf68a6
JM
489#elif defined(TARGET_ALPHA)
490 if (interrupt_request & CPU_INTERRUPT_HARD) {
491 do_interrupt(env);
b5fc09ae 492 next_tb = 0;
eddf68a6 493 }
f1ccf904 494#elif defined(TARGET_CRIS)
1b1a38b0
EI
495 if (interrupt_request & CPU_INTERRUPT_HARD
496 && (env->pregs[PR_CCS] & I_FLAG)) {
497 env->exception_index = EXCP_IRQ;
498 do_interrupt(env);
499 next_tb = 0;
500 }
501 if (interrupt_request & CPU_INTERRUPT_NMI
502 && (env->pregs[PR_CCS] & M_FLAG)) {
503 env->exception_index = EXCP_NMI;
f1ccf904 504 do_interrupt(env);
b5fc09ae 505 next_tb = 0;
f1ccf904 506 }
0633879f
PB
507#elif defined(TARGET_M68K)
508 if (interrupt_request & CPU_INTERRUPT_HARD
509 && ((env->sr & SR_I) >> SR_I_SHIFT)
510 < env->pending_level) {
511 /* Real hardware gets the interrupt vector via an
512 IACK cycle at this point. Current emulated
513 hardware doesn't rely on this, so we
514 provide/save the vector when the interrupt is
515 first signalled. */
516 env->exception_index = env->pending_vector;
517 do_interrupt(1);
b5fc09ae 518 next_tb = 0;
0633879f 519 }
68a79315 520#endif
9d05095e
FB
521 /* Don't use the cached interupt_request value,
522 do_interrupt may have updated the EXITTB flag. */
b5ff1b31 523 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
bf3e8bf1
FB
524 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
525 /* ensure that no TB jump will be modified as
526 the program flow was changed */
b5fc09ae 527 next_tb = 0;
bf3e8bf1 528 }
68a79315
FB
529 if (interrupt_request & CPU_INTERRUPT_EXIT) {
530 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
531 env->exception_index = EXCP_INTERRUPT;
532 cpu_loop_exit();
533 }
3fb2ded1 534 }
7d13299d 535#ifdef DEBUG_EXEC
b5ff1b31 536 if ((loglevel & CPU_LOG_TB_CPU)) {
3fb2ded1 537 /* restore flags in standard format */
ecb644f4
TS
538 regs_to_env();
539#if defined(TARGET_I386)
a7812ae4 540 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
7fe48483 541 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
3fb2ded1 542 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
e4533c7a 543#elif defined(TARGET_ARM)
7fe48483 544 cpu_dump_state(env, logfile, fprintf, 0);
93ac68bc 545#elif defined(TARGET_SPARC)
3475187d 546 cpu_dump_state(env, logfile, fprintf, 0);
67867308 547#elif defined(TARGET_PPC)
7fe48483 548 cpu_dump_state(env, logfile, fprintf, 0);
e6e5906b
PB
549#elif defined(TARGET_M68K)
550 cpu_m68k_flush_flags(env, env->cc_op);
551 env->cc_op = CC_OP_FLAGS;
552 env->sr = (env->sr & 0xffe0)
553 | env->cc_dest | (env->cc_x << 4);
554 cpu_dump_state(env, logfile, fprintf, 0);
6af0bf9c
FB
555#elif defined(TARGET_MIPS)
556 cpu_dump_state(env, logfile, fprintf, 0);
fdf9b3e8
FB
557#elif defined(TARGET_SH4)
558 cpu_dump_state(env, logfile, fprintf, 0);
eddf68a6
JM
559#elif defined(TARGET_ALPHA)
560 cpu_dump_state(env, logfile, fprintf, 0);
f1ccf904
TS
561#elif defined(TARGET_CRIS)
562 cpu_dump_state(env, logfile, fprintf, 0);
e4533c7a 563#else
5fafdf24 564#error unsupported target CPU
e4533c7a 565#endif
3fb2ded1 566 }
7d13299d 567#endif
d5975363 568 spin_lock(&tb_lock);
8a40a180 569 tb = tb_find_fast();
d5975363
PB
570 /* Note: we do it here to avoid a gcc bug on Mac OS X when
571 doing it in tb_find_slow */
572 if (tb_invalidated_flag) {
573 /* as some TB could have been invalidated because
574 of memory exceptions while generating the code, we
575 must recompute the hash index here */
576 next_tb = 0;
2e70f6ef 577 tb_invalidated_flag = 0;
d5975363 578 }
9d27abd9 579#ifdef DEBUG_EXEC
c1135f61 580 if ((loglevel & CPU_LOG_EXEC)) {
c27004ec
FB
581 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
582 (long)tb->tc_ptr, tb->pc,
583 lookup_symbol(tb->pc));
3fb2ded1 584 }
9d27abd9 585#endif
8a40a180
FB
586 /* see if we can patch the calling TB. When the TB
587 spans two pages, we cannot safely do a direct
588 jump. */
c27004ec 589 {
b5fc09ae 590 if (next_tb != 0 &&
4d7a0880 591#ifdef USE_KQEMU
f32fc648
FB
592 (env->kqemu_enabled != 2) &&
593#endif
ec6338ba 594 tb->page_addr[1] == -1) {
b5fc09ae 595 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
3fb2ded1 596 }
c27004ec 597 }
d5975363 598 spin_unlock(&tb_lock);
83479e77 599 env->current_tb = tb;
55e8b85e 600
601 /* cpu_interrupt might be called while translating the
602 TB, but before it is linked into a potentially
603 infinite loop and becomes env->current_tb. Avoid
604 starting execution if there is a pending interrupt. */
605 if (unlikely (env->interrupt_request & CPU_INTERRUPT_EXIT))
606 env->current_tb = NULL;
607
2e70f6ef
PB
608 while (env->current_tb) {
609 tc_ptr = tb->tc_ptr;
3fb2ded1 610 /* execute the generated code */
572a9d4a
BS
611#if defined(__sparc__) && !defined(HOST_SOLARIS)
612#undef env
2e70f6ef 613 env = cpu_single_env;
572a9d4a
BS
614#define env cpu_single_env
615#endif
2e70f6ef
PB
616 next_tb = tcg_qemu_tb_exec(tc_ptr);
617 env->current_tb = NULL;
618 if ((next_tb & 3) == 2) {
bf20dc07 619 /* Instruction counter expired. */
2e70f6ef
PB
620 int insns_left;
621 tb = (TranslationBlock *)(long)(next_tb & ~3);
622 /* Restore PC. */
622ed360 623 cpu_pc_from_tb(env, tb);
2e70f6ef
PB
624 insns_left = env->icount_decr.u32;
625 if (env->icount_extra && insns_left >= 0) {
626 /* Refill decrementer and continue execution. */
627 env->icount_extra += insns_left;
628 if (env->icount_extra > 0xffff) {
629 insns_left = 0xffff;
630 } else {
631 insns_left = env->icount_extra;
632 }
633 env->icount_extra -= insns_left;
634 env->icount_decr.u16.low = insns_left;
635 } else {
636 if (insns_left > 0) {
637 /* Execute remaining instructions. */
638 cpu_exec_nocache(insns_left, tb);
639 }
640 env->exception_index = EXCP_INTERRUPT;
641 next_tb = 0;
642 cpu_loop_exit();
643 }
644 }
645 }
4cbf74b6
FB
646 /* reset soft MMU for next block (it can currently
647 only be set by a memory fault) */
f32fc648
FB
648#if defined(USE_KQEMU)
649#define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
650 if (kqemu_is_ok(env) &&
651 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
652 cpu_loop_exit();
653 }
4cbf74b6 654#endif
50a518e3 655 } /* for(;;) */
3fb2ded1 656 } else {
0d1a29f9 657 env_to_regs();
7d13299d 658 }
3fb2ded1
FB
659 } /* for(;;) */
660
7d13299d 661
e4533c7a 662#if defined(TARGET_I386)
9de5e440 663 /* restore flags in standard format */
a7812ae4 664 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
e4533c7a 665#elif defined(TARGET_ARM)
b7bcbe95 666 /* XXX: Save/restore host fpu exception state?. */
93ac68bc 667#elif defined(TARGET_SPARC)
67867308 668#elif defined(TARGET_PPC)
e6e5906b
PB
669#elif defined(TARGET_M68K)
670 cpu_m68k_flush_flags(env, env->cc_op);
671 env->cc_op = CC_OP_FLAGS;
672 env->sr = (env->sr & 0xffe0)
673 | env->cc_dest | (env->cc_x << 4);
6af0bf9c 674#elif defined(TARGET_MIPS)
fdf9b3e8 675#elif defined(TARGET_SH4)
eddf68a6 676#elif defined(TARGET_ALPHA)
f1ccf904 677#elif defined(TARGET_CRIS)
fdf9b3e8 678 /* XXXXX */
e4533c7a
FB
679#else
680#error unsupported target CPU
681#endif
1057eaa7
PB
682
683 /* restore global registers */
1057eaa7
PB
684#include "hostregs_helper.h"
685
6a00d601 686 /* fail safe : never use cpu_single_env outside cpu_exec() */
5fafdf24 687 cpu_single_env = NULL;
7d13299d
FB
688 return ret;
689}
6dbad63e 690
fbf9eeb3
FB
691/* must only be called from the generated code as an exception can be
692 generated */
693void tb_invalidate_page_range(target_ulong start, target_ulong end)
694{
dc5d0b3d
FB
695 /* XXX: cannot enable it yet because it yields to MMU exception
696 where NIP != read address on PowerPC */
697#if 0
fbf9eeb3
FB
698 target_ulong phys_addr;
699 phys_addr = get_phys_addr_code(env, start);
700 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
dc5d0b3d 701#endif
fbf9eeb3
FB
702}
703
1a18c71b 704#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
e4533c7a 705
6dbad63e
FB
706void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
707{
708 CPUX86State *saved_env;
709
710 saved_env = env;
711 env = s;
a412ac57 712 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
a513fe19 713 selector &= 0xffff;
5fafdf24 714 cpu_x86_load_seg_cache(env, seg_reg, selector,
c27004ec 715 (selector << 4), 0xffff, 0);
a513fe19 716 } else {
5d97559d 717 helper_load_seg(seg_reg, selector);
a513fe19 718 }
6dbad63e
FB
719 env = saved_env;
720}
9de5e440 721
6f12a2a6 722void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
d0a1ffc9
FB
723{
724 CPUX86State *saved_env;
725
726 saved_env = env;
727 env = s;
3b46e624 728
6f12a2a6 729 helper_fsave(ptr, data32);
d0a1ffc9
FB
730
731 env = saved_env;
732}
733
6f12a2a6 734void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
d0a1ffc9
FB
735{
736 CPUX86State *saved_env;
737
738 saved_env = env;
739 env = s;
3b46e624 740
6f12a2a6 741 helper_frstor(ptr, data32);
d0a1ffc9
FB
742
743 env = saved_env;
744}
745
e4533c7a
FB
746#endif /* TARGET_I386 */
747
67b915a5
FB
748#if !defined(CONFIG_SOFTMMU)
749
3fb2ded1
FB
750#if defined(TARGET_I386)
751
b56dad1c 752/* 'pc' is the host PC at which the exception was raised. 'address' is
fd6ce8f6
FB
753 the effective address of the memory exception. 'is_write' is 1 if a
754 write caused the exception and otherwise 0'. 'old_set' is the
755 signal set which should be restored */
2b413144 756static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
5fafdf24 757 int is_write, sigset_t *old_set,
bf3e8bf1 758 void *puc)
9de5e440 759{
a513fe19
FB
760 TranslationBlock *tb;
761 int ret;
68a79315 762
83479e77
FB
763 if (cpu_single_env)
764 env = cpu_single_env; /* XXX: find a correct solution for multithread */
fd6ce8f6 765#if defined(DEBUG_SIGNAL)
5fafdf24 766 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
bf3e8bf1 767 pc, address, is_write, *(unsigned long *)old_set);
9de5e440 768#endif
25eb4484 769 /* XXX: locking issue */
53a5960a 770 if (is_write && page_unprotect(h2g(address), pc, puc)) {
fd6ce8f6
FB
771 return 1;
772 }
fbf9eeb3 773
3fb2ded1 774 /* see if it is an MMU fault */
6ebbf390 775 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
3fb2ded1
FB
776 if (ret < 0)
777 return 0; /* not an MMU fault */
778 if (ret == 0)
779 return 1; /* the MMU fault was handled without causing real CPU fault */
780 /* now we have a real cpu fault */
a513fe19
FB
781 tb = tb_find_pc(pc);
782 if (tb) {
9de5e440
FB
783 /* the PC is inside the translated code. It means that we have
784 a virtual CPU fault */
bf3e8bf1 785 cpu_restore_state(tb, env, pc, puc);
3fb2ded1 786 }
4cbf74b6 787 if (ret == 1) {
3fb2ded1 788#if 0
5fafdf24 789 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
4cbf74b6 790 env->eip, env->cr[2], env->error_code);
3fb2ded1 791#endif
4cbf74b6
FB
792 /* we restore the process signal mask as the sigreturn should
793 do it (XXX: use sigsetjmp) */
794 sigprocmask(SIG_SETMASK, old_set, NULL);
54ca9095 795 raise_exception_err(env->exception_index, env->error_code);
4cbf74b6
FB
796 } else {
797 /* activate soft MMU for this block */
3f337316 798 env->hflags |= HF_SOFTMMU_MASK;
fbf9eeb3 799 cpu_resume_from_signal(env, puc);
4cbf74b6 800 }
3fb2ded1
FB
801 /* never comes here */
802 return 1;
803}
804
e4533c7a 805#elif defined(TARGET_ARM)
3fb2ded1 806static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
807 int is_write, sigset_t *old_set,
808 void *puc)
3fb2ded1 809{
68016c62
FB
810 TranslationBlock *tb;
811 int ret;
812
813 if (cpu_single_env)
814 env = cpu_single_env; /* XXX: find a correct solution for multithread */
815#if defined(DEBUG_SIGNAL)
5fafdf24 816 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
68016c62
FB
817 pc, address, is_write, *(unsigned long *)old_set);
818#endif
9f0777ed 819 /* XXX: locking issue */
53a5960a 820 if (is_write && page_unprotect(h2g(address), pc, puc)) {
9f0777ed
FB
821 return 1;
822 }
68016c62 823 /* see if it is an MMU fault */
6ebbf390 824 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
68016c62
FB
825 if (ret < 0)
826 return 0; /* not an MMU fault */
827 if (ret == 0)
828 return 1; /* the MMU fault was handled without causing real CPU fault */
829 /* now we have a real cpu fault */
830 tb = tb_find_pc(pc);
831 if (tb) {
832 /* the PC is inside the translated code. It means that we have
833 a virtual CPU fault */
834 cpu_restore_state(tb, env, pc, puc);
835 }
836 /* we restore the process signal mask as the sigreturn should
837 do it (XXX: use sigsetjmp) */
838 sigprocmask(SIG_SETMASK, old_set, NULL);
839 cpu_loop_exit();
968c74da
AJ
840 /* never comes here */
841 return 1;
3fb2ded1 842}
93ac68bc
FB
843#elif defined(TARGET_SPARC)
844static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
845 int is_write, sigset_t *old_set,
846 void *puc)
93ac68bc 847{
68016c62
FB
848 TranslationBlock *tb;
849 int ret;
850
851 if (cpu_single_env)
852 env = cpu_single_env; /* XXX: find a correct solution for multithread */
853#if defined(DEBUG_SIGNAL)
5fafdf24 854 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
68016c62
FB
855 pc, address, is_write, *(unsigned long *)old_set);
856#endif
b453b70b 857 /* XXX: locking issue */
53a5960a 858 if (is_write && page_unprotect(h2g(address), pc, puc)) {
b453b70b
FB
859 return 1;
860 }
68016c62 861 /* see if it is an MMU fault */
6ebbf390 862 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
68016c62
FB
863 if (ret < 0)
864 return 0; /* not an MMU fault */
865 if (ret == 0)
866 return 1; /* the MMU fault was handled without causing real CPU fault */
867 /* now we have a real cpu fault */
868 tb = tb_find_pc(pc);
869 if (tb) {
870 /* the PC is inside the translated code. It means that we have
871 a virtual CPU fault */
872 cpu_restore_state(tb, env, pc, puc);
873 }
874 /* we restore the process signal mask as the sigreturn should
875 do it (XXX: use sigsetjmp) */
876 sigprocmask(SIG_SETMASK, old_set, NULL);
877 cpu_loop_exit();
968c74da
AJ
878 /* never comes here */
879 return 1;
93ac68bc 880}
67867308
FB
881#elif defined (TARGET_PPC)
882static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
883 int is_write, sigset_t *old_set,
884 void *puc)
67867308
FB
885{
886 TranslationBlock *tb;
ce09776b 887 int ret;
3b46e624 888
67867308
FB
889 if (cpu_single_env)
890 env = cpu_single_env; /* XXX: find a correct solution for multithread */
67867308 891#if defined(DEBUG_SIGNAL)
5fafdf24 892 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
67867308
FB
893 pc, address, is_write, *(unsigned long *)old_set);
894#endif
895 /* XXX: locking issue */
53a5960a 896 if (is_write && page_unprotect(h2g(address), pc, puc)) {
67867308
FB
897 return 1;
898 }
899
ce09776b 900 /* see if it is an MMU fault */
6ebbf390 901 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
ce09776b
FB
902 if (ret < 0)
903 return 0; /* not an MMU fault */
904 if (ret == 0)
905 return 1; /* the MMU fault was handled without causing real CPU fault */
906
67867308
FB
907 /* now we have a real cpu fault */
908 tb = tb_find_pc(pc);
909 if (tb) {
910 /* the PC is inside the translated code. It means that we have
911 a virtual CPU fault */
bf3e8bf1 912 cpu_restore_state(tb, env, pc, puc);
67867308 913 }
ce09776b 914 if (ret == 1) {
67867308 915#if 0
5fafdf24 916 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
ce09776b 917 env->nip, env->error_code, tb);
67867308
FB
918#endif
919 /* we restore the process signal mask as the sigreturn should
920 do it (XXX: use sigsetjmp) */
bf3e8bf1 921 sigprocmask(SIG_SETMASK, old_set, NULL);
e06fcd75 922 cpu_loop_exit();
ce09776b
FB
923 } else {
924 /* activate soft MMU for this block */
fbf9eeb3 925 cpu_resume_from_signal(env, puc);
ce09776b 926 }
67867308 927 /* never comes here */
e6e5906b
PB
928 return 1;
929}
930
931#elif defined(TARGET_M68K)
932static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
933 int is_write, sigset_t *old_set,
934 void *puc)
935{
936 TranslationBlock *tb;
937 int ret;
938
939 if (cpu_single_env)
940 env = cpu_single_env; /* XXX: find a correct solution for multithread */
941#if defined(DEBUG_SIGNAL)
5fafdf24 942 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
e6e5906b
PB
943 pc, address, is_write, *(unsigned long *)old_set);
944#endif
945 /* XXX: locking issue */
946 if (is_write && page_unprotect(address, pc, puc)) {
947 return 1;
948 }
949 /* see if it is an MMU fault */
6ebbf390 950 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
e6e5906b
PB
951 if (ret < 0)
952 return 0; /* not an MMU fault */
953 if (ret == 0)
954 return 1; /* the MMU fault was handled without causing real CPU fault */
955 /* now we have a real cpu fault */
956 tb = tb_find_pc(pc);
957 if (tb) {
958 /* the PC is inside the translated code. It means that we have
959 a virtual CPU fault */
960 cpu_restore_state(tb, env, pc, puc);
961 }
962 /* we restore the process signal mask as the sigreturn should
963 do it (XXX: use sigsetjmp) */
964 sigprocmask(SIG_SETMASK, old_set, NULL);
965 cpu_loop_exit();
966 /* never comes here */
67867308
FB
967 return 1;
968}
6af0bf9c
FB
969
970#elif defined (TARGET_MIPS)
971static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
972 int is_write, sigset_t *old_set,
973 void *puc)
974{
975 TranslationBlock *tb;
976 int ret;
3b46e624 977
6af0bf9c
FB
978 if (cpu_single_env)
979 env = cpu_single_env; /* XXX: find a correct solution for multithread */
980#if defined(DEBUG_SIGNAL)
5fafdf24 981 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
6af0bf9c
FB
982 pc, address, is_write, *(unsigned long *)old_set);
983#endif
984 /* XXX: locking issue */
53a5960a 985 if (is_write && page_unprotect(h2g(address), pc, puc)) {
6af0bf9c
FB
986 return 1;
987 }
988
989 /* see if it is an MMU fault */
6ebbf390 990 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
6af0bf9c
FB
991 if (ret < 0)
992 return 0; /* not an MMU fault */
993 if (ret == 0)
994 return 1; /* the MMU fault was handled without causing real CPU fault */
995
996 /* now we have a real cpu fault */
997 tb = tb_find_pc(pc);
998 if (tb) {
999 /* the PC is inside the translated code. It means that we have
1000 a virtual CPU fault */
1001 cpu_restore_state(tb, env, pc, puc);
1002 }
1003 if (ret == 1) {
1004#if 0
5fafdf24 1005 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1eb5207b 1006 env->PC, env->error_code, tb);
6af0bf9c
FB
1007#endif
1008 /* we restore the process signal mask as the sigreturn should
1009 do it (XXX: use sigsetjmp) */
1010 sigprocmask(SIG_SETMASK, old_set, NULL);
f9480ffc 1011 cpu_loop_exit();
6af0bf9c
FB
1012 } else {
1013 /* activate soft MMU for this block */
1014 cpu_resume_from_signal(env, puc);
1015 }
1016 /* never comes here */
1017 return 1;
1018}
1019
fdf9b3e8
FB
1020#elif defined (TARGET_SH4)
1021static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1022 int is_write, sigset_t *old_set,
1023 void *puc)
1024{
1025 TranslationBlock *tb;
1026 int ret;
3b46e624 1027
fdf9b3e8
FB
1028 if (cpu_single_env)
1029 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1030#if defined(DEBUG_SIGNAL)
5fafdf24 1031 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
fdf9b3e8
FB
1032 pc, address, is_write, *(unsigned long *)old_set);
1033#endif
1034 /* XXX: locking issue */
1035 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1036 return 1;
1037 }
1038
1039 /* see if it is an MMU fault */
6ebbf390 1040 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
fdf9b3e8
FB
1041 if (ret < 0)
1042 return 0; /* not an MMU fault */
1043 if (ret == 0)
1044 return 1; /* the MMU fault was handled without causing real CPU fault */
1045
1046 /* now we have a real cpu fault */
eddf68a6
JM
1047 tb = tb_find_pc(pc);
1048 if (tb) {
1049 /* the PC is inside the translated code. It means that we have
1050 a virtual CPU fault */
1051 cpu_restore_state(tb, env, pc, puc);
1052 }
1053#if 0
5fafdf24 1054 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
eddf68a6
JM
1055 env->nip, env->error_code, tb);
1056#endif
1057 /* we restore the process signal mask as the sigreturn should
1058 do it (XXX: use sigsetjmp) */
1059 sigprocmask(SIG_SETMASK, old_set, NULL);
1060 cpu_loop_exit();
1061 /* never comes here */
1062 return 1;
1063}
1064
1065#elif defined (TARGET_ALPHA)
1066static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1067 int is_write, sigset_t *old_set,
1068 void *puc)
1069{
1070 TranslationBlock *tb;
1071 int ret;
3b46e624 1072
eddf68a6
JM
1073 if (cpu_single_env)
1074 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1075#if defined(DEBUG_SIGNAL)
5fafdf24 1076 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
eddf68a6
JM
1077 pc, address, is_write, *(unsigned long *)old_set);
1078#endif
1079 /* XXX: locking issue */
1080 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1081 return 1;
1082 }
1083
1084 /* see if it is an MMU fault */
6ebbf390 1085 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
eddf68a6
JM
1086 if (ret < 0)
1087 return 0; /* not an MMU fault */
1088 if (ret == 0)
1089 return 1; /* the MMU fault was handled without causing real CPU fault */
1090
1091 /* now we have a real cpu fault */
fdf9b3e8
FB
1092 tb = tb_find_pc(pc);
1093 if (tb) {
1094 /* the PC is inside the translated code. It means that we have
1095 a virtual CPU fault */
1096 cpu_restore_state(tb, env, pc, puc);
1097 }
fdf9b3e8 1098#if 0
5fafdf24 1099 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
fdf9b3e8
FB
1100 env->nip, env->error_code, tb);
1101#endif
1102 /* we restore the process signal mask as the sigreturn should
1103 do it (XXX: use sigsetjmp) */
355fb23d
PB
1104 sigprocmask(SIG_SETMASK, old_set, NULL);
1105 cpu_loop_exit();
fdf9b3e8
FB
1106 /* never comes here */
1107 return 1;
1108}
f1ccf904
TS
1109#elif defined (TARGET_CRIS)
1110static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1111 int is_write, sigset_t *old_set,
1112 void *puc)
1113{
1114 TranslationBlock *tb;
1115 int ret;
1116
1117 if (cpu_single_env)
1118 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1119#if defined(DEBUG_SIGNAL)
1120 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1121 pc, address, is_write, *(unsigned long *)old_set);
1122#endif
1123 /* XXX: locking issue */
1124 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1125 return 1;
1126 }
1127
1128 /* see if it is an MMU fault */
6ebbf390 1129 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
f1ccf904
TS
1130 if (ret < 0)
1131 return 0; /* not an MMU fault */
1132 if (ret == 0)
1133 return 1; /* the MMU fault was handled without causing real CPU fault */
1134
1135 /* now we have a real cpu fault */
1136 tb = tb_find_pc(pc);
1137 if (tb) {
1138 /* the PC is inside the translated code. It means that we have
1139 a virtual CPU fault */
1140 cpu_restore_state(tb, env, pc, puc);
1141 }
f1ccf904
TS
1142 /* we restore the process signal mask as the sigreturn should
1143 do it (XXX: use sigsetjmp) */
1144 sigprocmask(SIG_SETMASK, old_set, NULL);
1145 cpu_loop_exit();
1146 /* never comes here */
1147 return 1;
1148}
1149
e4533c7a
FB
1150#else
1151#error unsupported target CPU
1152#endif
9de5e440 1153
2b413144
FB
1154#if defined(__i386__)
1155
d8ecc0b9
FB
1156#if defined(__APPLE__)
1157# include <sys/ucontext.h>
1158
1159# define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1160# define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1161# define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1162#else
1163# define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1164# define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1165# define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1166#endif
1167
5fafdf24 1168int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1169 void *puc)
9de5e440 1170{
5a7b542b 1171 siginfo_t *info = pinfo;
9de5e440
FB
1172 struct ucontext *uc = puc;
1173 unsigned long pc;
bf3e8bf1 1174 int trapno;
97eb5b14 1175
d691f669
FB
1176#ifndef REG_EIP
1177/* for glibc 2.1 */
fd6ce8f6
FB
1178#define REG_EIP EIP
1179#define REG_ERR ERR
1180#define REG_TRAPNO TRAPNO
d691f669 1181#endif
d8ecc0b9
FB
1182 pc = EIP_sig(uc);
1183 trapno = TRAP_sig(uc);
ec6338ba
FB
1184 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1185 trapno == 0xe ?
1186 (ERROR_sig(uc) >> 1) & 1 : 0,
1187 &uc->uc_sigmask, puc);
2b413144
FB
1188}
1189
bc51c5c9
FB
1190#elif defined(__x86_64__)
1191
b3efe5c8
BS
1192#ifdef __NetBSD__
1193#define REG_ERR _REG_ERR
1194#define REG_TRAPNO _REG_TRAPNO
1195
1196#define QEMU_UC_MCONTEXT_GREGS(uc, reg) (uc)->uc_mcontext.__gregs[(reg)]
1197#define QEMU_UC_MACHINE_PC(uc) _UC_MACHINE_PC(uc)
1198#else
1199#define QEMU_UC_MCONTEXT_GREGS(uc, reg) (uc)->uc_mcontext.gregs[(reg)]
1200#define QEMU_UC_MACHINE_PC(uc) QEMU_UC_MCONTEXT_GREGS(uc, REG_RIP)
1201#endif
1202
5a7b542b 1203int cpu_signal_handler(int host_signum, void *pinfo,
bc51c5c9
FB
1204 void *puc)
1205{
5a7b542b 1206 siginfo_t *info = pinfo;
bc51c5c9 1207 unsigned long pc;
b3efe5c8
BS
1208#ifdef __NetBSD__
1209 ucontext_t *uc = puc;
1210#else
1211 struct ucontext *uc = puc;
1212#endif
bc51c5c9 1213
b3efe5c8 1214 pc = QEMU_UC_MACHINE_PC(uc);
5fafdf24 1215 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
b3efe5c8
BS
1216 QEMU_UC_MCONTEXT_GREGS(uc, REG_TRAPNO) == 0xe ?
1217 (QEMU_UC_MCONTEXT_GREGS(uc, REG_ERR) >> 1) & 1 : 0,
bc51c5c9
FB
1218 &uc->uc_sigmask, puc);
1219}
1220
e58ffeb3 1221#elif defined(_ARCH_PPC)
2b413144 1222
83fb7adf
FB
1223/***********************************************************************
1224 * signal context platform-specific definitions
1225 * From Wine
1226 */
1227#ifdef linux
1228/* All Registers access - only for local access */
1229# define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1230/* Gpr Registers access */
1231# define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1232# define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1233# define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1234# define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1235# define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1236# define LR_sig(context) REG_sig(link, context) /* Link register */
1237# define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1238/* Float Registers access */
1239# define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1240# define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1241/* Exception Registers access */
1242# define DAR_sig(context) REG_sig(dar, context)
1243# define DSISR_sig(context) REG_sig(dsisr, context)
1244# define TRAP_sig(context) REG_sig(trap, context)
1245#endif /* linux */
1246
1247#ifdef __APPLE__
1248# include <sys/ucontext.h>
1249typedef struct ucontext SIGCONTEXT;
1250/* All Registers access - only for local access */
1251# define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1252# define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1253# define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1254# define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1255/* Gpr Registers access */
1256# define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1257# define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1258# define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1259# define CTR_sig(context) REG_sig(ctr, context)
1260# define XER_sig(context) REG_sig(xer, context) /* Link register */
1261# define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1262# define CR_sig(context) REG_sig(cr, context) /* Condition register */
1263/* Float Registers access */
1264# define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1265# define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1266/* Exception Registers access */
1267# define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1268# define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1269# define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1270#endif /* __APPLE__ */
1271
5fafdf24 1272int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1273 void *puc)
2b413144 1274{
5a7b542b 1275 siginfo_t *info = pinfo;
25eb4484 1276 struct ucontext *uc = puc;
25eb4484 1277 unsigned long pc;
25eb4484
FB
1278 int is_write;
1279
83fb7adf 1280 pc = IAR_sig(uc);
25eb4484
FB
1281 is_write = 0;
1282#if 0
1283 /* ppc 4xx case */
83fb7adf 1284 if (DSISR_sig(uc) & 0x00800000)
25eb4484
FB
1285 is_write = 1;
1286#else
83fb7adf 1287 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
25eb4484
FB
1288 is_write = 1;
1289#endif
5fafdf24 1290 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1291 is_write, &uc->uc_sigmask, puc);
2b413144
FB
1292}
1293
2f87c607
FB
1294#elif defined(__alpha__)
1295
5fafdf24 1296int cpu_signal_handler(int host_signum, void *pinfo,
2f87c607
FB
1297 void *puc)
1298{
5a7b542b 1299 siginfo_t *info = pinfo;
2f87c607
FB
1300 struct ucontext *uc = puc;
1301 uint32_t *pc = uc->uc_mcontext.sc_pc;
1302 uint32_t insn = *pc;
1303 int is_write = 0;
1304
8c6939c0 1305 /* XXX: need kernel patch to get write flag faster */
2f87c607
FB
1306 switch (insn >> 26) {
1307 case 0x0d: // stw
1308 case 0x0e: // stb
1309 case 0x0f: // stq_u
1310 case 0x24: // stf
1311 case 0x25: // stg
1312 case 0x26: // sts
1313 case 0x27: // stt
1314 case 0x2c: // stl
1315 case 0x2d: // stq
1316 case 0x2e: // stl_c
1317 case 0x2f: // stq_c
1318 is_write = 1;
1319 }
1320
5fafdf24 1321 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1322 is_write, &uc->uc_sigmask, puc);
2f87c607 1323}
8c6939c0
FB
1324#elif defined(__sparc__)
1325
5fafdf24 1326int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1327 void *puc)
8c6939c0 1328{
5a7b542b 1329 siginfo_t *info = pinfo;
8c6939c0
FB
1330 int is_write;
1331 uint32_t insn;
6b4c11cd 1332#if !defined(__arch64__) || defined(HOST_SOLARIS)
c9e1e2b0
BS
1333 uint32_t *regs = (uint32_t *)(info + 1);
1334 void *sigmask = (regs + 20);
8c6939c0 1335 /* XXX: is there a standard glibc define ? */
c9e1e2b0
BS
1336 unsigned long pc = regs[1];
1337#else
84778508 1338#ifdef __linux__
c9e1e2b0
BS
1339 struct sigcontext *sc = puc;
1340 unsigned long pc = sc->sigc_regs.tpc;
1341 void *sigmask = (void *)sc->sigc_mask;
84778508
BS
1342#elif defined(__OpenBSD__)
1343 struct sigcontext *uc = puc;
1344 unsigned long pc = uc->sc_pc;
1345 void *sigmask = (void *)(long)uc->sc_mask;
1346#endif
c9e1e2b0
BS
1347#endif
1348
8c6939c0
FB
1349 /* XXX: need kernel patch to get write flag faster */
1350 is_write = 0;
1351 insn = *(uint32_t *)pc;
1352 if ((insn >> 30) == 3) {
1353 switch((insn >> 19) & 0x3f) {
1354 case 0x05: // stb
1355 case 0x06: // sth
1356 case 0x04: // st
1357 case 0x07: // std
1358 case 0x24: // stf
1359 case 0x27: // stdf
1360 case 0x25: // stfsr
1361 is_write = 1;
1362 break;
1363 }
1364 }
5fafdf24 1365 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1366 is_write, sigmask, NULL);
8c6939c0
FB
1367}
1368
1369#elif defined(__arm__)
1370
5fafdf24 1371int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1372 void *puc)
8c6939c0 1373{
5a7b542b 1374 siginfo_t *info = pinfo;
8c6939c0
FB
1375 struct ucontext *uc = puc;
1376 unsigned long pc;
1377 int is_write;
3b46e624 1378
48bbf11b 1379#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
5c49b363
AZ
1380 pc = uc->uc_mcontext.gregs[R15];
1381#else
4eee57f5 1382 pc = uc->uc_mcontext.arm_pc;
5c49b363 1383#endif
8c6939c0
FB
1384 /* XXX: compute is_write */
1385 is_write = 0;
5fafdf24 1386 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
8c6939c0 1387 is_write,
f3a9676a 1388 &uc->uc_sigmask, puc);
8c6939c0
FB
1389}
1390
38e584a0
FB
1391#elif defined(__mc68000)
1392
5fafdf24 1393int cpu_signal_handler(int host_signum, void *pinfo,
38e584a0
FB
1394 void *puc)
1395{
5a7b542b 1396 siginfo_t *info = pinfo;
38e584a0
FB
1397 struct ucontext *uc = puc;
1398 unsigned long pc;
1399 int is_write;
3b46e624 1400
38e584a0
FB
1401 pc = uc->uc_mcontext.gregs[16];
1402 /* XXX: compute is_write */
1403 is_write = 0;
5fafdf24 1404 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
38e584a0 1405 is_write,
bf3e8bf1 1406 &uc->uc_sigmask, puc);
38e584a0
FB
1407}
1408
b8076a74
FB
1409#elif defined(__ia64)
1410
1411#ifndef __ISR_VALID
1412 /* This ought to be in <bits/siginfo.h>... */
1413# define __ISR_VALID 1
b8076a74
FB
1414#endif
1415
5a7b542b 1416int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
b8076a74 1417{
5a7b542b 1418 siginfo_t *info = pinfo;
b8076a74
FB
1419 struct ucontext *uc = puc;
1420 unsigned long ip;
1421 int is_write = 0;
1422
1423 ip = uc->uc_mcontext.sc_ip;
1424 switch (host_signum) {
1425 case SIGILL:
1426 case SIGFPE:
1427 case SIGSEGV:
1428 case SIGBUS:
1429 case SIGTRAP:
fd4a43e4 1430 if (info->si_code && (info->si_segvflags & __ISR_VALID))
b8076a74
FB
1431 /* ISR.W (write-access) is bit 33: */
1432 is_write = (info->si_isr >> 33) & 1;
1433 break;
1434
1435 default:
1436 break;
1437 }
1438 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1439 is_write,
1440 &uc->uc_sigmask, puc);
1441}
1442
90cb9493
FB
1443#elif defined(__s390__)
1444
5fafdf24 1445int cpu_signal_handler(int host_signum, void *pinfo,
90cb9493
FB
1446 void *puc)
1447{
5a7b542b 1448 siginfo_t *info = pinfo;
90cb9493
FB
1449 struct ucontext *uc = puc;
1450 unsigned long pc;
1451 int is_write;
3b46e624 1452
90cb9493
FB
1453 pc = uc->uc_mcontext.psw.addr;
1454 /* XXX: compute is_write */
1455 is_write = 0;
5fafdf24 1456 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
c4b89d18
TS
1457 is_write, &uc->uc_sigmask, puc);
1458}
1459
1460#elif defined(__mips__)
1461
5fafdf24 1462int cpu_signal_handler(int host_signum, void *pinfo,
c4b89d18
TS
1463 void *puc)
1464{
9617efe8 1465 siginfo_t *info = pinfo;
c4b89d18
TS
1466 struct ucontext *uc = puc;
1467 greg_t pc = uc->uc_mcontext.pc;
1468 int is_write;
3b46e624 1469
c4b89d18
TS
1470 /* XXX: compute is_write */
1471 is_write = 0;
5fafdf24 1472 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
c4b89d18 1473 is_write, &uc->uc_sigmask, puc);
90cb9493
FB
1474}
1475
f54b3f92
AJ
1476#elif defined(__hppa__)
1477
1478int cpu_signal_handler(int host_signum, void *pinfo,
1479 void *puc)
1480{
1481 struct siginfo *info = pinfo;
1482 struct ucontext *uc = puc;
1483 unsigned long pc;
1484 int is_write;
1485
1486 pc = uc->uc_mcontext.sc_iaoq[0];
1487 /* FIXME: compute is_write */
1488 is_write = 0;
1489 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1490 is_write,
1491 &uc->uc_sigmask, puc);
1492}
1493
9de5e440 1494#else
2b413144 1495
3fb2ded1 1496#error host CPU specific signal handler needed
2b413144 1497
9de5e440 1498#endif
67b915a5
FB
1499
1500#endif /* !defined(CONFIG_SOFTMMU) */