]> git.proxmox.com Git - qemu.git/blame - cpu-exec.c
gdbstub: Allow re-instantiation (Jan Kiszka)
[qemu.git] / cpu-exec.c
CommitLineData
7d13299d
FB
1/*
2 * i386 emulator main execution loop
5fafdf24 3 *
66321a11 4 * Copyright (c) 2003-2005 Fabrice Bellard
7d13299d 5 *
3ef693a0
FB
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
7d13299d 10 *
3ef693a0
FB
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
7d13299d 15 *
3ef693a0
FB
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
fad6cb1a 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
7d13299d 19 */
e4533c7a 20#include "config.h"
93ac68bc 21#include "exec.h"
956034d7 22#include "disas.h"
7cb69cae 23#include "tcg.h"
7ba1e619 24#include "kvm.h"
7d13299d 25
fbf9eeb3
FB
26#if !defined(CONFIG_SOFTMMU)
27#undef EAX
28#undef ECX
29#undef EDX
30#undef EBX
31#undef ESP
32#undef EBP
33#undef ESI
34#undef EDI
35#undef EIP
36#include <signal.h>
84778508 37#ifdef __linux__
fbf9eeb3
FB
38#include <sys/ucontext.h>
39#endif
84778508 40#endif
fbf9eeb3 41
572a9d4a
BS
42#if defined(__sparc__) && !defined(HOST_SOLARIS)
43// Work around ugly bugs in glibc that mangle global register contents
44#undef env
45#define env cpu_single_env
46#endif
47
36bdbe54
FB
48int tb_invalidated_flag;
49
dc99065b 50//#define DEBUG_EXEC
9de5e440 51//#define DEBUG_SIGNAL
7d13299d 52
e4533c7a
FB
53void cpu_loop_exit(void)
54{
bfed01fc
TS
55 /* NOTE: the register at this point must be saved by hand because
56 longjmp restore them */
57 regs_to_env();
e4533c7a
FB
58 longjmp(env->jmp_env, 1);
59}
bfed01fc 60
fbf9eeb3
FB
61/* exit the current TB from a signal handler. The host registers are
62 restored in a state compatible with the CPU emulator
63 */
5fafdf24 64void cpu_resume_from_signal(CPUState *env1, void *puc)
fbf9eeb3
FB
65{
66#if !defined(CONFIG_SOFTMMU)
84778508 67#ifdef __linux__
fbf9eeb3 68 struct ucontext *uc = puc;
84778508
BS
69#elif defined(__OpenBSD__)
70 struct sigcontext *uc = puc;
71#endif
fbf9eeb3
FB
72#endif
73
74 env = env1;
75
76 /* XXX: restore cpu registers saved in host registers */
77
78#if !defined(CONFIG_SOFTMMU)
79 if (puc) {
80 /* XXX: use siglongjmp ? */
84778508 81#ifdef __linux__
fbf9eeb3 82 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
84778508
BS
83#elif defined(__OpenBSD__)
84 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
85#endif
fbf9eeb3
FB
86 }
87#endif
9a3ea654 88 env->exception_index = -1;
fbf9eeb3
FB
89 longjmp(env->jmp_env, 1);
90}
91
2e70f6ef
PB
92/* Execute the code without caching the generated code. An interpreter
93 could be used if available. */
94static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
95{
96 unsigned long next_tb;
97 TranslationBlock *tb;
98
99 /* Should never happen.
100 We only end up here when an existing TB is too long. */
101 if (max_cycles > CF_COUNT_MASK)
102 max_cycles = CF_COUNT_MASK;
103
104 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
105 max_cycles);
106 env->current_tb = tb;
107 /* execute the generated code */
108 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
109
110 if ((next_tb & 3) == 2) {
111 /* Restore PC. This may happen if async event occurs before
112 the TB starts executing. */
622ed360 113 cpu_pc_from_tb(env, tb);
2e70f6ef
PB
114 }
115 tb_phys_invalidate(tb, -1);
116 tb_free(tb);
117}
118
8a40a180
FB
119static TranslationBlock *tb_find_slow(target_ulong pc,
120 target_ulong cs_base,
c068688b 121 uint64_t flags)
8a40a180
FB
122{
123 TranslationBlock *tb, **ptb1;
8a40a180
FB
124 unsigned int h;
125 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
3b46e624 126
8a40a180 127 tb_invalidated_flag = 0;
3b46e624 128
8a40a180 129 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
3b46e624 130
8a40a180
FB
131 /* find translated block using physical mappings */
132 phys_pc = get_phys_addr_code(env, pc);
133 phys_page1 = phys_pc & TARGET_PAGE_MASK;
134 phys_page2 = -1;
135 h = tb_phys_hash_func(phys_pc);
136 ptb1 = &tb_phys_hash[h];
137 for(;;) {
138 tb = *ptb1;
139 if (!tb)
140 goto not_found;
5fafdf24 141 if (tb->pc == pc &&
8a40a180 142 tb->page_addr[0] == phys_page1 &&
5fafdf24 143 tb->cs_base == cs_base &&
8a40a180
FB
144 tb->flags == flags) {
145 /* check next page if needed */
146 if (tb->page_addr[1] != -1) {
5fafdf24 147 virt_page2 = (pc & TARGET_PAGE_MASK) +
8a40a180
FB
148 TARGET_PAGE_SIZE;
149 phys_page2 = get_phys_addr_code(env, virt_page2);
150 if (tb->page_addr[1] == phys_page2)
151 goto found;
152 } else {
153 goto found;
154 }
155 }
156 ptb1 = &tb->phys_hash_next;
157 }
158 not_found:
2e70f6ef
PB
159 /* if no translated code available, then translate it now */
160 tb = tb_gen_code(env, pc, cs_base, flags, 0);
3b46e624 161
8a40a180 162 found:
8a40a180
FB
163 /* we add the TB in the virtual pc hash table */
164 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
8a40a180
FB
165 return tb;
166}
167
168static inline TranslationBlock *tb_find_fast(void)
169{
170 TranslationBlock *tb;
171 target_ulong cs_base, pc;
6b917547 172 int flags;
8a40a180
FB
173
174 /* we record a subset of the CPU state. It will
175 always be the same before a given translated block
176 is executed. */
6b917547 177 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
bce61846 178 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
551bd27f
TS
179 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
180 tb->flags != flags)) {
8a40a180
FB
181 tb = tb_find_slow(pc, cs_base, flags);
182 }
183 return tb;
184}
185
dde2367e
AL
186static CPUDebugExcpHandler *debug_excp_handler;
187
188CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
189{
190 CPUDebugExcpHandler *old_handler = debug_excp_handler;
191
192 debug_excp_handler = handler;
193 return old_handler;
194}
195
6e140f28
AL
196static void cpu_handle_debug_exception(CPUState *env)
197{
198 CPUWatchpoint *wp;
199
200 if (!env->watchpoint_hit)
c0ce998e 201 TAILQ_FOREACH(wp, &env->watchpoints, entry)
6e140f28 202 wp->flags &= ~BP_WATCHPOINT_HIT;
dde2367e
AL
203
204 if (debug_excp_handler)
205 debug_excp_handler(env);
6e140f28
AL
206}
207
7d13299d
FB
208/* main execution loop */
209
e4533c7a 210int cpu_exec(CPUState *env1)
7d13299d 211{
1057eaa7
PB
212#define DECLARE_HOST_REGS 1
213#include "hostregs_helper.h"
8a40a180 214 int ret, interrupt_request;
8a40a180 215 TranslationBlock *tb;
c27004ec 216 uint8_t *tc_ptr;
d5975363 217 unsigned long next_tb;
8c6939c0 218
bfed01fc
TS
219 if (cpu_halted(env1) == EXCP_HALTED)
220 return EXCP_HALTED;
5a1e3cfc 221
5fafdf24 222 cpu_single_env = env1;
6a00d601 223
7d13299d 224 /* first we save global registers */
1057eaa7
PB
225#define SAVE_HOST_REGS 1
226#include "hostregs_helper.h"
c27004ec 227 env = env1;
e4533c7a 228
0d1a29f9 229 env_to_regs();
ecb644f4 230#if defined(TARGET_I386)
9de5e440 231 /* put eflags in CPU temporary format */
fc2b4c48
FB
232 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
233 DF = 1 - (2 * ((env->eflags >> 10) & 1));
9de5e440 234 CC_OP = CC_OP_EFLAGS;
fc2b4c48 235 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
93ac68bc 236#elif defined(TARGET_SPARC)
e6e5906b
PB
237#elif defined(TARGET_M68K)
238 env->cc_op = CC_OP_FLAGS;
239 env->cc_dest = env->sr & 0xf;
240 env->cc_x = (env->sr >> 4) & 1;
ecb644f4
TS
241#elif defined(TARGET_ALPHA)
242#elif defined(TARGET_ARM)
243#elif defined(TARGET_PPC)
6af0bf9c 244#elif defined(TARGET_MIPS)
fdf9b3e8 245#elif defined(TARGET_SH4)
f1ccf904 246#elif defined(TARGET_CRIS)
fdf9b3e8 247 /* XXXXX */
e4533c7a
FB
248#else
249#error unsupported target CPU
250#endif
3fb2ded1 251 env->exception_index = -1;
9d27abd9 252
7d13299d 253 /* prepare setjmp context for exception handling */
3fb2ded1
FB
254 for(;;) {
255 if (setjmp(env->jmp_env) == 0) {
ee8b7021 256 env->current_tb = NULL;
3fb2ded1
FB
257 /* if an exception is pending, we execute it here */
258 if (env->exception_index >= 0) {
259 if (env->exception_index >= EXCP_INTERRUPT) {
260 /* exit request from the cpu execution loop */
261 ret = env->exception_index;
6e140f28
AL
262 if (ret == EXCP_DEBUG)
263 cpu_handle_debug_exception(env);
3fb2ded1 264 break;
72d239ed
AJ
265 } else {
266#if defined(CONFIG_USER_ONLY)
3fb2ded1 267 /* if user mode only, we simulate a fake exception
9f083493 268 which will be handled outside the cpu execution
3fb2ded1 269 loop */
83479e77 270#if defined(TARGET_I386)
5fafdf24
TS
271 do_interrupt_user(env->exception_index,
272 env->exception_is_int,
273 env->error_code,
3fb2ded1 274 env->exception_next_eip);
eba01623
FB
275 /* successfully delivered */
276 env->old_exception = -1;
83479e77 277#endif
3fb2ded1
FB
278 ret = env->exception_index;
279 break;
72d239ed 280#else
83479e77 281#if defined(TARGET_I386)
3fb2ded1
FB
282 /* simulate a real cpu exception. On i386, it can
283 trigger new exceptions, but we do not handle
284 double or triple faults yet. */
5fafdf24
TS
285 do_interrupt(env->exception_index,
286 env->exception_is_int,
287 env->error_code,
d05e66d2 288 env->exception_next_eip, 0);
678dde13
TS
289 /* successfully delivered */
290 env->old_exception = -1;
ce09776b
FB
291#elif defined(TARGET_PPC)
292 do_interrupt(env);
6af0bf9c
FB
293#elif defined(TARGET_MIPS)
294 do_interrupt(env);
e95c8d51 295#elif defined(TARGET_SPARC)
f2bc7e7f 296 do_interrupt(env);
b5ff1b31
FB
297#elif defined(TARGET_ARM)
298 do_interrupt(env);
fdf9b3e8
FB
299#elif defined(TARGET_SH4)
300 do_interrupt(env);
eddf68a6
JM
301#elif defined(TARGET_ALPHA)
302 do_interrupt(env);
f1ccf904
TS
303#elif defined(TARGET_CRIS)
304 do_interrupt(env);
0633879f
PB
305#elif defined(TARGET_M68K)
306 do_interrupt(0);
72d239ed 307#endif
83479e77 308#endif
3fb2ded1
FB
309 }
310 env->exception_index = -1;
5fafdf24 311 }
9df217a3 312#ifdef USE_KQEMU
be214e6c 313 if (kqemu_is_ok(env) && env->interrupt_request == 0 && env->exit_request == 0) {
9df217a3 314 int ret;
a7812ae4 315 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
9df217a3
FB
316 ret = kqemu_cpu_exec(env);
317 /* put eflags in CPU temporary format */
318 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
319 DF = 1 - (2 * ((env->eflags >> 10) & 1));
320 CC_OP = CC_OP_EFLAGS;
321 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
322 if (ret == 1) {
323 /* exception */
324 longjmp(env->jmp_env, 1);
325 } else if (ret == 2) {
326 /* softmmu execution needed */
327 } else {
be214e6c 328 if (env->interrupt_request != 0 || env->exit_request != 0) {
9df217a3
FB
329 /* hardware interrupt will be executed just after */
330 } else {
331 /* otherwise, we restart */
332 longjmp(env->jmp_env, 1);
333 }
334 }
3fb2ded1 335 }
9df217a3
FB
336#endif
337
7ba1e619 338 if (kvm_enabled()) {
becfc390
AL
339 kvm_cpu_exec(env);
340 longjmp(env->jmp_env, 1);
7ba1e619
AL
341 }
342
b5fc09ae 343 next_tb = 0; /* force lookup of first TB */
3fb2ded1 344 for(;;) {
68a79315 345 interrupt_request = env->interrupt_request;
e1638bd8 346 if (unlikely(interrupt_request)) {
347 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
348 /* Mask out external interrupts for this step. */
349 interrupt_request &= ~(CPU_INTERRUPT_HARD |
350 CPU_INTERRUPT_FIQ |
351 CPU_INTERRUPT_SMI |
352 CPU_INTERRUPT_NMI);
353 }
6658ffb8
PB
354 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
355 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
356 env->exception_index = EXCP_DEBUG;
357 cpu_loop_exit();
358 }
a90b7318 359#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
f1ccf904 360 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
a90b7318
AZ
361 if (interrupt_request & CPU_INTERRUPT_HALT) {
362 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
363 env->halted = 1;
364 env->exception_index = EXCP_HLT;
365 cpu_loop_exit();
366 }
367#endif
68a79315 368#if defined(TARGET_I386)
db620f46
FB
369 if (env->hflags2 & HF2_GIF_MASK) {
370 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
371 !(env->hflags & HF_SMM_MASK)) {
372 svm_check_intercept(SVM_EXIT_SMI);
373 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
374 do_smm_enter();
375 next_tb = 0;
376 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
377 !(env->hflags2 & HF2_NMI_MASK)) {
378 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
379 env->hflags2 |= HF2_NMI_MASK;
380 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
381 next_tb = 0;
382 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
383 (((env->hflags2 & HF2_VINTR_MASK) &&
384 (env->hflags2 & HF2_HIF_MASK)) ||
385 (!(env->hflags2 & HF2_VINTR_MASK) &&
386 (env->eflags & IF_MASK &&
387 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
388 int intno;
389 svm_check_intercept(SVM_EXIT_INTR);
390 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
391 intno = cpu_get_pic_interrupt(env);
93fcfe39 392 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
db620f46
FB
393 do_interrupt(intno, 0, 0, 0, 1);
394 /* ensure that no TB jump will be modified as
395 the program flow was changed */
396 next_tb = 0;
0573fbfc 397#if !defined(CONFIG_USER_ONLY)
db620f46
FB
398 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
399 (env->eflags & IF_MASK) &&
400 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
401 int intno;
402 /* FIXME: this should respect TPR */
403 svm_check_intercept(SVM_EXIT_VINTR);
db620f46 404 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
93fcfe39 405 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
db620f46 406 do_interrupt(intno, 0, 0, 0, 1);
d40c54d6 407 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
db620f46 408 next_tb = 0;
907a5b26 409#endif
db620f46 410 }
68a79315 411 }
ce09776b 412#elif defined(TARGET_PPC)
9fddaa0c
FB
413#if 0
414 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
415 cpu_ppc_reset(env);
416 }
417#endif
47103572 418 if (interrupt_request & CPU_INTERRUPT_HARD) {
e9df014c
JM
419 ppc_hw_interrupt(env);
420 if (env->pending_interrupts == 0)
421 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
b5fc09ae 422 next_tb = 0;
ce09776b 423 }
6af0bf9c
FB
424#elif defined(TARGET_MIPS)
425 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
24c7b0e3 426 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
6af0bf9c 427 (env->CP0_Status & (1 << CP0St_IE)) &&
24c7b0e3
TS
428 !(env->CP0_Status & (1 << CP0St_EXL)) &&
429 !(env->CP0_Status & (1 << CP0St_ERL)) &&
6af0bf9c
FB
430 !(env->hflags & MIPS_HFLAG_DM)) {
431 /* Raise it */
432 env->exception_index = EXCP_EXT_INTERRUPT;
433 env->error_code = 0;
434 do_interrupt(env);
b5fc09ae 435 next_tb = 0;
6af0bf9c 436 }
e95c8d51 437#elif defined(TARGET_SPARC)
66321a11
FB
438 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
439 (env->psret != 0)) {
440 int pil = env->interrupt_index & 15;
441 int type = env->interrupt_index & 0xf0;
442
443 if (((type == TT_EXTINT) &&
444 (pil == 15 || pil > env->psrpil)) ||
445 type != TT_EXTINT) {
446 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
f2bc7e7f
BS
447 env->exception_index = env->interrupt_index;
448 do_interrupt(env);
66321a11 449 env->interrupt_index = 0;
327ac2e7
BS
450#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
451 cpu_check_irqs(env);
452#endif
b5fc09ae 453 next_tb = 0;
66321a11 454 }
e95c8d51
FB
455 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
456 //do_interrupt(0, 0, 0, 0, 0);
457 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
a90b7318 458 }
b5ff1b31
FB
459#elif defined(TARGET_ARM)
460 if (interrupt_request & CPU_INTERRUPT_FIQ
461 && !(env->uncached_cpsr & CPSR_F)) {
462 env->exception_index = EXCP_FIQ;
463 do_interrupt(env);
b5fc09ae 464 next_tb = 0;
b5ff1b31 465 }
9ee6e8bb
PB
466 /* ARMv7-M interrupt return works by loading a magic value
467 into the PC. On real hardware the load causes the
468 return to occur. The qemu implementation performs the
469 jump normally, then does the exception return when the
470 CPU tries to execute code at the magic address.
471 This will cause the magic PC value to be pushed to
472 the stack if an interrupt occured at the wrong time.
473 We avoid this by disabling interrupts when
474 pc contains a magic address. */
b5ff1b31 475 if (interrupt_request & CPU_INTERRUPT_HARD
9ee6e8bb
PB
476 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
477 || !(env->uncached_cpsr & CPSR_I))) {
b5ff1b31
FB
478 env->exception_index = EXCP_IRQ;
479 do_interrupt(env);
b5fc09ae 480 next_tb = 0;
b5ff1b31 481 }
fdf9b3e8 482#elif defined(TARGET_SH4)
e96e2044
TS
483 if (interrupt_request & CPU_INTERRUPT_HARD) {
484 do_interrupt(env);
b5fc09ae 485 next_tb = 0;
e96e2044 486 }
eddf68a6
JM
487#elif defined(TARGET_ALPHA)
488 if (interrupt_request & CPU_INTERRUPT_HARD) {
489 do_interrupt(env);
b5fc09ae 490 next_tb = 0;
eddf68a6 491 }
f1ccf904 492#elif defined(TARGET_CRIS)
1b1a38b0
EI
493 if (interrupt_request & CPU_INTERRUPT_HARD
494 && (env->pregs[PR_CCS] & I_FLAG)) {
495 env->exception_index = EXCP_IRQ;
496 do_interrupt(env);
497 next_tb = 0;
498 }
499 if (interrupt_request & CPU_INTERRUPT_NMI
500 && (env->pregs[PR_CCS] & M_FLAG)) {
501 env->exception_index = EXCP_NMI;
f1ccf904 502 do_interrupt(env);
b5fc09ae 503 next_tb = 0;
f1ccf904 504 }
0633879f
PB
505#elif defined(TARGET_M68K)
506 if (interrupt_request & CPU_INTERRUPT_HARD
507 && ((env->sr & SR_I) >> SR_I_SHIFT)
508 < env->pending_level) {
509 /* Real hardware gets the interrupt vector via an
510 IACK cycle at this point. Current emulated
511 hardware doesn't rely on this, so we
512 provide/save the vector when the interrupt is
513 first signalled. */
514 env->exception_index = env->pending_vector;
515 do_interrupt(1);
b5fc09ae 516 next_tb = 0;
0633879f 517 }
68a79315 518#endif
9d05095e
FB
519 /* Don't use the cached interupt_request value,
520 do_interrupt may have updated the EXITTB flag. */
b5ff1b31 521 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
bf3e8bf1
FB
522 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
523 /* ensure that no TB jump will be modified as
524 the program flow was changed */
b5fc09ae 525 next_tb = 0;
bf3e8bf1 526 }
be214e6c
AJ
527 }
528 if (unlikely(env->exit_request)) {
529 env->exit_request = 0;
530 env->exception_index = EXCP_INTERRUPT;
531 cpu_loop_exit();
3fb2ded1 532 }
7d13299d 533#ifdef DEBUG_EXEC
8fec2b8c 534 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
3fb2ded1 535 /* restore flags in standard format */
ecb644f4
TS
536 regs_to_env();
537#if defined(TARGET_I386)
a7812ae4 538 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
93fcfe39 539 log_cpu_state(env, X86_DUMP_CCOP);
3fb2ded1 540 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
e4533c7a 541#elif defined(TARGET_ARM)
93fcfe39 542 log_cpu_state(env, 0);
93ac68bc 543#elif defined(TARGET_SPARC)
93fcfe39 544 log_cpu_state(env, 0);
67867308 545#elif defined(TARGET_PPC)
93fcfe39 546 log_cpu_state(env, 0);
e6e5906b
PB
547#elif defined(TARGET_M68K)
548 cpu_m68k_flush_flags(env, env->cc_op);
549 env->cc_op = CC_OP_FLAGS;
550 env->sr = (env->sr & 0xffe0)
551 | env->cc_dest | (env->cc_x << 4);
93fcfe39 552 log_cpu_state(env, 0);
6af0bf9c 553#elif defined(TARGET_MIPS)
93fcfe39 554 log_cpu_state(env, 0);
fdf9b3e8 555#elif defined(TARGET_SH4)
93fcfe39 556 log_cpu_state(env, 0);
eddf68a6 557#elif defined(TARGET_ALPHA)
93fcfe39 558 log_cpu_state(env, 0);
f1ccf904 559#elif defined(TARGET_CRIS)
93fcfe39 560 log_cpu_state(env, 0);
e4533c7a 561#else
5fafdf24 562#error unsupported target CPU
e4533c7a 563#endif
3fb2ded1 564 }
7d13299d 565#endif
d5975363 566 spin_lock(&tb_lock);
8a40a180 567 tb = tb_find_fast();
d5975363
PB
568 /* Note: we do it here to avoid a gcc bug on Mac OS X when
569 doing it in tb_find_slow */
570 if (tb_invalidated_flag) {
571 /* as some TB could have been invalidated because
572 of memory exceptions while generating the code, we
573 must recompute the hash index here */
574 next_tb = 0;
2e70f6ef 575 tb_invalidated_flag = 0;
d5975363 576 }
9d27abd9 577#ifdef DEBUG_EXEC
93fcfe39
AL
578 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
579 (long)tb->tc_ptr, tb->pc,
580 lookup_symbol(tb->pc));
9d27abd9 581#endif
8a40a180
FB
582 /* see if we can patch the calling TB. When the TB
583 spans two pages, we cannot safely do a direct
584 jump. */
c27004ec 585 {
b5fc09ae 586 if (next_tb != 0 &&
4d7a0880 587#ifdef USE_KQEMU
f32fc648
FB
588 (env->kqemu_enabled != 2) &&
589#endif
ec6338ba 590 tb->page_addr[1] == -1) {
b5fc09ae 591 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
3fb2ded1 592 }
c27004ec 593 }
d5975363 594 spin_unlock(&tb_lock);
83479e77 595 env->current_tb = tb;
55e8b85e 596
597 /* cpu_interrupt might be called while translating the
598 TB, but before it is linked into a potentially
599 infinite loop and becomes env->current_tb. Avoid
600 starting execution if there is a pending interrupt. */
be214e6c 601 if (unlikely (env->exit_request))
55e8b85e 602 env->current_tb = NULL;
603
2e70f6ef
PB
604 while (env->current_tb) {
605 tc_ptr = tb->tc_ptr;
3fb2ded1 606 /* execute the generated code */
572a9d4a
BS
607#if defined(__sparc__) && !defined(HOST_SOLARIS)
608#undef env
2e70f6ef 609 env = cpu_single_env;
572a9d4a
BS
610#define env cpu_single_env
611#endif
2e70f6ef
PB
612 next_tb = tcg_qemu_tb_exec(tc_ptr);
613 env->current_tb = NULL;
614 if ((next_tb & 3) == 2) {
bf20dc07 615 /* Instruction counter expired. */
2e70f6ef
PB
616 int insns_left;
617 tb = (TranslationBlock *)(long)(next_tb & ~3);
618 /* Restore PC. */
622ed360 619 cpu_pc_from_tb(env, tb);
2e70f6ef
PB
620 insns_left = env->icount_decr.u32;
621 if (env->icount_extra && insns_left >= 0) {
622 /* Refill decrementer and continue execution. */
623 env->icount_extra += insns_left;
624 if (env->icount_extra > 0xffff) {
625 insns_left = 0xffff;
626 } else {
627 insns_left = env->icount_extra;
628 }
629 env->icount_extra -= insns_left;
630 env->icount_decr.u16.low = insns_left;
631 } else {
632 if (insns_left > 0) {
633 /* Execute remaining instructions. */
634 cpu_exec_nocache(insns_left, tb);
635 }
636 env->exception_index = EXCP_INTERRUPT;
637 next_tb = 0;
638 cpu_loop_exit();
639 }
640 }
641 }
4cbf74b6
FB
642 /* reset soft MMU for next block (it can currently
643 only be set by a memory fault) */
f32fc648
FB
644#if defined(USE_KQEMU)
645#define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
646 if (kqemu_is_ok(env) &&
647 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
648 cpu_loop_exit();
649 }
4cbf74b6 650#endif
50a518e3 651 } /* for(;;) */
3fb2ded1 652 } else {
0d1a29f9 653 env_to_regs();
7d13299d 654 }
3fb2ded1
FB
655 } /* for(;;) */
656
7d13299d 657
e4533c7a 658#if defined(TARGET_I386)
9de5e440 659 /* restore flags in standard format */
a7812ae4 660 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
e4533c7a 661#elif defined(TARGET_ARM)
b7bcbe95 662 /* XXX: Save/restore host fpu exception state?. */
93ac68bc 663#elif defined(TARGET_SPARC)
67867308 664#elif defined(TARGET_PPC)
e6e5906b
PB
665#elif defined(TARGET_M68K)
666 cpu_m68k_flush_flags(env, env->cc_op);
667 env->cc_op = CC_OP_FLAGS;
668 env->sr = (env->sr & 0xffe0)
669 | env->cc_dest | (env->cc_x << 4);
6af0bf9c 670#elif defined(TARGET_MIPS)
fdf9b3e8 671#elif defined(TARGET_SH4)
eddf68a6 672#elif defined(TARGET_ALPHA)
f1ccf904 673#elif defined(TARGET_CRIS)
fdf9b3e8 674 /* XXXXX */
e4533c7a
FB
675#else
676#error unsupported target CPU
677#endif
1057eaa7
PB
678
679 /* restore global registers */
1057eaa7
PB
680#include "hostregs_helper.h"
681
6a00d601 682 /* fail safe : never use cpu_single_env outside cpu_exec() */
5fafdf24 683 cpu_single_env = NULL;
7d13299d
FB
684 return ret;
685}
6dbad63e 686
fbf9eeb3
FB
687/* must only be called from the generated code as an exception can be
688 generated */
689void tb_invalidate_page_range(target_ulong start, target_ulong end)
690{
dc5d0b3d
FB
691 /* XXX: cannot enable it yet because it yields to MMU exception
692 where NIP != read address on PowerPC */
693#if 0
fbf9eeb3
FB
694 target_ulong phys_addr;
695 phys_addr = get_phys_addr_code(env, start);
696 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
dc5d0b3d 697#endif
fbf9eeb3
FB
698}
699
1a18c71b 700#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
e4533c7a 701
6dbad63e
FB
702void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
703{
704 CPUX86State *saved_env;
705
706 saved_env = env;
707 env = s;
a412ac57 708 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
a513fe19 709 selector &= 0xffff;
5fafdf24 710 cpu_x86_load_seg_cache(env, seg_reg, selector,
c27004ec 711 (selector << 4), 0xffff, 0);
a513fe19 712 } else {
5d97559d 713 helper_load_seg(seg_reg, selector);
a513fe19 714 }
6dbad63e
FB
715 env = saved_env;
716}
9de5e440 717
6f12a2a6 718void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
d0a1ffc9
FB
719{
720 CPUX86State *saved_env;
721
722 saved_env = env;
723 env = s;
3b46e624 724
6f12a2a6 725 helper_fsave(ptr, data32);
d0a1ffc9
FB
726
727 env = saved_env;
728}
729
6f12a2a6 730void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
d0a1ffc9
FB
731{
732 CPUX86State *saved_env;
733
734 saved_env = env;
735 env = s;
3b46e624 736
6f12a2a6 737 helper_frstor(ptr, data32);
d0a1ffc9
FB
738
739 env = saved_env;
740}
741
e4533c7a
FB
742#endif /* TARGET_I386 */
743
67b915a5
FB
744#if !defined(CONFIG_SOFTMMU)
745
3fb2ded1
FB
746#if defined(TARGET_I386)
747
b56dad1c 748/* 'pc' is the host PC at which the exception was raised. 'address' is
fd6ce8f6
FB
749 the effective address of the memory exception. 'is_write' is 1 if a
750 write caused the exception and otherwise 0'. 'old_set' is the
751 signal set which should be restored */
2b413144 752static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
5fafdf24 753 int is_write, sigset_t *old_set,
bf3e8bf1 754 void *puc)
9de5e440 755{
a513fe19
FB
756 TranslationBlock *tb;
757 int ret;
68a79315 758
83479e77
FB
759 if (cpu_single_env)
760 env = cpu_single_env; /* XXX: find a correct solution for multithread */
fd6ce8f6 761#if defined(DEBUG_SIGNAL)
5fafdf24 762 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
bf3e8bf1 763 pc, address, is_write, *(unsigned long *)old_set);
9de5e440 764#endif
25eb4484 765 /* XXX: locking issue */
53a5960a 766 if (is_write && page_unprotect(h2g(address), pc, puc)) {
fd6ce8f6
FB
767 return 1;
768 }
fbf9eeb3 769
3fb2ded1 770 /* see if it is an MMU fault */
6ebbf390 771 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
3fb2ded1
FB
772 if (ret < 0)
773 return 0; /* not an MMU fault */
774 if (ret == 0)
775 return 1; /* the MMU fault was handled without causing real CPU fault */
776 /* now we have a real cpu fault */
a513fe19
FB
777 tb = tb_find_pc(pc);
778 if (tb) {
9de5e440
FB
779 /* the PC is inside the translated code. It means that we have
780 a virtual CPU fault */
bf3e8bf1 781 cpu_restore_state(tb, env, pc, puc);
3fb2ded1 782 }
4cbf74b6 783 if (ret == 1) {
3fb2ded1 784#if 0
5fafdf24 785 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
4cbf74b6 786 env->eip, env->cr[2], env->error_code);
3fb2ded1 787#endif
4cbf74b6
FB
788 /* we restore the process signal mask as the sigreturn should
789 do it (XXX: use sigsetjmp) */
790 sigprocmask(SIG_SETMASK, old_set, NULL);
54ca9095 791 raise_exception_err(env->exception_index, env->error_code);
4cbf74b6
FB
792 } else {
793 /* activate soft MMU for this block */
3f337316 794 env->hflags |= HF_SOFTMMU_MASK;
fbf9eeb3 795 cpu_resume_from_signal(env, puc);
4cbf74b6 796 }
3fb2ded1
FB
797 /* never comes here */
798 return 1;
799}
800
e4533c7a 801#elif defined(TARGET_ARM)
3fb2ded1 802static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
803 int is_write, sigset_t *old_set,
804 void *puc)
3fb2ded1 805{
68016c62
FB
806 TranslationBlock *tb;
807 int ret;
808
809 if (cpu_single_env)
810 env = cpu_single_env; /* XXX: find a correct solution for multithread */
811#if defined(DEBUG_SIGNAL)
5fafdf24 812 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
68016c62
FB
813 pc, address, is_write, *(unsigned long *)old_set);
814#endif
9f0777ed 815 /* XXX: locking issue */
53a5960a 816 if (is_write && page_unprotect(h2g(address), pc, puc)) {
9f0777ed
FB
817 return 1;
818 }
68016c62 819 /* see if it is an MMU fault */
6ebbf390 820 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
68016c62
FB
821 if (ret < 0)
822 return 0; /* not an MMU fault */
823 if (ret == 0)
824 return 1; /* the MMU fault was handled without causing real CPU fault */
825 /* now we have a real cpu fault */
826 tb = tb_find_pc(pc);
827 if (tb) {
828 /* the PC is inside the translated code. It means that we have
829 a virtual CPU fault */
830 cpu_restore_state(tb, env, pc, puc);
831 }
832 /* we restore the process signal mask as the sigreturn should
833 do it (XXX: use sigsetjmp) */
834 sigprocmask(SIG_SETMASK, old_set, NULL);
835 cpu_loop_exit();
968c74da
AJ
836 /* never comes here */
837 return 1;
3fb2ded1 838}
93ac68bc
FB
839#elif defined(TARGET_SPARC)
840static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
841 int is_write, sigset_t *old_set,
842 void *puc)
93ac68bc 843{
68016c62
FB
844 TranslationBlock *tb;
845 int ret;
846
847 if (cpu_single_env)
848 env = cpu_single_env; /* XXX: find a correct solution for multithread */
849#if defined(DEBUG_SIGNAL)
5fafdf24 850 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
68016c62
FB
851 pc, address, is_write, *(unsigned long *)old_set);
852#endif
b453b70b 853 /* XXX: locking issue */
53a5960a 854 if (is_write && page_unprotect(h2g(address), pc, puc)) {
b453b70b
FB
855 return 1;
856 }
68016c62 857 /* see if it is an MMU fault */
6ebbf390 858 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
68016c62
FB
859 if (ret < 0)
860 return 0; /* not an MMU fault */
861 if (ret == 0)
862 return 1; /* the MMU fault was handled without causing real CPU fault */
863 /* now we have a real cpu fault */
864 tb = tb_find_pc(pc);
865 if (tb) {
866 /* the PC is inside the translated code. It means that we have
867 a virtual CPU fault */
868 cpu_restore_state(tb, env, pc, puc);
869 }
870 /* we restore the process signal mask as the sigreturn should
871 do it (XXX: use sigsetjmp) */
872 sigprocmask(SIG_SETMASK, old_set, NULL);
873 cpu_loop_exit();
968c74da
AJ
874 /* never comes here */
875 return 1;
93ac68bc 876}
67867308
FB
877#elif defined (TARGET_PPC)
878static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
879 int is_write, sigset_t *old_set,
880 void *puc)
67867308
FB
881{
882 TranslationBlock *tb;
ce09776b 883 int ret;
3b46e624 884
67867308
FB
885 if (cpu_single_env)
886 env = cpu_single_env; /* XXX: find a correct solution for multithread */
67867308 887#if defined(DEBUG_SIGNAL)
5fafdf24 888 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
67867308
FB
889 pc, address, is_write, *(unsigned long *)old_set);
890#endif
891 /* XXX: locking issue */
53a5960a 892 if (is_write && page_unprotect(h2g(address), pc, puc)) {
67867308
FB
893 return 1;
894 }
895
ce09776b 896 /* see if it is an MMU fault */
6ebbf390 897 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
ce09776b
FB
898 if (ret < 0)
899 return 0; /* not an MMU fault */
900 if (ret == 0)
901 return 1; /* the MMU fault was handled without causing real CPU fault */
902
67867308
FB
903 /* now we have a real cpu fault */
904 tb = tb_find_pc(pc);
905 if (tb) {
906 /* the PC is inside the translated code. It means that we have
907 a virtual CPU fault */
bf3e8bf1 908 cpu_restore_state(tb, env, pc, puc);
67867308 909 }
ce09776b 910 if (ret == 1) {
67867308 911#if 0
5fafdf24 912 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
ce09776b 913 env->nip, env->error_code, tb);
67867308
FB
914#endif
915 /* we restore the process signal mask as the sigreturn should
916 do it (XXX: use sigsetjmp) */
bf3e8bf1 917 sigprocmask(SIG_SETMASK, old_set, NULL);
e06fcd75 918 cpu_loop_exit();
ce09776b
FB
919 } else {
920 /* activate soft MMU for this block */
fbf9eeb3 921 cpu_resume_from_signal(env, puc);
ce09776b 922 }
67867308 923 /* never comes here */
e6e5906b
PB
924 return 1;
925}
926
927#elif defined(TARGET_M68K)
928static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
929 int is_write, sigset_t *old_set,
930 void *puc)
931{
932 TranslationBlock *tb;
933 int ret;
934
935 if (cpu_single_env)
936 env = cpu_single_env; /* XXX: find a correct solution for multithread */
937#if defined(DEBUG_SIGNAL)
5fafdf24 938 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
e6e5906b
PB
939 pc, address, is_write, *(unsigned long *)old_set);
940#endif
941 /* XXX: locking issue */
942 if (is_write && page_unprotect(address, pc, puc)) {
943 return 1;
944 }
945 /* see if it is an MMU fault */
6ebbf390 946 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
e6e5906b
PB
947 if (ret < 0)
948 return 0; /* not an MMU fault */
949 if (ret == 0)
950 return 1; /* the MMU fault was handled without causing real CPU fault */
951 /* now we have a real cpu fault */
952 tb = tb_find_pc(pc);
953 if (tb) {
954 /* the PC is inside the translated code. It means that we have
955 a virtual CPU fault */
956 cpu_restore_state(tb, env, pc, puc);
957 }
958 /* we restore the process signal mask as the sigreturn should
959 do it (XXX: use sigsetjmp) */
960 sigprocmask(SIG_SETMASK, old_set, NULL);
961 cpu_loop_exit();
962 /* never comes here */
67867308
FB
963 return 1;
964}
6af0bf9c
FB
965
966#elif defined (TARGET_MIPS)
967static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
968 int is_write, sigset_t *old_set,
969 void *puc)
970{
971 TranslationBlock *tb;
972 int ret;
3b46e624 973
6af0bf9c
FB
974 if (cpu_single_env)
975 env = cpu_single_env; /* XXX: find a correct solution for multithread */
976#if defined(DEBUG_SIGNAL)
5fafdf24 977 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
6af0bf9c
FB
978 pc, address, is_write, *(unsigned long *)old_set);
979#endif
980 /* XXX: locking issue */
53a5960a 981 if (is_write && page_unprotect(h2g(address), pc, puc)) {
6af0bf9c
FB
982 return 1;
983 }
984
985 /* see if it is an MMU fault */
6ebbf390 986 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
6af0bf9c
FB
987 if (ret < 0)
988 return 0; /* not an MMU fault */
989 if (ret == 0)
990 return 1; /* the MMU fault was handled without causing real CPU fault */
991
992 /* now we have a real cpu fault */
993 tb = tb_find_pc(pc);
994 if (tb) {
995 /* the PC is inside the translated code. It means that we have
996 a virtual CPU fault */
997 cpu_restore_state(tb, env, pc, puc);
998 }
999 if (ret == 1) {
1000#if 0
5fafdf24 1001 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1eb5207b 1002 env->PC, env->error_code, tb);
6af0bf9c
FB
1003#endif
1004 /* we restore the process signal mask as the sigreturn should
1005 do it (XXX: use sigsetjmp) */
1006 sigprocmask(SIG_SETMASK, old_set, NULL);
f9480ffc 1007 cpu_loop_exit();
6af0bf9c
FB
1008 } else {
1009 /* activate soft MMU for this block */
1010 cpu_resume_from_signal(env, puc);
1011 }
1012 /* never comes here */
1013 return 1;
1014}
1015
fdf9b3e8
FB
1016#elif defined (TARGET_SH4)
1017static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1018 int is_write, sigset_t *old_set,
1019 void *puc)
1020{
1021 TranslationBlock *tb;
1022 int ret;
3b46e624 1023
fdf9b3e8
FB
1024 if (cpu_single_env)
1025 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1026#if defined(DEBUG_SIGNAL)
5fafdf24 1027 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
fdf9b3e8
FB
1028 pc, address, is_write, *(unsigned long *)old_set);
1029#endif
1030 /* XXX: locking issue */
1031 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1032 return 1;
1033 }
1034
1035 /* see if it is an MMU fault */
6ebbf390 1036 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
fdf9b3e8
FB
1037 if (ret < 0)
1038 return 0; /* not an MMU fault */
1039 if (ret == 0)
1040 return 1; /* the MMU fault was handled without causing real CPU fault */
1041
1042 /* now we have a real cpu fault */
eddf68a6
JM
1043 tb = tb_find_pc(pc);
1044 if (tb) {
1045 /* the PC is inside the translated code. It means that we have
1046 a virtual CPU fault */
1047 cpu_restore_state(tb, env, pc, puc);
1048 }
1049#if 0
5fafdf24 1050 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
eddf68a6
JM
1051 env->nip, env->error_code, tb);
1052#endif
1053 /* we restore the process signal mask as the sigreturn should
1054 do it (XXX: use sigsetjmp) */
1055 sigprocmask(SIG_SETMASK, old_set, NULL);
1056 cpu_loop_exit();
1057 /* never comes here */
1058 return 1;
1059}
1060
1061#elif defined (TARGET_ALPHA)
1062static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1063 int is_write, sigset_t *old_set,
1064 void *puc)
1065{
1066 TranslationBlock *tb;
1067 int ret;
3b46e624 1068
eddf68a6
JM
1069 if (cpu_single_env)
1070 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1071#if defined(DEBUG_SIGNAL)
5fafdf24 1072 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
eddf68a6
JM
1073 pc, address, is_write, *(unsigned long *)old_set);
1074#endif
1075 /* XXX: locking issue */
1076 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1077 return 1;
1078 }
1079
1080 /* see if it is an MMU fault */
6ebbf390 1081 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
eddf68a6
JM
1082 if (ret < 0)
1083 return 0; /* not an MMU fault */
1084 if (ret == 0)
1085 return 1; /* the MMU fault was handled without causing real CPU fault */
1086
1087 /* now we have a real cpu fault */
fdf9b3e8
FB
1088 tb = tb_find_pc(pc);
1089 if (tb) {
1090 /* the PC is inside the translated code. It means that we have
1091 a virtual CPU fault */
1092 cpu_restore_state(tb, env, pc, puc);
1093 }
fdf9b3e8 1094#if 0
5fafdf24 1095 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
fdf9b3e8
FB
1096 env->nip, env->error_code, tb);
1097#endif
1098 /* we restore the process signal mask as the sigreturn should
1099 do it (XXX: use sigsetjmp) */
355fb23d
PB
1100 sigprocmask(SIG_SETMASK, old_set, NULL);
1101 cpu_loop_exit();
fdf9b3e8
FB
1102 /* never comes here */
1103 return 1;
1104}
f1ccf904
TS
1105#elif defined (TARGET_CRIS)
1106static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1107 int is_write, sigset_t *old_set,
1108 void *puc)
1109{
1110 TranslationBlock *tb;
1111 int ret;
1112
1113 if (cpu_single_env)
1114 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1115#if defined(DEBUG_SIGNAL)
1116 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1117 pc, address, is_write, *(unsigned long *)old_set);
1118#endif
1119 /* XXX: locking issue */
1120 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1121 return 1;
1122 }
1123
1124 /* see if it is an MMU fault */
6ebbf390 1125 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
f1ccf904
TS
1126 if (ret < 0)
1127 return 0; /* not an MMU fault */
1128 if (ret == 0)
1129 return 1; /* the MMU fault was handled without causing real CPU fault */
1130
1131 /* now we have a real cpu fault */
1132 tb = tb_find_pc(pc);
1133 if (tb) {
1134 /* the PC is inside the translated code. It means that we have
1135 a virtual CPU fault */
1136 cpu_restore_state(tb, env, pc, puc);
1137 }
f1ccf904
TS
1138 /* we restore the process signal mask as the sigreturn should
1139 do it (XXX: use sigsetjmp) */
1140 sigprocmask(SIG_SETMASK, old_set, NULL);
1141 cpu_loop_exit();
1142 /* never comes here */
1143 return 1;
1144}
1145
e4533c7a
FB
1146#else
1147#error unsupported target CPU
1148#endif
9de5e440 1149
2b413144
FB
1150#if defined(__i386__)
1151
d8ecc0b9
FB
1152#if defined(__APPLE__)
1153# include <sys/ucontext.h>
1154
1155# define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1156# define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1157# define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1158#else
1159# define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1160# define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1161# define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1162#endif
1163
5fafdf24 1164int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1165 void *puc)
9de5e440 1166{
5a7b542b 1167 siginfo_t *info = pinfo;
9de5e440
FB
1168 struct ucontext *uc = puc;
1169 unsigned long pc;
bf3e8bf1 1170 int trapno;
97eb5b14 1171
d691f669
FB
1172#ifndef REG_EIP
1173/* for glibc 2.1 */
fd6ce8f6
FB
1174#define REG_EIP EIP
1175#define REG_ERR ERR
1176#define REG_TRAPNO TRAPNO
d691f669 1177#endif
d8ecc0b9
FB
1178 pc = EIP_sig(uc);
1179 trapno = TRAP_sig(uc);
ec6338ba
FB
1180 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1181 trapno == 0xe ?
1182 (ERROR_sig(uc) >> 1) & 1 : 0,
1183 &uc->uc_sigmask, puc);
2b413144
FB
1184}
1185
bc51c5c9
FB
1186#elif defined(__x86_64__)
1187
b3efe5c8
BS
1188#ifdef __NetBSD__
1189#define REG_ERR _REG_ERR
1190#define REG_TRAPNO _REG_TRAPNO
1191
1192#define QEMU_UC_MCONTEXT_GREGS(uc, reg) (uc)->uc_mcontext.__gregs[(reg)]
1193#define QEMU_UC_MACHINE_PC(uc) _UC_MACHINE_PC(uc)
1194#else
1195#define QEMU_UC_MCONTEXT_GREGS(uc, reg) (uc)->uc_mcontext.gregs[(reg)]
1196#define QEMU_UC_MACHINE_PC(uc) QEMU_UC_MCONTEXT_GREGS(uc, REG_RIP)
1197#endif
1198
5a7b542b 1199int cpu_signal_handler(int host_signum, void *pinfo,
bc51c5c9
FB
1200 void *puc)
1201{
5a7b542b 1202 siginfo_t *info = pinfo;
bc51c5c9 1203 unsigned long pc;
b3efe5c8
BS
1204#ifdef __NetBSD__
1205 ucontext_t *uc = puc;
1206#else
1207 struct ucontext *uc = puc;
1208#endif
bc51c5c9 1209
b3efe5c8 1210 pc = QEMU_UC_MACHINE_PC(uc);
5fafdf24 1211 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
b3efe5c8
BS
1212 QEMU_UC_MCONTEXT_GREGS(uc, REG_TRAPNO) == 0xe ?
1213 (QEMU_UC_MCONTEXT_GREGS(uc, REG_ERR) >> 1) & 1 : 0,
bc51c5c9
FB
1214 &uc->uc_sigmask, puc);
1215}
1216
e58ffeb3 1217#elif defined(_ARCH_PPC)
2b413144 1218
83fb7adf
FB
1219/***********************************************************************
1220 * signal context platform-specific definitions
1221 * From Wine
1222 */
1223#ifdef linux
1224/* All Registers access - only for local access */
1225# define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1226/* Gpr Registers access */
1227# define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1228# define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1229# define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1230# define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1231# define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1232# define LR_sig(context) REG_sig(link, context) /* Link register */
1233# define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1234/* Float Registers access */
1235# define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1236# define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1237/* Exception Registers access */
1238# define DAR_sig(context) REG_sig(dar, context)
1239# define DSISR_sig(context) REG_sig(dsisr, context)
1240# define TRAP_sig(context) REG_sig(trap, context)
1241#endif /* linux */
1242
1243#ifdef __APPLE__
1244# include <sys/ucontext.h>
1245typedef struct ucontext SIGCONTEXT;
1246/* All Registers access - only for local access */
1247# define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1248# define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1249# define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1250# define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1251/* Gpr Registers access */
1252# define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1253# define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1254# define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1255# define CTR_sig(context) REG_sig(ctr, context)
1256# define XER_sig(context) REG_sig(xer, context) /* Link register */
1257# define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1258# define CR_sig(context) REG_sig(cr, context) /* Condition register */
1259/* Float Registers access */
1260# define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1261# define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1262/* Exception Registers access */
1263# define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1264# define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1265# define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1266#endif /* __APPLE__ */
1267
5fafdf24 1268int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1269 void *puc)
2b413144 1270{
5a7b542b 1271 siginfo_t *info = pinfo;
25eb4484 1272 struct ucontext *uc = puc;
25eb4484 1273 unsigned long pc;
25eb4484
FB
1274 int is_write;
1275
83fb7adf 1276 pc = IAR_sig(uc);
25eb4484
FB
1277 is_write = 0;
1278#if 0
1279 /* ppc 4xx case */
83fb7adf 1280 if (DSISR_sig(uc) & 0x00800000)
25eb4484
FB
1281 is_write = 1;
1282#else
83fb7adf 1283 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
25eb4484
FB
1284 is_write = 1;
1285#endif
5fafdf24 1286 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1287 is_write, &uc->uc_sigmask, puc);
2b413144
FB
1288}
1289
2f87c607
FB
1290#elif defined(__alpha__)
1291
5fafdf24 1292int cpu_signal_handler(int host_signum, void *pinfo,
2f87c607
FB
1293 void *puc)
1294{
5a7b542b 1295 siginfo_t *info = pinfo;
2f87c607
FB
1296 struct ucontext *uc = puc;
1297 uint32_t *pc = uc->uc_mcontext.sc_pc;
1298 uint32_t insn = *pc;
1299 int is_write = 0;
1300
8c6939c0 1301 /* XXX: need kernel patch to get write flag faster */
2f87c607
FB
1302 switch (insn >> 26) {
1303 case 0x0d: // stw
1304 case 0x0e: // stb
1305 case 0x0f: // stq_u
1306 case 0x24: // stf
1307 case 0x25: // stg
1308 case 0x26: // sts
1309 case 0x27: // stt
1310 case 0x2c: // stl
1311 case 0x2d: // stq
1312 case 0x2e: // stl_c
1313 case 0x2f: // stq_c
1314 is_write = 1;
1315 }
1316
5fafdf24 1317 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1318 is_write, &uc->uc_sigmask, puc);
2f87c607 1319}
8c6939c0
FB
1320#elif defined(__sparc__)
1321
5fafdf24 1322int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1323 void *puc)
8c6939c0 1324{
5a7b542b 1325 siginfo_t *info = pinfo;
8c6939c0
FB
1326 int is_write;
1327 uint32_t insn;
6b4c11cd 1328#if !defined(__arch64__) || defined(HOST_SOLARIS)
c9e1e2b0
BS
1329 uint32_t *regs = (uint32_t *)(info + 1);
1330 void *sigmask = (regs + 20);
8c6939c0 1331 /* XXX: is there a standard glibc define ? */
c9e1e2b0
BS
1332 unsigned long pc = regs[1];
1333#else
84778508 1334#ifdef __linux__
c9e1e2b0
BS
1335 struct sigcontext *sc = puc;
1336 unsigned long pc = sc->sigc_regs.tpc;
1337 void *sigmask = (void *)sc->sigc_mask;
84778508
BS
1338#elif defined(__OpenBSD__)
1339 struct sigcontext *uc = puc;
1340 unsigned long pc = uc->sc_pc;
1341 void *sigmask = (void *)(long)uc->sc_mask;
1342#endif
c9e1e2b0
BS
1343#endif
1344
8c6939c0
FB
1345 /* XXX: need kernel patch to get write flag faster */
1346 is_write = 0;
1347 insn = *(uint32_t *)pc;
1348 if ((insn >> 30) == 3) {
1349 switch((insn >> 19) & 0x3f) {
1350 case 0x05: // stb
1351 case 0x06: // sth
1352 case 0x04: // st
1353 case 0x07: // std
1354 case 0x24: // stf
1355 case 0x27: // stdf
1356 case 0x25: // stfsr
1357 is_write = 1;
1358 break;
1359 }
1360 }
5fafdf24 1361 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1362 is_write, sigmask, NULL);
8c6939c0
FB
1363}
1364
1365#elif defined(__arm__)
1366
5fafdf24 1367int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1368 void *puc)
8c6939c0 1369{
5a7b542b 1370 siginfo_t *info = pinfo;
8c6939c0
FB
1371 struct ucontext *uc = puc;
1372 unsigned long pc;
1373 int is_write;
3b46e624 1374
48bbf11b 1375#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
5c49b363
AZ
1376 pc = uc->uc_mcontext.gregs[R15];
1377#else
4eee57f5 1378 pc = uc->uc_mcontext.arm_pc;
5c49b363 1379#endif
8c6939c0
FB
1380 /* XXX: compute is_write */
1381 is_write = 0;
5fafdf24 1382 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
8c6939c0 1383 is_write,
f3a9676a 1384 &uc->uc_sigmask, puc);
8c6939c0
FB
1385}
1386
38e584a0
FB
1387#elif defined(__mc68000)
1388
5fafdf24 1389int cpu_signal_handler(int host_signum, void *pinfo,
38e584a0
FB
1390 void *puc)
1391{
5a7b542b 1392 siginfo_t *info = pinfo;
38e584a0
FB
1393 struct ucontext *uc = puc;
1394 unsigned long pc;
1395 int is_write;
3b46e624 1396
38e584a0
FB
1397 pc = uc->uc_mcontext.gregs[16];
1398 /* XXX: compute is_write */
1399 is_write = 0;
5fafdf24 1400 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
38e584a0 1401 is_write,
bf3e8bf1 1402 &uc->uc_sigmask, puc);
38e584a0
FB
1403}
1404
b8076a74
FB
1405#elif defined(__ia64)
1406
1407#ifndef __ISR_VALID
1408 /* This ought to be in <bits/siginfo.h>... */
1409# define __ISR_VALID 1
b8076a74
FB
1410#endif
1411
5a7b542b 1412int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
b8076a74 1413{
5a7b542b 1414 siginfo_t *info = pinfo;
b8076a74
FB
1415 struct ucontext *uc = puc;
1416 unsigned long ip;
1417 int is_write = 0;
1418
1419 ip = uc->uc_mcontext.sc_ip;
1420 switch (host_signum) {
1421 case SIGILL:
1422 case SIGFPE:
1423 case SIGSEGV:
1424 case SIGBUS:
1425 case SIGTRAP:
fd4a43e4 1426 if (info->si_code && (info->si_segvflags & __ISR_VALID))
b8076a74
FB
1427 /* ISR.W (write-access) is bit 33: */
1428 is_write = (info->si_isr >> 33) & 1;
1429 break;
1430
1431 default:
1432 break;
1433 }
1434 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1435 is_write,
1436 &uc->uc_sigmask, puc);
1437}
1438
90cb9493
FB
1439#elif defined(__s390__)
1440
5fafdf24 1441int cpu_signal_handler(int host_signum, void *pinfo,
90cb9493
FB
1442 void *puc)
1443{
5a7b542b 1444 siginfo_t *info = pinfo;
90cb9493
FB
1445 struct ucontext *uc = puc;
1446 unsigned long pc;
1447 int is_write;
3b46e624 1448
90cb9493
FB
1449 pc = uc->uc_mcontext.psw.addr;
1450 /* XXX: compute is_write */
1451 is_write = 0;
5fafdf24 1452 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
c4b89d18
TS
1453 is_write, &uc->uc_sigmask, puc);
1454}
1455
1456#elif defined(__mips__)
1457
5fafdf24 1458int cpu_signal_handler(int host_signum, void *pinfo,
c4b89d18
TS
1459 void *puc)
1460{
9617efe8 1461 siginfo_t *info = pinfo;
c4b89d18
TS
1462 struct ucontext *uc = puc;
1463 greg_t pc = uc->uc_mcontext.pc;
1464 int is_write;
3b46e624 1465
c4b89d18
TS
1466 /* XXX: compute is_write */
1467 is_write = 0;
5fafdf24 1468 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
c4b89d18 1469 is_write, &uc->uc_sigmask, puc);
90cb9493
FB
1470}
1471
f54b3f92
AJ
1472#elif defined(__hppa__)
1473
1474int cpu_signal_handler(int host_signum, void *pinfo,
1475 void *puc)
1476{
1477 struct siginfo *info = pinfo;
1478 struct ucontext *uc = puc;
1479 unsigned long pc;
1480 int is_write;
1481
1482 pc = uc->uc_mcontext.sc_iaoq[0];
1483 /* FIXME: compute is_write */
1484 is_write = 0;
1485 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1486 is_write,
1487 &uc->uc_sigmask, puc);
1488}
1489
9de5e440 1490#else
2b413144 1491
3fb2ded1 1492#error host CPU specific signal handler needed
2b413144 1493
9de5e440 1494#endif
67b915a5
FB
1495
1496#endif /* !defined(CONFIG_SOFTMMU) */