]> git.proxmox.com Git - qemu.git/blame - cpu-exec.c
Refactor and fix do_sendkey (Jan Kiszka).
[qemu.git] / cpu-exec.c
CommitLineData
7d13299d
FB
1/*
2 * i386 emulator main execution loop
5fafdf24 3 *
66321a11 4 * Copyright (c) 2003-2005 Fabrice Bellard
7d13299d 5 *
3ef693a0
FB
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
7d13299d 10 *
3ef693a0
FB
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
7d13299d 15 *
3ef693a0
FB
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
7d13299d 19 */
e4533c7a 20#include "config.h"
7cb69cae 21#define CPU_NO_GLOBAL_REGS
93ac68bc 22#include "exec.h"
956034d7 23#include "disas.h"
7cb69cae 24#include "tcg.h"
7d13299d 25
fbf9eeb3
FB
26#if !defined(CONFIG_SOFTMMU)
27#undef EAX
28#undef ECX
29#undef EDX
30#undef EBX
31#undef ESP
32#undef EBP
33#undef ESI
34#undef EDI
35#undef EIP
36#include <signal.h>
37#include <sys/ucontext.h>
38#endif
39
572a9d4a
BS
40#if defined(__sparc__) && !defined(HOST_SOLARIS)
41// Work around ugly bugs in glibc that mangle global register contents
42#undef env
43#define env cpu_single_env
44#endif
45
36bdbe54 46int tb_invalidated_flag;
b5fc09ae 47static unsigned long next_tb;
36bdbe54 48
dc99065b 49//#define DEBUG_EXEC
9de5e440 50//#define DEBUG_SIGNAL
7d13299d 51
e4533c7a
FB
52void cpu_loop_exit(void)
53{
bfed01fc
TS
54 /* NOTE: the register at this point must be saved by hand because
55 longjmp restore them */
56 regs_to_env();
e4533c7a
FB
57 longjmp(env->jmp_env, 1);
58}
bfed01fc 59
e6e5906b 60#if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
3475187d
FB
61#define reg_T2
62#endif
e4533c7a 63
fbf9eeb3
FB
64/* exit the current TB from a signal handler. The host registers are
65 restored in a state compatible with the CPU emulator
66 */
5fafdf24 67void cpu_resume_from_signal(CPUState *env1, void *puc)
fbf9eeb3
FB
68{
69#if !defined(CONFIG_SOFTMMU)
70 struct ucontext *uc = puc;
71#endif
72
73 env = env1;
74
75 /* XXX: restore cpu registers saved in host registers */
76
77#if !defined(CONFIG_SOFTMMU)
78 if (puc) {
79 /* XXX: use siglongjmp ? */
80 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
81 }
82#endif
83 longjmp(env->jmp_env, 1);
84}
85
8a40a180
FB
86static TranslationBlock *tb_find_slow(target_ulong pc,
87 target_ulong cs_base,
c068688b 88 uint64_t flags)
8a40a180
FB
89{
90 TranslationBlock *tb, **ptb1;
91 int code_gen_size;
92 unsigned int h;
93 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
94 uint8_t *tc_ptr;
3b46e624 95
8a40a180
FB
96 spin_lock(&tb_lock);
97
98 tb_invalidated_flag = 0;
3b46e624 99
8a40a180 100 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
3b46e624 101
8a40a180
FB
102 /* find translated block using physical mappings */
103 phys_pc = get_phys_addr_code(env, pc);
104 phys_page1 = phys_pc & TARGET_PAGE_MASK;
105 phys_page2 = -1;
106 h = tb_phys_hash_func(phys_pc);
107 ptb1 = &tb_phys_hash[h];
108 for(;;) {
109 tb = *ptb1;
110 if (!tb)
111 goto not_found;
5fafdf24 112 if (tb->pc == pc &&
8a40a180 113 tb->page_addr[0] == phys_page1 &&
5fafdf24 114 tb->cs_base == cs_base &&
8a40a180
FB
115 tb->flags == flags) {
116 /* check next page if needed */
117 if (tb->page_addr[1] != -1) {
5fafdf24 118 virt_page2 = (pc & TARGET_PAGE_MASK) +
8a40a180
FB
119 TARGET_PAGE_SIZE;
120 phys_page2 = get_phys_addr_code(env, virt_page2);
121 if (tb->page_addr[1] == phys_page2)
122 goto found;
123 } else {
124 goto found;
125 }
126 }
127 ptb1 = &tb->phys_hash_next;
128 }
129 not_found:
130 /* if no translated code available, then translate it now */
131 tb = tb_alloc(pc);
132 if (!tb) {
133 /* flush must be done */
134 tb_flush(env);
135 /* cannot fail at this point */
136 tb = tb_alloc(pc);
137 /* don't forget to invalidate previous TB info */
15388002 138 tb_invalidated_flag = 1;
8a40a180
FB
139 }
140 tc_ptr = code_gen_ptr;
141 tb->tc_ptr = tc_ptr;
142 tb->cs_base = cs_base;
143 tb->flags = flags;
d07bde88 144 cpu_gen_code(env, tb, &code_gen_size);
8a40a180 145 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 146
8a40a180
FB
147 /* check next page if needed */
148 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
149 phys_page2 = -1;
150 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
151 phys_page2 = get_phys_addr_code(env, virt_page2);
152 }
153 tb_link_phys(tb, phys_pc, phys_page2);
3b46e624 154
8a40a180 155 found:
8a40a180
FB
156 /* we add the TB in the virtual pc hash table */
157 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
158 spin_unlock(&tb_lock);
159 return tb;
160}
161
162static inline TranslationBlock *tb_find_fast(void)
163{
164 TranslationBlock *tb;
165 target_ulong cs_base, pc;
c068688b 166 uint64_t flags;
8a40a180
FB
167
168 /* we record a subset of the CPU state. It will
169 always be the same before a given translated block
170 is executed. */
171#if defined(TARGET_I386)
172 flags = env->hflags;
173 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
174 cs_base = env->segs[R_CS].base;
175 pc = cs_base + env->eip;
176#elif defined(TARGET_ARM)
177 flags = env->thumb | (env->vfp.vec_len << 1)
b5ff1b31
FB
178 | (env->vfp.vec_stride << 4);
179 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
180 flags |= (1 << 6);
40f137e1
PB
181 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
182 flags |= (1 << 7);
9ee6e8bb 183 flags |= (env->condexec_bits << 8);
8a40a180
FB
184 cs_base = 0;
185 pc = env->regs[15];
186#elif defined(TARGET_SPARC)
187#ifdef TARGET_SPARC64
a80dde08
FB
188 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
189 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
190 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
8a40a180 191#else
6d5f237a
BS
192 // FPU enable . Supervisor
193 flags = (env->psref << 4) | env->psrs;
8a40a180
FB
194#endif
195 cs_base = env->npc;
196 pc = env->pc;
197#elif defined(TARGET_PPC)
1527c87e 198 flags = env->hflags;
8a40a180
FB
199 cs_base = 0;
200 pc = env->nip;
201#elif defined(TARGET_MIPS)
56b19403 202 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
cc9442b9 203 cs_base = 0;
ead9360e 204 pc = env->PC[env->current_tc];
e6e5906b 205#elif defined(TARGET_M68K)
acf930aa
PB
206 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
207 | (env->sr & SR_S) /* Bit 13 */
208 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
e6e5906b
PB
209 cs_base = 0;
210 pc = env->pc;
fdf9b3e8 211#elif defined(TARGET_SH4)
823029f9
TS
212 flags = env->flags;
213 cs_base = 0;
fdf9b3e8 214 pc = env->pc;
eddf68a6
JM
215#elif defined(TARGET_ALPHA)
216 flags = env->ps;
217 cs_base = 0;
218 pc = env->pc;
f1ccf904 219#elif defined(TARGET_CRIS)
3878e2c9 220 flags = env->pregs[PR_CCS] & (U_FLAG | X_FLAG);
cf1d97f0 221 flags |= env->dslot;
f1ccf904
TS
222 cs_base = 0;
223 pc = env->pc;
8a40a180
FB
224#else
225#error unsupported CPU
226#endif
bce61846 227 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
8a40a180
FB
228 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
229 tb->flags != flags, 0)) {
230 tb = tb_find_slow(pc, cs_base, flags);
15388002
FB
231 /* Note: we do it here to avoid a gcc bug on Mac OS X when
232 doing it in tb_find_slow */
233 if (tb_invalidated_flag) {
234 /* as some TB could have been invalidated because
235 of memory exceptions while generating the code, we
236 must recompute the hash index here */
b5fc09ae 237 next_tb = 0;
15388002 238 }
8a40a180
FB
239 }
240 return tb;
241}
242
7d13299d
FB
243/* main execution loop */
244
e4533c7a 245int cpu_exec(CPUState *env1)
7d13299d 246{
1057eaa7
PB
247#define DECLARE_HOST_REGS 1
248#include "hostregs_helper.h"
8a40a180 249 int ret, interrupt_request;
8a40a180 250 TranslationBlock *tb;
c27004ec 251 uint8_t *tc_ptr;
8c6939c0 252
bfed01fc
TS
253 if (cpu_halted(env1) == EXCP_HALTED)
254 return EXCP_HALTED;
5a1e3cfc 255
5fafdf24 256 cpu_single_env = env1;
6a00d601 257
7d13299d 258 /* first we save global registers */
1057eaa7
PB
259#define SAVE_HOST_REGS 1
260#include "hostregs_helper.h"
c27004ec 261 env = env1;
e4533c7a 262
0d1a29f9 263 env_to_regs();
ecb644f4 264#if defined(TARGET_I386)
9de5e440 265 /* put eflags in CPU temporary format */
fc2b4c48
FB
266 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
267 DF = 1 - (2 * ((env->eflags >> 10) & 1));
9de5e440 268 CC_OP = CC_OP_EFLAGS;
fc2b4c48 269 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
93ac68bc 270#elif defined(TARGET_SPARC)
e6e5906b
PB
271#elif defined(TARGET_M68K)
272 env->cc_op = CC_OP_FLAGS;
273 env->cc_dest = env->sr & 0xf;
274 env->cc_x = (env->sr >> 4) & 1;
ecb644f4
TS
275#elif defined(TARGET_ALPHA)
276#elif defined(TARGET_ARM)
277#elif defined(TARGET_PPC)
6af0bf9c 278#elif defined(TARGET_MIPS)
fdf9b3e8 279#elif defined(TARGET_SH4)
f1ccf904 280#elif defined(TARGET_CRIS)
fdf9b3e8 281 /* XXXXX */
e4533c7a
FB
282#else
283#error unsupported target CPU
284#endif
3fb2ded1 285 env->exception_index = -1;
9d27abd9 286
7d13299d 287 /* prepare setjmp context for exception handling */
3fb2ded1
FB
288 for(;;) {
289 if (setjmp(env->jmp_env) == 0) {
ee8b7021 290 env->current_tb = NULL;
3fb2ded1
FB
291 /* if an exception is pending, we execute it here */
292 if (env->exception_index >= 0) {
293 if (env->exception_index >= EXCP_INTERRUPT) {
294 /* exit request from the cpu execution loop */
295 ret = env->exception_index;
296 break;
297 } else if (env->user_mode_only) {
298 /* if user mode only, we simulate a fake exception
9f083493 299 which will be handled outside the cpu execution
3fb2ded1 300 loop */
83479e77 301#if defined(TARGET_I386)
5fafdf24
TS
302 do_interrupt_user(env->exception_index,
303 env->exception_is_int,
304 env->error_code,
3fb2ded1 305 env->exception_next_eip);
eba01623
FB
306 /* successfully delivered */
307 env->old_exception = -1;
83479e77 308#endif
3fb2ded1
FB
309 ret = env->exception_index;
310 break;
311 } else {
83479e77 312#if defined(TARGET_I386)
3fb2ded1
FB
313 /* simulate a real cpu exception. On i386, it can
314 trigger new exceptions, but we do not handle
315 double or triple faults yet. */
5fafdf24
TS
316 do_interrupt(env->exception_index,
317 env->exception_is_int,
318 env->error_code,
d05e66d2 319 env->exception_next_eip, 0);
678dde13
TS
320 /* successfully delivered */
321 env->old_exception = -1;
ce09776b
FB
322#elif defined(TARGET_PPC)
323 do_interrupt(env);
6af0bf9c
FB
324#elif defined(TARGET_MIPS)
325 do_interrupt(env);
e95c8d51 326#elif defined(TARGET_SPARC)
f2bc7e7f 327 do_interrupt(env);
b5ff1b31
FB
328#elif defined(TARGET_ARM)
329 do_interrupt(env);
fdf9b3e8
FB
330#elif defined(TARGET_SH4)
331 do_interrupt(env);
eddf68a6
JM
332#elif defined(TARGET_ALPHA)
333 do_interrupt(env);
f1ccf904
TS
334#elif defined(TARGET_CRIS)
335 do_interrupt(env);
0633879f
PB
336#elif defined(TARGET_M68K)
337 do_interrupt(0);
83479e77 338#endif
3fb2ded1
FB
339 }
340 env->exception_index = -1;
5fafdf24 341 }
9df217a3
FB
342#ifdef USE_KQEMU
343 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
344 int ret;
345 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
346 ret = kqemu_cpu_exec(env);
347 /* put eflags in CPU temporary format */
348 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
349 DF = 1 - (2 * ((env->eflags >> 10) & 1));
350 CC_OP = CC_OP_EFLAGS;
351 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
352 if (ret == 1) {
353 /* exception */
354 longjmp(env->jmp_env, 1);
355 } else if (ret == 2) {
356 /* softmmu execution needed */
357 } else {
358 if (env->interrupt_request != 0) {
359 /* hardware interrupt will be executed just after */
360 } else {
361 /* otherwise, we restart */
362 longjmp(env->jmp_env, 1);
363 }
364 }
3fb2ded1 365 }
9df217a3
FB
366#endif
367
b5fc09ae 368 next_tb = 0; /* force lookup of first TB */
3fb2ded1 369 for(;;) {
68a79315 370 interrupt_request = env->interrupt_request;
0573fbfc
TS
371 if (__builtin_expect(interrupt_request, 0)
372#if defined(TARGET_I386)
373 && env->hflags & HF_GIF_MASK
374#endif
21b20814 375 && likely(!(env->singlestep_enabled & SSTEP_NOIRQ))) {
6658ffb8
PB
376 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
377 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
378 env->exception_index = EXCP_DEBUG;
379 cpu_loop_exit();
380 }
a90b7318 381#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
f1ccf904 382 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
a90b7318
AZ
383 if (interrupt_request & CPU_INTERRUPT_HALT) {
384 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
385 env->halted = 1;
386 env->exception_index = EXCP_HLT;
387 cpu_loop_exit();
388 }
389#endif
68a79315 390#if defined(TARGET_I386)
3b21e03e
FB
391 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
392 !(env->hflags & HF_SMM_MASK)) {
0573fbfc 393 svm_check_intercept(SVM_EXIT_SMI);
3b21e03e
FB
394 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
395 do_smm_enter();
b5fc09ae 396 next_tb = 0;
474ea849
AJ
397 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
398 !(env->hflags & HF_NMI_MASK)) {
399 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
400 env->hflags |= HF_NMI_MASK;
401 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
b5fc09ae 402 next_tb = 0;
3b21e03e 403 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
0573fbfc 404 (env->eflags & IF_MASK || env->hflags & HF_HIF_MASK) &&
3f337316 405 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
68a79315 406 int intno;
0573fbfc 407 svm_check_intercept(SVM_EXIT_INTR);
52621688 408 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
a541f297 409 intno = cpu_get_pic_interrupt(env);
f193c797 410 if (loglevel & CPU_LOG_TB_IN_ASM) {
68a79315
FB
411 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
412 }
d05e66d2 413 do_interrupt(intno, 0, 0, 0, 1);
907a5b26
FB
414 /* ensure that no TB jump will be modified as
415 the program flow was changed */
b5fc09ae 416 next_tb = 0;
0573fbfc
TS
417#if !defined(CONFIG_USER_ONLY)
418 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
419 (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
420 int intno;
421 /* FIXME: this should respect TPR */
422 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
52621688 423 svm_check_intercept(SVM_EXIT_VINTR);
0573fbfc
TS
424 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
425 if (loglevel & CPU_LOG_TB_IN_ASM)
426 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
427 do_interrupt(intno, 0, 0, -1, 1);
52621688
TS
428 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl),
429 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)) & ~V_IRQ_MASK);
b5fc09ae 430 next_tb = 0;
907a5b26 431#endif
68a79315 432 }
ce09776b 433#elif defined(TARGET_PPC)
9fddaa0c
FB
434#if 0
435 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
436 cpu_ppc_reset(env);
437 }
438#endif
47103572 439 if (interrupt_request & CPU_INTERRUPT_HARD) {
e9df014c
JM
440 ppc_hw_interrupt(env);
441 if (env->pending_interrupts == 0)
442 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
b5fc09ae 443 next_tb = 0;
ce09776b 444 }
6af0bf9c
FB
445#elif defined(TARGET_MIPS)
446 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
24c7b0e3 447 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
6af0bf9c 448 (env->CP0_Status & (1 << CP0St_IE)) &&
24c7b0e3
TS
449 !(env->CP0_Status & (1 << CP0St_EXL)) &&
450 !(env->CP0_Status & (1 << CP0St_ERL)) &&
6af0bf9c
FB
451 !(env->hflags & MIPS_HFLAG_DM)) {
452 /* Raise it */
453 env->exception_index = EXCP_EXT_INTERRUPT;
454 env->error_code = 0;
455 do_interrupt(env);
b5fc09ae 456 next_tb = 0;
6af0bf9c 457 }
e95c8d51 458#elif defined(TARGET_SPARC)
66321a11
FB
459 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
460 (env->psret != 0)) {
461 int pil = env->interrupt_index & 15;
462 int type = env->interrupt_index & 0xf0;
463
464 if (((type == TT_EXTINT) &&
465 (pil == 15 || pil > env->psrpil)) ||
466 type != TT_EXTINT) {
467 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
f2bc7e7f
BS
468 env->exception_index = env->interrupt_index;
469 do_interrupt(env);
66321a11 470 env->interrupt_index = 0;
327ac2e7
BS
471#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
472 cpu_check_irqs(env);
473#endif
b5fc09ae 474 next_tb = 0;
66321a11 475 }
e95c8d51
FB
476 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
477 //do_interrupt(0, 0, 0, 0, 0);
478 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
a90b7318 479 }
b5ff1b31
FB
480#elif defined(TARGET_ARM)
481 if (interrupt_request & CPU_INTERRUPT_FIQ
482 && !(env->uncached_cpsr & CPSR_F)) {
483 env->exception_index = EXCP_FIQ;
484 do_interrupt(env);
b5fc09ae 485 next_tb = 0;
b5ff1b31 486 }
9ee6e8bb
PB
487 /* ARMv7-M interrupt return works by loading a magic value
488 into the PC. On real hardware the load causes the
489 return to occur. The qemu implementation performs the
490 jump normally, then does the exception return when the
491 CPU tries to execute code at the magic address.
492 This will cause the magic PC value to be pushed to
493 the stack if an interrupt occured at the wrong time.
494 We avoid this by disabling interrupts when
495 pc contains a magic address. */
b5ff1b31 496 if (interrupt_request & CPU_INTERRUPT_HARD
9ee6e8bb
PB
497 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
498 || !(env->uncached_cpsr & CPSR_I))) {
b5ff1b31
FB
499 env->exception_index = EXCP_IRQ;
500 do_interrupt(env);
b5fc09ae 501 next_tb = 0;
b5ff1b31 502 }
fdf9b3e8 503#elif defined(TARGET_SH4)
e96e2044
TS
504 if (interrupt_request & CPU_INTERRUPT_HARD) {
505 do_interrupt(env);
b5fc09ae 506 next_tb = 0;
e96e2044 507 }
eddf68a6
JM
508#elif defined(TARGET_ALPHA)
509 if (interrupt_request & CPU_INTERRUPT_HARD) {
510 do_interrupt(env);
b5fc09ae 511 next_tb = 0;
eddf68a6 512 }
f1ccf904
TS
513#elif defined(TARGET_CRIS)
514 if (interrupt_request & CPU_INTERRUPT_HARD) {
515 do_interrupt(env);
b5fc09ae 516 next_tb = 0;
f1ccf904 517 }
0633879f
PB
518#elif defined(TARGET_M68K)
519 if (interrupt_request & CPU_INTERRUPT_HARD
520 && ((env->sr & SR_I) >> SR_I_SHIFT)
521 < env->pending_level) {
522 /* Real hardware gets the interrupt vector via an
523 IACK cycle at this point. Current emulated
524 hardware doesn't rely on this, so we
525 provide/save the vector when the interrupt is
526 first signalled. */
527 env->exception_index = env->pending_vector;
528 do_interrupt(1);
b5fc09ae 529 next_tb = 0;
0633879f 530 }
68a79315 531#endif
9d05095e
FB
532 /* Don't use the cached interupt_request value,
533 do_interrupt may have updated the EXITTB flag. */
b5ff1b31 534 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
bf3e8bf1
FB
535 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
536 /* ensure that no TB jump will be modified as
537 the program flow was changed */
b5fc09ae 538 next_tb = 0;
bf3e8bf1 539 }
68a79315
FB
540 if (interrupt_request & CPU_INTERRUPT_EXIT) {
541 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
542 env->exception_index = EXCP_INTERRUPT;
543 cpu_loop_exit();
544 }
3fb2ded1 545 }
7d13299d 546#ifdef DEBUG_EXEC
b5ff1b31 547 if ((loglevel & CPU_LOG_TB_CPU)) {
3fb2ded1 548 /* restore flags in standard format */
ecb644f4
TS
549 regs_to_env();
550#if defined(TARGET_I386)
3fb2ded1 551 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
7fe48483 552 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
3fb2ded1 553 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
e4533c7a 554#elif defined(TARGET_ARM)
7fe48483 555 cpu_dump_state(env, logfile, fprintf, 0);
93ac68bc 556#elif defined(TARGET_SPARC)
3475187d 557 cpu_dump_state(env, logfile, fprintf, 0);
67867308 558#elif defined(TARGET_PPC)
7fe48483 559 cpu_dump_state(env, logfile, fprintf, 0);
e6e5906b
PB
560#elif defined(TARGET_M68K)
561 cpu_m68k_flush_flags(env, env->cc_op);
562 env->cc_op = CC_OP_FLAGS;
563 env->sr = (env->sr & 0xffe0)
564 | env->cc_dest | (env->cc_x << 4);
565 cpu_dump_state(env, logfile, fprintf, 0);
6af0bf9c
FB
566#elif defined(TARGET_MIPS)
567 cpu_dump_state(env, logfile, fprintf, 0);
fdf9b3e8
FB
568#elif defined(TARGET_SH4)
569 cpu_dump_state(env, logfile, fprintf, 0);
eddf68a6
JM
570#elif defined(TARGET_ALPHA)
571 cpu_dump_state(env, logfile, fprintf, 0);
f1ccf904
TS
572#elif defined(TARGET_CRIS)
573 cpu_dump_state(env, logfile, fprintf, 0);
e4533c7a 574#else
5fafdf24 575#error unsupported target CPU
e4533c7a 576#endif
3fb2ded1 577 }
7d13299d 578#endif
8a40a180 579 tb = tb_find_fast();
9d27abd9 580#ifdef DEBUG_EXEC
c1135f61 581 if ((loglevel & CPU_LOG_EXEC)) {
c27004ec
FB
582 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
583 (long)tb->tc_ptr, tb->pc,
584 lookup_symbol(tb->pc));
3fb2ded1 585 }
9d27abd9 586#endif
8a40a180
FB
587 /* see if we can patch the calling TB. When the TB
588 spans two pages, we cannot safely do a direct
589 jump. */
c27004ec 590 {
b5fc09ae 591 if (next_tb != 0 &&
4d7a0880 592#ifdef USE_KQEMU
f32fc648
FB
593 (env->kqemu_enabled != 2) &&
594#endif
ec6338ba 595 tb->page_addr[1] == -1) {
3fb2ded1 596 spin_lock(&tb_lock);
b5fc09ae 597 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
3fb2ded1
FB
598 spin_unlock(&tb_lock);
599 }
c27004ec 600 }
3fb2ded1 601 tc_ptr = tb->tc_ptr;
83479e77 602 env->current_tb = tb;
3fb2ded1 603 /* execute the generated code */
572a9d4a
BS
604#if defined(__sparc__) && !defined(HOST_SOLARIS)
605#undef env
606 env = cpu_single_env;
607#define env cpu_single_env
608#endif
7cb69cae 609 next_tb = tcg_qemu_tb_exec(tc_ptr);
83479e77 610 env->current_tb = NULL;
4cbf74b6
FB
611 /* reset soft MMU for next block (it can currently
612 only be set by a memory fault) */
f32fc648
FB
613#if defined(USE_KQEMU)
614#define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
615 if (kqemu_is_ok(env) &&
616 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
617 cpu_loop_exit();
618 }
4cbf74b6 619#endif
50a518e3 620 } /* for(;;) */
3fb2ded1 621 } else {
0d1a29f9 622 env_to_regs();
7d13299d 623 }
3fb2ded1
FB
624 } /* for(;;) */
625
7d13299d 626
e4533c7a 627#if defined(TARGET_I386)
9de5e440 628 /* restore flags in standard format */
fc2b4c48 629 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
e4533c7a 630#elif defined(TARGET_ARM)
b7bcbe95 631 /* XXX: Save/restore host fpu exception state?. */
93ac68bc 632#elif defined(TARGET_SPARC)
67867308 633#elif defined(TARGET_PPC)
e6e5906b
PB
634#elif defined(TARGET_M68K)
635 cpu_m68k_flush_flags(env, env->cc_op);
636 env->cc_op = CC_OP_FLAGS;
637 env->sr = (env->sr & 0xffe0)
638 | env->cc_dest | (env->cc_x << 4);
6af0bf9c 639#elif defined(TARGET_MIPS)
fdf9b3e8 640#elif defined(TARGET_SH4)
eddf68a6 641#elif defined(TARGET_ALPHA)
f1ccf904 642#elif defined(TARGET_CRIS)
fdf9b3e8 643 /* XXXXX */
e4533c7a
FB
644#else
645#error unsupported target CPU
646#endif
1057eaa7
PB
647
648 /* restore global registers */
1057eaa7
PB
649#include "hostregs_helper.h"
650
6a00d601 651 /* fail safe : never use cpu_single_env outside cpu_exec() */
5fafdf24 652 cpu_single_env = NULL;
7d13299d
FB
653 return ret;
654}
6dbad63e 655
fbf9eeb3
FB
656/* must only be called from the generated code as an exception can be
657 generated */
658void tb_invalidate_page_range(target_ulong start, target_ulong end)
659{
dc5d0b3d
FB
660 /* XXX: cannot enable it yet because it yields to MMU exception
661 where NIP != read address on PowerPC */
662#if 0
fbf9eeb3
FB
663 target_ulong phys_addr;
664 phys_addr = get_phys_addr_code(env, start);
665 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
dc5d0b3d 666#endif
fbf9eeb3
FB
667}
668
1a18c71b 669#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
e4533c7a 670
6dbad63e
FB
671void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
672{
673 CPUX86State *saved_env;
674
675 saved_env = env;
676 env = s;
a412ac57 677 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
a513fe19 678 selector &= 0xffff;
5fafdf24 679 cpu_x86_load_seg_cache(env, seg_reg, selector,
c27004ec 680 (selector << 4), 0xffff, 0);
a513fe19 681 } else {
5d97559d 682 helper_load_seg(seg_reg, selector);
a513fe19 683 }
6dbad63e
FB
684 env = saved_env;
685}
9de5e440 686
6f12a2a6 687void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
d0a1ffc9
FB
688{
689 CPUX86State *saved_env;
690
691 saved_env = env;
692 env = s;
3b46e624 693
6f12a2a6 694 helper_fsave(ptr, data32);
d0a1ffc9
FB
695
696 env = saved_env;
697}
698
6f12a2a6 699void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
d0a1ffc9
FB
700{
701 CPUX86State *saved_env;
702
703 saved_env = env;
704 env = s;
3b46e624 705
6f12a2a6 706 helper_frstor(ptr, data32);
d0a1ffc9
FB
707
708 env = saved_env;
709}
710
e4533c7a
FB
711#endif /* TARGET_I386 */
712
67b915a5
FB
713#if !defined(CONFIG_SOFTMMU)
714
3fb2ded1
FB
715#if defined(TARGET_I386)
716
b56dad1c 717/* 'pc' is the host PC at which the exception was raised. 'address' is
fd6ce8f6
FB
718 the effective address of the memory exception. 'is_write' is 1 if a
719 write caused the exception and otherwise 0'. 'old_set' is the
720 signal set which should be restored */
2b413144 721static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
5fafdf24 722 int is_write, sigset_t *old_set,
bf3e8bf1 723 void *puc)
9de5e440 724{
a513fe19
FB
725 TranslationBlock *tb;
726 int ret;
68a79315 727
83479e77
FB
728 if (cpu_single_env)
729 env = cpu_single_env; /* XXX: find a correct solution for multithread */
fd6ce8f6 730#if defined(DEBUG_SIGNAL)
5fafdf24 731 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
bf3e8bf1 732 pc, address, is_write, *(unsigned long *)old_set);
9de5e440 733#endif
25eb4484 734 /* XXX: locking issue */
53a5960a 735 if (is_write && page_unprotect(h2g(address), pc, puc)) {
fd6ce8f6
FB
736 return 1;
737 }
fbf9eeb3 738
3fb2ded1 739 /* see if it is an MMU fault */
6ebbf390 740 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
3fb2ded1
FB
741 if (ret < 0)
742 return 0; /* not an MMU fault */
743 if (ret == 0)
744 return 1; /* the MMU fault was handled without causing real CPU fault */
745 /* now we have a real cpu fault */
a513fe19
FB
746 tb = tb_find_pc(pc);
747 if (tb) {
9de5e440
FB
748 /* the PC is inside the translated code. It means that we have
749 a virtual CPU fault */
bf3e8bf1 750 cpu_restore_state(tb, env, pc, puc);
3fb2ded1 751 }
4cbf74b6 752 if (ret == 1) {
3fb2ded1 753#if 0
5fafdf24 754 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
4cbf74b6 755 env->eip, env->cr[2], env->error_code);
3fb2ded1 756#endif
4cbf74b6
FB
757 /* we restore the process signal mask as the sigreturn should
758 do it (XXX: use sigsetjmp) */
759 sigprocmask(SIG_SETMASK, old_set, NULL);
54ca9095 760 raise_exception_err(env->exception_index, env->error_code);
4cbf74b6
FB
761 } else {
762 /* activate soft MMU for this block */
3f337316 763 env->hflags |= HF_SOFTMMU_MASK;
fbf9eeb3 764 cpu_resume_from_signal(env, puc);
4cbf74b6 765 }
3fb2ded1
FB
766 /* never comes here */
767 return 1;
768}
769
e4533c7a 770#elif defined(TARGET_ARM)
3fb2ded1 771static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
772 int is_write, sigset_t *old_set,
773 void *puc)
3fb2ded1 774{
68016c62
FB
775 TranslationBlock *tb;
776 int ret;
777
778 if (cpu_single_env)
779 env = cpu_single_env; /* XXX: find a correct solution for multithread */
780#if defined(DEBUG_SIGNAL)
5fafdf24 781 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
68016c62
FB
782 pc, address, is_write, *(unsigned long *)old_set);
783#endif
9f0777ed 784 /* XXX: locking issue */
53a5960a 785 if (is_write && page_unprotect(h2g(address), pc, puc)) {
9f0777ed
FB
786 return 1;
787 }
68016c62 788 /* see if it is an MMU fault */
6ebbf390 789 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
68016c62
FB
790 if (ret < 0)
791 return 0; /* not an MMU fault */
792 if (ret == 0)
793 return 1; /* the MMU fault was handled without causing real CPU fault */
794 /* now we have a real cpu fault */
795 tb = tb_find_pc(pc);
796 if (tb) {
797 /* the PC is inside the translated code. It means that we have
798 a virtual CPU fault */
799 cpu_restore_state(tb, env, pc, puc);
800 }
801 /* we restore the process signal mask as the sigreturn should
802 do it (XXX: use sigsetjmp) */
803 sigprocmask(SIG_SETMASK, old_set, NULL);
804 cpu_loop_exit();
968c74da
AJ
805 /* never comes here */
806 return 1;
3fb2ded1 807}
93ac68bc
FB
808#elif defined(TARGET_SPARC)
809static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
810 int is_write, sigset_t *old_set,
811 void *puc)
93ac68bc 812{
68016c62
FB
813 TranslationBlock *tb;
814 int ret;
815
816 if (cpu_single_env)
817 env = cpu_single_env; /* XXX: find a correct solution for multithread */
818#if defined(DEBUG_SIGNAL)
5fafdf24 819 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
68016c62
FB
820 pc, address, is_write, *(unsigned long *)old_set);
821#endif
b453b70b 822 /* XXX: locking issue */
53a5960a 823 if (is_write && page_unprotect(h2g(address), pc, puc)) {
b453b70b
FB
824 return 1;
825 }
68016c62 826 /* see if it is an MMU fault */
6ebbf390 827 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
68016c62
FB
828 if (ret < 0)
829 return 0; /* not an MMU fault */
830 if (ret == 0)
831 return 1; /* the MMU fault was handled without causing real CPU fault */
832 /* now we have a real cpu fault */
833 tb = tb_find_pc(pc);
834 if (tb) {
835 /* the PC is inside the translated code. It means that we have
836 a virtual CPU fault */
837 cpu_restore_state(tb, env, pc, puc);
838 }
839 /* we restore the process signal mask as the sigreturn should
840 do it (XXX: use sigsetjmp) */
841 sigprocmask(SIG_SETMASK, old_set, NULL);
842 cpu_loop_exit();
968c74da
AJ
843 /* never comes here */
844 return 1;
93ac68bc 845}
67867308
FB
846#elif defined (TARGET_PPC)
847static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
848 int is_write, sigset_t *old_set,
849 void *puc)
67867308
FB
850{
851 TranslationBlock *tb;
ce09776b 852 int ret;
3b46e624 853
67867308
FB
854 if (cpu_single_env)
855 env = cpu_single_env; /* XXX: find a correct solution for multithread */
67867308 856#if defined(DEBUG_SIGNAL)
5fafdf24 857 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
67867308
FB
858 pc, address, is_write, *(unsigned long *)old_set);
859#endif
860 /* XXX: locking issue */
53a5960a 861 if (is_write && page_unprotect(h2g(address), pc, puc)) {
67867308
FB
862 return 1;
863 }
864
ce09776b 865 /* see if it is an MMU fault */
6ebbf390 866 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
ce09776b
FB
867 if (ret < 0)
868 return 0; /* not an MMU fault */
869 if (ret == 0)
870 return 1; /* the MMU fault was handled without causing real CPU fault */
871
67867308
FB
872 /* now we have a real cpu fault */
873 tb = tb_find_pc(pc);
874 if (tb) {
875 /* the PC is inside the translated code. It means that we have
876 a virtual CPU fault */
bf3e8bf1 877 cpu_restore_state(tb, env, pc, puc);
67867308 878 }
ce09776b 879 if (ret == 1) {
67867308 880#if 0
5fafdf24 881 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
ce09776b 882 env->nip, env->error_code, tb);
67867308
FB
883#endif
884 /* we restore the process signal mask as the sigreturn should
885 do it (XXX: use sigsetjmp) */
bf3e8bf1 886 sigprocmask(SIG_SETMASK, old_set, NULL);
9fddaa0c 887 do_raise_exception_err(env->exception_index, env->error_code);
ce09776b
FB
888 } else {
889 /* activate soft MMU for this block */
fbf9eeb3 890 cpu_resume_from_signal(env, puc);
ce09776b 891 }
67867308 892 /* never comes here */
e6e5906b
PB
893 return 1;
894}
895
896#elif defined(TARGET_M68K)
897static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
898 int is_write, sigset_t *old_set,
899 void *puc)
900{
901 TranslationBlock *tb;
902 int ret;
903
904 if (cpu_single_env)
905 env = cpu_single_env; /* XXX: find a correct solution for multithread */
906#if defined(DEBUG_SIGNAL)
5fafdf24 907 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
e6e5906b
PB
908 pc, address, is_write, *(unsigned long *)old_set);
909#endif
910 /* XXX: locking issue */
911 if (is_write && page_unprotect(address, pc, puc)) {
912 return 1;
913 }
914 /* see if it is an MMU fault */
6ebbf390 915 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
e6e5906b
PB
916 if (ret < 0)
917 return 0; /* not an MMU fault */
918 if (ret == 0)
919 return 1; /* the MMU fault was handled without causing real CPU fault */
920 /* now we have a real cpu fault */
921 tb = tb_find_pc(pc);
922 if (tb) {
923 /* the PC is inside the translated code. It means that we have
924 a virtual CPU fault */
925 cpu_restore_state(tb, env, pc, puc);
926 }
927 /* we restore the process signal mask as the sigreturn should
928 do it (XXX: use sigsetjmp) */
929 sigprocmask(SIG_SETMASK, old_set, NULL);
930 cpu_loop_exit();
931 /* never comes here */
67867308
FB
932 return 1;
933}
6af0bf9c
FB
934
935#elif defined (TARGET_MIPS)
936static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
937 int is_write, sigset_t *old_set,
938 void *puc)
939{
940 TranslationBlock *tb;
941 int ret;
3b46e624 942
6af0bf9c
FB
943 if (cpu_single_env)
944 env = cpu_single_env; /* XXX: find a correct solution for multithread */
945#if defined(DEBUG_SIGNAL)
5fafdf24 946 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
6af0bf9c
FB
947 pc, address, is_write, *(unsigned long *)old_set);
948#endif
949 /* XXX: locking issue */
53a5960a 950 if (is_write && page_unprotect(h2g(address), pc, puc)) {
6af0bf9c
FB
951 return 1;
952 }
953
954 /* see if it is an MMU fault */
6ebbf390 955 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
6af0bf9c
FB
956 if (ret < 0)
957 return 0; /* not an MMU fault */
958 if (ret == 0)
959 return 1; /* the MMU fault was handled without causing real CPU fault */
960
961 /* now we have a real cpu fault */
962 tb = tb_find_pc(pc);
963 if (tb) {
964 /* the PC is inside the translated code. It means that we have
965 a virtual CPU fault */
966 cpu_restore_state(tb, env, pc, puc);
967 }
968 if (ret == 1) {
969#if 0
5fafdf24 970 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1eb5207b 971 env->PC, env->error_code, tb);
6af0bf9c
FB
972#endif
973 /* we restore the process signal mask as the sigreturn should
974 do it (XXX: use sigsetjmp) */
975 sigprocmask(SIG_SETMASK, old_set, NULL);
976 do_raise_exception_err(env->exception_index, env->error_code);
977 } else {
978 /* activate soft MMU for this block */
979 cpu_resume_from_signal(env, puc);
980 }
981 /* never comes here */
982 return 1;
983}
984
fdf9b3e8
FB
985#elif defined (TARGET_SH4)
986static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
987 int is_write, sigset_t *old_set,
988 void *puc)
989{
990 TranslationBlock *tb;
991 int ret;
3b46e624 992
fdf9b3e8
FB
993 if (cpu_single_env)
994 env = cpu_single_env; /* XXX: find a correct solution for multithread */
995#if defined(DEBUG_SIGNAL)
5fafdf24 996 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
fdf9b3e8
FB
997 pc, address, is_write, *(unsigned long *)old_set);
998#endif
999 /* XXX: locking issue */
1000 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1001 return 1;
1002 }
1003
1004 /* see if it is an MMU fault */
6ebbf390 1005 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
fdf9b3e8
FB
1006 if (ret < 0)
1007 return 0; /* not an MMU fault */
1008 if (ret == 0)
1009 return 1; /* the MMU fault was handled without causing real CPU fault */
1010
1011 /* now we have a real cpu fault */
eddf68a6
JM
1012 tb = tb_find_pc(pc);
1013 if (tb) {
1014 /* the PC is inside the translated code. It means that we have
1015 a virtual CPU fault */
1016 cpu_restore_state(tb, env, pc, puc);
1017 }
1018#if 0
5fafdf24 1019 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
eddf68a6
JM
1020 env->nip, env->error_code, tb);
1021#endif
1022 /* we restore the process signal mask as the sigreturn should
1023 do it (XXX: use sigsetjmp) */
1024 sigprocmask(SIG_SETMASK, old_set, NULL);
1025 cpu_loop_exit();
1026 /* never comes here */
1027 return 1;
1028}
1029
1030#elif defined (TARGET_ALPHA)
1031static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1032 int is_write, sigset_t *old_set,
1033 void *puc)
1034{
1035 TranslationBlock *tb;
1036 int ret;
3b46e624 1037
eddf68a6
JM
1038 if (cpu_single_env)
1039 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1040#if defined(DEBUG_SIGNAL)
5fafdf24 1041 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
eddf68a6
JM
1042 pc, address, is_write, *(unsigned long *)old_set);
1043#endif
1044 /* XXX: locking issue */
1045 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1046 return 1;
1047 }
1048
1049 /* see if it is an MMU fault */
6ebbf390 1050 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
eddf68a6
JM
1051 if (ret < 0)
1052 return 0; /* not an MMU fault */
1053 if (ret == 0)
1054 return 1; /* the MMU fault was handled without causing real CPU fault */
1055
1056 /* now we have a real cpu fault */
fdf9b3e8
FB
1057 tb = tb_find_pc(pc);
1058 if (tb) {
1059 /* the PC is inside the translated code. It means that we have
1060 a virtual CPU fault */
1061 cpu_restore_state(tb, env, pc, puc);
1062 }
fdf9b3e8 1063#if 0
5fafdf24 1064 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
fdf9b3e8
FB
1065 env->nip, env->error_code, tb);
1066#endif
1067 /* we restore the process signal mask as the sigreturn should
1068 do it (XXX: use sigsetjmp) */
355fb23d
PB
1069 sigprocmask(SIG_SETMASK, old_set, NULL);
1070 cpu_loop_exit();
fdf9b3e8
FB
1071 /* never comes here */
1072 return 1;
1073}
f1ccf904
TS
1074#elif defined (TARGET_CRIS)
1075static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1076 int is_write, sigset_t *old_set,
1077 void *puc)
1078{
1079 TranslationBlock *tb;
1080 int ret;
1081
1082 if (cpu_single_env)
1083 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1084#if defined(DEBUG_SIGNAL)
1085 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1086 pc, address, is_write, *(unsigned long *)old_set);
1087#endif
1088 /* XXX: locking issue */
1089 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1090 return 1;
1091 }
1092
1093 /* see if it is an MMU fault */
6ebbf390 1094 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
f1ccf904
TS
1095 if (ret < 0)
1096 return 0; /* not an MMU fault */
1097 if (ret == 0)
1098 return 1; /* the MMU fault was handled without causing real CPU fault */
1099
1100 /* now we have a real cpu fault */
1101 tb = tb_find_pc(pc);
1102 if (tb) {
1103 /* the PC is inside the translated code. It means that we have
1104 a virtual CPU fault */
1105 cpu_restore_state(tb, env, pc, puc);
1106 }
f1ccf904
TS
1107 /* we restore the process signal mask as the sigreturn should
1108 do it (XXX: use sigsetjmp) */
1109 sigprocmask(SIG_SETMASK, old_set, NULL);
1110 cpu_loop_exit();
1111 /* never comes here */
1112 return 1;
1113}
1114
e4533c7a
FB
1115#else
1116#error unsupported target CPU
1117#endif
9de5e440 1118
2b413144
FB
1119#if defined(__i386__)
1120
d8ecc0b9
FB
1121#if defined(__APPLE__)
1122# include <sys/ucontext.h>
1123
1124# define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1125# define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1126# define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1127#else
1128# define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1129# define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1130# define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1131#endif
1132
5fafdf24 1133int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1134 void *puc)
9de5e440 1135{
5a7b542b 1136 siginfo_t *info = pinfo;
9de5e440
FB
1137 struct ucontext *uc = puc;
1138 unsigned long pc;
bf3e8bf1 1139 int trapno;
97eb5b14 1140
d691f669
FB
1141#ifndef REG_EIP
1142/* for glibc 2.1 */
fd6ce8f6
FB
1143#define REG_EIP EIP
1144#define REG_ERR ERR
1145#define REG_TRAPNO TRAPNO
d691f669 1146#endif
d8ecc0b9
FB
1147 pc = EIP_sig(uc);
1148 trapno = TRAP_sig(uc);
ec6338ba
FB
1149 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1150 trapno == 0xe ?
1151 (ERROR_sig(uc) >> 1) & 1 : 0,
1152 &uc->uc_sigmask, puc);
2b413144
FB
1153}
1154
bc51c5c9
FB
1155#elif defined(__x86_64__)
1156
5a7b542b 1157int cpu_signal_handler(int host_signum, void *pinfo,
bc51c5c9
FB
1158 void *puc)
1159{
5a7b542b 1160 siginfo_t *info = pinfo;
bc51c5c9
FB
1161 struct ucontext *uc = puc;
1162 unsigned long pc;
1163
1164 pc = uc->uc_mcontext.gregs[REG_RIP];
5fafdf24
TS
1165 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1166 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
bc51c5c9
FB
1167 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1168 &uc->uc_sigmask, puc);
1169}
1170
83fb7adf 1171#elif defined(__powerpc__)
2b413144 1172
83fb7adf
FB
1173/***********************************************************************
1174 * signal context platform-specific definitions
1175 * From Wine
1176 */
1177#ifdef linux
1178/* All Registers access - only for local access */
1179# define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1180/* Gpr Registers access */
1181# define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1182# define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1183# define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1184# define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1185# define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1186# define LR_sig(context) REG_sig(link, context) /* Link register */
1187# define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1188/* Float Registers access */
1189# define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1190# define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1191/* Exception Registers access */
1192# define DAR_sig(context) REG_sig(dar, context)
1193# define DSISR_sig(context) REG_sig(dsisr, context)
1194# define TRAP_sig(context) REG_sig(trap, context)
1195#endif /* linux */
1196
1197#ifdef __APPLE__
1198# include <sys/ucontext.h>
1199typedef struct ucontext SIGCONTEXT;
1200/* All Registers access - only for local access */
1201# define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1202# define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1203# define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1204# define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1205/* Gpr Registers access */
1206# define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1207# define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1208# define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1209# define CTR_sig(context) REG_sig(ctr, context)
1210# define XER_sig(context) REG_sig(xer, context) /* Link register */
1211# define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1212# define CR_sig(context) REG_sig(cr, context) /* Condition register */
1213/* Float Registers access */
1214# define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1215# define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1216/* Exception Registers access */
1217# define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1218# define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1219# define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1220#endif /* __APPLE__ */
1221
5fafdf24 1222int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1223 void *puc)
2b413144 1224{
5a7b542b 1225 siginfo_t *info = pinfo;
25eb4484 1226 struct ucontext *uc = puc;
25eb4484 1227 unsigned long pc;
25eb4484
FB
1228 int is_write;
1229
83fb7adf 1230 pc = IAR_sig(uc);
25eb4484
FB
1231 is_write = 0;
1232#if 0
1233 /* ppc 4xx case */
83fb7adf 1234 if (DSISR_sig(uc) & 0x00800000)
25eb4484
FB
1235 is_write = 1;
1236#else
83fb7adf 1237 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
25eb4484
FB
1238 is_write = 1;
1239#endif
5fafdf24 1240 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1241 is_write, &uc->uc_sigmask, puc);
2b413144
FB
1242}
1243
2f87c607
FB
1244#elif defined(__alpha__)
1245
5fafdf24 1246int cpu_signal_handler(int host_signum, void *pinfo,
2f87c607
FB
1247 void *puc)
1248{
5a7b542b 1249 siginfo_t *info = pinfo;
2f87c607
FB
1250 struct ucontext *uc = puc;
1251 uint32_t *pc = uc->uc_mcontext.sc_pc;
1252 uint32_t insn = *pc;
1253 int is_write = 0;
1254
8c6939c0 1255 /* XXX: need kernel patch to get write flag faster */
2f87c607
FB
1256 switch (insn >> 26) {
1257 case 0x0d: // stw
1258 case 0x0e: // stb
1259 case 0x0f: // stq_u
1260 case 0x24: // stf
1261 case 0x25: // stg
1262 case 0x26: // sts
1263 case 0x27: // stt
1264 case 0x2c: // stl
1265 case 0x2d: // stq
1266 case 0x2e: // stl_c
1267 case 0x2f: // stq_c
1268 is_write = 1;
1269 }
1270
5fafdf24 1271 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1272 is_write, &uc->uc_sigmask, puc);
2f87c607 1273}
8c6939c0
FB
1274#elif defined(__sparc__)
1275
5fafdf24 1276int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1277 void *puc)
8c6939c0 1278{
5a7b542b 1279 siginfo_t *info = pinfo;
8c6939c0
FB
1280 int is_write;
1281 uint32_t insn;
6b4c11cd 1282#if !defined(__arch64__) || defined(HOST_SOLARIS)
c9e1e2b0
BS
1283 uint32_t *regs = (uint32_t *)(info + 1);
1284 void *sigmask = (regs + 20);
8c6939c0 1285 /* XXX: is there a standard glibc define ? */
c9e1e2b0
BS
1286 unsigned long pc = regs[1];
1287#else
1288 struct sigcontext *sc = puc;
1289 unsigned long pc = sc->sigc_regs.tpc;
1290 void *sigmask = (void *)sc->sigc_mask;
1291#endif
1292
8c6939c0
FB
1293 /* XXX: need kernel patch to get write flag faster */
1294 is_write = 0;
1295 insn = *(uint32_t *)pc;
1296 if ((insn >> 30) == 3) {
1297 switch((insn >> 19) & 0x3f) {
1298 case 0x05: // stb
1299 case 0x06: // sth
1300 case 0x04: // st
1301 case 0x07: // std
1302 case 0x24: // stf
1303 case 0x27: // stdf
1304 case 0x25: // stfsr
1305 is_write = 1;
1306 break;
1307 }
1308 }
5fafdf24 1309 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1310 is_write, sigmask, NULL);
8c6939c0
FB
1311}
1312
1313#elif defined(__arm__)
1314
5fafdf24 1315int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1316 void *puc)
8c6939c0 1317{
5a7b542b 1318 siginfo_t *info = pinfo;
8c6939c0
FB
1319 struct ucontext *uc = puc;
1320 unsigned long pc;
1321 int is_write;
3b46e624 1322
5c49b363
AZ
1323#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ =< 3))
1324 pc = uc->uc_mcontext.gregs[R15];
1325#else
4eee57f5 1326 pc = uc->uc_mcontext.arm_pc;
5c49b363 1327#endif
8c6939c0
FB
1328 /* XXX: compute is_write */
1329 is_write = 0;
5fafdf24 1330 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
8c6939c0 1331 is_write,
f3a9676a 1332 &uc->uc_sigmask, puc);
8c6939c0
FB
1333}
1334
38e584a0
FB
1335#elif defined(__mc68000)
1336
5fafdf24 1337int cpu_signal_handler(int host_signum, void *pinfo,
38e584a0
FB
1338 void *puc)
1339{
5a7b542b 1340 siginfo_t *info = pinfo;
38e584a0
FB
1341 struct ucontext *uc = puc;
1342 unsigned long pc;
1343 int is_write;
3b46e624 1344
38e584a0
FB
1345 pc = uc->uc_mcontext.gregs[16];
1346 /* XXX: compute is_write */
1347 is_write = 0;
5fafdf24 1348 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
38e584a0 1349 is_write,
bf3e8bf1 1350 &uc->uc_sigmask, puc);
38e584a0
FB
1351}
1352
b8076a74
FB
1353#elif defined(__ia64)
1354
1355#ifndef __ISR_VALID
1356 /* This ought to be in <bits/siginfo.h>... */
1357# define __ISR_VALID 1
b8076a74
FB
1358#endif
1359
5a7b542b 1360int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
b8076a74 1361{
5a7b542b 1362 siginfo_t *info = pinfo;
b8076a74
FB
1363 struct ucontext *uc = puc;
1364 unsigned long ip;
1365 int is_write = 0;
1366
1367 ip = uc->uc_mcontext.sc_ip;
1368 switch (host_signum) {
1369 case SIGILL:
1370 case SIGFPE:
1371 case SIGSEGV:
1372 case SIGBUS:
1373 case SIGTRAP:
fd4a43e4 1374 if (info->si_code && (info->si_segvflags & __ISR_VALID))
b8076a74
FB
1375 /* ISR.W (write-access) is bit 33: */
1376 is_write = (info->si_isr >> 33) & 1;
1377 break;
1378
1379 default:
1380 break;
1381 }
1382 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1383 is_write,
1384 &uc->uc_sigmask, puc);
1385}
1386
90cb9493
FB
1387#elif defined(__s390__)
1388
5fafdf24 1389int cpu_signal_handler(int host_signum, void *pinfo,
90cb9493
FB
1390 void *puc)
1391{
5a7b542b 1392 siginfo_t *info = pinfo;
90cb9493
FB
1393 struct ucontext *uc = puc;
1394 unsigned long pc;
1395 int is_write;
3b46e624 1396
90cb9493
FB
1397 pc = uc->uc_mcontext.psw.addr;
1398 /* XXX: compute is_write */
1399 is_write = 0;
5fafdf24 1400 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
c4b89d18
TS
1401 is_write, &uc->uc_sigmask, puc);
1402}
1403
1404#elif defined(__mips__)
1405
5fafdf24 1406int cpu_signal_handler(int host_signum, void *pinfo,
c4b89d18
TS
1407 void *puc)
1408{
9617efe8 1409 siginfo_t *info = pinfo;
c4b89d18
TS
1410 struct ucontext *uc = puc;
1411 greg_t pc = uc->uc_mcontext.pc;
1412 int is_write;
3b46e624 1413
c4b89d18
TS
1414 /* XXX: compute is_write */
1415 is_write = 0;
5fafdf24 1416 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
c4b89d18 1417 is_write, &uc->uc_sigmask, puc);
90cb9493
FB
1418}
1419
f54b3f92
AJ
1420#elif defined(__hppa__)
1421
1422int cpu_signal_handler(int host_signum, void *pinfo,
1423 void *puc)
1424{
1425 struct siginfo *info = pinfo;
1426 struct ucontext *uc = puc;
1427 unsigned long pc;
1428 int is_write;
1429
1430 pc = uc->uc_mcontext.sc_iaoq[0];
1431 /* FIXME: compute is_write */
1432 is_write = 0;
1433 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1434 is_write,
1435 &uc->uc_sigmask, puc);
1436}
1437
9de5e440 1438#else
2b413144 1439
3fb2ded1 1440#error host CPU specific signal handler needed
2b413144 1441
9de5e440 1442#endif
67b915a5
FB
1443
1444#endif /* !defined(CONFIG_SOFTMMU) */