]> git.proxmox.com Git - qemu.git/blame - cpu-exec.c
User qemu profiling
[qemu.git] / cpu-exec.c
CommitLineData
7d13299d
FB
1/*
2 * i386 emulator main execution loop
5fafdf24 3 *
66321a11 4 * Copyright (c) 2003-2005 Fabrice Bellard
7d13299d 5 *
3ef693a0
FB
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
7d13299d 10 *
3ef693a0
FB
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
7d13299d 15 *
3ef693a0
FB
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
7d13299d 19 */
e4533c7a 20#include "config.h"
7cb69cae 21#define CPU_NO_GLOBAL_REGS
93ac68bc 22#include "exec.h"
956034d7 23#include "disas.h"
7cb69cae 24#include "tcg.h"
7ba1e619 25#include "kvm.h"
7d13299d 26
fbf9eeb3
FB
27#if !defined(CONFIG_SOFTMMU)
28#undef EAX
29#undef ECX
30#undef EDX
31#undef EBX
32#undef ESP
33#undef EBP
34#undef ESI
35#undef EDI
36#undef EIP
37#include <signal.h>
84778508 38#ifdef __linux__
fbf9eeb3
FB
39#include <sys/ucontext.h>
40#endif
84778508 41#endif
fbf9eeb3 42
572a9d4a
BS
43#if defined(__sparc__) && !defined(HOST_SOLARIS)
44// Work around ugly bugs in glibc that mangle global register contents
45#undef env
46#define env cpu_single_env
47#endif
48
36bdbe54
FB
49int tb_invalidated_flag;
50
dc99065b 51//#define DEBUG_EXEC
9de5e440 52//#define DEBUG_SIGNAL
7d13299d 53
e4533c7a
FB
54void cpu_loop_exit(void)
55{
bfed01fc
TS
56 /* NOTE: the register at this point must be saved by hand because
57 longjmp restore them */
58 regs_to_env();
e4533c7a
FB
59 longjmp(env->jmp_env, 1);
60}
bfed01fc 61
fbf9eeb3
FB
62/* exit the current TB from a signal handler. The host registers are
63 restored in a state compatible with the CPU emulator
64 */
5fafdf24 65void cpu_resume_from_signal(CPUState *env1, void *puc)
fbf9eeb3
FB
66{
67#if !defined(CONFIG_SOFTMMU)
84778508 68#ifdef __linux__
fbf9eeb3 69 struct ucontext *uc = puc;
84778508
BS
70#elif defined(__OpenBSD__)
71 struct sigcontext *uc = puc;
72#endif
fbf9eeb3
FB
73#endif
74
75 env = env1;
76
77 /* XXX: restore cpu registers saved in host registers */
78
79#if !defined(CONFIG_SOFTMMU)
80 if (puc) {
81 /* XXX: use siglongjmp ? */
84778508 82#ifdef __linux__
fbf9eeb3 83 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
84778508
BS
84#elif defined(__OpenBSD__)
85 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
86#endif
fbf9eeb3
FB
87 }
88#endif
89 longjmp(env->jmp_env, 1);
90}
91
2e70f6ef
PB
92/* Execute the code without caching the generated code. An interpreter
93 could be used if available. */
94static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
95{
96 unsigned long next_tb;
97 TranslationBlock *tb;
98
99 /* Should never happen.
100 We only end up here when an existing TB is too long. */
101 if (max_cycles > CF_COUNT_MASK)
102 max_cycles = CF_COUNT_MASK;
103
104 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
105 max_cycles);
106 env->current_tb = tb;
107 /* execute the generated code */
108 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
109
110 if ((next_tb & 3) == 2) {
111 /* Restore PC. This may happen if async event occurs before
112 the TB starts executing. */
113 CPU_PC_FROM_TB(env, tb);
114 }
115 tb_phys_invalidate(tb, -1);
116 tb_free(tb);
117}
118
8a40a180
FB
119static TranslationBlock *tb_find_slow(target_ulong pc,
120 target_ulong cs_base,
c068688b 121 uint64_t flags)
8a40a180
FB
122{
123 TranslationBlock *tb, **ptb1;
8a40a180
FB
124 unsigned int h;
125 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
3b46e624 126
8a40a180 127 tb_invalidated_flag = 0;
3b46e624 128
8a40a180 129 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
3b46e624 130
8a40a180
FB
131 /* find translated block using physical mappings */
132 phys_pc = get_phys_addr_code(env, pc);
133 phys_page1 = phys_pc & TARGET_PAGE_MASK;
134 phys_page2 = -1;
135 h = tb_phys_hash_func(phys_pc);
136 ptb1 = &tb_phys_hash[h];
137 for(;;) {
138 tb = *ptb1;
139 if (!tb)
140 goto not_found;
5fafdf24 141 if (tb->pc == pc &&
8a40a180 142 tb->page_addr[0] == phys_page1 &&
5fafdf24 143 tb->cs_base == cs_base &&
8a40a180
FB
144 tb->flags == flags) {
145 /* check next page if needed */
146 if (tb->page_addr[1] != -1) {
5fafdf24 147 virt_page2 = (pc & TARGET_PAGE_MASK) +
8a40a180
FB
148 TARGET_PAGE_SIZE;
149 phys_page2 = get_phys_addr_code(env, virt_page2);
150 if (tb->page_addr[1] == phys_page2)
151 goto found;
152 } else {
153 goto found;
154 }
155 }
156 ptb1 = &tb->phys_hash_next;
157 }
158 not_found:
2e70f6ef
PB
159 /* if no translated code available, then translate it now */
160 tb = tb_gen_code(env, pc, cs_base, flags, 0);
3b46e624 161
8a40a180 162 found:
8a40a180
FB
163 /* we add the TB in the virtual pc hash table */
164 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
8a40a180
FB
165 return tb;
166}
167
168static inline TranslationBlock *tb_find_fast(void)
169{
170 TranslationBlock *tb;
171 target_ulong cs_base, pc;
c068688b 172 uint64_t flags;
8a40a180
FB
173
174 /* we record a subset of the CPU state. It will
175 always be the same before a given translated block
176 is executed. */
177#if defined(TARGET_I386)
178 flags = env->hflags;
179 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
180 cs_base = env->segs[R_CS].base;
181 pc = cs_base + env->eip;
182#elif defined(TARGET_ARM)
183 flags = env->thumb | (env->vfp.vec_len << 1)
b5ff1b31
FB
184 | (env->vfp.vec_stride << 4);
185 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
186 flags |= (1 << 6);
40f137e1
PB
187 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
188 flags |= (1 << 7);
9ee6e8bb 189 flags |= (env->condexec_bits << 8);
8a40a180
FB
190 cs_base = 0;
191 pc = env->regs[15];
192#elif defined(TARGET_SPARC)
193#ifdef TARGET_SPARC64
2cade6a3
BS
194 // AM . Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
195 flags = ((env->pstate & PS_AM) << 2)
196 | (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
a80dde08 197 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
8a40a180 198#else
6d5f237a
BS
199 // FPU enable . Supervisor
200 flags = (env->psref << 4) | env->psrs;
8a40a180
FB
201#endif
202 cs_base = env->npc;
203 pc = env->pc;
204#elif defined(TARGET_PPC)
1527c87e 205 flags = env->hflags;
8a40a180
FB
206 cs_base = 0;
207 pc = env->nip;
208#elif defined(TARGET_MIPS)
56b19403 209 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
cc9442b9 210 cs_base = 0;
b5dc7732 211 pc = env->active_tc.PC;
e6e5906b 212#elif defined(TARGET_M68K)
acf930aa
PB
213 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
214 | (env->sr & SR_S) /* Bit 13 */
215 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
e6e5906b
PB
216 cs_base = 0;
217 pc = env->pc;
fdf9b3e8 218#elif defined(TARGET_SH4)
fe25591e
AJ
219 flags = (env->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL
220 | DELAY_SLOT_TRUE | DELAY_SLOT_CLEARME)) /* Bits 0- 3 */
221 | (env->fpscr & (FPSCR_FR | FPSCR_SZ | FPSCR_PR)) /* Bits 19-21 */
222 | (env->sr & (SR_MD | SR_RB)); /* Bits 29-30 */
823029f9 223 cs_base = 0;
fdf9b3e8 224 pc = env->pc;
eddf68a6
JM
225#elif defined(TARGET_ALPHA)
226 flags = env->ps;
227 cs_base = 0;
228 pc = env->pc;
f1ccf904 229#elif defined(TARGET_CRIS)
a1aebcb8 230 flags = env->pregs[PR_CCS] & (S_FLAG | P_FLAG | U_FLAG | X_FLAG);
cf1d97f0 231 flags |= env->dslot;
f1ccf904
TS
232 cs_base = 0;
233 pc = env->pc;
8a40a180
FB
234#else
235#error unsupported CPU
236#endif
bce61846 237 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
551bd27f
TS
238 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
239 tb->flags != flags)) {
8a40a180
FB
240 tb = tb_find_slow(pc, cs_base, flags);
241 }
242 return tb;
243}
244
7d13299d
FB
245/* main execution loop */
246
e4533c7a 247int cpu_exec(CPUState *env1)
7d13299d 248{
1057eaa7
PB
249#define DECLARE_HOST_REGS 1
250#include "hostregs_helper.h"
8a40a180 251 int ret, interrupt_request;
8a40a180 252 TranslationBlock *tb;
c27004ec 253 uint8_t *tc_ptr;
d5975363 254 unsigned long next_tb;
8c6939c0 255
bfed01fc
TS
256 if (cpu_halted(env1) == EXCP_HALTED)
257 return EXCP_HALTED;
5a1e3cfc 258
5fafdf24 259 cpu_single_env = env1;
6a00d601 260
7d13299d 261 /* first we save global registers */
1057eaa7
PB
262#define SAVE_HOST_REGS 1
263#include "hostregs_helper.h"
c27004ec 264 env = env1;
e4533c7a 265
0d1a29f9 266 env_to_regs();
ecb644f4 267#if defined(TARGET_I386)
9de5e440 268 /* put eflags in CPU temporary format */
fc2b4c48
FB
269 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
270 DF = 1 - (2 * ((env->eflags >> 10) & 1));
9de5e440 271 CC_OP = CC_OP_EFLAGS;
fc2b4c48 272 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
93ac68bc 273#elif defined(TARGET_SPARC)
e6e5906b
PB
274#elif defined(TARGET_M68K)
275 env->cc_op = CC_OP_FLAGS;
276 env->cc_dest = env->sr & 0xf;
277 env->cc_x = (env->sr >> 4) & 1;
ecb644f4
TS
278#elif defined(TARGET_ALPHA)
279#elif defined(TARGET_ARM)
280#elif defined(TARGET_PPC)
6af0bf9c 281#elif defined(TARGET_MIPS)
fdf9b3e8 282#elif defined(TARGET_SH4)
f1ccf904 283#elif defined(TARGET_CRIS)
fdf9b3e8 284 /* XXXXX */
e4533c7a
FB
285#else
286#error unsupported target CPU
287#endif
3fb2ded1 288 env->exception_index = -1;
9d27abd9 289
7d13299d 290 /* prepare setjmp context for exception handling */
3fb2ded1
FB
291 for(;;) {
292 if (setjmp(env->jmp_env) == 0) {
ee8b7021 293 env->current_tb = NULL;
3fb2ded1
FB
294 /* if an exception is pending, we execute it here */
295 if (env->exception_index >= 0) {
296 if (env->exception_index >= EXCP_INTERRUPT) {
297 /* exit request from the cpu execution loop */
298 ret = env->exception_index;
299 break;
300 } else if (env->user_mode_only) {
301 /* if user mode only, we simulate a fake exception
9f083493 302 which will be handled outside the cpu execution
3fb2ded1 303 loop */
83479e77 304#if defined(TARGET_I386)
5fafdf24
TS
305 do_interrupt_user(env->exception_index,
306 env->exception_is_int,
307 env->error_code,
3fb2ded1 308 env->exception_next_eip);
eba01623
FB
309 /* successfully delivered */
310 env->old_exception = -1;
83479e77 311#endif
3fb2ded1
FB
312 ret = env->exception_index;
313 break;
314 } else {
83479e77 315#if defined(TARGET_I386)
3fb2ded1
FB
316 /* simulate a real cpu exception. On i386, it can
317 trigger new exceptions, but we do not handle
318 double or triple faults yet. */
5fafdf24
TS
319 do_interrupt(env->exception_index,
320 env->exception_is_int,
321 env->error_code,
d05e66d2 322 env->exception_next_eip, 0);
678dde13
TS
323 /* successfully delivered */
324 env->old_exception = -1;
ce09776b
FB
325#elif defined(TARGET_PPC)
326 do_interrupt(env);
6af0bf9c
FB
327#elif defined(TARGET_MIPS)
328 do_interrupt(env);
e95c8d51 329#elif defined(TARGET_SPARC)
f2bc7e7f 330 do_interrupt(env);
b5ff1b31
FB
331#elif defined(TARGET_ARM)
332 do_interrupt(env);
fdf9b3e8
FB
333#elif defined(TARGET_SH4)
334 do_interrupt(env);
eddf68a6
JM
335#elif defined(TARGET_ALPHA)
336 do_interrupt(env);
f1ccf904
TS
337#elif defined(TARGET_CRIS)
338 do_interrupt(env);
0633879f
PB
339#elif defined(TARGET_M68K)
340 do_interrupt(0);
83479e77 341#endif
3fb2ded1
FB
342 }
343 env->exception_index = -1;
5fafdf24 344 }
9df217a3
FB
345#ifdef USE_KQEMU
346 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
347 int ret;
348 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
349 ret = kqemu_cpu_exec(env);
350 /* put eflags in CPU temporary format */
351 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
352 DF = 1 - (2 * ((env->eflags >> 10) & 1));
353 CC_OP = CC_OP_EFLAGS;
354 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
355 if (ret == 1) {
356 /* exception */
357 longjmp(env->jmp_env, 1);
358 } else if (ret == 2) {
359 /* softmmu execution needed */
360 } else {
361 if (env->interrupt_request != 0) {
362 /* hardware interrupt will be executed just after */
363 } else {
364 /* otherwise, we restart */
365 longjmp(env->jmp_env, 1);
366 }
367 }
3fb2ded1 368 }
9df217a3
FB
369#endif
370
7ba1e619
AL
371 if (kvm_enabled()) {
372 int ret;
373 ret = kvm_cpu_exec(env);
374 if ((env->interrupt_request & CPU_INTERRUPT_EXIT)) {
375 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
376 env->exception_index = EXCP_INTERRUPT;
377 cpu_loop_exit();
378 } else if (env->halted) {
379 cpu_loop_exit();
380 } else
381 longjmp(env->jmp_env, 1);
382 }
383
b5fc09ae 384 next_tb = 0; /* force lookup of first TB */
3fb2ded1 385 for(;;) {
68a79315 386 interrupt_request = env->interrupt_request;
551bd27f 387 if (unlikely(interrupt_request) &&
db620f46 388 likely(!(env->singlestep_enabled & SSTEP_NOIRQ))) {
6658ffb8
PB
389 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
390 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
391 env->exception_index = EXCP_DEBUG;
392 cpu_loop_exit();
393 }
a90b7318 394#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
f1ccf904 395 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
a90b7318
AZ
396 if (interrupt_request & CPU_INTERRUPT_HALT) {
397 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
398 env->halted = 1;
399 env->exception_index = EXCP_HLT;
400 cpu_loop_exit();
401 }
402#endif
68a79315 403#if defined(TARGET_I386)
db620f46
FB
404 if (env->hflags2 & HF2_GIF_MASK) {
405 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
406 !(env->hflags & HF_SMM_MASK)) {
407 svm_check_intercept(SVM_EXIT_SMI);
408 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
409 do_smm_enter();
410 next_tb = 0;
411 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
412 !(env->hflags2 & HF2_NMI_MASK)) {
413 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
414 env->hflags2 |= HF2_NMI_MASK;
415 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
416 next_tb = 0;
417 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
418 (((env->hflags2 & HF2_VINTR_MASK) &&
419 (env->hflags2 & HF2_HIF_MASK)) ||
420 (!(env->hflags2 & HF2_VINTR_MASK) &&
421 (env->eflags & IF_MASK &&
422 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
423 int intno;
424 svm_check_intercept(SVM_EXIT_INTR);
425 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
426 intno = cpu_get_pic_interrupt(env);
427 if (loglevel & CPU_LOG_TB_IN_ASM) {
428 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
429 }
430 do_interrupt(intno, 0, 0, 0, 1);
431 /* ensure that no TB jump will be modified as
432 the program flow was changed */
433 next_tb = 0;
0573fbfc 434#if !defined(CONFIG_USER_ONLY)
db620f46
FB
435 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
436 (env->eflags & IF_MASK) &&
437 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
438 int intno;
439 /* FIXME: this should respect TPR */
440 svm_check_intercept(SVM_EXIT_VINTR);
441 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
442 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
443 if (loglevel & CPU_LOG_TB_IN_ASM)
444 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
445 do_interrupt(intno, 0, 0, 0, 1);
446 next_tb = 0;
907a5b26 447#endif
db620f46 448 }
68a79315 449 }
ce09776b 450#elif defined(TARGET_PPC)
9fddaa0c
FB
451#if 0
452 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
453 cpu_ppc_reset(env);
454 }
455#endif
47103572 456 if (interrupt_request & CPU_INTERRUPT_HARD) {
e9df014c
JM
457 ppc_hw_interrupt(env);
458 if (env->pending_interrupts == 0)
459 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
b5fc09ae 460 next_tb = 0;
ce09776b 461 }
6af0bf9c
FB
462#elif defined(TARGET_MIPS)
463 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
24c7b0e3 464 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
6af0bf9c 465 (env->CP0_Status & (1 << CP0St_IE)) &&
24c7b0e3
TS
466 !(env->CP0_Status & (1 << CP0St_EXL)) &&
467 !(env->CP0_Status & (1 << CP0St_ERL)) &&
6af0bf9c
FB
468 !(env->hflags & MIPS_HFLAG_DM)) {
469 /* Raise it */
470 env->exception_index = EXCP_EXT_INTERRUPT;
471 env->error_code = 0;
472 do_interrupt(env);
b5fc09ae 473 next_tb = 0;
6af0bf9c 474 }
e95c8d51 475#elif defined(TARGET_SPARC)
66321a11
FB
476 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
477 (env->psret != 0)) {
478 int pil = env->interrupt_index & 15;
479 int type = env->interrupt_index & 0xf0;
480
481 if (((type == TT_EXTINT) &&
482 (pil == 15 || pil > env->psrpil)) ||
483 type != TT_EXTINT) {
484 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
f2bc7e7f
BS
485 env->exception_index = env->interrupt_index;
486 do_interrupt(env);
66321a11 487 env->interrupt_index = 0;
327ac2e7
BS
488#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
489 cpu_check_irqs(env);
490#endif
b5fc09ae 491 next_tb = 0;
66321a11 492 }
e95c8d51
FB
493 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
494 //do_interrupt(0, 0, 0, 0, 0);
495 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
a90b7318 496 }
b5ff1b31
FB
497#elif defined(TARGET_ARM)
498 if (interrupt_request & CPU_INTERRUPT_FIQ
499 && !(env->uncached_cpsr & CPSR_F)) {
500 env->exception_index = EXCP_FIQ;
501 do_interrupt(env);
b5fc09ae 502 next_tb = 0;
b5ff1b31 503 }
9ee6e8bb
PB
504 /* ARMv7-M interrupt return works by loading a magic value
505 into the PC. On real hardware the load causes the
506 return to occur. The qemu implementation performs the
507 jump normally, then does the exception return when the
508 CPU tries to execute code at the magic address.
509 This will cause the magic PC value to be pushed to
510 the stack if an interrupt occured at the wrong time.
511 We avoid this by disabling interrupts when
512 pc contains a magic address. */
b5ff1b31 513 if (interrupt_request & CPU_INTERRUPT_HARD
9ee6e8bb
PB
514 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
515 || !(env->uncached_cpsr & CPSR_I))) {
b5ff1b31
FB
516 env->exception_index = EXCP_IRQ;
517 do_interrupt(env);
b5fc09ae 518 next_tb = 0;
b5ff1b31 519 }
fdf9b3e8 520#elif defined(TARGET_SH4)
e96e2044
TS
521 if (interrupt_request & CPU_INTERRUPT_HARD) {
522 do_interrupt(env);
b5fc09ae 523 next_tb = 0;
e96e2044 524 }
eddf68a6
JM
525#elif defined(TARGET_ALPHA)
526 if (interrupt_request & CPU_INTERRUPT_HARD) {
527 do_interrupt(env);
b5fc09ae 528 next_tb = 0;
eddf68a6 529 }
f1ccf904 530#elif defined(TARGET_CRIS)
1b1a38b0
EI
531 if (interrupt_request & CPU_INTERRUPT_HARD
532 && (env->pregs[PR_CCS] & I_FLAG)) {
533 env->exception_index = EXCP_IRQ;
534 do_interrupt(env);
535 next_tb = 0;
536 }
537 if (interrupt_request & CPU_INTERRUPT_NMI
538 && (env->pregs[PR_CCS] & M_FLAG)) {
539 env->exception_index = EXCP_NMI;
f1ccf904 540 do_interrupt(env);
b5fc09ae 541 next_tb = 0;
f1ccf904 542 }
0633879f
PB
543#elif defined(TARGET_M68K)
544 if (interrupt_request & CPU_INTERRUPT_HARD
545 && ((env->sr & SR_I) >> SR_I_SHIFT)
546 < env->pending_level) {
547 /* Real hardware gets the interrupt vector via an
548 IACK cycle at this point. Current emulated
549 hardware doesn't rely on this, so we
550 provide/save the vector when the interrupt is
551 first signalled. */
552 env->exception_index = env->pending_vector;
553 do_interrupt(1);
b5fc09ae 554 next_tb = 0;
0633879f 555 }
68a79315 556#endif
9d05095e
FB
557 /* Don't use the cached interupt_request value,
558 do_interrupt may have updated the EXITTB flag. */
b5ff1b31 559 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
bf3e8bf1
FB
560 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
561 /* ensure that no TB jump will be modified as
562 the program flow was changed */
b5fc09ae 563 next_tb = 0;
bf3e8bf1 564 }
68a79315
FB
565 if (interrupt_request & CPU_INTERRUPT_EXIT) {
566 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
567 env->exception_index = EXCP_INTERRUPT;
568 cpu_loop_exit();
569 }
3fb2ded1 570 }
7d13299d 571#ifdef DEBUG_EXEC
b5ff1b31 572 if ((loglevel & CPU_LOG_TB_CPU)) {
3fb2ded1 573 /* restore flags in standard format */
ecb644f4
TS
574 regs_to_env();
575#if defined(TARGET_I386)
3fb2ded1 576 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
7fe48483 577 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
3fb2ded1 578 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
e4533c7a 579#elif defined(TARGET_ARM)
7fe48483 580 cpu_dump_state(env, logfile, fprintf, 0);
93ac68bc 581#elif defined(TARGET_SPARC)
3475187d 582 cpu_dump_state(env, logfile, fprintf, 0);
67867308 583#elif defined(TARGET_PPC)
7fe48483 584 cpu_dump_state(env, logfile, fprintf, 0);
e6e5906b
PB
585#elif defined(TARGET_M68K)
586 cpu_m68k_flush_flags(env, env->cc_op);
587 env->cc_op = CC_OP_FLAGS;
588 env->sr = (env->sr & 0xffe0)
589 | env->cc_dest | (env->cc_x << 4);
590 cpu_dump_state(env, logfile, fprintf, 0);
6af0bf9c
FB
591#elif defined(TARGET_MIPS)
592 cpu_dump_state(env, logfile, fprintf, 0);
fdf9b3e8
FB
593#elif defined(TARGET_SH4)
594 cpu_dump_state(env, logfile, fprintf, 0);
eddf68a6
JM
595#elif defined(TARGET_ALPHA)
596 cpu_dump_state(env, logfile, fprintf, 0);
f1ccf904
TS
597#elif defined(TARGET_CRIS)
598 cpu_dump_state(env, logfile, fprintf, 0);
e4533c7a 599#else
5fafdf24 600#error unsupported target CPU
e4533c7a 601#endif
3fb2ded1 602 }
7d13299d 603#endif
d5975363 604 spin_lock(&tb_lock);
8a40a180 605 tb = tb_find_fast();
d5975363
PB
606 /* Note: we do it here to avoid a gcc bug on Mac OS X when
607 doing it in tb_find_slow */
608 if (tb_invalidated_flag) {
609 /* as some TB could have been invalidated because
610 of memory exceptions while generating the code, we
611 must recompute the hash index here */
612 next_tb = 0;
2e70f6ef 613 tb_invalidated_flag = 0;
d5975363 614 }
9d27abd9 615#ifdef DEBUG_EXEC
c1135f61 616 if ((loglevel & CPU_LOG_EXEC)) {
c27004ec
FB
617 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
618 (long)tb->tc_ptr, tb->pc,
619 lookup_symbol(tb->pc));
3fb2ded1 620 }
9d27abd9 621#endif
8a40a180
FB
622 /* see if we can patch the calling TB. When the TB
623 spans two pages, we cannot safely do a direct
624 jump. */
c27004ec 625 {
b5fc09ae 626 if (next_tb != 0 &&
4d7a0880 627#ifdef USE_KQEMU
f32fc648
FB
628 (env->kqemu_enabled != 2) &&
629#endif
ec6338ba 630 tb->page_addr[1] == -1) {
b5fc09ae 631 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
3fb2ded1 632 }
c27004ec 633 }
d5975363 634 spin_unlock(&tb_lock);
83479e77 635 env->current_tb = tb;
55e8b85e 636
637 /* cpu_interrupt might be called while translating the
638 TB, but before it is linked into a potentially
639 infinite loop and becomes env->current_tb. Avoid
640 starting execution if there is a pending interrupt. */
641 if (unlikely (env->interrupt_request & CPU_INTERRUPT_EXIT))
642 env->current_tb = NULL;
643
2e70f6ef
PB
644 while (env->current_tb) {
645 tc_ptr = tb->tc_ptr;
3fb2ded1 646 /* execute the generated code */
572a9d4a
BS
647#if defined(__sparc__) && !defined(HOST_SOLARIS)
648#undef env
2e70f6ef 649 env = cpu_single_env;
572a9d4a
BS
650#define env cpu_single_env
651#endif
2e70f6ef
PB
652 next_tb = tcg_qemu_tb_exec(tc_ptr);
653 env->current_tb = NULL;
654 if ((next_tb & 3) == 2) {
bf20dc07 655 /* Instruction counter expired. */
2e70f6ef
PB
656 int insns_left;
657 tb = (TranslationBlock *)(long)(next_tb & ~3);
658 /* Restore PC. */
659 CPU_PC_FROM_TB(env, tb);
660 insns_left = env->icount_decr.u32;
661 if (env->icount_extra && insns_left >= 0) {
662 /* Refill decrementer and continue execution. */
663 env->icount_extra += insns_left;
664 if (env->icount_extra > 0xffff) {
665 insns_left = 0xffff;
666 } else {
667 insns_left = env->icount_extra;
668 }
669 env->icount_extra -= insns_left;
670 env->icount_decr.u16.low = insns_left;
671 } else {
672 if (insns_left > 0) {
673 /* Execute remaining instructions. */
674 cpu_exec_nocache(insns_left, tb);
675 }
676 env->exception_index = EXCP_INTERRUPT;
677 next_tb = 0;
678 cpu_loop_exit();
679 }
680 }
681 }
4cbf74b6
FB
682 /* reset soft MMU for next block (it can currently
683 only be set by a memory fault) */
f32fc648
FB
684#if defined(USE_KQEMU)
685#define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
686 if (kqemu_is_ok(env) &&
687 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
688 cpu_loop_exit();
689 }
4cbf74b6 690#endif
50a518e3 691 } /* for(;;) */
3fb2ded1 692 } else {
0d1a29f9 693 env_to_regs();
7d13299d 694 }
3fb2ded1
FB
695 } /* for(;;) */
696
7d13299d 697
e4533c7a 698#if defined(TARGET_I386)
9de5e440 699 /* restore flags in standard format */
fc2b4c48 700 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
e4533c7a 701#elif defined(TARGET_ARM)
b7bcbe95 702 /* XXX: Save/restore host fpu exception state?. */
93ac68bc 703#elif defined(TARGET_SPARC)
67867308 704#elif defined(TARGET_PPC)
e6e5906b
PB
705#elif defined(TARGET_M68K)
706 cpu_m68k_flush_flags(env, env->cc_op);
707 env->cc_op = CC_OP_FLAGS;
708 env->sr = (env->sr & 0xffe0)
709 | env->cc_dest | (env->cc_x << 4);
6af0bf9c 710#elif defined(TARGET_MIPS)
fdf9b3e8 711#elif defined(TARGET_SH4)
eddf68a6 712#elif defined(TARGET_ALPHA)
f1ccf904 713#elif defined(TARGET_CRIS)
fdf9b3e8 714 /* XXXXX */
e4533c7a
FB
715#else
716#error unsupported target CPU
717#endif
1057eaa7
PB
718
719 /* restore global registers */
1057eaa7
PB
720#include "hostregs_helper.h"
721
6a00d601 722 /* fail safe : never use cpu_single_env outside cpu_exec() */
5fafdf24 723 cpu_single_env = NULL;
7d13299d
FB
724 return ret;
725}
6dbad63e 726
fbf9eeb3
FB
727/* must only be called from the generated code as an exception can be
728 generated */
729void tb_invalidate_page_range(target_ulong start, target_ulong end)
730{
dc5d0b3d
FB
731 /* XXX: cannot enable it yet because it yields to MMU exception
732 where NIP != read address on PowerPC */
733#if 0
fbf9eeb3
FB
734 target_ulong phys_addr;
735 phys_addr = get_phys_addr_code(env, start);
736 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
dc5d0b3d 737#endif
fbf9eeb3
FB
738}
739
1a18c71b 740#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
e4533c7a 741
6dbad63e
FB
742void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
743{
744 CPUX86State *saved_env;
745
746 saved_env = env;
747 env = s;
a412ac57 748 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
a513fe19 749 selector &= 0xffff;
5fafdf24 750 cpu_x86_load_seg_cache(env, seg_reg, selector,
c27004ec 751 (selector << 4), 0xffff, 0);
a513fe19 752 } else {
5d97559d 753 helper_load_seg(seg_reg, selector);
a513fe19 754 }
6dbad63e
FB
755 env = saved_env;
756}
9de5e440 757
6f12a2a6 758void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
d0a1ffc9
FB
759{
760 CPUX86State *saved_env;
761
762 saved_env = env;
763 env = s;
3b46e624 764
6f12a2a6 765 helper_fsave(ptr, data32);
d0a1ffc9
FB
766
767 env = saved_env;
768}
769
6f12a2a6 770void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
d0a1ffc9
FB
771{
772 CPUX86State *saved_env;
773
774 saved_env = env;
775 env = s;
3b46e624 776
6f12a2a6 777 helper_frstor(ptr, data32);
d0a1ffc9
FB
778
779 env = saved_env;
780}
781
e4533c7a
FB
782#endif /* TARGET_I386 */
783
67b915a5
FB
784#if !defined(CONFIG_SOFTMMU)
785
3fb2ded1
FB
786#if defined(TARGET_I386)
787
b56dad1c 788/* 'pc' is the host PC at which the exception was raised. 'address' is
fd6ce8f6
FB
789 the effective address of the memory exception. 'is_write' is 1 if a
790 write caused the exception and otherwise 0'. 'old_set' is the
791 signal set which should be restored */
2b413144 792static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
5fafdf24 793 int is_write, sigset_t *old_set,
bf3e8bf1 794 void *puc)
9de5e440 795{
a513fe19
FB
796 TranslationBlock *tb;
797 int ret;
68a79315 798
83479e77
FB
799 if (cpu_single_env)
800 env = cpu_single_env; /* XXX: find a correct solution for multithread */
fd6ce8f6 801#if defined(DEBUG_SIGNAL)
5fafdf24 802 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
bf3e8bf1 803 pc, address, is_write, *(unsigned long *)old_set);
9de5e440 804#endif
25eb4484 805 /* XXX: locking issue */
53a5960a 806 if (is_write && page_unprotect(h2g(address), pc, puc)) {
fd6ce8f6
FB
807 return 1;
808 }
fbf9eeb3 809
3fb2ded1 810 /* see if it is an MMU fault */
6ebbf390 811 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
3fb2ded1
FB
812 if (ret < 0)
813 return 0; /* not an MMU fault */
814 if (ret == 0)
815 return 1; /* the MMU fault was handled without causing real CPU fault */
816 /* now we have a real cpu fault */
a513fe19
FB
817 tb = tb_find_pc(pc);
818 if (tb) {
9de5e440
FB
819 /* the PC is inside the translated code. It means that we have
820 a virtual CPU fault */
bf3e8bf1 821 cpu_restore_state(tb, env, pc, puc);
3fb2ded1 822 }
4cbf74b6 823 if (ret == 1) {
3fb2ded1 824#if 0
5fafdf24 825 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
4cbf74b6 826 env->eip, env->cr[2], env->error_code);
3fb2ded1 827#endif
4cbf74b6
FB
828 /* we restore the process signal mask as the sigreturn should
829 do it (XXX: use sigsetjmp) */
830 sigprocmask(SIG_SETMASK, old_set, NULL);
54ca9095 831 raise_exception_err(env->exception_index, env->error_code);
4cbf74b6
FB
832 } else {
833 /* activate soft MMU for this block */
3f337316 834 env->hflags |= HF_SOFTMMU_MASK;
fbf9eeb3 835 cpu_resume_from_signal(env, puc);
4cbf74b6 836 }
3fb2ded1
FB
837 /* never comes here */
838 return 1;
839}
840
e4533c7a 841#elif defined(TARGET_ARM)
3fb2ded1 842static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
843 int is_write, sigset_t *old_set,
844 void *puc)
3fb2ded1 845{
68016c62
FB
846 TranslationBlock *tb;
847 int ret;
848
849 if (cpu_single_env)
850 env = cpu_single_env; /* XXX: find a correct solution for multithread */
851#if defined(DEBUG_SIGNAL)
5fafdf24 852 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
68016c62
FB
853 pc, address, is_write, *(unsigned long *)old_set);
854#endif
9f0777ed 855 /* XXX: locking issue */
53a5960a 856 if (is_write && page_unprotect(h2g(address), pc, puc)) {
9f0777ed
FB
857 return 1;
858 }
68016c62 859 /* see if it is an MMU fault */
6ebbf390 860 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
68016c62
FB
861 if (ret < 0)
862 return 0; /* not an MMU fault */
863 if (ret == 0)
864 return 1; /* the MMU fault was handled without causing real CPU fault */
865 /* now we have a real cpu fault */
866 tb = tb_find_pc(pc);
867 if (tb) {
868 /* the PC is inside the translated code. It means that we have
869 a virtual CPU fault */
870 cpu_restore_state(tb, env, pc, puc);
871 }
872 /* we restore the process signal mask as the sigreturn should
873 do it (XXX: use sigsetjmp) */
874 sigprocmask(SIG_SETMASK, old_set, NULL);
875 cpu_loop_exit();
968c74da
AJ
876 /* never comes here */
877 return 1;
3fb2ded1 878}
93ac68bc
FB
879#elif defined(TARGET_SPARC)
880static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
881 int is_write, sigset_t *old_set,
882 void *puc)
93ac68bc 883{
68016c62
FB
884 TranslationBlock *tb;
885 int ret;
886
887 if (cpu_single_env)
888 env = cpu_single_env; /* XXX: find a correct solution for multithread */
889#if defined(DEBUG_SIGNAL)
5fafdf24 890 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
68016c62
FB
891 pc, address, is_write, *(unsigned long *)old_set);
892#endif
b453b70b 893 /* XXX: locking issue */
53a5960a 894 if (is_write && page_unprotect(h2g(address), pc, puc)) {
b453b70b
FB
895 return 1;
896 }
68016c62 897 /* see if it is an MMU fault */
6ebbf390 898 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
68016c62
FB
899 if (ret < 0)
900 return 0; /* not an MMU fault */
901 if (ret == 0)
902 return 1; /* the MMU fault was handled without causing real CPU fault */
903 /* now we have a real cpu fault */
904 tb = tb_find_pc(pc);
905 if (tb) {
906 /* the PC is inside the translated code. It means that we have
907 a virtual CPU fault */
908 cpu_restore_state(tb, env, pc, puc);
909 }
910 /* we restore the process signal mask as the sigreturn should
911 do it (XXX: use sigsetjmp) */
912 sigprocmask(SIG_SETMASK, old_set, NULL);
913 cpu_loop_exit();
968c74da
AJ
914 /* never comes here */
915 return 1;
93ac68bc 916}
67867308
FB
917#elif defined (TARGET_PPC)
918static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
919 int is_write, sigset_t *old_set,
920 void *puc)
67867308
FB
921{
922 TranslationBlock *tb;
ce09776b 923 int ret;
3b46e624 924
67867308
FB
925 if (cpu_single_env)
926 env = cpu_single_env; /* XXX: find a correct solution for multithread */
67867308 927#if defined(DEBUG_SIGNAL)
5fafdf24 928 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
67867308
FB
929 pc, address, is_write, *(unsigned long *)old_set);
930#endif
931 /* XXX: locking issue */
53a5960a 932 if (is_write && page_unprotect(h2g(address), pc, puc)) {
67867308
FB
933 return 1;
934 }
935
ce09776b 936 /* see if it is an MMU fault */
6ebbf390 937 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
ce09776b
FB
938 if (ret < 0)
939 return 0; /* not an MMU fault */
940 if (ret == 0)
941 return 1; /* the MMU fault was handled without causing real CPU fault */
942
67867308
FB
943 /* now we have a real cpu fault */
944 tb = tb_find_pc(pc);
945 if (tb) {
946 /* the PC is inside the translated code. It means that we have
947 a virtual CPU fault */
bf3e8bf1 948 cpu_restore_state(tb, env, pc, puc);
67867308 949 }
ce09776b 950 if (ret == 1) {
67867308 951#if 0
5fafdf24 952 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
ce09776b 953 env->nip, env->error_code, tb);
67867308
FB
954#endif
955 /* we restore the process signal mask as the sigreturn should
956 do it (XXX: use sigsetjmp) */
bf3e8bf1 957 sigprocmask(SIG_SETMASK, old_set, NULL);
9fddaa0c 958 do_raise_exception_err(env->exception_index, env->error_code);
ce09776b
FB
959 } else {
960 /* activate soft MMU for this block */
fbf9eeb3 961 cpu_resume_from_signal(env, puc);
ce09776b 962 }
67867308 963 /* never comes here */
e6e5906b
PB
964 return 1;
965}
966
967#elif defined(TARGET_M68K)
968static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
969 int is_write, sigset_t *old_set,
970 void *puc)
971{
972 TranslationBlock *tb;
973 int ret;
974
975 if (cpu_single_env)
976 env = cpu_single_env; /* XXX: find a correct solution for multithread */
977#if defined(DEBUG_SIGNAL)
5fafdf24 978 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
e6e5906b
PB
979 pc, address, is_write, *(unsigned long *)old_set);
980#endif
981 /* XXX: locking issue */
982 if (is_write && page_unprotect(address, pc, puc)) {
983 return 1;
984 }
985 /* see if it is an MMU fault */
6ebbf390 986 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
e6e5906b
PB
987 if (ret < 0)
988 return 0; /* not an MMU fault */
989 if (ret == 0)
990 return 1; /* the MMU fault was handled without causing real CPU fault */
991 /* now we have a real cpu fault */
992 tb = tb_find_pc(pc);
993 if (tb) {
994 /* the PC is inside the translated code. It means that we have
995 a virtual CPU fault */
996 cpu_restore_state(tb, env, pc, puc);
997 }
998 /* we restore the process signal mask as the sigreturn should
999 do it (XXX: use sigsetjmp) */
1000 sigprocmask(SIG_SETMASK, old_set, NULL);
1001 cpu_loop_exit();
1002 /* never comes here */
67867308
FB
1003 return 1;
1004}
6af0bf9c
FB
1005
1006#elif defined (TARGET_MIPS)
1007static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1008 int is_write, sigset_t *old_set,
1009 void *puc)
1010{
1011 TranslationBlock *tb;
1012 int ret;
3b46e624 1013
6af0bf9c
FB
1014 if (cpu_single_env)
1015 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1016#if defined(DEBUG_SIGNAL)
5fafdf24 1017 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
6af0bf9c
FB
1018 pc, address, is_write, *(unsigned long *)old_set);
1019#endif
1020 /* XXX: locking issue */
53a5960a 1021 if (is_write && page_unprotect(h2g(address), pc, puc)) {
6af0bf9c
FB
1022 return 1;
1023 }
1024
1025 /* see if it is an MMU fault */
6ebbf390 1026 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
6af0bf9c
FB
1027 if (ret < 0)
1028 return 0; /* not an MMU fault */
1029 if (ret == 0)
1030 return 1; /* the MMU fault was handled without causing real CPU fault */
1031
1032 /* now we have a real cpu fault */
1033 tb = tb_find_pc(pc);
1034 if (tb) {
1035 /* the PC is inside the translated code. It means that we have
1036 a virtual CPU fault */
1037 cpu_restore_state(tb, env, pc, puc);
1038 }
1039 if (ret == 1) {
1040#if 0
5fafdf24 1041 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1eb5207b 1042 env->PC, env->error_code, tb);
6af0bf9c
FB
1043#endif
1044 /* we restore the process signal mask as the sigreturn should
1045 do it (XXX: use sigsetjmp) */
1046 sigprocmask(SIG_SETMASK, old_set, NULL);
1047 do_raise_exception_err(env->exception_index, env->error_code);
1048 } else {
1049 /* activate soft MMU for this block */
1050 cpu_resume_from_signal(env, puc);
1051 }
1052 /* never comes here */
1053 return 1;
1054}
1055
fdf9b3e8
FB
1056#elif defined (TARGET_SH4)
1057static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1058 int is_write, sigset_t *old_set,
1059 void *puc)
1060{
1061 TranslationBlock *tb;
1062 int ret;
3b46e624 1063
fdf9b3e8
FB
1064 if (cpu_single_env)
1065 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1066#if defined(DEBUG_SIGNAL)
5fafdf24 1067 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
fdf9b3e8
FB
1068 pc, address, is_write, *(unsigned long *)old_set);
1069#endif
1070 /* XXX: locking issue */
1071 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1072 return 1;
1073 }
1074
1075 /* see if it is an MMU fault */
6ebbf390 1076 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
fdf9b3e8
FB
1077 if (ret < 0)
1078 return 0; /* not an MMU fault */
1079 if (ret == 0)
1080 return 1; /* the MMU fault was handled without causing real CPU fault */
1081
1082 /* now we have a real cpu fault */
eddf68a6
JM
1083 tb = tb_find_pc(pc);
1084 if (tb) {
1085 /* the PC is inside the translated code. It means that we have
1086 a virtual CPU fault */
1087 cpu_restore_state(tb, env, pc, puc);
1088 }
1089#if 0
5fafdf24 1090 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
eddf68a6
JM
1091 env->nip, env->error_code, tb);
1092#endif
1093 /* we restore the process signal mask as the sigreturn should
1094 do it (XXX: use sigsetjmp) */
1095 sigprocmask(SIG_SETMASK, old_set, NULL);
1096 cpu_loop_exit();
1097 /* never comes here */
1098 return 1;
1099}
1100
1101#elif defined (TARGET_ALPHA)
1102static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1103 int is_write, sigset_t *old_set,
1104 void *puc)
1105{
1106 TranslationBlock *tb;
1107 int ret;
3b46e624 1108
eddf68a6
JM
1109 if (cpu_single_env)
1110 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1111#if defined(DEBUG_SIGNAL)
5fafdf24 1112 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
eddf68a6
JM
1113 pc, address, is_write, *(unsigned long *)old_set);
1114#endif
1115 /* XXX: locking issue */
1116 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1117 return 1;
1118 }
1119
1120 /* see if it is an MMU fault */
6ebbf390 1121 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
eddf68a6
JM
1122 if (ret < 0)
1123 return 0; /* not an MMU fault */
1124 if (ret == 0)
1125 return 1; /* the MMU fault was handled without causing real CPU fault */
1126
1127 /* now we have a real cpu fault */
fdf9b3e8
FB
1128 tb = tb_find_pc(pc);
1129 if (tb) {
1130 /* the PC is inside the translated code. It means that we have
1131 a virtual CPU fault */
1132 cpu_restore_state(tb, env, pc, puc);
1133 }
fdf9b3e8 1134#if 0
5fafdf24 1135 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
fdf9b3e8
FB
1136 env->nip, env->error_code, tb);
1137#endif
1138 /* we restore the process signal mask as the sigreturn should
1139 do it (XXX: use sigsetjmp) */
355fb23d
PB
1140 sigprocmask(SIG_SETMASK, old_set, NULL);
1141 cpu_loop_exit();
fdf9b3e8
FB
1142 /* never comes here */
1143 return 1;
1144}
f1ccf904
TS
1145#elif defined (TARGET_CRIS)
1146static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1147 int is_write, sigset_t *old_set,
1148 void *puc)
1149{
1150 TranslationBlock *tb;
1151 int ret;
1152
1153 if (cpu_single_env)
1154 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1155#if defined(DEBUG_SIGNAL)
1156 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1157 pc, address, is_write, *(unsigned long *)old_set);
1158#endif
1159 /* XXX: locking issue */
1160 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1161 return 1;
1162 }
1163
1164 /* see if it is an MMU fault */
6ebbf390 1165 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
f1ccf904
TS
1166 if (ret < 0)
1167 return 0; /* not an MMU fault */
1168 if (ret == 0)
1169 return 1; /* the MMU fault was handled without causing real CPU fault */
1170
1171 /* now we have a real cpu fault */
1172 tb = tb_find_pc(pc);
1173 if (tb) {
1174 /* the PC is inside the translated code. It means that we have
1175 a virtual CPU fault */
1176 cpu_restore_state(tb, env, pc, puc);
1177 }
f1ccf904
TS
1178 /* we restore the process signal mask as the sigreturn should
1179 do it (XXX: use sigsetjmp) */
1180 sigprocmask(SIG_SETMASK, old_set, NULL);
1181 cpu_loop_exit();
1182 /* never comes here */
1183 return 1;
1184}
1185
e4533c7a
FB
1186#else
1187#error unsupported target CPU
1188#endif
9de5e440 1189
2b413144
FB
1190#if defined(__i386__)
1191
d8ecc0b9
FB
1192#if defined(__APPLE__)
1193# include <sys/ucontext.h>
1194
1195# define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1196# define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1197# define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1198#else
1199# define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1200# define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1201# define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1202#endif
1203
5fafdf24 1204int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1205 void *puc)
9de5e440 1206{
5a7b542b 1207 siginfo_t *info = pinfo;
9de5e440
FB
1208 struct ucontext *uc = puc;
1209 unsigned long pc;
bf3e8bf1 1210 int trapno;
97eb5b14 1211
d691f669
FB
1212#ifndef REG_EIP
1213/* for glibc 2.1 */
fd6ce8f6
FB
1214#define REG_EIP EIP
1215#define REG_ERR ERR
1216#define REG_TRAPNO TRAPNO
d691f669 1217#endif
d8ecc0b9
FB
1218 pc = EIP_sig(uc);
1219 trapno = TRAP_sig(uc);
ec6338ba
FB
1220 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1221 trapno == 0xe ?
1222 (ERROR_sig(uc) >> 1) & 1 : 0,
1223 &uc->uc_sigmask, puc);
2b413144
FB
1224}
1225
bc51c5c9
FB
1226#elif defined(__x86_64__)
1227
5a7b542b 1228int cpu_signal_handler(int host_signum, void *pinfo,
bc51c5c9
FB
1229 void *puc)
1230{
5a7b542b 1231 siginfo_t *info = pinfo;
bc51c5c9
FB
1232 struct ucontext *uc = puc;
1233 unsigned long pc;
1234
1235 pc = uc->uc_mcontext.gregs[REG_RIP];
5fafdf24
TS
1236 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1237 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
bc51c5c9
FB
1238 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1239 &uc->uc_sigmask, puc);
1240}
1241
83fb7adf 1242#elif defined(__powerpc__)
2b413144 1243
83fb7adf
FB
1244/***********************************************************************
1245 * signal context platform-specific definitions
1246 * From Wine
1247 */
1248#ifdef linux
1249/* All Registers access - only for local access */
1250# define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1251/* Gpr Registers access */
1252# define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1253# define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1254# define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1255# define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1256# define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1257# define LR_sig(context) REG_sig(link, context) /* Link register */
1258# define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1259/* Float Registers access */
1260# define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1261# define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1262/* Exception Registers access */
1263# define DAR_sig(context) REG_sig(dar, context)
1264# define DSISR_sig(context) REG_sig(dsisr, context)
1265# define TRAP_sig(context) REG_sig(trap, context)
1266#endif /* linux */
1267
1268#ifdef __APPLE__
1269# include <sys/ucontext.h>
1270typedef struct ucontext SIGCONTEXT;
1271/* All Registers access - only for local access */
1272# define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1273# define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1274# define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1275# define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1276/* Gpr Registers access */
1277# define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1278# define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1279# define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1280# define CTR_sig(context) REG_sig(ctr, context)
1281# define XER_sig(context) REG_sig(xer, context) /* Link register */
1282# define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1283# define CR_sig(context) REG_sig(cr, context) /* Condition register */
1284/* Float Registers access */
1285# define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1286# define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1287/* Exception Registers access */
1288# define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1289# define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1290# define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1291#endif /* __APPLE__ */
1292
5fafdf24 1293int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1294 void *puc)
2b413144 1295{
5a7b542b 1296 siginfo_t *info = pinfo;
25eb4484 1297 struct ucontext *uc = puc;
25eb4484 1298 unsigned long pc;
25eb4484
FB
1299 int is_write;
1300
83fb7adf 1301 pc = IAR_sig(uc);
25eb4484
FB
1302 is_write = 0;
1303#if 0
1304 /* ppc 4xx case */
83fb7adf 1305 if (DSISR_sig(uc) & 0x00800000)
25eb4484
FB
1306 is_write = 1;
1307#else
83fb7adf 1308 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
25eb4484
FB
1309 is_write = 1;
1310#endif
5fafdf24 1311 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1312 is_write, &uc->uc_sigmask, puc);
2b413144
FB
1313}
1314
2f87c607
FB
1315#elif defined(__alpha__)
1316
5fafdf24 1317int cpu_signal_handler(int host_signum, void *pinfo,
2f87c607
FB
1318 void *puc)
1319{
5a7b542b 1320 siginfo_t *info = pinfo;
2f87c607
FB
1321 struct ucontext *uc = puc;
1322 uint32_t *pc = uc->uc_mcontext.sc_pc;
1323 uint32_t insn = *pc;
1324 int is_write = 0;
1325
8c6939c0 1326 /* XXX: need kernel patch to get write flag faster */
2f87c607
FB
1327 switch (insn >> 26) {
1328 case 0x0d: // stw
1329 case 0x0e: // stb
1330 case 0x0f: // stq_u
1331 case 0x24: // stf
1332 case 0x25: // stg
1333 case 0x26: // sts
1334 case 0x27: // stt
1335 case 0x2c: // stl
1336 case 0x2d: // stq
1337 case 0x2e: // stl_c
1338 case 0x2f: // stq_c
1339 is_write = 1;
1340 }
1341
5fafdf24 1342 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1343 is_write, &uc->uc_sigmask, puc);
2f87c607 1344}
8c6939c0
FB
1345#elif defined(__sparc__)
1346
5fafdf24 1347int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1348 void *puc)
8c6939c0 1349{
5a7b542b 1350 siginfo_t *info = pinfo;
8c6939c0
FB
1351 int is_write;
1352 uint32_t insn;
6b4c11cd 1353#if !defined(__arch64__) || defined(HOST_SOLARIS)
c9e1e2b0
BS
1354 uint32_t *regs = (uint32_t *)(info + 1);
1355 void *sigmask = (regs + 20);
8c6939c0 1356 /* XXX: is there a standard glibc define ? */
c9e1e2b0
BS
1357 unsigned long pc = regs[1];
1358#else
84778508 1359#ifdef __linux__
c9e1e2b0
BS
1360 struct sigcontext *sc = puc;
1361 unsigned long pc = sc->sigc_regs.tpc;
1362 void *sigmask = (void *)sc->sigc_mask;
84778508
BS
1363#elif defined(__OpenBSD__)
1364 struct sigcontext *uc = puc;
1365 unsigned long pc = uc->sc_pc;
1366 void *sigmask = (void *)(long)uc->sc_mask;
1367#endif
c9e1e2b0
BS
1368#endif
1369
8c6939c0
FB
1370 /* XXX: need kernel patch to get write flag faster */
1371 is_write = 0;
1372 insn = *(uint32_t *)pc;
1373 if ((insn >> 30) == 3) {
1374 switch((insn >> 19) & 0x3f) {
1375 case 0x05: // stb
1376 case 0x06: // sth
1377 case 0x04: // st
1378 case 0x07: // std
1379 case 0x24: // stf
1380 case 0x27: // stdf
1381 case 0x25: // stfsr
1382 is_write = 1;
1383 break;
1384 }
1385 }
5fafdf24 1386 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1387 is_write, sigmask, NULL);
8c6939c0
FB
1388}
1389
1390#elif defined(__arm__)
1391
5fafdf24 1392int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1393 void *puc)
8c6939c0 1394{
5a7b542b 1395 siginfo_t *info = pinfo;
8c6939c0
FB
1396 struct ucontext *uc = puc;
1397 unsigned long pc;
1398 int is_write;
3b46e624 1399
48bbf11b 1400#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
5c49b363
AZ
1401 pc = uc->uc_mcontext.gregs[R15];
1402#else
4eee57f5 1403 pc = uc->uc_mcontext.arm_pc;
5c49b363 1404#endif
8c6939c0
FB
1405 /* XXX: compute is_write */
1406 is_write = 0;
5fafdf24 1407 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
8c6939c0 1408 is_write,
f3a9676a 1409 &uc->uc_sigmask, puc);
8c6939c0
FB
1410}
1411
38e584a0
FB
1412#elif defined(__mc68000)
1413
5fafdf24 1414int cpu_signal_handler(int host_signum, void *pinfo,
38e584a0
FB
1415 void *puc)
1416{
5a7b542b 1417 siginfo_t *info = pinfo;
38e584a0
FB
1418 struct ucontext *uc = puc;
1419 unsigned long pc;
1420 int is_write;
3b46e624 1421
38e584a0
FB
1422 pc = uc->uc_mcontext.gregs[16];
1423 /* XXX: compute is_write */
1424 is_write = 0;
5fafdf24 1425 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
38e584a0 1426 is_write,
bf3e8bf1 1427 &uc->uc_sigmask, puc);
38e584a0
FB
1428}
1429
b8076a74
FB
1430#elif defined(__ia64)
1431
1432#ifndef __ISR_VALID
1433 /* This ought to be in <bits/siginfo.h>... */
1434# define __ISR_VALID 1
b8076a74
FB
1435#endif
1436
5a7b542b 1437int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
b8076a74 1438{
5a7b542b 1439 siginfo_t *info = pinfo;
b8076a74
FB
1440 struct ucontext *uc = puc;
1441 unsigned long ip;
1442 int is_write = 0;
1443
1444 ip = uc->uc_mcontext.sc_ip;
1445 switch (host_signum) {
1446 case SIGILL:
1447 case SIGFPE:
1448 case SIGSEGV:
1449 case SIGBUS:
1450 case SIGTRAP:
fd4a43e4 1451 if (info->si_code && (info->si_segvflags & __ISR_VALID))
b8076a74
FB
1452 /* ISR.W (write-access) is bit 33: */
1453 is_write = (info->si_isr >> 33) & 1;
1454 break;
1455
1456 default:
1457 break;
1458 }
1459 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1460 is_write,
1461 &uc->uc_sigmask, puc);
1462}
1463
90cb9493
FB
1464#elif defined(__s390__)
1465
5fafdf24 1466int cpu_signal_handler(int host_signum, void *pinfo,
90cb9493
FB
1467 void *puc)
1468{
5a7b542b 1469 siginfo_t *info = pinfo;
90cb9493
FB
1470 struct ucontext *uc = puc;
1471 unsigned long pc;
1472 int is_write;
3b46e624 1473
90cb9493
FB
1474 pc = uc->uc_mcontext.psw.addr;
1475 /* XXX: compute is_write */
1476 is_write = 0;
5fafdf24 1477 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
c4b89d18
TS
1478 is_write, &uc->uc_sigmask, puc);
1479}
1480
1481#elif defined(__mips__)
1482
5fafdf24 1483int cpu_signal_handler(int host_signum, void *pinfo,
c4b89d18
TS
1484 void *puc)
1485{
9617efe8 1486 siginfo_t *info = pinfo;
c4b89d18
TS
1487 struct ucontext *uc = puc;
1488 greg_t pc = uc->uc_mcontext.pc;
1489 int is_write;
3b46e624 1490
c4b89d18
TS
1491 /* XXX: compute is_write */
1492 is_write = 0;
5fafdf24 1493 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
c4b89d18 1494 is_write, &uc->uc_sigmask, puc);
90cb9493
FB
1495}
1496
f54b3f92
AJ
1497#elif defined(__hppa__)
1498
1499int cpu_signal_handler(int host_signum, void *pinfo,
1500 void *puc)
1501{
1502 struct siginfo *info = pinfo;
1503 struct ucontext *uc = puc;
1504 unsigned long pc;
1505 int is_write;
1506
1507 pc = uc->uc_mcontext.sc_iaoq[0];
1508 /* FIXME: compute is_write */
1509 is_write = 0;
1510 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1511 is_write,
1512 &uc->uc_sigmask, puc);
1513}
1514
9de5e440 1515#else
2b413144 1516
3fb2ded1 1517#error host CPU specific signal handler needed
2b413144 1518
9de5e440 1519#endif
67b915a5
FB
1520
1521#endif /* !defined(CONFIG_SOFTMMU) */