]> git.proxmox.com Git - qemu.git/blame - cpu-exec.c
Update from binutils 2.17
[qemu.git] / cpu-exec.c
CommitLineData
7d13299d
FB
1/*
2 * i386 emulator main execution loop
5fafdf24 3 *
66321a11 4 * Copyright (c) 2003-2005 Fabrice Bellard
7d13299d 5 *
3ef693a0
FB
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
7d13299d 10 *
3ef693a0
FB
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
7d13299d 15 *
3ef693a0
FB
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
7d13299d 19 */
e4533c7a 20#include "config.h"
7cb69cae 21#define CPU_NO_GLOBAL_REGS
93ac68bc 22#include "exec.h"
956034d7 23#include "disas.h"
7cb69cae 24#include "tcg.h"
7d13299d 25
fbf9eeb3
FB
26#if !defined(CONFIG_SOFTMMU)
27#undef EAX
28#undef ECX
29#undef EDX
30#undef EBX
31#undef ESP
32#undef EBP
33#undef ESI
34#undef EDI
35#undef EIP
36#include <signal.h>
37#include <sys/ucontext.h>
38#endif
39
572a9d4a
BS
40#if defined(__sparc__) && !defined(HOST_SOLARIS)
41// Work around ugly bugs in glibc that mangle global register contents
42#undef env
43#define env cpu_single_env
44#endif
45
36bdbe54
FB
46int tb_invalidated_flag;
47
dc99065b 48//#define DEBUG_EXEC
9de5e440 49//#define DEBUG_SIGNAL
7d13299d 50
e4533c7a
FB
51void cpu_loop_exit(void)
52{
bfed01fc
TS
53 /* NOTE: the register at this point must be saved by hand because
54 longjmp restore them */
55 regs_to_env();
e4533c7a
FB
56 longjmp(env->jmp_env, 1);
57}
bfed01fc 58
e6e5906b 59#if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
3475187d
FB
60#define reg_T2
61#endif
e4533c7a 62
fbf9eeb3
FB
63/* exit the current TB from a signal handler. The host registers are
64 restored in a state compatible with the CPU emulator
65 */
5fafdf24 66void cpu_resume_from_signal(CPUState *env1, void *puc)
fbf9eeb3
FB
67{
68#if !defined(CONFIG_SOFTMMU)
69 struct ucontext *uc = puc;
70#endif
71
72 env = env1;
73
74 /* XXX: restore cpu registers saved in host registers */
75
76#if !defined(CONFIG_SOFTMMU)
77 if (puc) {
78 /* XXX: use siglongjmp ? */
79 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
80 }
81#endif
82 longjmp(env->jmp_env, 1);
83}
84
2e70f6ef
PB
85/* Execute the code without caching the generated code. An interpreter
86 could be used if available. */
87static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
88{
89 unsigned long next_tb;
90 TranslationBlock *tb;
91
92 /* Should never happen.
93 We only end up here when an existing TB is too long. */
94 if (max_cycles > CF_COUNT_MASK)
95 max_cycles = CF_COUNT_MASK;
96
97 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
98 max_cycles);
99 env->current_tb = tb;
100 /* execute the generated code */
101 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
102
103 if ((next_tb & 3) == 2) {
104 /* Restore PC. This may happen if async event occurs before
105 the TB starts executing. */
106 CPU_PC_FROM_TB(env, tb);
107 }
108 tb_phys_invalidate(tb, -1);
109 tb_free(tb);
110}
111
8a40a180
FB
112static TranslationBlock *tb_find_slow(target_ulong pc,
113 target_ulong cs_base,
c068688b 114 uint64_t flags)
8a40a180
FB
115{
116 TranslationBlock *tb, **ptb1;
8a40a180
FB
117 unsigned int h;
118 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
3b46e624 119
8a40a180 120 tb_invalidated_flag = 0;
3b46e624 121
8a40a180 122 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
3b46e624 123
8a40a180
FB
124 /* find translated block using physical mappings */
125 phys_pc = get_phys_addr_code(env, pc);
126 phys_page1 = phys_pc & TARGET_PAGE_MASK;
127 phys_page2 = -1;
128 h = tb_phys_hash_func(phys_pc);
129 ptb1 = &tb_phys_hash[h];
130 for(;;) {
131 tb = *ptb1;
132 if (!tb)
133 goto not_found;
5fafdf24 134 if (tb->pc == pc &&
8a40a180 135 tb->page_addr[0] == phys_page1 &&
5fafdf24 136 tb->cs_base == cs_base &&
8a40a180
FB
137 tb->flags == flags) {
138 /* check next page if needed */
139 if (tb->page_addr[1] != -1) {
5fafdf24 140 virt_page2 = (pc & TARGET_PAGE_MASK) +
8a40a180
FB
141 TARGET_PAGE_SIZE;
142 phys_page2 = get_phys_addr_code(env, virt_page2);
143 if (tb->page_addr[1] == phys_page2)
144 goto found;
145 } else {
146 goto found;
147 }
148 }
149 ptb1 = &tb->phys_hash_next;
150 }
151 not_found:
2e70f6ef
PB
152 /* if no translated code available, then translate it now */
153 tb = tb_gen_code(env, pc, cs_base, flags, 0);
3b46e624 154
8a40a180 155 found:
8a40a180
FB
156 /* we add the TB in the virtual pc hash table */
157 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
8a40a180
FB
158 return tb;
159}
160
161static inline TranslationBlock *tb_find_fast(void)
162{
163 TranslationBlock *tb;
164 target_ulong cs_base, pc;
c068688b 165 uint64_t flags;
8a40a180
FB
166
167 /* we record a subset of the CPU state. It will
168 always be the same before a given translated block
169 is executed. */
170#if defined(TARGET_I386)
171 flags = env->hflags;
172 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
173 cs_base = env->segs[R_CS].base;
174 pc = cs_base + env->eip;
175#elif defined(TARGET_ARM)
176 flags = env->thumb | (env->vfp.vec_len << 1)
b5ff1b31
FB
177 | (env->vfp.vec_stride << 4);
178 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
179 flags |= (1 << 6);
40f137e1
PB
180 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
181 flags |= (1 << 7);
9ee6e8bb 182 flags |= (env->condexec_bits << 8);
8a40a180
FB
183 cs_base = 0;
184 pc = env->regs[15];
185#elif defined(TARGET_SPARC)
186#ifdef TARGET_SPARC64
2cade6a3
BS
187 // AM . Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
188 flags = ((env->pstate & PS_AM) << 2)
189 | (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
a80dde08 190 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
8a40a180 191#else
6d5f237a
BS
192 // FPU enable . Supervisor
193 flags = (env->psref << 4) | env->psrs;
8a40a180
FB
194#endif
195 cs_base = env->npc;
196 pc = env->pc;
197#elif defined(TARGET_PPC)
1527c87e 198 flags = env->hflags;
8a40a180
FB
199 cs_base = 0;
200 pc = env->nip;
201#elif defined(TARGET_MIPS)
56b19403 202 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
cc9442b9 203 cs_base = 0;
b5dc7732 204 pc = env->active_tc.PC;
e6e5906b 205#elif defined(TARGET_M68K)
acf930aa
PB
206 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
207 | (env->sr & SR_S) /* Bit 13 */
208 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
e6e5906b
PB
209 cs_base = 0;
210 pc = env->pc;
fdf9b3e8 211#elif defined(TARGET_SH4)
fe25591e
AJ
212 flags = (env->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL
213 | DELAY_SLOT_TRUE | DELAY_SLOT_CLEARME)) /* Bits 0- 3 */
214 | (env->fpscr & (FPSCR_FR | FPSCR_SZ | FPSCR_PR)) /* Bits 19-21 */
215 | (env->sr & (SR_MD | SR_RB)); /* Bits 29-30 */
823029f9 216 cs_base = 0;
fdf9b3e8 217 pc = env->pc;
eddf68a6
JM
218#elif defined(TARGET_ALPHA)
219 flags = env->ps;
220 cs_base = 0;
221 pc = env->pc;
f1ccf904 222#elif defined(TARGET_CRIS)
a1aebcb8 223 flags = env->pregs[PR_CCS] & (S_FLAG | P_FLAG | U_FLAG | X_FLAG);
cf1d97f0 224 flags |= env->dslot;
f1ccf904
TS
225 cs_base = 0;
226 pc = env->pc;
8a40a180
FB
227#else
228#error unsupported CPU
229#endif
bce61846 230 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
551bd27f
TS
231 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
232 tb->flags != flags)) {
8a40a180
FB
233 tb = tb_find_slow(pc, cs_base, flags);
234 }
235 return tb;
236}
237
7d13299d
FB
238/* main execution loop */
239
e4533c7a 240int cpu_exec(CPUState *env1)
7d13299d 241{
1057eaa7
PB
242#define DECLARE_HOST_REGS 1
243#include "hostregs_helper.h"
8a40a180 244 int ret, interrupt_request;
8a40a180 245 TranslationBlock *tb;
c27004ec 246 uint8_t *tc_ptr;
d5975363 247 unsigned long next_tb;
8c6939c0 248
bfed01fc
TS
249 if (cpu_halted(env1) == EXCP_HALTED)
250 return EXCP_HALTED;
5a1e3cfc 251
5fafdf24 252 cpu_single_env = env1;
6a00d601 253
7d13299d 254 /* first we save global registers */
1057eaa7
PB
255#define SAVE_HOST_REGS 1
256#include "hostregs_helper.h"
c27004ec 257 env = env1;
e4533c7a 258
0d1a29f9 259 env_to_regs();
ecb644f4 260#if defined(TARGET_I386)
9de5e440 261 /* put eflags in CPU temporary format */
fc2b4c48
FB
262 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
263 DF = 1 - (2 * ((env->eflags >> 10) & 1));
9de5e440 264 CC_OP = CC_OP_EFLAGS;
fc2b4c48 265 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
93ac68bc 266#elif defined(TARGET_SPARC)
e6e5906b
PB
267#elif defined(TARGET_M68K)
268 env->cc_op = CC_OP_FLAGS;
269 env->cc_dest = env->sr & 0xf;
270 env->cc_x = (env->sr >> 4) & 1;
ecb644f4
TS
271#elif defined(TARGET_ALPHA)
272#elif defined(TARGET_ARM)
273#elif defined(TARGET_PPC)
6af0bf9c 274#elif defined(TARGET_MIPS)
fdf9b3e8 275#elif defined(TARGET_SH4)
f1ccf904 276#elif defined(TARGET_CRIS)
fdf9b3e8 277 /* XXXXX */
e4533c7a
FB
278#else
279#error unsupported target CPU
280#endif
3fb2ded1 281 env->exception_index = -1;
9d27abd9 282
7d13299d 283 /* prepare setjmp context for exception handling */
3fb2ded1
FB
284 for(;;) {
285 if (setjmp(env->jmp_env) == 0) {
ee8b7021 286 env->current_tb = NULL;
3fb2ded1
FB
287 /* if an exception is pending, we execute it here */
288 if (env->exception_index >= 0) {
289 if (env->exception_index >= EXCP_INTERRUPT) {
290 /* exit request from the cpu execution loop */
291 ret = env->exception_index;
292 break;
293 } else if (env->user_mode_only) {
294 /* if user mode only, we simulate a fake exception
9f083493 295 which will be handled outside the cpu execution
3fb2ded1 296 loop */
83479e77 297#if defined(TARGET_I386)
5fafdf24
TS
298 do_interrupt_user(env->exception_index,
299 env->exception_is_int,
300 env->error_code,
3fb2ded1 301 env->exception_next_eip);
eba01623
FB
302 /* successfully delivered */
303 env->old_exception = -1;
83479e77 304#endif
3fb2ded1
FB
305 ret = env->exception_index;
306 break;
307 } else {
83479e77 308#if defined(TARGET_I386)
3fb2ded1
FB
309 /* simulate a real cpu exception. On i386, it can
310 trigger new exceptions, but we do not handle
311 double or triple faults yet. */
5fafdf24
TS
312 do_interrupt(env->exception_index,
313 env->exception_is_int,
314 env->error_code,
d05e66d2 315 env->exception_next_eip, 0);
678dde13
TS
316 /* successfully delivered */
317 env->old_exception = -1;
ce09776b
FB
318#elif defined(TARGET_PPC)
319 do_interrupt(env);
6af0bf9c
FB
320#elif defined(TARGET_MIPS)
321 do_interrupt(env);
e95c8d51 322#elif defined(TARGET_SPARC)
f2bc7e7f 323 do_interrupt(env);
b5ff1b31
FB
324#elif defined(TARGET_ARM)
325 do_interrupt(env);
fdf9b3e8
FB
326#elif defined(TARGET_SH4)
327 do_interrupt(env);
eddf68a6
JM
328#elif defined(TARGET_ALPHA)
329 do_interrupt(env);
f1ccf904
TS
330#elif defined(TARGET_CRIS)
331 do_interrupt(env);
0633879f
PB
332#elif defined(TARGET_M68K)
333 do_interrupt(0);
83479e77 334#endif
3fb2ded1
FB
335 }
336 env->exception_index = -1;
5fafdf24 337 }
9df217a3
FB
338#ifdef USE_KQEMU
339 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
340 int ret;
341 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
342 ret = kqemu_cpu_exec(env);
343 /* put eflags in CPU temporary format */
344 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
345 DF = 1 - (2 * ((env->eflags >> 10) & 1));
346 CC_OP = CC_OP_EFLAGS;
347 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
348 if (ret == 1) {
349 /* exception */
350 longjmp(env->jmp_env, 1);
351 } else if (ret == 2) {
352 /* softmmu execution needed */
353 } else {
354 if (env->interrupt_request != 0) {
355 /* hardware interrupt will be executed just after */
356 } else {
357 /* otherwise, we restart */
358 longjmp(env->jmp_env, 1);
359 }
360 }
3fb2ded1 361 }
9df217a3
FB
362#endif
363
b5fc09ae 364 next_tb = 0; /* force lookup of first TB */
3fb2ded1 365 for(;;) {
68a79315 366 interrupt_request = env->interrupt_request;
551bd27f 367 if (unlikely(interrupt_request) &&
db620f46 368 likely(!(env->singlestep_enabled & SSTEP_NOIRQ))) {
6658ffb8
PB
369 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
370 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
371 env->exception_index = EXCP_DEBUG;
372 cpu_loop_exit();
373 }
a90b7318 374#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
f1ccf904 375 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
a90b7318
AZ
376 if (interrupt_request & CPU_INTERRUPT_HALT) {
377 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
378 env->halted = 1;
379 env->exception_index = EXCP_HLT;
380 cpu_loop_exit();
381 }
382#endif
68a79315 383#if defined(TARGET_I386)
db620f46
FB
384 if (env->hflags2 & HF2_GIF_MASK) {
385 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
386 !(env->hflags & HF_SMM_MASK)) {
387 svm_check_intercept(SVM_EXIT_SMI);
388 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
389 do_smm_enter();
390 next_tb = 0;
391 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
392 !(env->hflags2 & HF2_NMI_MASK)) {
393 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
394 env->hflags2 |= HF2_NMI_MASK;
395 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
396 next_tb = 0;
397 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
398 (((env->hflags2 & HF2_VINTR_MASK) &&
399 (env->hflags2 & HF2_HIF_MASK)) ||
400 (!(env->hflags2 & HF2_VINTR_MASK) &&
401 (env->eflags & IF_MASK &&
402 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
403 int intno;
404 svm_check_intercept(SVM_EXIT_INTR);
405 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
406 intno = cpu_get_pic_interrupt(env);
407 if (loglevel & CPU_LOG_TB_IN_ASM) {
408 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
409 }
410 do_interrupt(intno, 0, 0, 0, 1);
411 /* ensure that no TB jump will be modified as
412 the program flow was changed */
413 next_tb = 0;
0573fbfc 414#if !defined(CONFIG_USER_ONLY)
db620f46
FB
415 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
416 (env->eflags & IF_MASK) &&
417 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
418 int intno;
419 /* FIXME: this should respect TPR */
420 svm_check_intercept(SVM_EXIT_VINTR);
421 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
422 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
423 if (loglevel & CPU_LOG_TB_IN_ASM)
424 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
425 do_interrupt(intno, 0, 0, 0, 1);
426 next_tb = 0;
907a5b26 427#endif
db620f46 428 }
68a79315 429 }
ce09776b 430#elif defined(TARGET_PPC)
9fddaa0c
FB
431#if 0
432 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
433 cpu_ppc_reset(env);
434 }
435#endif
47103572 436 if (interrupt_request & CPU_INTERRUPT_HARD) {
e9df014c
JM
437 ppc_hw_interrupt(env);
438 if (env->pending_interrupts == 0)
439 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
b5fc09ae 440 next_tb = 0;
ce09776b 441 }
6af0bf9c
FB
442#elif defined(TARGET_MIPS)
443 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
24c7b0e3 444 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
6af0bf9c 445 (env->CP0_Status & (1 << CP0St_IE)) &&
24c7b0e3
TS
446 !(env->CP0_Status & (1 << CP0St_EXL)) &&
447 !(env->CP0_Status & (1 << CP0St_ERL)) &&
6af0bf9c
FB
448 !(env->hflags & MIPS_HFLAG_DM)) {
449 /* Raise it */
450 env->exception_index = EXCP_EXT_INTERRUPT;
451 env->error_code = 0;
452 do_interrupt(env);
b5fc09ae 453 next_tb = 0;
6af0bf9c 454 }
e95c8d51 455#elif defined(TARGET_SPARC)
66321a11
FB
456 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
457 (env->psret != 0)) {
458 int pil = env->interrupt_index & 15;
459 int type = env->interrupt_index & 0xf0;
460
461 if (((type == TT_EXTINT) &&
462 (pil == 15 || pil > env->psrpil)) ||
463 type != TT_EXTINT) {
464 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
f2bc7e7f
BS
465 env->exception_index = env->interrupt_index;
466 do_interrupt(env);
66321a11 467 env->interrupt_index = 0;
327ac2e7
BS
468#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
469 cpu_check_irqs(env);
470#endif
b5fc09ae 471 next_tb = 0;
66321a11 472 }
e95c8d51
FB
473 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
474 //do_interrupt(0, 0, 0, 0, 0);
475 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
a90b7318 476 }
b5ff1b31
FB
477#elif defined(TARGET_ARM)
478 if (interrupt_request & CPU_INTERRUPT_FIQ
479 && !(env->uncached_cpsr & CPSR_F)) {
480 env->exception_index = EXCP_FIQ;
481 do_interrupt(env);
b5fc09ae 482 next_tb = 0;
b5ff1b31 483 }
9ee6e8bb
PB
484 /* ARMv7-M interrupt return works by loading a magic value
485 into the PC. On real hardware the load causes the
486 return to occur. The qemu implementation performs the
487 jump normally, then does the exception return when the
488 CPU tries to execute code at the magic address.
489 This will cause the magic PC value to be pushed to
490 the stack if an interrupt occured at the wrong time.
491 We avoid this by disabling interrupts when
492 pc contains a magic address. */
b5ff1b31 493 if (interrupt_request & CPU_INTERRUPT_HARD
9ee6e8bb
PB
494 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
495 || !(env->uncached_cpsr & CPSR_I))) {
b5ff1b31
FB
496 env->exception_index = EXCP_IRQ;
497 do_interrupt(env);
b5fc09ae 498 next_tb = 0;
b5ff1b31 499 }
fdf9b3e8 500#elif defined(TARGET_SH4)
e96e2044
TS
501 if (interrupt_request & CPU_INTERRUPT_HARD) {
502 do_interrupt(env);
b5fc09ae 503 next_tb = 0;
e96e2044 504 }
eddf68a6
JM
505#elif defined(TARGET_ALPHA)
506 if (interrupt_request & CPU_INTERRUPT_HARD) {
507 do_interrupt(env);
b5fc09ae 508 next_tb = 0;
eddf68a6 509 }
f1ccf904 510#elif defined(TARGET_CRIS)
1b1a38b0
EI
511 if (interrupt_request & CPU_INTERRUPT_HARD
512 && (env->pregs[PR_CCS] & I_FLAG)) {
513 env->exception_index = EXCP_IRQ;
514 do_interrupt(env);
515 next_tb = 0;
516 }
517 if (interrupt_request & CPU_INTERRUPT_NMI
518 && (env->pregs[PR_CCS] & M_FLAG)) {
519 env->exception_index = EXCP_NMI;
f1ccf904 520 do_interrupt(env);
b5fc09ae 521 next_tb = 0;
f1ccf904 522 }
0633879f
PB
523#elif defined(TARGET_M68K)
524 if (interrupt_request & CPU_INTERRUPT_HARD
525 && ((env->sr & SR_I) >> SR_I_SHIFT)
526 < env->pending_level) {
527 /* Real hardware gets the interrupt vector via an
528 IACK cycle at this point. Current emulated
529 hardware doesn't rely on this, so we
530 provide/save the vector when the interrupt is
531 first signalled. */
532 env->exception_index = env->pending_vector;
533 do_interrupt(1);
b5fc09ae 534 next_tb = 0;
0633879f 535 }
68a79315 536#endif
9d05095e
FB
537 /* Don't use the cached interupt_request value,
538 do_interrupt may have updated the EXITTB flag. */
b5ff1b31 539 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
bf3e8bf1
FB
540 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
541 /* ensure that no TB jump will be modified as
542 the program flow was changed */
b5fc09ae 543 next_tb = 0;
bf3e8bf1 544 }
68a79315
FB
545 if (interrupt_request & CPU_INTERRUPT_EXIT) {
546 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
547 env->exception_index = EXCP_INTERRUPT;
548 cpu_loop_exit();
549 }
3fb2ded1 550 }
7d13299d 551#ifdef DEBUG_EXEC
b5ff1b31 552 if ((loglevel & CPU_LOG_TB_CPU)) {
3fb2ded1 553 /* restore flags in standard format */
ecb644f4
TS
554 regs_to_env();
555#if defined(TARGET_I386)
3fb2ded1 556 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
7fe48483 557 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
3fb2ded1 558 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
e4533c7a 559#elif defined(TARGET_ARM)
7fe48483 560 cpu_dump_state(env, logfile, fprintf, 0);
93ac68bc 561#elif defined(TARGET_SPARC)
3475187d 562 cpu_dump_state(env, logfile, fprintf, 0);
67867308 563#elif defined(TARGET_PPC)
7fe48483 564 cpu_dump_state(env, logfile, fprintf, 0);
e6e5906b
PB
565#elif defined(TARGET_M68K)
566 cpu_m68k_flush_flags(env, env->cc_op);
567 env->cc_op = CC_OP_FLAGS;
568 env->sr = (env->sr & 0xffe0)
569 | env->cc_dest | (env->cc_x << 4);
570 cpu_dump_state(env, logfile, fprintf, 0);
6af0bf9c
FB
571#elif defined(TARGET_MIPS)
572 cpu_dump_state(env, logfile, fprintf, 0);
fdf9b3e8
FB
573#elif defined(TARGET_SH4)
574 cpu_dump_state(env, logfile, fprintf, 0);
eddf68a6
JM
575#elif defined(TARGET_ALPHA)
576 cpu_dump_state(env, logfile, fprintf, 0);
f1ccf904
TS
577#elif defined(TARGET_CRIS)
578 cpu_dump_state(env, logfile, fprintf, 0);
e4533c7a 579#else
5fafdf24 580#error unsupported target CPU
e4533c7a 581#endif
3fb2ded1 582 }
7d13299d 583#endif
d5975363 584 spin_lock(&tb_lock);
8a40a180 585 tb = tb_find_fast();
d5975363
PB
586 /* Note: we do it here to avoid a gcc bug on Mac OS X when
587 doing it in tb_find_slow */
588 if (tb_invalidated_flag) {
589 /* as some TB could have been invalidated because
590 of memory exceptions while generating the code, we
591 must recompute the hash index here */
592 next_tb = 0;
2e70f6ef 593 tb_invalidated_flag = 0;
d5975363 594 }
9d27abd9 595#ifdef DEBUG_EXEC
c1135f61 596 if ((loglevel & CPU_LOG_EXEC)) {
c27004ec
FB
597 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
598 (long)tb->tc_ptr, tb->pc,
599 lookup_symbol(tb->pc));
3fb2ded1 600 }
9d27abd9 601#endif
8a40a180
FB
602 /* see if we can patch the calling TB. When the TB
603 spans two pages, we cannot safely do a direct
604 jump. */
c27004ec 605 {
b5fc09ae 606 if (next_tb != 0 &&
4d7a0880 607#ifdef USE_KQEMU
f32fc648
FB
608 (env->kqemu_enabled != 2) &&
609#endif
ec6338ba 610 tb->page_addr[1] == -1) {
b5fc09ae 611 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
3fb2ded1 612 }
c27004ec 613 }
d5975363 614 spin_unlock(&tb_lock);
83479e77 615 env->current_tb = tb;
2e70f6ef
PB
616 while (env->current_tb) {
617 tc_ptr = tb->tc_ptr;
3fb2ded1 618 /* execute the generated code */
572a9d4a
BS
619#if defined(__sparc__) && !defined(HOST_SOLARIS)
620#undef env
2e70f6ef 621 env = cpu_single_env;
572a9d4a
BS
622#define env cpu_single_env
623#endif
2e70f6ef
PB
624 next_tb = tcg_qemu_tb_exec(tc_ptr);
625 env->current_tb = NULL;
626 if ((next_tb & 3) == 2) {
bf20dc07 627 /* Instruction counter expired. */
2e70f6ef
PB
628 int insns_left;
629 tb = (TranslationBlock *)(long)(next_tb & ~3);
630 /* Restore PC. */
631 CPU_PC_FROM_TB(env, tb);
632 insns_left = env->icount_decr.u32;
633 if (env->icount_extra && insns_left >= 0) {
634 /* Refill decrementer and continue execution. */
635 env->icount_extra += insns_left;
636 if (env->icount_extra > 0xffff) {
637 insns_left = 0xffff;
638 } else {
639 insns_left = env->icount_extra;
640 }
641 env->icount_extra -= insns_left;
642 env->icount_decr.u16.low = insns_left;
643 } else {
644 if (insns_left > 0) {
645 /* Execute remaining instructions. */
646 cpu_exec_nocache(insns_left, tb);
647 }
648 env->exception_index = EXCP_INTERRUPT;
649 next_tb = 0;
650 cpu_loop_exit();
651 }
652 }
653 }
4cbf74b6
FB
654 /* reset soft MMU for next block (it can currently
655 only be set by a memory fault) */
f32fc648
FB
656#if defined(USE_KQEMU)
657#define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
658 if (kqemu_is_ok(env) &&
659 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
660 cpu_loop_exit();
661 }
4cbf74b6 662#endif
50a518e3 663 } /* for(;;) */
3fb2ded1 664 } else {
0d1a29f9 665 env_to_regs();
7d13299d 666 }
3fb2ded1
FB
667 } /* for(;;) */
668
7d13299d 669
e4533c7a 670#if defined(TARGET_I386)
9de5e440 671 /* restore flags in standard format */
fc2b4c48 672 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
e4533c7a 673#elif defined(TARGET_ARM)
b7bcbe95 674 /* XXX: Save/restore host fpu exception state?. */
93ac68bc 675#elif defined(TARGET_SPARC)
67867308 676#elif defined(TARGET_PPC)
e6e5906b
PB
677#elif defined(TARGET_M68K)
678 cpu_m68k_flush_flags(env, env->cc_op);
679 env->cc_op = CC_OP_FLAGS;
680 env->sr = (env->sr & 0xffe0)
681 | env->cc_dest | (env->cc_x << 4);
6af0bf9c 682#elif defined(TARGET_MIPS)
fdf9b3e8 683#elif defined(TARGET_SH4)
eddf68a6 684#elif defined(TARGET_ALPHA)
f1ccf904 685#elif defined(TARGET_CRIS)
fdf9b3e8 686 /* XXXXX */
e4533c7a
FB
687#else
688#error unsupported target CPU
689#endif
1057eaa7
PB
690
691 /* restore global registers */
1057eaa7
PB
692#include "hostregs_helper.h"
693
6a00d601 694 /* fail safe : never use cpu_single_env outside cpu_exec() */
5fafdf24 695 cpu_single_env = NULL;
7d13299d
FB
696 return ret;
697}
6dbad63e 698
fbf9eeb3
FB
699/* must only be called from the generated code as an exception can be
700 generated */
701void tb_invalidate_page_range(target_ulong start, target_ulong end)
702{
dc5d0b3d
FB
703 /* XXX: cannot enable it yet because it yields to MMU exception
704 where NIP != read address on PowerPC */
705#if 0
fbf9eeb3
FB
706 target_ulong phys_addr;
707 phys_addr = get_phys_addr_code(env, start);
708 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
dc5d0b3d 709#endif
fbf9eeb3
FB
710}
711
1a18c71b 712#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
e4533c7a 713
6dbad63e
FB
714void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
715{
716 CPUX86State *saved_env;
717
718 saved_env = env;
719 env = s;
a412ac57 720 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
a513fe19 721 selector &= 0xffff;
5fafdf24 722 cpu_x86_load_seg_cache(env, seg_reg, selector,
c27004ec 723 (selector << 4), 0xffff, 0);
a513fe19 724 } else {
5d97559d 725 helper_load_seg(seg_reg, selector);
a513fe19 726 }
6dbad63e
FB
727 env = saved_env;
728}
9de5e440 729
6f12a2a6 730void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
d0a1ffc9
FB
731{
732 CPUX86State *saved_env;
733
734 saved_env = env;
735 env = s;
3b46e624 736
6f12a2a6 737 helper_fsave(ptr, data32);
d0a1ffc9
FB
738
739 env = saved_env;
740}
741
6f12a2a6 742void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
d0a1ffc9
FB
743{
744 CPUX86State *saved_env;
745
746 saved_env = env;
747 env = s;
3b46e624 748
6f12a2a6 749 helper_frstor(ptr, data32);
d0a1ffc9
FB
750
751 env = saved_env;
752}
753
e4533c7a
FB
754#endif /* TARGET_I386 */
755
67b915a5
FB
756#if !defined(CONFIG_SOFTMMU)
757
3fb2ded1
FB
758#if defined(TARGET_I386)
759
b56dad1c 760/* 'pc' is the host PC at which the exception was raised. 'address' is
fd6ce8f6
FB
761 the effective address of the memory exception. 'is_write' is 1 if a
762 write caused the exception and otherwise 0'. 'old_set' is the
763 signal set which should be restored */
2b413144 764static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
5fafdf24 765 int is_write, sigset_t *old_set,
bf3e8bf1 766 void *puc)
9de5e440 767{
a513fe19
FB
768 TranslationBlock *tb;
769 int ret;
68a79315 770
83479e77
FB
771 if (cpu_single_env)
772 env = cpu_single_env; /* XXX: find a correct solution for multithread */
fd6ce8f6 773#if defined(DEBUG_SIGNAL)
5fafdf24 774 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
bf3e8bf1 775 pc, address, is_write, *(unsigned long *)old_set);
9de5e440 776#endif
25eb4484 777 /* XXX: locking issue */
53a5960a 778 if (is_write && page_unprotect(h2g(address), pc, puc)) {
fd6ce8f6
FB
779 return 1;
780 }
fbf9eeb3 781
3fb2ded1 782 /* see if it is an MMU fault */
6ebbf390 783 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
3fb2ded1
FB
784 if (ret < 0)
785 return 0; /* not an MMU fault */
786 if (ret == 0)
787 return 1; /* the MMU fault was handled without causing real CPU fault */
788 /* now we have a real cpu fault */
a513fe19
FB
789 tb = tb_find_pc(pc);
790 if (tb) {
9de5e440
FB
791 /* the PC is inside the translated code. It means that we have
792 a virtual CPU fault */
bf3e8bf1 793 cpu_restore_state(tb, env, pc, puc);
3fb2ded1 794 }
4cbf74b6 795 if (ret == 1) {
3fb2ded1 796#if 0
5fafdf24 797 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
4cbf74b6 798 env->eip, env->cr[2], env->error_code);
3fb2ded1 799#endif
4cbf74b6
FB
800 /* we restore the process signal mask as the sigreturn should
801 do it (XXX: use sigsetjmp) */
802 sigprocmask(SIG_SETMASK, old_set, NULL);
54ca9095 803 raise_exception_err(env->exception_index, env->error_code);
4cbf74b6
FB
804 } else {
805 /* activate soft MMU for this block */
3f337316 806 env->hflags |= HF_SOFTMMU_MASK;
fbf9eeb3 807 cpu_resume_from_signal(env, puc);
4cbf74b6 808 }
3fb2ded1
FB
809 /* never comes here */
810 return 1;
811}
812
e4533c7a 813#elif defined(TARGET_ARM)
3fb2ded1 814static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
815 int is_write, sigset_t *old_set,
816 void *puc)
3fb2ded1 817{
68016c62
FB
818 TranslationBlock *tb;
819 int ret;
820
821 if (cpu_single_env)
822 env = cpu_single_env; /* XXX: find a correct solution for multithread */
823#if defined(DEBUG_SIGNAL)
5fafdf24 824 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
68016c62
FB
825 pc, address, is_write, *(unsigned long *)old_set);
826#endif
9f0777ed 827 /* XXX: locking issue */
53a5960a 828 if (is_write && page_unprotect(h2g(address), pc, puc)) {
9f0777ed
FB
829 return 1;
830 }
68016c62 831 /* see if it is an MMU fault */
6ebbf390 832 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
68016c62
FB
833 if (ret < 0)
834 return 0; /* not an MMU fault */
835 if (ret == 0)
836 return 1; /* the MMU fault was handled without causing real CPU fault */
837 /* now we have a real cpu fault */
838 tb = tb_find_pc(pc);
839 if (tb) {
840 /* the PC is inside the translated code. It means that we have
841 a virtual CPU fault */
842 cpu_restore_state(tb, env, pc, puc);
843 }
844 /* we restore the process signal mask as the sigreturn should
845 do it (XXX: use sigsetjmp) */
846 sigprocmask(SIG_SETMASK, old_set, NULL);
847 cpu_loop_exit();
968c74da
AJ
848 /* never comes here */
849 return 1;
3fb2ded1 850}
93ac68bc
FB
851#elif defined(TARGET_SPARC)
852static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
853 int is_write, sigset_t *old_set,
854 void *puc)
93ac68bc 855{
68016c62
FB
856 TranslationBlock *tb;
857 int ret;
858
859 if (cpu_single_env)
860 env = cpu_single_env; /* XXX: find a correct solution for multithread */
861#if defined(DEBUG_SIGNAL)
5fafdf24 862 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
68016c62
FB
863 pc, address, is_write, *(unsigned long *)old_set);
864#endif
b453b70b 865 /* XXX: locking issue */
53a5960a 866 if (is_write && page_unprotect(h2g(address), pc, puc)) {
b453b70b
FB
867 return 1;
868 }
68016c62 869 /* see if it is an MMU fault */
6ebbf390 870 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
68016c62
FB
871 if (ret < 0)
872 return 0; /* not an MMU fault */
873 if (ret == 0)
874 return 1; /* the MMU fault was handled without causing real CPU fault */
875 /* now we have a real cpu fault */
876 tb = tb_find_pc(pc);
877 if (tb) {
878 /* the PC is inside the translated code. It means that we have
879 a virtual CPU fault */
880 cpu_restore_state(tb, env, pc, puc);
881 }
882 /* we restore the process signal mask as the sigreturn should
883 do it (XXX: use sigsetjmp) */
884 sigprocmask(SIG_SETMASK, old_set, NULL);
885 cpu_loop_exit();
968c74da
AJ
886 /* never comes here */
887 return 1;
93ac68bc 888}
67867308
FB
889#elif defined (TARGET_PPC)
890static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
891 int is_write, sigset_t *old_set,
892 void *puc)
67867308
FB
893{
894 TranslationBlock *tb;
ce09776b 895 int ret;
3b46e624 896
67867308
FB
897 if (cpu_single_env)
898 env = cpu_single_env; /* XXX: find a correct solution for multithread */
67867308 899#if defined(DEBUG_SIGNAL)
5fafdf24 900 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
67867308
FB
901 pc, address, is_write, *(unsigned long *)old_set);
902#endif
903 /* XXX: locking issue */
53a5960a 904 if (is_write && page_unprotect(h2g(address), pc, puc)) {
67867308
FB
905 return 1;
906 }
907
ce09776b 908 /* see if it is an MMU fault */
6ebbf390 909 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
ce09776b
FB
910 if (ret < 0)
911 return 0; /* not an MMU fault */
912 if (ret == 0)
913 return 1; /* the MMU fault was handled without causing real CPU fault */
914
67867308
FB
915 /* now we have a real cpu fault */
916 tb = tb_find_pc(pc);
917 if (tb) {
918 /* the PC is inside the translated code. It means that we have
919 a virtual CPU fault */
bf3e8bf1 920 cpu_restore_state(tb, env, pc, puc);
67867308 921 }
ce09776b 922 if (ret == 1) {
67867308 923#if 0
5fafdf24 924 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
ce09776b 925 env->nip, env->error_code, tb);
67867308
FB
926#endif
927 /* we restore the process signal mask as the sigreturn should
928 do it (XXX: use sigsetjmp) */
bf3e8bf1 929 sigprocmask(SIG_SETMASK, old_set, NULL);
9fddaa0c 930 do_raise_exception_err(env->exception_index, env->error_code);
ce09776b
FB
931 } else {
932 /* activate soft MMU for this block */
fbf9eeb3 933 cpu_resume_from_signal(env, puc);
ce09776b 934 }
67867308 935 /* never comes here */
e6e5906b
PB
936 return 1;
937}
938
939#elif defined(TARGET_M68K)
940static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
941 int is_write, sigset_t *old_set,
942 void *puc)
943{
944 TranslationBlock *tb;
945 int ret;
946
947 if (cpu_single_env)
948 env = cpu_single_env; /* XXX: find a correct solution for multithread */
949#if defined(DEBUG_SIGNAL)
5fafdf24 950 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
e6e5906b
PB
951 pc, address, is_write, *(unsigned long *)old_set);
952#endif
953 /* XXX: locking issue */
954 if (is_write && page_unprotect(address, pc, puc)) {
955 return 1;
956 }
957 /* see if it is an MMU fault */
6ebbf390 958 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
e6e5906b
PB
959 if (ret < 0)
960 return 0; /* not an MMU fault */
961 if (ret == 0)
962 return 1; /* the MMU fault was handled without causing real CPU fault */
963 /* now we have a real cpu fault */
964 tb = tb_find_pc(pc);
965 if (tb) {
966 /* the PC is inside the translated code. It means that we have
967 a virtual CPU fault */
968 cpu_restore_state(tb, env, pc, puc);
969 }
970 /* we restore the process signal mask as the sigreturn should
971 do it (XXX: use sigsetjmp) */
972 sigprocmask(SIG_SETMASK, old_set, NULL);
973 cpu_loop_exit();
974 /* never comes here */
67867308
FB
975 return 1;
976}
6af0bf9c
FB
977
978#elif defined (TARGET_MIPS)
979static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
980 int is_write, sigset_t *old_set,
981 void *puc)
982{
983 TranslationBlock *tb;
984 int ret;
3b46e624 985
6af0bf9c
FB
986 if (cpu_single_env)
987 env = cpu_single_env; /* XXX: find a correct solution for multithread */
988#if defined(DEBUG_SIGNAL)
5fafdf24 989 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
6af0bf9c
FB
990 pc, address, is_write, *(unsigned long *)old_set);
991#endif
992 /* XXX: locking issue */
53a5960a 993 if (is_write && page_unprotect(h2g(address), pc, puc)) {
6af0bf9c
FB
994 return 1;
995 }
996
997 /* see if it is an MMU fault */
6ebbf390 998 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
6af0bf9c
FB
999 if (ret < 0)
1000 return 0; /* not an MMU fault */
1001 if (ret == 0)
1002 return 1; /* the MMU fault was handled without causing real CPU fault */
1003
1004 /* now we have a real cpu fault */
1005 tb = tb_find_pc(pc);
1006 if (tb) {
1007 /* the PC is inside the translated code. It means that we have
1008 a virtual CPU fault */
1009 cpu_restore_state(tb, env, pc, puc);
1010 }
1011 if (ret == 1) {
1012#if 0
5fafdf24 1013 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1eb5207b 1014 env->PC, env->error_code, tb);
6af0bf9c
FB
1015#endif
1016 /* we restore the process signal mask as the sigreturn should
1017 do it (XXX: use sigsetjmp) */
1018 sigprocmask(SIG_SETMASK, old_set, NULL);
1019 do_raise_exception_err(env->exception_index, env->error_code);
1020 } else {
1021 /* activate soft MMU for this block */
1022 cpu_resume_from_signal(env, puc);
1023 }
1024 /* never comes here */
1025 return 1;
1026}
1027
fdf9b3e8
FB
1028#elif defined (TARGET_SH4)
1029static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1030 int is_write, sigset_t *old_set,
1031 void *puc)
1032{
1033 TranslationBlock *tb;
1034 int ret;
3b46e624 1035
fdf9b3e8
FB
1036 if (cpu_single_env)
1037 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1038#if defined(DEBUG_SIGNAL)
5fafdf24 1039 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
fdf9b3e8
FB
1040 pc, address, is_write, *(unsigned long *)old_set);
1041#endif
1042 /* XXX: locking issue */
1043 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1044 return 1;
1045 }
1046
1047 /* see if it is an MMU fault */
6ebbf390 1048 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
fdf9b3e8
FB
1049 if (ret < 0)
1050 return 0; /* not an MMU fault */
1051 if (ret == 0)
1052 return 1; /* the MMU fault was handled without causing real CPU fault */
1053
1054 /* now we have a real cpu fault */
eddf68a6
JM
1055 tb = tb_find_pc(pc);
1056 if (tb) {
1057 /* the PC is inside the translated code. It means that we have
1058 a virtual CPU fault */
1059 cpu_restore_state(tb, env, pc, puc);
1060 }
1061#if 0
5fafdf24 1062 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
eddf68a6
JM
1063 env->nip, env->error_code, tb);
1064#endif
1065 /* we restore the process signal mask as the sigreturn should
1066 do it (XXX: use sigsetjmp) */
1067 sigprocmask(SIG_SETMASK, old_set, NULL);
1068 cpu_loop_exit();
1069 /* never comes here */
1070 return 1;
1071}
1072
1073#elif defined (TARGET_ALPHA)
1074static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1075 int is_write, sigset_t *old_set,
1076 void *puc)
1077{
1078 TranslationBlock *tb;
1079 int ret;
3b46e624 1080
eddf68a6
JM
1081 if (cpu_single_env)
1082 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1083#if defined(DEBUG_SIGNAL)
5fafdf24 1084 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
eddf68a6
JM
1085 pc, address, is_write, *(unsigned long *)old_set);
1086#endif
1087 /* XXX: locking issue */
1088 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1089 return 1;
1090 }
1091
1092 /* see if it is an MMU fault */
6ebbf390 1093 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
eddf68a6
JM
1094 if (ret < 0)
1095 return 0; /* not an MMU fault */
1096 if (ret == 0)
1097 return 1; /* the MMU fault was handled without causing real CPU fault */
1098
1099 /* now we have a real cpu fault */
fdf9b3e8
FB
1100 tb = tb_find_pc(pc);
1101 if (tb) {
1102 /* the PC is inside the translated code. It means that we have
1103 a virtual CPU fault */
1104 cpu_restore_state(tb, env, pc, puc);
1105 }
fdf9b3e8 1106#if 0
5fafdf24 1107 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
fdf9b3e8
FB
1108 env->nip, env->error_code, tb);
1109#endif
1110 /* we restore the process signal mask as the sigreturn should
1111 do it (XXX: use sigsetjmp) */
355fb23d
PB
1112 sigprocmask(SIG_SETMASK, old_set, NULL);
1113 cpu_loop_exit();
fdf9b3e8
FB
1114 /* never comes here */
1115 return 1;
1116}
f1ccf904
TS
1117#elif defined (TARGET_CRIS)
1118static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1119 int is_write, sigset_t *old_set,
1120 void *puc)
1121{
1122 TranslationBlock *tb;
1123 int ret;
1124
1125 if (cpu_single_env)
1126 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1127#if defined(DEBUG_SIGNAL)
1128 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1129 pc, address, is_write, *(unsigned long *)old_set);
1130#endif
1131 /* XXX: locking issue */
1132 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1133 return 1;
1134 }
1135
1136 /* see if it is an MMU fault */
6ebbf390 1137 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
f1ccf904
TS
1138 if (ret < 0)
1139 return 0; /* not an MMU fault */
1140 if (ret == 0)
1141 return 1; /* the MMU fault was handled without causing real CPU fault */
1142
1143 /* now we have a real cpu fault */
1144 tb = tb_find_pc(pc);
1145 if (tb) {
1146 /* the PC is inside the translated code. It means that we have
1147 a virtual CPU fault */
1148 cpu_restore_state(tb, env, pc, puc);
1149 }
f1ccf904
TS
1150 /* we restore the process signal mask as the sigreturn should
1151 do it (XXX: use sigsetjmp) */
1152 sigprocmask(SIG_SETMASK, old_set, NULL);
1153 cpu_loop_exit();
1154 /* never comes here */
1155 return 1;
1156}
1157
e4533c7a
FB
1158#else
1159#error unsupported target CPU
1160#endif
9de5e440 1161
2b413144
FB
1162#if defined(__i386__)
1163
d8ecc0b9
FB
1164#if defined(__APPLE__)
1165# include <sys/ucontext.h>
1166
1167# define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1168# define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1169# define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1170#else
1171# define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1172# define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1173# define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1174#endif
1175
5fafdf24 1176int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1177 void *puc)
9de5e440 1178{
5a7b542b 1179 siginfo_t *info = pinfo;
9de5e440
FB
1180 struct ucontext *uc = puc;
1181 unsigned long pc;
bf3e8bf1 1182 int trapno;
97eb5b14 1183
d691f669
FB
1184#ifndef REG_EIP
1185/* for glibc 2.1 */
fd6ce8f6
FB
1186#define REG_EIP EIP
1187#define REG_ERR ERR
1188#define REG_TRAPNO TRAPNO
d691f669 1189#endif
d8ecc0b9
FB
1190 pc = EIP_sig(uc);
1191 trapno = TRAP_sig(uc);
ec6338ba
FB
1192 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1193 trapno == 0xe ?
1194 (ERROR_sig(uc) >> 1) & 1 : 0,
1195 &uc->uc_sigmask, puc);
2b413144
FB
1196}
1197
bc51c5c9
FB
1198#elif defined(__x86_64__)
1199
5a7b542b 1200int cpu_signal_handler(int host_signum, void *pinfo,
bc51c5c9
FB
1201 void *puc)
1202{
5a7b542b 1203 siginfo_t *info = pinfo;
bc51c5c9
FB
1204 struct ucontext *uc = puc;
1205 unsigned long pc;
1206
1207 pc = uc->uc_mcontext.gregs[REG_RIP];
5fafdf24
TS
1208 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1209 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
bc51c5c9
FB
1210 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1211 &uc->uc_sigmask, puc);
1212}
1213
83fb7adf 1214#elif defined(__powerpc__)
2b413144 1215
83fb7adf
FB
1216/***********************************************************************
1217 * signal context platform-specific definitions
1218 * From Wine
1219 */
1220#ifdef linux
1221/* All Registers access - only for local access */
1222# define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1223/* Gpr Registers access */
1224# define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1225# define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1226# define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1227# define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1228# define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1229# define LR_sig(context) REG_sig(link, context) /* Link register */
1230# define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1231/* Float Registers access */
1232# define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1233# define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1234/* Exception Registers access */
1235# define DAR_sig(context) REG_sig(dar, context)
1236# define DSISR_sig(context) REG_sig(dsisr, context)
1237# define TRAP_sig(context) REG_sig(trap, context)
1238#endif /* linux */
1239
1240#ifdef __APPLE__
1241# include <sys/ucontext.h>
1242typedef struct ucontext SIGCONTEXT;
1243/* All Registers access - only for local access */
1244# define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1245# define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1246# define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1247# define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1248/* Gpr Registers access */
1249# define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1250# define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1251# define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1252# define CTR_sig(context) REG_sig(ctr, context)
1253# define XER_sig(context) REG_sig(xer, context) /* Link register */
1254# define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1255# define CR_sig(context) REG_sig(cr, context) /* Condition register */
1256/* Float Registers access */
1257# define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1258# define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1259/* Exception Registers access */
1260# define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1261# define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1262# define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1263#endif /* __APPLE__ */
1264
5fafdf24 1265int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1266 void *puc)
2b413144 1267{
5a7b542b 1268 siginfo_t *info = pinfo;
25eb4484 1269 struct ucontext *uc = puc;
25eb4484 1270 unsigned long pc;
25eb4484
FB
1271 int is_write;
1272
83fb7adf 1273 pc = IAR_sig(uc);
25eb4484
FB
1274 is_write = 0;
1275#if 0
1276 /* ppc 4xx case */
83fb7adf 1277 if (DSISR_sig(uc) & 0x00800000)
25eb4484
FB
1278 is_write = 1;
1279#else
83fb7adf 1280 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
25eb4484
FB
1281 is_write = 1;
1282#endif
5fafdf24 1283 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1284 is_write, &uc->uc_sigmask, puc);
2b413144
FB
1285}
1286
2f87c607
FB
1287#elif defined(__alpha__)
1288
5fafdf24 1289int cpu_signal_handler(int host_signum, void *pinfo,
2f87c607
FB
1290 void *puc)
1291{
5a7b542b 1292 siginfo_t *info = pinfo;
2f87c607
FB
1293 struct ucontext *uc = puc;
1294 uint32_t *pc = uc->uc_mcontext.sc_pc;
1295 uint32_t insn = *pc;
1296 int is_write = 0;
1297
8c6939c0 1298 /* XXX: need kernel patch to get write flag faster */
2f87c607
FB
1299 switch (insn >> 26) {
1300 case 0x0d: // stw
1301 case 0x0e: // stb
1302 case 0x0f: // stq_u
1303 case 0x24: // stf
1304 case 0x25: // stg
1305 case 0x26: // sts
1306 case 0x27: // stt
1307 case 0x2c: // stl
1308 case 0x2d: // stq
1309 case 0x2e: // stl_c
1310 case 0x2f: // stq_c
1311 is_write = 1;
1312 }
1313
5fafdf24 1314 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1315 is_write, &uc->uc_sigmask, puc);
2f87c607 1316}
8c6939c0
FB
1317#elif defined(__sparc__)
1318
5fafdf24 1319int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1320 void *puc)
8c6939c0 1321{
5a7b542b 1322 siginfo_t *info = pinfo;
8c6939c0
FB
1323 int is_write;
1324 uint32_t insn;
6b4c11cd 1325#if !defined(__arch64__) || defined(HOST_SOLARIS)
c9e1e2b0
BS
1326 uint32_t *regs = (uint32_t *)(info + 1);
1327 void *sigmask = (regs + 20);
8c6939c0 1328 /* XXX: is there a standard glibc define ? */
c9e1e2b0
BS
1329 unsigned long pc = regs[1];
1330#else
1331 struct sigcontext *sc = puc;
1332 unsigned long pc = sc->sigc_regs.tpc;
1333 void *sigmask = (void *)sc->sigc_mask;
1334#endif
1335
8c6939c0
FB
1336 /* XXX: need kernel patch to get write flag faster */
1337 is_write = 0;
1338 insn = *(uint32_t *)pc;
1339 if ((insn >> 30) == 3) {
1340 switch((insn >> 19) & 0x3f) {
1341 case 0x05: // stb
1342 case 0x06: // sth
1343 case 0x04: // st
1344 case 0x07: // std
1345 case 0x24: // stf
1346 case 0x27: // stdf
1347 case 0x25: // stfsr
1348 is_write = 1;
1349 break;
1350 }
1351 }
5fafdf24 1352 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1353 is_write, sigmask, NULL);
8c6939c0
FB
1354}
1355
1356#elif defined(__arm__)
1357
5fafdf24 1358int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1359 void *puc)
8c6939c0 1360{
5a7b542b 1361 siginfo_t *info = pinfo;
8c6939c0
FB
1362 struct ucontext *uc = puc;
1363 unsigned long pc;
1364 int is_write;
3b46e624 1365
48bbf11b 1366#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
5c49b363
AZ
1367 pc = uc->uc_mcontext.gregs[R15];
1368#else
4eee57f5 1369 pc = uc->uc_mcontext.arm_pc;
5c49b363 1370#endif
8c6939c0
FB
1371 /* XXX: compute is_write */
1372 is_write = 0;
5fafdf24 1373 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
8c6939c0 1374 is_write,
f3a9676a 1375 &uc->uc_sigmask, puc);
8c6939c0
FB
1376}
1377
38e584a0
FB
1378#elif defined(__mc68000)
1379
5fafdf24 1380int cpu_signal_handler(int host_signum, void *pinfo,
38e584a0
FB
1381 void *puc)
1382{
5a7b542b 1383 siginfo_t *info = pinfo;
38e584a0
FB
1384 struct ucontext *uc = puc;
1385 unsigned long pc;
1386 int is_write;
3b46e624 1387
38e584a0
FB
1388 pc = uc->uc_mcontext.gregs[16];
1389 /* XXX: compute is_write */
1390 is_write = 0;
5fafdf24 1391 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
38e584a0 1392 is_write,
bf3e8bf1 1393 &uc->uc_sigmask, puc);
38e584a0
FB
1394}
1395
b8076a74
FB
1396#elif defined(__ia64)
1397
1398#ifndef __ISR_VALID
1399 /* This ought to be in <bits/siginfo.h>... */
1400# define __ISR_VALID 1
b8076a74
FB
1401#endif
1402
5a7b542b 1403int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
b8076a74 1404{
5a7b542b 1405 siginfo_t *info = pinfo;
b8076a74
FB
1406 struct ucontext *uc = puc;
1407 unsigned long ip;
1408 int is_write = 0;
1409
1410 ip = uc->uc_mcontext.sc_ip;
1411 switch (host_signum) {
1412 case SIGILL:
1413 case SIGFPE:
1414 case SIGSEGV:
1415 case SIGBUS:
1416 case SIGTRAP:
fd4a43e4 1417 if (info->si_code && (info->si_segvflags & __ISR_VALID))
b8076a74
FB
1418 /* ISR.W (write-access) is bit 33: */
1419 is_write = (info->si_isr >> 33) & 1;
1420 break;
1421
1422 default:
1423 break;
1424 }
1425 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1426 is_write,
1427 &uc->uc_sigmask, puc);
1428}
1429
90cb9493
FB
1430#elif defined(__s390__)
1431
5fafdf24 1432int cpu_signal_handler(int host_signum, void *pinfo,
90cb9493
FB
1433 void *puc)
1434{
5a7b542b 1435 siginfo_t *info = pinfo;
90cb9493
FB
1436 struct ucontext *uc = puc;
1437 unsigned long pc;
1438 int is_write;
3b46e624 1439
90cb9493
FB
1440 pc = uc->uc_mcontext.psw.addr;
1441 /* XXX: compute is_write */
1442 is_write = 0;
5fafdf24 1443 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
c4b89d18
TS
1444 is_write, &uc->uc_sigmask, puc);
1445}
1446
1447#elif defined(__mips__)
1448
5fafdf24 1449int cpu_signal_handler(int host_signum, void *pinfo,
c4b89d18
TS
1450 void *puc)
1451{
9617efe8 1452 siginfo_t *info = pinfo;
c4b89d18
TS
1453 struct ucontext *uc = puc;
1454 greg_t pc = uc->uc_mcontext.pc;
1455 int is_write;
3b46e624 1456
c4b89d18
TS
1457 /* XXX: compute is_write */
1458 is_write = 0;
5fafdf24 1459 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
c4b89d18 1460 is_write, &uc->uc_sigmask, puc);
90cb9493
FB
1461}
1462
f54b3f92
AJ
1463#elif defined(__hppa__)
1464
1465int cpu_signal_handler(int host_signum, void *pinfo,
1466 void *puc)
1467{
1468 struct siginfo *info = pinfo;
1469 struct ucontext *uc = puc;
1470 unsigned long pc;
1471 int is_write;
1472
1473 pc = uc->uc_mcontext.sc_iaoq[0];
1474 /* FIXME: compute is_write */
1475 is_write = 0;
1476 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1477 is_write,
1478 &uc->uc_sigmask, puc);
1479}
1480
9de5e440 1481#else
2b413144 1482
3fb2ded1 1483#error host CPU specific signal handler needed
2b413144 1484
9de5e440 1485#endif
67b915a5
FB
1486
1487#endif /* !defined(CONFIG_SOFTMMU) */