]> git.proxmox.com Git - qemu.git/blame - cpu-exec.c
Sparc32: convert slavio interrupt controller to qdev
[qemu.git] / cpu-exec.c
CommitLineData
7d13299d
FB
1/*
2 * i386 emulator main execution loop
5fafdf24 3 *
66321a11 4 * Copyright (c) 2003-2005 Fabrice Bellard
7d13299d 5 *
3ef693a0
FB
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
7d13299d 10 *
3ef693a0
FB
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
7d13299d 15 *
3ef693a0
FB
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
fad6cb1a 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
7d13299d 19 */
e4533c7a 20#include "config.h"
93ac68bc 21#include "exec.h"
956034d7 22#include "disas.h"
7cb69cae 23#include "tcg.h"
7ba1e619 24#include "kvm.h"
7d13299d 25
fbf9eeb3
FB
26#if !defined(CONFIG_SOFTMMU)
27#undef EAX
28#undef ECX
29#undef EDX
30#undef EBX
31#undef ESP
32#undef EBP
33#undef ESI
34#undef EDI
35#undef EIP
36#include <signal.h>
84778508 37#ifdef __linux__
fbf9eeb3
FB
38#include <sys/ucontext.h>
39#endif
84778508 40#endif
fbf9eeb3 41
572a9d4a
BS
42#if defined(__sparc__) && !defined(HOST_SOLARIS)
43// Work around ugly bugs in glibc that mangle global register contents
44#undef env
45#define env cpu_single_env
46#endif
47
36bdbe54
FB
48int tb_invalidated_flag;
49
dc99065b 50//#define DEBUG_EXEC
9de5e440 51//#define DEBUG_SIGNAL
7d13299d 52
6a4955a8
AL
53int qemu_cpu_has_work(CPUState *env)
54{
55 return cpu_has_work(env);
56}
57
e4533c7a
FB
58void cpu_loop_exit(void)
59{
bfed01fc
TS
60 /* NOTE: the register at this point must be saved by hand because
61 longjmp restore them */
62 regs_to_env();
e4533c7a
FB
63 longjmp(env->jmp_env, 1);
64}
bfed01fc 65
fbf9eeb3
FB
66/* exit the current TB from a signal handler. The host registers are
67 restored in a state compatible with the CPU emulator
68 */
5fafdf24 69void cpu_resume_from_signal(CPUState *env1, void *puc)
fbf9eeb3
FB
70{
71#if !defined(CONFIG_SOFTMMU)
84778508 72#ifdef __linux__
fbf9eeb3 73 struct ucontext *uc = puc;
84778508
BS
74#elif defined(__OpenBSD__)
75 struct sigcontext *uc = puc;
76#endif
fbf9eeb3
FB
77#endif
78
79 env = env1;
80
81 /* XXX: restore cpu registers saved in host registers */
82
83#if !defined(CONFIG_SOFTMMU)
84 if (puc) {
85 /* XXX: use siglongjmp ? */
84778508 86#ifdef __linux__
fbf9eeb3 87 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
84778508
BS
88#elif defined(__OpenBSD__)
89 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
90#endif
fbf9eeb3
FB
91 }
92#endif
9a3ea654 93 env->exception_index = -1;
fbf9eeb3
FB
94 longjmp(env->jmp_env, 1);
95}
96
2e70f6ef
PB
97/* Execute the code without caching the generated code. An interpreter
98 could be used if available. */
99static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
100{
101 unsigned long next_tb;
102 TranslationBlock *tb;
103
104 /* Should never happen.
105 We only end up here when an existing TB is too long. */
106 if (max_cycles > CF_COUNT_MASK)
107 max_cycles = CF_COUNT_MASK;
108
109 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
110 max_cycles);
111 env->current_tb = tb;
112 /* execute the generated code */
113 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
114
115 if ((next_tb & 3) == 2) {
116 /* Restore PC. This may happen if async event occurs before
117 the TB starts executing. */
622ed360 118 cpu_pc_from_tb(env, tb);
2e70f6ef
PB
119 }
120 tb_phys_invalidate(tb, -1);
121 tb_free(tb);
122}
123
8a40a180
FB
124static TranslationBlock *tb_find_slow(target_ulong pc,
125 target_ulong cs_base,
c068688b 126 uint64_t flags)
8a40a180
FB
127{
128 TranslationBlock *tb, **ptb1;
8a40a180
FB
129 unsigned int h;
130 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
3b46e624 131
8a40a180 132 tb_invalidated_flag = 0;
3b46e624 133
8a40a180 134 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
3b46e624 135
8a40a180
FB
136 /* find translated block using physical mappings */
137 phys_pc = get_phys_addr_code(env, pc);
138 phys_page1 = phys_pc & TARGET_PAGE_MASK;
139 phys_page2 = -1;
140 h = tb_phys_hash_func(phys_pc);
141 ptb1 = &tb_phys_hash[h];
142 for(;;) {
143 tb = *ptb1;
144 if (!tb)
145 goto not_found;
5fafdf24 146 if (tb->pc == pc &&
8a40a180 147 tb->page_addr[0] == phys_page1 &&
5fafdf24 148 tb->cs_base == cs_base &&
8a40a180
FB
149 tb->flags == flags) {
150 /* check next page if needed */
151 if (tb->page_addr[1] != -1) {
5fafdf24 152 virt_page2 = (pc & TARGET_PAGE_MASK) +
8a40a180
FB
153 TARGET_PAGE_SIZE;
154 phys_page2 = get_phys_addr_code(env, virt_page2);
155 if (tb->page_addr[1] == phys_page2)
156 goto found;
157 } else {
158 goto found;
159 }
160 }
161 ptb1 = &tb->phys_hash_next;
162 }
163 not_found:
2e70f6ef
PB
164 /* if no translated code available, then translate it now */
165 tb = tb_gen_code(env, pc, cs_base, flags, 0);
3b46e624 166
8a40a180 167 found:
8a40a180
FB
168 /* we add the TB in the virtual pc hash table */
169 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
8a40a180
FB
170 return tb;
171}
172
173static inline TranslationBlock *tb_find_fast(void)
174{
175 TranslationBlock *tb;
176 target_ulong cs_base, pc;
6b917547 177 int flags;
8a40a180
FB
178
179 /* we record a subset of the CPU state. It will
180 always be the same before a given translated block
181 is executed. */
6b917547 182 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
bce61846 183 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
551bd27f
TS
184 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
185 tb->flags != flags)) {
8a40a180
FB
186 tb = tb_find_slow(pc, cs_base, flags);
187 }
188 return tb;
189}
190
dde2367e
AL
191static CPUDebugExcpHandler *debug_excp_handler;
192
193CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
194{
195 CPUDebugExcpHandler *old_handler = debug_excp_handler;
196
197 debug_excp_handler = handler;
198 return old_handler;
199}
200
6e140f28
AL
201static void cpu_handle_debug_exception(CPUState *env)
202{
203 CPUWatchpoint *wp;
204
205 if (!env->watchpoint_hit)
c0ce998e 206 TAILQ_FOREACH(wp, &env->watchpoints, entry)
6e140f28 207 wp->flags &= ~BP_WATCHPOINT_HIT;
dde2367e
AL
208
209 if (debug_excp_handler)
210 debug_excp_handler(env);
6e140f28
AL
211}
212
7d13299d
FB
213/* main execution loop */
214
e4533c7a 215int cpu_exec(CPUState *env1)
7d13299d 216{
1057eaa7
PB
217#define DECLARE_HOST_REGS 1
218#include "hostregs_helper.h"
8a40a180 219 int ret, interrupt_request;
8a40a180 220 TranslationBlock *tb;
c27004ec 221 uint8_t *tc_ptr;
d5975363 222 unsigned long next_tb;
8c6939c0 223
bfed01fc
TS
224 if (cpu_halted(env1) == EXCP_HALTED)
225 return EXCP_HALTED;
5a1e3cfc 226
5fafdf24 227 cpu_single_env = env1;
6a00d601 228
7d13299d 229 /* first we save global registers */
1057eaa7
PB
230#define SAVE_HOST_REGS 1
231#include "hostregs_helper.h"
c27004ec 232 env = env1;
e4533c7a 233
0d1a29f9 234 env_to_regs();
ecb644f4 235#if defined(TARGET_I386)
9de5e440 236 /* put eflags in CPU temporary format */
fc2b4c48
FB
237 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
238 DF = 1 - (2 * ((env->eflags >> 10) & 1));
9de5e440 239 CC_OP = CC_OP_EFLAGS;
fc2b4c48 240 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
93ac68bc 241#elif defined(TARGET_SPARC)
e6e5906b
PB
242#elif defined(TARGET_M68K)
243 env->cc_op = CC_OP_FLAGS;
244 env->cc_dest = env->sr & 0xf;
245 env->cc_x = (env->sr >> 4) & 1;
ecb644f4
TS
246#elif defined(TARGET_ALPHA)
247#elif defined(TARGET_ARM)
248#elif defined(TARGET_PPC)
b779e29e 249#elif defined(TARGET_MICROBLAZE)
6af0bf9c 250#elif defined(TARGET_MIPS)
fdf9b3e8 251#elif defined(TARGET_SH4)
f1ccf904 252#elif defined(TARGET_CRIS)
fdf9b3e8 253 /* XXXXX */
e4533c7a
FB
254#else
255#error unsupported target CPU
256#endif
3fb2ded1 257 env->exception_index = -1;
9d27abd9 258
7d13299d 259 /* prepare setjmp context for exception handling */
3fb2ded1
FB
260 for(;;) {
261 if (setjmp(env->jmp_env) == 0) {
9ddff3d2
BS
262#if defined(__sparc__) && !defined(HOST_SOLARIS)
263#undef env
264 env = cpu_single_env;
265#define env cpu_single_env
266#endif
ee8b7021 267 env->current_tb = NULL;
3fb2ded1
FB
268 /* if an exception is pending, we execute it here */
269 if (env->exception_index >= 0) {
270 if (env->exception_index >= EXCP_INTERRUPT) {
271 /* exit request from the cpu execution loop */
272 ret = env->exception_index;
6e140f28
AL
273 if (ret == EXCP_DEBUG)
274 cpu_handle_debug_exception(env);
3fb2ded1 275 break;
72d239ed
AJ
276 } else {
277#if defined(CONFIG_USER_ONLY)
3fb2ded1 278 /* if user mode only, we simulate a fake exception
9f083493 279 which will be handled outside the cpu execution
3fb2ded1 280 loop */
83479e77 281#if defined(TARGET_I386)
5fafdf24
TS
282 do_interrupt_user(env->exception_index,
283 env->exception_is_int,
284 env->error_code,
3fb2ded1 285 env->exception_next_eip);
eba01623
FB
286 /* successfully delivered */
287 env->old_exception = -1;
83479e77 288#endif
3fb2ded1
FB
289 ret = env->exception_index;
290 break;
72d239ed 291#else
83479e77 292#if defined(TARGET_I386)
3fb2ded1
FB
293 /* simulate a real cpu exception. On i386, it can
294 trigger new exceptions, but we do not handle
295 double or triple faults yet. */
5fafdf24
TS
296 do_interrupt(env->exception_index,
297 env->exception_is_int,
298 env->error_code,
d05e66d2 299 env->exception_next_eip, 0);
678dde13
TS
300 /* successfully delivered */
301 env->old_exception = -1;
ce09776b
FB
302#elif defined(TARGET_PPC)
303 do_interrupt(env);
b779e29e
EI
304#elif defined(TARGET_MICROBLAZE)
305 do_interrupt(env);
6af0bf9c
FB
306#elif defined(TARGET_MIPS)
307 do_interrupt(env);
e95c8d51 308#elif defined(TARGET_SPARC)
f2bc7e7f 309 do_interrupt(env);
b5ff1b31
FB
310#elif defined(TARGET_ARM)
311 do_interrupt(env);
fdf9b3e8
FB
312#elif defined(TARGET_SH4)
313 do_interrupt(env);
eddf68a6
JM
314#elif defined(TARGET_ALPHA)
315 do_interrupt(env);
f1ccf904
TS
316#elif defined(TARGET_CRIS)
317 do_interrupt(env);
0633879f
PB
318#elif defined(TARGET_M68K)
319 do_interrupt(0);
72d239ed 320#endif
83479e77 321#endif
3fb2ded1
FB
322 }
323 env->exception_index = -1;
5fafdf24 324 }
640f42e4 325#ifdef CONFIG_KQEMU
be214e6c 326 if (kqemu_is_ok(env) && env->interrupt_request == 0 && env->exit_request == 0) {
9df217a3 327 int ret;
a7812ae4 328 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
9df217a3
FB
329 ret = kqemu_cpu_exec(env);
330 /* put eflags in CPU temporary format */
331 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
332 DF = 1 - (2 * ((env->eflags >> 10) & 1));
333 CC_OP = CC_OP_EFLAGS;
334 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
335 if (ret == 1) {
336 /* exception */
337 longjmp(env->jmp_env, 1);
338 } else if (ret == 2) {
339 /* softmmu execution needed */
340 } else {
be214e6c 341 if (env->interrupt_request != 0 || env->exit_request != 0) {
9df217a3
FB
342 /* hardware interrupt will be executed just after */
343 } else {
344 /* otherwise, we restart */
345 longjmp(env->jmp_env, 1);
346 }
347 }
3fb2ded1 348 }
9df217a3
FB
349#endif
350
7ba1e619 351 if (kvm_enabled()) {
becfc390
AL
352 kvm_cpu_exec(env);
353 longjmp(env->jmp_env, 1);
7ba1e619
AL
354 }
355
b5fc09ae 356 next_tb = 0; /* force lookup of first TB */
3fb2ded1 357 for(;;) {
68a79315 358 interrupt_request = env->interrupt_request;
e1638bd8 359 if (unlikely(interrupt_request)) {
360 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
361 /* Mask out external interrupts for this step. */
362 interrupt_request &= ~(CPU_INTERRUPT_HARD |
363 CPU_INTERRUPT_FIQ |
364 CPU_INTERRUPT_SMI |
365 CPU_INTERRUPT_NMI);
366 }
6658ffb8
PB
367 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
368 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
369 env->exception_index = EXCP_DEBUG;
370 cpu_loop_exit();
371 }
a90b7318 372#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
b779e29e
EI
373 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
374 defined(TARGET_MICROBLAZE)
a90b7318
AZ
375 if (interrupt_request & CPU_INTERRUPT_HALT) {
376 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
377 env->halted = 1;
378 env->exception_index = EXCP_HLT;
379 cpu_loop_exit();
380 }
381#endif
68a79315 382#if defined(TARGET_I386)
b09ea7d5
GN
383 if (interrupt_request & CPU_INTERRUPT_INIT) {
384 svm_check_intercept(SVM_EXIT_INIT);
385 do_cpu_init(env);
386 env->exception_index = EXCP_HALTED;
387 cpu_loop_exit();
388 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
389 do_cpu_sipi(env);
390 } else if (env->hflags2 & HF2_GIF_MASK) {
db620f46
FB
391 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
392 !(env->hflags & HF_SMM_MASK)) {
393 svm_check_intercept(SVM_EXIT_SMI);
394 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
395 do_smm_enter();
396 next_tb = 0;
397 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
398 !(env->hflags2 & HF2_NMI_MASK)) {
399 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
400 env->hflags2 |= HF2_NMI_MASK;
401 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
402 next_tb = 0;
79c4f6b0
HY
403 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
404 env->interrupt_request &= ~CPU_INTERRUPT_MCE;
405 do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
406 next_tb = 0;
db620f46
FB
407 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
408 (((env->hflags2 & HF2_VINTR_MASK) &&
409 (env->hflags2 & HF2_HIF_MASK)) ||
410 (!(env->hflags2 & HF2_VINTR_MASK) &&
411 (env->eflags & IF_MASK &&
412 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
413 int intno;
414 svm_check_intercept(SVM_EXIT_INTR);
415 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
416 intno = cpu_get_pic_interrupt(env);
93fcfe39 417 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
9ddff3d2
BS
418#if defined(__sparc__) && !defined(HOST_SOLARIS)
419#undef env
420 env = cpu_single_env;
421#define env cpu_single_env
422#endif
db620f46
FB
423 do_interrupt(intno, 0, 0, 0, 1);
424 /* ensure that no TB jump will be modified as
425 the program flow was changed */
426 next_tb = 0;
0573fbfc 427#if !defined(CONFIG_USER_ONLY)
db620f46
FB
428 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
429 (env->eflags & IF_MASK) &&
430 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
431 int intno;
432 /* FIXME: this should respect TPR */
433 svm_check_intercept(SVM_EXIT_VINTR);
db620f46 434 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
93fcfe39 435 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
db620f46 436 do_interrupt(intno, 0, 0, 0, 1);
d40c54d6 437 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
db620f46 438 next_tb = 0;
907a5b26 439#endif
db620f46 440 }
68a79315 441 }
ce09776b 442#elif defined(TARGET_PPC)
9fddaa0c
FB
443#if 0
444 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
445 cpu_ppc_reset(env);
446 }
447#endif
47103572 448 if (interrupt_request & CPU_INTERRUPT_HARD) {
e9df014c
JM
449 ppc_hw_interrupt(env);
450 if (env->pending_interrupts == 0)
451 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
b5fc09ae 452 next_tb = 0;
ce09776b 453 }
b779e29e
EI
454#elif defined(TARGET_MICROBLAZE)
455 if ((interrupt_request & CPU_INTERRUPT_HARD)
456 && (env->sregs[SR_MSR] & MSR_IE)
457 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
458 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
459 env->exception_index = EXCP_IRQ;
460 do_interrupt(env);
461 next_tb = 0;
462 }
6af0bf9c
FB
463#elif defined(TARGET_MIPS)
464 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
24c7b0e3 465 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
6af0bf9c 466 (env->CP0_Status & (1 << CP0St_IE)) &&
24c7b0e3
TS
467 !(env->CP0_Status & (1 << CP0St_EXL)) &&
468 !(env->CP0_Status & (1 << CP0St_ERL)) &&
6af0bf9c
FB
469 !(env->hflags & MIPS_HFLAG_DM)) {
470 /* Raise it */
471 env->exception_index = EXCP_EXT_INTERRUPT;
472 env->error_code = 0;
473 do_interrupt(env);
b5fc09ae 474 next_tb = 0;
6af0bf9c 475 }
e95c8d51 476#elif defined(TARGET_SPARC)
66321a11 477 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
5210977a 478 cpu_interrupts_enabled(env)) {
66321a11
FB
479 int pil = env->interrupt_index & 15;
480 int type = env->interrupt_index & 0xf0;
481
482 if (((type == TT_EXTINT) &&
483 (pil == 15 || pil > env->psrpil)) ||
484 type != TT_EXTINT) {
485 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
f2bc7e7f
BS
486 env->exception_index = env->interrupt_index;
487 do_interrupt(env);
66321a11 488 env->interrupt_index = 0;
5210977a 489#if !defined(CONFIG_USER_ONLY)
327ac2e7
BS
490 cpu_check_irqs(env);
491#endif
b5fc09ae 492 next_tb = 0;
66321a11 493 }
e95c8d51
FB
494 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
495 //do_interrupt(0, 0, 0, 0, 0);
496 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
a90b7318 497 }
b5ff1b31
FB
498#elif defined(TARGET_ARM)
499 if (interrupt_request & CPU_INTERRUPT_FIQ
500 && !(env->uncached_cpsr & CPSR_F)) {
501 env->exception_index = EXCP_FIQ;
502 do_interrupt(env);
b5fc09ae 503 next_tb = 0;
b5ff1b31 504 }
9ee6e8bb
PB
505 /* ARMv7-M interrupt return works by loading a magic value
506 into the PC. On real hardware the load causes the
507 return to occur. The qemu implementation performs the
508 jump normally, then does the exception return when the
509 CPU tries to execute code at the magic address.
510 This will cause the magic PC value to be pushed to
511 the stack if an interrupt occured at the wrong time.
512 We avoid this by disabling interrupts when
513 pc contains a magic address. */
b5ff1b31 514 if (interrupt_request & CPU_INTERRUPT_HARD
9ee6e8bb
PB
515 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
516 || !(env->uncached_cpsr & CPSR_I))) {
b5ff1b31
FB
517 env->exception_index = EXCP_IRQ;
518 do_interrupt(env);
b5fc09ae 519 next_tb = 0;
b5ff1b31 520 }
fdf9b3e8 521#elif defined(TARGET_SH4)
e96e2044
TS
522 if (interrupt_request & CPU_INTERRUPT_HARD) {
523 do_interrupt(env);
b5fc09ae 524 next_tb = 0;
e96e2044 525 }
eddf68a6
JM
526#elif defined(TARGET_ALPHA)
527 if (interrupt_request & CPU_INTERRUPT_HARD) {
528 do_interrupt(env);
b5fc09ae 529 next_tb = 0;
eddf68a6 530 }
f1ccf904 531#elif defined(TARGET_CRIS)
1b1a38b0
EI
532 if (interrupt_request & CPU_INTERRUPT_HARD
533 && (env->pregs[PR_CCS] & I_FLAG)) {
534 env->exception_index = EXCP_IRQ;
535 do_interrupt(env);
536 next_tb = 0;
537 }
538 if (interrupt_request & CPU_INTERRUPT_NMI
539 && (env->pregs[PR_CCS] & M_FLAG)) {
540 env->exception_index = EXCP_NMI;
f1ccf904 541 do_interrupt(env);
b5fc09ae 542 next_tb = 0;
f1ccf904 543 }
0633879f
PB
544#elif defined(TARGET_M68K)
545 if (interrupt_request & CPU_INTERRUPT_HARD
546 && ((env->sr & SR_I) >> SR_I_SHIFT)
547 < env->pending_level) {
548 /* Real hardware gets the interrupt vector via an
549 IACK cycle at this point. Current emulated
550 hardware doesn't rely on this, so we
551 provide/save the vector when the interrupt is
552 first signalled. */
553 env->exception_index = env->pending_vector;
554 do_interrupt(1);
b5fc09ae 555 next_tb = 0;
0633879f 556 }
68a79315 557#endif
9d05095e
FB
558 /* Don't use the cached interupt_request value,
559 do_interrupt may have updated the EXITTB flag. */
b5ff1b31 560 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
bf3e8bf1
FB
561 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
562 /* ensure that no TB jump will be modified as
563 the program flow was changed */
b5fc09ae 564 next_tb = 0;
bf3e8bf1 565 }
be214e6c
AJ
566 }
567 if (unlikely(env->exit_request)) {
568 env->exit_request = 0;
569 env->exception_index = EXCP_INTERRUPT;
570 cpu_loop_exit();
3fb2ded1 571 }
7d13299d 572#ifdef DEBUG_EXEC
8fec2b8c 573 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
3fb2ded1 574 /* restore flags in standard format */
ecb644f4
TS
575 regs_to_env();
576#if defined(TARGET_I386)
a7812ae4 577 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
93fcfe39 578 log_cpu_state(env, X86_DUMP_CCOP);
3fb2ded1 579 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
e4533c7a 580#elif defined(TARGET_ARM)
93fcfe39 581 log_cpu_state(env, 0);
93ac68bc 582#elif defined(TARGET_SPARC)
93fcfe39 583 log_cpu_state(env, 0);
67867308 584#elif defined(TARGET_PPC)
93fcfe39 585 log_cpu_state(env, 0);
e6e5906b
PB
586#elif defined(TARGET_M68K)
587 cpu_m68k_flush_flags(env, env->cc_op);
588 env->cc_op = CC_OP_FLAGS;
589 env->sr = (env->sr & 0xffe0)
590 | env->cc_dest | (env->cc_x << 4);
93fcfe39 591 log_cpu_state(env, 0);
b779e29e
EI
592#elif defined(TARGET_MICROBLAZE)
593 log_cpu_state(env, 0);
6af0bf9c 594#elif defined(TARGET_MIPS)
93fcfe39 595 log_cpu_state(env, 0);
fdf9b3e8 596#elif defined(TARGET_SH4)
93fcfe39 597 log_cpu_state(env, 0);
eddf68a6 598#elif defined(TARGET_ALPHA)
93fcfe39 599 log_cpu_state(env, 0);
f1ccf904 600#elif defined(TARGET_CRIS)
93fcfe39 601 log_cpu_state(env, 0);
e4533c7a 602#else
5fafdf24 603#error unsupported target CPU
e4533c7a 604#endif
3fb2ded1 605 }
7d13299d 606#endif
d5975363 607 spin_lock(&tb_lock);
8a40a180 608 tb = tb_find_fast();
d5975363
PB
609 /* Note: we do it here to avoid a gcc bug on Mac OS X when
610 doing it in tb_find_slow */
611 if (tb_invalidated_flag) {
612 /* as some TB could have been invalidated because
613 of memory exceptions while generating the code, we
614 must recompute the hash index here */
615 next_tb = 0;
2e70f6ef 616 tb_invalidated_flag = 0;
d5975363 617 }
9d27abd9 618#ifdef DEBUG_EXEC
93fcfe39
AL
619 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
620 (long)tb->tc_ptr, tb->pc,
621 lookup_symbol(tb->pc));
9d27abd9 622#endif
8a40a180
FB
623 /* see if we can patch the calling TB. When the TB
624 spans two pages, we cannot safely do a direct
625 jump. */
c27004ec 626 {
b5fc09ae 627 if (next_tb != 0 &&
640f42e4 628#ifdef CONFIG_KQEMU
f32fc648
FB
629 (env->kqemu_enabled != 2) &&
630#endif
ec6338ba 631 tb->page_addr[1] == -1) {
b5fc09ae 632 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
3fb2ded1 633 }
c27004ec 634 }
d5975363 635 spin_unlock(&tb_lock);
83479e77 636 env->current_tb = tb;
55e8b85e 637
638 /* cpu_interrupt might be called while translating the
639 TB, but before it is linked into a potentially
640 infinite loop and becomes env->current_tb. Avoid
641 starting execution if there is a pending interrupt. */
be214e6c 642 if (unlikely (env->exit_request))
55e8b85e 643 env->current_tb = NULL;
644
2e70f6ef
PB
645 while (env->current_tb) {
646 tc_ptr = tb->tc_ptr;
3fb2ded1 647 /* execute the generated code */
572a9d4a
BS
648#if defined(__sparc__) && !defined(HOST_SOLARIS)
649#undef env
2e70f6ef 650 env = cpu_single_env;
572a9d4a
BS
651#define env cpu_single_env
652#endif
2e70f6ef
PB
653 next_tb = tcg_qemu_tb_exec(tc_ptr);
654 env->current_tb = NULL;
655 if ((next_tb & 3) == 2) {
bf20dc07 656 /* Instruction counter expired. */
2e70f6ef
PB
657 int insns_left;
658 tb = (TranslationBlock *)(long)(next_tb & ~3);
659 /* Restore PC. */
622ed360 660 cpu_pc_from_tb(env, tb);
2e70f6ef
PB
661 insns_left = env->icount_decr.u32;
662 if (env->icount_extra && insns_left >= 0) {
663 /* Refill decrementer and continue execution. */
664 env->icount_extra += insns_left;
665 if (env->icount_extra > 0xffff) {
666 insns_left = 0xffff;
667 } else {
668 insns_left = env->icount_extra;
669 }
670 env->icount_extra -= insns_left;
671 env->icount_decr.u16.low = insns_left;
672 } else {
673 if (insns_left > 0) {
674 /* Execute remaining instructions. */
675 cpu_exec_nocache(insns_left, tb);
676 }
677 env->exception_index = EXCP_INTERRUPT;
678 next_tb = 0;
679 cpu_loop_exit();
680 }
681 }
682 }
4cbf74b6
FB
683 /* reset soft MMU for next block (it can currently
684 only be set by a memory fault) */
640f42e4 685#if defined(CONFIG_KQEMU)
f32fc648
FB
686#define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
687 if (kqemu_is_ok(env) &&
688 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
689 cpu_loop_exit();
690 }
4cbf74b6 691#endif
50a518e3 692 } /* for(;;) */
3fb2ded1 693 } else {
0d1a29f9 694 env_to_regs();
7d13299d 695 }
3fb2ded1
FB
696 } /* for(;;) */
697
7d13299d 698
e4533c7a 699#if defined(TARGET_I386)
9de5e440 700 /* restore flags in standard format */
a7812ae4 701 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
e4533c7a 702#elif defined(TARGET_ARM)
b7bcbe95 703 /* XXX: Save/restore host fpu exception state?. */
93ac68bc 704#elif defined(TARGET_SPARC)
67867308 705#elif defined(TARGET_PPC)
e6e5906b
PB
706#elif defined(TARGET_M68K)
707 cpu_m68k_flush_flags(env, env->cc_op);
708 env->cc_op = CC_OP_FLAGS;
709 env->sr = (env->sr & 0xffe0)
710 | env->cc_dest | (env->cc_x << 4);
b779e29e 711#elif defined(TARGET_MICROBLAZE)
6af0bf9c 712#elif defined(TARGET_MIPS)
fdf9b3e8 713#elif defined(TARGET_SH4)
eddf68a6 714#elif defined(TARGET_ALPHA)
f1ccf904 715#elif defined(TARGET_CRIS)
fdf9b3e8 716 /* XXXXX */
e4533c7a
FB
717#else
718#error unsupported target CPU
719#endif
1057eaa7
PB
720
721 /* restore global registers */
1057eaa7
PB
722#include "hostregs_helper.h"
723
6a00d601 724 /* fail safe : never use cpu_single_env outside cpu_exec() */
5fafdf24 725 cpu_single_env = NULL;
7d13299d
FB
726 return ret;
727}
6dbad63e 728
fbf9eeb3
FB
729/* must only be called from the generated code as an exception can be
730 generated */
731void tb_invalidate_page_range(target_ulong start, target_ulong end)
732{
dc5d0b3d
FB
733 /* XXX: cannot enable it yet because it yields to MMU exception
734 where NIP != read address on PowerPC */
735#if 0
fbf9eeb3
FB
736 target_ulong phys_addr;
737 phys_addr = get_phys_addr_code(env, start);
738 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
dc5d0b3d 739#endif
fbf9eeb3
FB
740}
741
1a18c71b 742#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
e4533c7a 743
6dbad63e
FB
744void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
745{
746 CPUX86State *saved_env;
747
748 saved_env = env;
749 env = s;
a412ac57 750 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
a513fe19 751 selector &= 0xffff;
5fafdf24 752 cpu_x86_load_seg_cache(env, seg_reg, selector,
c27004ec 753 (selector << 4), 0xffff, 0);
a513fe19 754 } else {
5d97559d 755 helper_load_seg(seg_reg, selector);
a513fe19 756 }
6dbad63e
FB
757 env = saved_env;
758}
9de5e440 759
6f12a2a6 760void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
d0a1ffc9
FB
761{
762 CPUX86State *saved_env;
763
764 saved_env = env;
765 env = s;
3b46e624 766
6f12a2a6 767 helper_fsave(ptr, data32);
d0a1ffc9
FB
768
769 env = saved_env;
770}
771
6f12a2a6 772void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
d0a1ffc9
FB
773{
774 CPUX86State *saved_env;
775
776 saved_env = env;
777 env = s;
3b46e624 778
6f12a2a6 779 helper_frstor(ptr, data32);
d0a1ffc9
FB
780
781 env = saved_env;
782}
783
e4533c7a
FB
784#endif /* TARGET_I386 */
785
67b915a5
FB
786#if !defined(CONFIG_SOFTMMU)
787
3fb2ded1
FB
788#if defined(TARGET_I386)
789
b56dad1c 790/* 'pc' is the host PC at which the exception was raised. 'address' is
fd6ce8f6
FB
791 the effective address of the memory exception. 'is_write' is 1 if a
792 write caused the exception and otherwise 0'. 'old_set' is the
793 signal set which should be restored */
2b413144 794static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
5fafdf24 795 int is_write, sigset_t *old_set,
bf3e8bf1 796 void *puc)
9de5e440 797{
a513fe19
FB
798 TranslationBlock *tb;
799 int ret;
68a79315 800
83479e77
FB
801 if (cpu_single_env)
802 env = cpu_single_env; /* XXX: find a correct solution for multithread */
fd6ce8f6 803#if defined(DEBUG_SIGNAL)
5fafdf24 804 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
bf3e8bf1 805 pc, address, is_write, *(unsigned long *)old_set);
9de5e440 806#endif
25eb4484 807 /* XXX: locking issue */
53a5960a 808 if (is_write && page_unprotect(h2g(address), pc, puc)) {
fd6ce8f6
FB
809 return 1;
810 }
fbf9eeb3 811
3fb2ded1 812 /* see if it is an MMU fault */
6ebbf390 813 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
3fb2ded1
FB
814 if (ret < 0)
815 return 0; /* not an MMU fault */
816 if (ret == 0)
817 return 1; /* the MMU fault was handled without causing real CPU fault */
818 /* now we have a real cpu fault */
a513fe19
FB
819 tb = tb_find_pc(pc);
820 if (tb) {
9de5e440
FB
821 /* the PC is inside the translated code. It means that we have
822 a virtual CPU fault */
bf3e8bf1 823 cpu_restore_state(tb, env, pc, puc);
3fb2ded1 824 }
4cbf74b6 825 if (ret == 1) {
3fb2ded1 826#if 0
5fafdf24 827 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
4cbf74b6 828 env->eip, env->cr[2], env->error_code);
3fb2ded1 829#endif
4cbf74b6
FB
830 /* we restore the process signal mask as the sigreturn should
831 do it (XXX: use sigsetjmp) */
832 sigprocmask(SIG_SETMASK, old_set, NULL);
54ca9095 833 raise_exception_err(env->exception_index, env->error_code);
4cbf74b6
FB
834 } else {
835 /* activate soft MMU for this block */
3f337316 836 env->hflags |= HF_SOFTMMU_MASK;
fbf9eeb3 837 cpu_resume_from_signal(env, puc);
4cbf74b6 838 }
3fb2ded1
FB
839 /* never comes here */
840 return 1;
841}
842
e4533c7a 843#elif defined(TARGET_ARM)
3fb2ded1 844static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
845 int is_write, sigset_t *old_set,
846 void *puc)
3fb2ded1 847{
68016c62
FB
848 TranslationBlock *tb;
849 int ret;
850
851 if (cpu_single_env)
852 env = cpu_single_env; /* XXX: find a correct solution for multithread */
853#if defined(DEBUG_SIGNAL)
5fafdf24 854 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
68016c62
FB
855 pc, address, is_write, *(unsigned long *)old_set);
856#endif
9f0777ed 857 /* XXX: locking issue */
53a5960a 858 if (is_write && page_unprotect(h2g(address), pc, puc)) {
9f0777ed
FB
859 return 1;
860 }
68016c62 861 /* see if it is an MMU fault */
6ebbf390 862 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
68016c62
FB
863 if (ret < 0)
864 return 0; /* not an MMU fault */
865 if (ret == 0)
866 return 1; /* the MMU fault was handled without causing real CPU fault */
867 /* now we have a real cpu fault */
868 tb = tb_find_pc(pc);
869 if (tb) {
870 /* the PC is inside the translated code. It means that we have
871 a virtual CPU fault */
872 cpu_restore_state(tb, env, pc, puc);
873 }
874 /* we restore the process signal mask as the sigreturn should
875 do it (XXX: use sigsetjmp) */
876 sigprocmask(SIG_SETMASK, old_set, NULL);
877 cpu_loop_exit();
968c74da
AJ
878 /* never comes here */
879 return 1;
3fb2ded1 880}
93ac68bc
FB
881#elif defined(TARGET_SPARC)
882static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
883 int is_write, sigset_t *old_set,
884 void *puc)
93ac68bc 885{
68016c62
FB
886 TranslationBlock *tb;
887 int ret;
888
889 if (cpu_single_env)
890 env = cpu_single_env; /* XXX: find a correct solution for multithread */
891#if defined(DEBUG_SIGNAL)
5fafdf24 892 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
68016c62
FB
893 pc, address, is_write, *(unsigned long *)old_set);
894#endif
b453b70b 895 /* XXX: locking issue */
53a5960a 896 if (is_write && page_unprotect(h2g(address), pc, puc)) {
b453b70b
FB
897 return 1;
898 }
68016c62 899 /* see if it is an MMU fault */
6ebbf390 900 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
68016c62
FB
901 if (ret < 0)
902 return 0; /* not an MMU fault */
903 if (ret == 0)
904 return 1; /* the MMU fault was handled without causing real CPU fault */
905 /* now we have a real cpu fault */
906 tb = tb_find_pc(pc);
907 if (tb) {
908 /* the PC is inside the translated code. It means that we have
909 a virtual CPU fault */
910 cpu_restore_state(tb, env, pc, puc);
911 }
912 /* we restore the process signal mask as the sigreturn should
913 do it (XXX: use sigsetjmp) */
914 sigprocmask(SIG_SETMASK, old_set, NULL);
915 cpu_loop_exit();
968c74da
AJ
916 /* never comes here */
917 return 1;
93ac68bc 918}
67867308
FB
919#elif defined (TARGET_PPC)
920static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
bf3e8bf1
FB
921 int is_write, sigset_t *old_set,
922 void *puc)
67867308
FB
923{
924 TranslationBlock *tb;
ce09776b 925 int ret;
3b46e624 926
67867308
FB
927 if (cpu_single_env)
928 env = cpu_single_env; /* XXX: find a correct solution for multithread */
67867308 929#if defined(DEBUG_SIGNAL)
5fafdf24 930 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
67867308
FB
931 pc, address, is_write, *(unsigned long *)old_set);
932#endif
933 /* XXX: locking issue */
53a5960a 934 if (is_write && page_unprotect(h2g(address), pc, puc)) {
67867308
FB
935 return 1;
936 }
937
ce09776b 938 /* see if it is an MMU fault */
6ebbf390 939 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
ce09776b
FB
940 if (ret < 0)
941 return 0; /* not an MMU fault */
942 if (ret == 0)
943 return 1; /* the MMU fault was handled without causing real CPU fault */
944
67867308
FB
945 /* now we have a real cpu fault */
946 tb = tb_find_pc(pc);
947 if (tb) {
948 /* the PC is inside the translated code. It means that we have
949 a virtual CPU fault */
bf3e8bf1 950 cpu_restore_state(tb, env, pc, puc);
67867308 951 }
ce09776b 952 if (ret == 1) {
67867308 953#if 0
5fafdf24 954 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
ce09776b 955 env->nip, env->error_code, tb);
67867308
FB
956#endif
957 /* we restore the process signal mask as the sigreturn should
958 do it (XXX: use sigsetjmp) */
bf3e8bf1 959 sigprocmask(SIG_SETMASK, old_set, NULL);
e06fcd75 960 cpu_loop_exit();
ce09776b
FB
961 } else {
962 /* activate soft MMU for this block */
fbf9eeb3 963 cpu_resume_from_signal(env, puc);
ce09776b 964 }
67867308 965 /* never comes here */
e6e5906b
PB
966 return 1;
967}
968
969#elif defined(TARGET_M68K)
970static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
971 int is_write, sigset_t *old_set,
972 void *puc)
973{
974 TranslationBlock *tb;
975 int ret;
976
977 if (cpu_single_env)
978 env = cpu_single_env; /* XXX: find a correct solution for multithread */
979#if defined(DEBUG_SIGNAL)
5fafdf24 980 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
e6e5906b
PB
981 pc, address, is_write, *(unsigned long *)old_set);
982#endif
983 /* XXX: locking issue */
984 if (is_write && page_unprotect(address, pc, puc)) {
985 return 1;
986 }
987 /* see if it is an MMU fault */
6ebbf390 988 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
e6e5906b
PB
989 if (ret < 0)
990 return 0; /* not an MMU fault */
991 if (ret == 0)
992 return 1; /* the MMU fault was handled without causing real CPU fault */
993 /* now we have a real cpu fault */
994 tb = tb_find_pc(pc);
995 if (tb) {
996 /* the PC is inside the translated code. It means that we have
997 a virtual CPU fault */
998 cpu_restore_state(tb, env, pc, puc);
999 }
1000 /* we restore the process signal mask as the sigreturn should
1001 do it (XXX: use sigsetjmp) */
1002 sigprocmask(SIG_SETMASK, old_set, NULL);
1003 cpu_loop_exit();
1004 /* never comes here */
67867308
FB
1005 return 1;
1006}
6af0bf9c
FB
1007
1008#elif defined (TARGET_MIPS)
1009static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1010 int is_write, sigset_t *old_set,
1011 void *puc)
1012{
1013 TranslationBlock *tb;
1014 int ret;
3b46e624 1015
6af0bf9c
FB
1016 if (cpu_single_env)
1017 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1018#if defined(DEBUG_SIGNAL)
5fafdf24 1019 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
6af0bf9c
FB
1020 pc, address, is_write, *(unsigned long *)old_set);
1021#endif
1022 /* XXX: locking issue */
53a5960a 1023 if (is_write && page_unprotect(h2g(address), pc, puc)) {
6af0bf9c
FB
1024 return 1;
1025 }
1026
1027 /* see if it is an MMU fault */
6ebbf390 1028 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
6af0bf9c
FB
1029 if (ret < 0)
1030 return 0; /* not an MMU fault */
1031 if (ret == 0)
1032 return 1; /* the MMU fault was handled without causing real CPU fault */
1033
1034 /* now we have a real cpu fault */
1035 tb = tb_find_pc(pc);
1036 if (tb) {
1037 /* the PC is inside the translated code. It means that we have
1038 a virtual CPU fault */
1039 cpu_restore_state(tb, env, pc, puc);
1040 }
1041 if (ret == 1) {
1042#if 0
5fafdf24 1043 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1eb5207b 1044 env->PC, env->error_code, tb);
b779e29e
EI
1045#endif
1046 /* we restore the process signal mask as the sigreturn should
1047 do it (XXX: use sigsetjmp) */
1048 sigprocmask(SIG_SETMASK, old_set, NULL);
1049 cpu_loop_exit();
1050 } else {
1051 /* activate soft MMU for this block */
1052 cpu_resume_from_signal(env, puc);
1053 }
1054 /* never comes here */
1055 return 1;
1056}
1057
1058#elif defined (TARGET_MICROBLAZE)
1059static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1060 int is_write, sigset_t *old_set,
1061 void *puc)
1062{
1063 TranslationBlock *tb;
1064 int ret;
1065
1066 if (cpu_single_env)
1067 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1068#if defined(DEBUG_SIGNAL)
1069 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1070 pc, address, is_write, *(unsigned long *)old_set);
1071#endif
1072 /* XXX: locking issue */
1073 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1074 return 1;
1075 }
1076
1077 /* see if it is an MMU fault */
1078 ret = cpu_mb_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1079 if (ret < 0)
1080 return 0; /* not an MMU fault */
1081 if (ret == 0)
1082 return 1; /* the MMU fault was handled without causing real CPU fault */
1083
1084 /* now we have a real cpu fault */
1085 tb = tb_find_pc(pc);
1086 if (tb) {
1087 /* the PC is inside the translated code. It means that we have
1088 a virtual CPU fault */
1089 cpu_restore_state(tb, env, pc, puc);
1090 }
1091 if (ret == 1) {
1092#if 0
1093 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1094 env->PC, env->error_code, tb);
6af0bf9c
FB
1095#endif
1096 /* we restore the process signal mask as the sigreturn should
1097 do it (XXX: use sigsetjmp) */
1098 sigprocmask(SIG_SETMASK, old_set, NULL);
f9480ffc 1099 cpu_loop_exit();
6af0bf9c
FB
1100 } else {
1101 /* activate soft MMU for this block */
1102 cpu_resume_from_signal(env, puc);
1103 }
1104 /* never comes here */
1105 return 1;
1106}
1107
fdf9b3e8
FB
1108#elif defined (TARGET_SH4)
1109static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1110 int is_write, sigset_t *old_set,
1111 void *puc)
1112{
1113 TranslationBlock *tb;
1114 int ret;
3b46e624 1115
fdf9b3e8
FB
1116 if (cpu_single_env)
1117 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1118#if defined(DEBUG_SIGNAL)
5fafdf24 1119 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
fdf9b3e8
FB
1120 pc, address, is_write, *(unsigned long *)old_set);
1121#endif
1122 /* XXX: locking issue */
1123 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1124 return 1;
1125 }
1126
1127 /* see if it is an MMU fault */
6ebbf390 1128 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
fdf9b3e8
FB
1129 if (ret < 0)
1130 return 0; /* not an MMU fault */
1131 if (ret == 0)
1132 return 1; /* the MMU fault was handled without causing real CPU fault */
1133
1134 /* now we have a real cpu fault */
eddf68a6
JM
1135 tb = tb_find_pc(pc);
1136 if (tb) {
1137 /* the PC is inside the translated code. It means that we have
1138 a virtual CPU fault */
1139 cpu_restore_state(tb, env, pc, puc);
1140 }
1141#if 0
5fafdf24 1142 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
eddf68a6
JM
1143 env->nip, env->error_code, tb);
1144#endif
1145 /* we restore the process signal mask as the sigreturn should
1146 do it (XXX: use sigsetjmp) */
1147 sigprocmask(SIG_SETMASK, old_set, NULL);
1148 cpu_loop_exit();
1149 /* never comes here */
1150 return 1;
1151}
1152
1153#elif defined (TARGET_ALPHA)
1154static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1155 int is_write, sigset_t *old_set,
1156 void *puc)
1157{
1158 TranslationBlock *tb;
1159 int ret;
3b46e624 1160
eddf68a6
JM
1161 if (cpu_single_env)
1162 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1163#if defined(DEBUG_SIGNAL)
5fafdf24 1164 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
eddf68a6
JM
1165 pc, address, is_write, *(unsigned long *)old_set);
1166#endif
1167 /* XXX: locking issue */
1168 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1169 return 1;
1170 }
1171
1172 /* see if it is an MMU fault */
6ebbf390 1173 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
eddf68a6
JM
1174 if (ret < 0)
1175 return 0; /* not an MMU fault */
1176 if (ret == 0)
1177 return 1; /* the MMU fault was handled without causing real CPU fault */
1178
1179 /* now we have a real cpu fault */
fdf9b3e8
FB
1180 tb = tb_find_pc(pc);
1181 if (tb) {
1182 /* the PC is inside the translated code. It means that we have
1183 a virtual CPU fault */
1184 cpu_restore_state(tb, env, pc, puc);
1185 }
fdf9b3e8 1186#if 0
5fafdf24 1187 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
fdf9b3e8
FB
1188 env->nip, env->error_code, tb);
1189#endif
1190 /* we restore the process signal mask as the sigreturn should
1191 do it (XXX: use sigsetjmp) */
355fb23d
PB
1192 sigprocmask(SIG_SETMASK, old_set, NULL);
1193 cpu_loop_exit();
fdf9b3e8
FB
1194 /* never comes here */
1195 return 1;
1196}
f1ccf904
TS
1197#elif defined (TARGET_CRIS)
1198static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1199 int is_write, sigset_t *old_set,
1200 void *puc)
1201{
1202 TranslationBlock *tb;
1203 int ret;
1204
1205 if (cpu_single_env)
1206 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1207#if defined(DEBUG_SIGNAL)
1208 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1209 pc, address, is_write, *(unsigned long *)old_set);
1210#endif
1211 /* XXX: locking issue */
1212 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1213 return 1;
1214 }
1215
1216 /* see if it is an MMU fault */
6ebbf390 1217 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
f1ccf904
TS
1218 if (ret < 0)
1219 return 0; /* not an MMU fault */
1220 if (ret == 0)
1221 return 1; /* the MMU fault was handled without causing real CPU fault */
1222
1223 /* now we have a real cpu fault */
1224 tb = tb_find_pc(pc);
1225 if (tb) {
1226 /* the PC is inside the translated code. It means that we have
1227 a virtual CPU fault */
1228 cpu_restore_state(tb, env, pc, puc);
1229 }
f1ccf904
TS
1230 /* we restore the process signal mask as the sigreturn should
1231 do it (XXX: use sigsetjmp) */
1232 sigprocmask(SIG_SETMASK, old_set, NULL);
1233 cpu_loop_exit();
1234 /* never comes here */
1235 return 1;
1236}
1237
e4533c7a
FB
1238#else
1239#error unsupported target CPU
1240#endif
9de5e440 1241
2b413144
FB
1242#if defined(__i386__)
1243
d8ecc0b9
FB
1244#if defined(__APPLE__)
1245# include <sys/ucontext.h>
1246
1247# define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1248# define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1249# define ERROR_sig(context) ((context)->uc_mcontext->es.err)
d39bb24a
BS
1250# define MASK_sig(context) ((context)->uc_sigmask)
1251#elif defined(__OpenBSD__)
1252# define EIP_sig(context) ((context)->sc_eip)
1253# define TRAP_sig(context) ((context)->sc_trapno)
1254# define ERROR_sig(context) ((context)->sc_err)
1255# define MASK_sig(context) ((context)->sc_mask)
d8ecc0b9
FB
1256#else
1257# define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1258# define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1259# define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
d39bb24a 1260# define MASK_sig(context) ((context)->uc_sigmask)
d8ecc0b9
FB
1261#endif
1262
5fafdf24 1263int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1264 void *puc)
9de5e440 1265{
5a7b542b 1266 siginfo_t *info = pinfo;
d39bb24a
BS
1267#if defined(__OpenBSD__)
1268 struct sigcontext *uc = puc;
1269#else
9de5e440 1270 struct ucontext *uc = puc;
d39bb24a 1271#endif
9de5e440 1272 unsigned long pc;
bf3e8bf1 1273 int trapno;
97eb5b14 1274
d691f669
FB
1275#ifndef REG_EIP
1276/* for glibc 2.1 */
fd6ce8f6
FB
1277#define REG_EIP EIP
1278#define REG_ERR ERR
1279#define REG_TRAPNO TRAPNO
d691f669 1280#endif
d8ecc0b9
FB
1281 pc = EIP_sig(uc);
1282 trapno = TRAP_sig(uc);
ec6338ba
FB
1283 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1284 trapno == 0xe ?
1285 (ERROR_sig(uc) >> 1) & 1 : 0,
d39bb24a 1286 &MASK_sig(uc), puc);
2b413144
FB
1287}
1288
bc51c5c9
FB
1289#elif defined(__x86_64__)
1290
b3efe5c8 1291#ifdef __NetBSD__
d397abbd
BS
1292#define PC_sig(context) _UC_MACHINE_PC(context)
1293#define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
1294#define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
1295#define MASK_sig(context) ((context)->uc_sigmask)
1296#elif defined(__OpenBSD__)
1297#define PC_sig(context) ((context)->sc_rip)
1298#define TRAP_sig(context) ((context)->sc_trapno)
1299#define ERROR_sig(context) ((context)->sc_err)
1300#define MASK_sig(context) ((context)->sc_mask)
b3efe5c8 1301#else
d397abbd
BS
1302#define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
1303#define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1304#define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1305#define MASK_sig(context) ((context)->uc_sigmask)
b3efe5c8
BS
1306#endif
1307
5a7b542b 1308int cpu_signal_handler(int host_signum, void *pinfo,
bc51c5c9
FB
1309 void *puc)
1310{
5a7b542b 1311 siginfo_t *info = pinfo;
bc51c5c9 1312 unsigned long pc;
b3efe5c8
BS
1313#ifdef __NetBSD__
1314 ucontext_t *uc = puc;
d397abbd
BS
1315#elif defined(__OpenBSD__)
1316 struct sigcontext *uc = puc;
b3efe5c8
BS
1317#else
1318 struct ucontext *uc = puc;
1319#endif
bc51c5c9 1320
d397abbd 1321 pc = PC_sig(uc);
5fafdf24 1322 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
d397abbd
BS
1323 TRAP_sig(uc) == 0xe ?
1324 (ERROR_sig(uc) >> 1) & 1 : 0,
1325 &MASK_sig(uc), puc);
bc51c5c9
FB
1326}
1327
e58ffeb3 1328#elif defined(_ARCH_PPC)
2b413144 1329
83fb7adf
FB
1330/***********************************************************************
1331 * signal context platform-specific definitions
1332 * From Wine
1333 */
1334#ifdef linux
1335/* All Registers access - only for local access */
1336# define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1337/* Gpr Registers access */
1338# define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1339# define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1340# define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1341# define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1342# define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1343# define LR_sig(context) REG_sig(link, context) /* Link register */
1344# define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1345/* Float Registers access */
1346# define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1347# define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1348/* Exception Registers access */
1349# define DAR_sig(context) REG_sig(dar, context)
1350# define DSISR_sig(context) REG_sig(dsisr, context)
1351# define TRAP_sig(context) REG_sig(trap, context)
1352#endif /* linux */
1353
1354#ifdef __APPLE__
1355# include <sys/ucontext.h>
1356typedef struct ucontext SIGCONTEXT;
1357/* All Registers access - only for local access */
1358# define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1359# define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1360# define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1361# define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1362/* Gpr Registers access */
1363# define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1364# define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1365# define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1366# define CTR_sig(context) REG_sig(ctr, context)
1367# define XER_sig(context) REG_sig(xer, context) /* Link register */
1368# define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1369# define CR_sig(context) REG_sig(cr, context) /* Condition register */
1370/* Float Registers access */
1371# define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1372# define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1373/* Exception Registers access */
1374# define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1375# define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1376# define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1377#endif /* __APPLE__ */
1378
5fafdf24 1379int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1380 void *puc)
2b413144 1381{
5a7b542b 1382 siginfo_t *info = pinfo;
25eb4484 1383 struct ucontext *uc = puc;
25eb4484 1384 unsigned long pc;
25eb4484
FB
1385 int is_write;
1386
83fb7adf 1387 pc = IAR_sig(uc);
25eb4484
FB
1388 is_write = 0;
1389#if 0
1390 /* ppc 4xx case */
83fb7adf 1391 if (DSISR_sig(uc) & 0x00800000)
25eb4484
FB
1392 is_write = 1;
1393#else
83fb7adf 1394 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
25eb4484
FB
1395 is_write = 1;
1396#endif
5fafdf24 1397 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1398 is_write, &uc->uc_sigmask, puc);
2b413144
FB
1399}
1400
2f87c607
FB
1401#elif defined(__alpha__)
1402
5fafdf24 1403int cpu_signal_handler(int host_signum, void *pinfo,
2f87c607
FB
1404 void *puc)
1405{
5a7b542b 1406 siginfo_t *info = pinfo;
2f87c607
FB
1407 struct ucontext *uc = puc;
1408 uint32_t *pc = uc->uc_mcontext.sc_pc;
1409 uint32_t insn = *pc;
1410 int is_write = 0;
1411
8c6939c0 1412 /* XXX: need kernel patch to get write flag faster */
2f87c607
FB
1413 switch (insn >> 26) {
1414 case 0x0d: // stw
1415 case 0x0e: // stb
1416 case 0x0f: // stq_u
1417 case 0x24: // stf
1418 case 0x25: // stg
1419 case 0x26: // sts
1420 case 0x27: // stt
1421 case 0x2c: // stl
1422 case 0x2d: // stq
1423 case 0x2e: // stl_c
1424 case 0x2f: // stq_c
1425 is_write = 1;
1426 }
1427
5fafdf24 1428 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1429 is_write, &uc->uc_sigmask, puc);
2f87c607 1430}
8c6939c0
FB
1431#elif defined(__sparc__)
1432
5fafdf24 1433int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1434 void *puc)
8c6939c0 1435{
5a7b542b 1436 siginfo_t *info = pinfo;
8c6939c0
FB
1437 int is_write;
1438 uint32_t insn;
6b4c11cd 1439#if !defined(__arch64__) || defined(HOST_SOLARIS)
c9e1e2b0
BS
1440 uint32_t *regs = (uint32_t *)(info + 1);
1441 void *sigmask = (regs + 20);
8c6939c0 1442 /* XXX: is there a standard glibc define ? */
c9e1e2b0
BS
1443 unsigned long pc = regs[1];
1444#else
84778508 1445#ifdef __linux__
c9e1e2b0
BS
1446 struct sigcontext *sc = puc;
1447 unsigned long pc = sc->sigc_regs.tpc;
1448 void *sigmask = (void *)sc->sigc_mask;
84778508
BS
1449#elif defined(__OpenBSD__)
1450 struct sigcontext *uc = puc;
1451 unsigned long pc = uc->sc_pc;
1452 void *sigmask = (void *)(long)uc->sc_mask;
1453#endif
c9e1e2b0
BS
1454#endif
1455
8c6939c0
FB
1456 /* XXX: need kernel patch to get write flag faster */
1457 is_write = 0;
1458 insn = *(uint32_t *)pc;
1459 if ((insn >> 30) == 3) {
1460 switch((insn >> 19) & 0x3f) {
1461 case 0x05: // stb
d877fa5a 1462 case 0x15: // stba
8c6939c0 1463 case 0x06: // sth
d877fa5a 1464 case 0x16: // stha
8c6939c0 1465 case 0x04: // st
d877fa5a 1466 case 0x14: // sta
8c6939c0 1467 case 0x07: // std
d877fa5a
BS
1468 case 0x17: // stda
1469 case 0x0e: // stx
1470 case 0x1e: // stxa
8c6939c0 1471 case 0x24: // stf
d877fa5a 1472 case 0x34: // stfa
8c6939c0 1473 case 0x27: // stdf
d877fa5a
BS
1474 case 0x37: // stdfa
1475 case 0x26: // stqf
1476 case 0x36: // stqfa
8c6939c0 1477 case 0x25: // stfsr
d877fa5a
BS
1478 case 0x3c: // casa
1479 case 0x3e: // casxa
8c6939c0
FB
1480 is_write = 1;
1481 break;
1482 }
1483 }
5fafdf24 1484 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
bf3e8bf1 1485 is_write, sigmask, NULL);
8c6939c0
FB
1486}
1487
1488#elif defined(__arm__)
1489
5fafdf24 1490int cpu_signal_handler(int host_signum, void *pinfo,
e4533c7a 1491 void *puc)
8c6939c0 1492{
5a7b542b 1493 siginfo_t *info = pinfo;
8c6939c0
FB
1494 struct ucontext *uc = puc;
1495 unsigned long pc;
1496 int is_write;
3b46e624 1497
48bbf11b 1498#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
5c49b363
AZ
1499 pc = uc->uc_mcontext.gregs[R15];
1500#else
4eee57f5 1501 pc = uc->uc_mcontext.arm_pc;
5c49b363 1502#endif
8c6939c0
FB
1503 /* XXX: compute is_write */
1504 is_write = 0;
5fafdf24 1505 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
8c6939c0 1506 is_write,
f3a9676a 1507 &uc->uc_sigmask, puc);
8c6939c0
FB
1508}
1509
38e584a0
FB
1510#elif defined(__mc68000)
1511
5fafdf24 1512int cpu_signal_handler(int host_signum, void *pinfo,
38e584a0
FB
1513 void *puc)
1514{
5a7b542b 1515 siginfo_t *info = pinfo;
38e584a0
FB
1516 struct ucontext *uc = puc;
1517 unsigned long pc;
1518 int is_write;
3b46e624 1519
38e584a0
FB
1520 pc = uc->uc_mcontext.gregs[16];
1521 /* XXX: compute is_write */
1522 is_write = 0;
5fafdf24 1523 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
38e584a0 1524 is_write,
bf3e8bf1 1525 &uc->uc_sigmask, puc);
38e584a0
FB
1526}
1527
b8076a74
FB
1528#elif defined(__ia64)
1529
1530#ifndef __ISR_VALID
1531 /* This ought to be in <bits/siginfo.h>... */
1532# define __ISR_VALID 1
b8076a74
FB
1533#endif
1534
5a7b542b 1535int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
b8076a74 1536{
5a7b542b 1537 siginfo_t *info = pinfo;
b8076a74
FB
1538 struct ucontext *uc = puc;
1539 unsigned long ip;
1540 int is_write = 0;
1541
1542 ip = uc->uc_mcontext.sc_ip;
1543 switch (host_signum) {
1544 case SIGILL:
1545 case SIGFPE:
1546 case SIGSEGV:
1547 case SIGBUS:
1548 case SIGTRAP:
fd4a43e4 1549 if (info->si_code && (info->si_segvflags & __ISR_VALID))
b8076a74
FB
1550 /* ISR.W (write-access) is bit 33: */
1551 is_write = (info->si_isr >> 33) & 1;
1552 break;
1553
1554 default:
1555 break;
1556 }
1557 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1558 is_write,
1559 &uc->uc_sigmask, puc);
1560}
1561
90cb9493
FB
1562#elif defined(__s390__)
1563
5fafdf24 1564int cpu_signal_handler(int host_signum, void *pinfo,
90cb9493
FB
1565 void *puc)
1566{
5a7b542b 1567 siginfo_t *info = pinfo;
90cb9493
FB
1568 struct ucontext *uc = puc;
1569 unsigned long pc;
1570 int is_write;
3b46e624 1571
90cb9493
FB
1572 pc = uc->uc_mcontext.psw.addr;
1573 /* XXX: compute is_write */
1574 is_write = 0;
5fafdf24 1575 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
c4b89d18
TS
1576 is_write, &uc->uc_sigmask, puc);
1577}
1578
1579#elif defined(__mips__)
1580
5fafdf24 1581int cpu_signal_handler(int host_signum, void *pinfo,
c4b89d18
TS
1582 void *puc)
1583{
9617efe8 1584 siginfo_t *info = pinfo;
c4b89d18
TS
1585 struct ucontext *uc = puc;
1586 greg_t pc = uc->uc_mcontext.pc;
1587 int is_write;
3b46e624 1588
c4b89d18
TS
1589 /* XXX: compute is_write */
1590 is_write = 0;
5fafdf24 1591 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
c4b89d18 1592 is_write, &uc->uc_sigmask, puc);
90cb9493
FB
1593}
1594
f54b3f92
AJ
1595#elif defined(__hppa__)
1596
1597int cpu_signal_handler(int host_signum, void *pinfo,
1598 void *puc)
1599{
1600 struct siginfo *info = pinfo;
1601 struct ucontext *uc = puc;
1602 unsigned long pc;
1603 int is_write;
1604
1605 pc = uc->uc_mcontext.sc_iaoq[0];
1606 /* FIXME: compute is_write */
1607 is_write = 0;
1608 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1609 is_write,
1610 &uc->uc_sigmask, puc);
1611}
1612
9de5e440 1613#else
2b413144 1614
3fb2ded1 1615#error host CPU specific signal handler needed
2b413144 1616
9de5e440 1617#endif
67b915a5
FB
1618
1619#endif /* !defined(CONFIG_SOFTMMU) */