]> git.proxmox.com Git - qemu.git/blob - cpu-exec.c
Introduce proper compiler barrier
[qemu.git] / cpu-exec.c
1 /*
2 * i386 emulator main execution loop
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "config.h"
20 #include "exec.h"
21 #include "disas.h"
22 #include "tcg.h"
23 #include "kvm.h"
24 #include "qemu-barrier.h"
25
26 #if !defined(CONFIG_SOFTMMU)
27 #undef EAX
28 #undef ECX
29 #undef EDX
30 #undef EBX
31 #undef ESP
32 #undef EBP
33 #undef ESI
34 #undef EDI
35 #undef EIP
36 #include <signal.h>
37 #ifdef __linux__
38 #include <sys/ucontext.h>
39 #endif
40 #endif
41
42 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
43 // Work around ugly bugs in glibc that mangle global register contents
44 #undef env
45 #define env cpu_single_env
46 #endif
47
48 int tb_invalidated_flag;
49
50 //#define CONFIG_DEBUG_EXEC
51 //#define DEBUG_SIGNAL
52
53 int qemu_cpu_has_work(CPUState *env)
54 {
55 return cpu_has_work(env);
56 }
57
58 void cpu_loop_exit(void)
59 {
60 env->current_tb = NULL;
61 longjmp(env->jmp_env, 1);
62 }
63
64 /* exit the current TB from a signal handler. The host registers are
65 restored in a state compatible with the CPU emulator
66 */
67 void cpu_resume_from_signal(CPUState *env1, void *puc)
68 {
69 #if !defined(CONFIG_SOFTMMU)
70 #ifdef __linux__
71 struct ucontext *uc = puc;
72 #elif defined(__OpenBSD__)
73 struct sigcontext *uc = puc;
74 #endif
75 #endif
76
77 env = env1;
78
79 /* XXX: restore cpu registers saved in host registers */
80
81 #if !defined(CONFIG_SOFTMMU)
82 if (puc) {
83 /* XXX: use siglongjmp ? */
84 #ifdef __linux__
85 #ifdef __ia64
86 sigprocmask(SIG_SETMASK, (sigset_t *)&uc->uc_sigmask, NULL);
87 #else
88 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
89 #endif
90 #elif defined(__OpenBSD__)
91 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
92 #endif
93 }
94 #endif
95 env->exception_index = -1;
96 longjmp(env->jmp_env, 1);
97 }
98
99 /* Execute the code without caching the generated code. An interpreter
100 could be used if available. */
101 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
102 {
103 unsigned long next_tb;
104 TranslationBlock *tb;
105
106 /* Should never happen.
107 We only end up here when an existing TB is too long. */
108 if (max_cycles > CF_COUNT_MASK)
109 max_cycles = CF_COUNT_MASK;
110
111 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
112 max_cycles);
113 env->current_tb = tb;
114 /* execute the generated code */
115 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
116 env->current_tb = NULL;
117
118 if ((next_tb & 3) == 2) {
119 /* Restore PC. This may happen if async event occurs before
120 the TB starts executing. */
121 cpu_pc_from_tb(env, tb);
122 }
123 tb_phys_invalidate(tb, -1);
124 tb_free(tb);
125 }
126
127 static TranslationBlock *tb_find_slow(target_ulong pc,
128 target_ulong cs_base,
129 uint64_t flags)
130 {
131 TranslationBlock *tb, **ptb1;
132 unsigned int h;
133 tb_page_addr_t phys_pc, phys_page1, phys_page2;
134 target_ulong virt_page2;
135
136 tb_invalidated_flag = 0;
137
138 /* find translated block using physical mappings */
139 phys_pc = get_page_addr_code(env, pc);
140 phys_page1 = phys_pc & TARGET_PAGE_MASK;
141 phys_page2 = -1;
142 h = tb_phys_hash_func(phys_pc);
143 ptb1 = &tb_phys_hash[h];
144 for(;;) {
145 tb = *ptb1;
146 if (!tb)
147 goto not_found;
148 if (tb->pc == pc &&
149 tb->page_addr[0] == phys_page1 &&
150 tb->cs_base == cs_base &&
151 tb->flags == flags) {
152 /* check next page if needed */
153 if (tb->page_addr[1] != -1) {
154 virt_page2 = (pc & TARGET_PAGE_MASK) +
155 TARGET_PAGE_SIZE;
156 phys_page2 = get_page_addr_code(env, virt_page2);
157 if (tb->page_addr[1] == phys_page2)
158 goto found;
159 } else {
160 goto found;
161 }
162 }
163 ptb1 = &tb->phys_hash_next;
164 }
165 not_found:
166 /* if no translated code available, then translate it now */
167 tb = tb_gen_code(env, pc, cs_base, flags, 0);
168
169 found:
170 /* we add the TB in the virtual pc hash table */
171 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
172 return tb;
173 }
174
175 static inline TranslationBlock *tb_find_fast(void)
176 {
177 TranslationBlock *tb;
178 target_ulong cs_base, pc;
179 int flags;
180
181 /* we record a subset of the CPU state. It will
182 always be the same before a given translated block
183 is executed. */
184 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
185 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
186 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
187 tb->flags != flags)) {
188 tb = tb_find_slow(pc, cs_base, flags);
189 }
190 return tb;
191 }
192
193 static CPUDebugExcpHandler *debug_excp_handler;
194
195 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
196 {
197 CPUDebugExcpHandler *old_handler = debug_excp_handler;
198
199 debug_excp_handler = handler;
200 return old_handler;
201 }
202
203 static void cpu_handle_debug_exception(CPUState *env)
204 {
205 CPUWatchpoint *wp;
206
207 if (!env->watchpoint_hit)
208 QTAILQ_FOREACH(wp, &env->watchpoints, entry)
209 wp->flags &= ~BP_WATCHPOINT_HIT;
210
211 if (debug_excp_handler)
212 debug_excp_handler(env);
213 }
214
215 /* main execution loop */
216
217 volatile sig_atomic_t exit_request;
218
219 int cpu_exec(CPUState *env1)
220 {
221 volatile host_reg_t saved_env_reg;
222 int ret, interrupt_request;
223 TranslationBlock *tb;
224 uint8_t *tc_ptr;
225 unsigned long next_tb;
226
227 if (cpu_halted(env1) == EXCP_HALTED)
228 return EXCP_HALTED;
229
230 cpu_single_env = env1;
231
232 /* the access to env below is actually saving the global register's
233 value, so that files not including target-xyz/exec.h are free to
234 use it. */
235 QEMU_BUILD_BUG_ON (sizeof (saved_env_reg) != sizeof (env));
236 saved_env_reg = (host_reg_t) env;
237 barrier();
238 env = env1;
239
240 if (exit_request) {
241 env->exit_request = 1;
242 exit_request = 0;
243 }
244
245 #if defined(TARGET_I386)
246 if (!kvm_enabled()) {
247 /* put eflags in CPU temporary format */
248 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
249 DF = 1 - (2 * ((env->eflags >> 10) & 1));
250 CC_OP = CC_OP_EFLAGS;
251 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
252 }
253 #elif defined(TARGET_SPARC)
254 #elif defined(TARGET_M68K)
255 env->cc_op = CC_OP_FLAGS;
256 env->cc_dest = env->sr & 0xf;
257 env->cc_x = (env->sr >> 4) & 1;
258 #elif defined(TARGET_ALPHA)
259 #elif defined(TARGET_ARM)
260 #elif defined(TARGET_PPC)
261 #elif defined(TARGET_MICROBLAZE)
262 #elif defined(TARGET_MIPS)
263 #elif defined(TARGET_SH4)
264 #elif defined(TARGET_CRIS)
265 #elif defined(TARGET_S390X)
266 /* XXXXX */
267 #else
268 #error unsupported target CPU
269 #endif
270 env->exception_index = -1;
271
272 /* prepare setjmp context for exception handling */
273 for(;;) {
274 if (setjmp(env->jmp_env) == 0) {
275 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
276 #undef env
277 env = cpu_single_env;
278 #define env cpu_single_env
279 #endif
280 /* if an exception is pending, we execute it here */
281 if (env->exception_index >= 0) {
282 if (env->exception_index >= EXCP_INTERRUPT) {
283 /* exit request from the cpu execution loop */
284 ret = env->exception_index;
285 if (ret == EXCP_DEBUG)
286 cpu_handle_debug_exception(env);
287 break;
288 } else {
289 #if defined(CONFIG_USER_ONLY)
290 /* if user mode only, we simulate a fake exception
291 which will be handled outside the cpu execution
292 loop */
293 #if defined(TARGET_I386)
294 do_interrupt_user(env->exception_index,
295 env->exception_is_int,
296 env->error_code,
297 env->exception_next_eip);
298 /* successfully delivered */
299 env->old_exception = -1;
300 #endif
301 ret = env->exception_index;
302 break;
303 #else
304 #if defined(TARGET_I386)
305 /* simulate a real cpu exception. On i386, it can
306 trigger new exceptions, but we do not handle
307 double or triple faults yet. */
308 do_interrupt(env->exception_index,
309 env->exception_is_int,
310 env->error_code,
311 env->exception_next_eip, 0);
312 /* successfully delivered */
313 env->old_exception = -1;
314 #elif defined(TARGET_PPC)
315 do_interrupt(env);
316 #elif defined(TARGET_MICROBLAZE)
317 do_interrupt(env);
318 #elif defined(TARGET_MIPS)
319 do_interrupt(env);
320 #elif defined(TARGET_SPARC)
321 do_interrupt(env);
322 #elif defined(TARGET_ARM)
323 do_interrupt(env);
324 #elif defined(TARGET_SH4)
325 do_interrupt(env);
326 #elif defined(TARGET_ALPHA)
327 do_interrupt(env);
328 #elif defined(TARGET_CRIS)
329 do_interrupt(env);
330 #elif defined(TARGET_M68K)
331 do_interrupt(0);
332 #endif
333 env->exception_index = -1;
334 #endif
335 }
336 }
337
338 if (kvm_enabled()) {
339 kvm_cpu_exec(env);
340 longjmp(env->jmp_env, 1);
341 }
342
343 next_tb = 0; /* force lookup of first TB */
344 for(;;) {
345 interrupt_request = env->interrupt_request;
346 if (unlikely(interrupt_request)) {
347 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
348 /* Mask out external interrupts for this step. */
349 interrupt_request &= ~(CPU_INTERRUPT_HARD |
350 CPU_INTERRUPT_FIQ |
351 CPU_INTERRUPT_SMI |
352 CPU_INTERRUPT_NMI);
353 }
354 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
355 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
356 env->exception_index = EXCP_DEBUG;
357 cpu_loop_exit();
358 }
359 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
360 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
361 defined(TARGET_MICROBLAZE)
362 if (interrupt_request & CPU_INTERRUPT_HALT) {
363 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
364 env->halted = 1;
365 env->exception_index = EXCP_HLT;
366 cpu_loop_exit();
367 }
368 #endif
369 #if defined(TARGET_I386)
370 if (interrupt_request & CPU_INTERRUPT_INIT) {
371 svm_check_intercept(SVM_EXIT_INIT);
372 do_cpu_init(env);
373 env->exception_index = EXCP_HALTED;
374 cpu_loop_exit();
375 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
376 do_cpu_sipi(env);
377 } else if (env->hflags2 & HF2_GIF_MASK) {
378 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
379 !(env->hflags & HF_SMM_MASK)) {
380 svm_check_intercept(SVM_EXIT_SMI);
381 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
382 do_smm_enter();
383 next_tb = 0;
384 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
385 !(env->hflags2 & HF2_NMI_MASK)) {
386 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
387 env->hflags2 |= HF2_NMI_MASK;
388 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
389 next_tb = 0;
390 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
391 env->interrupt_request &= ~CPU_INTERRUPT_MCE;
392 do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
393 next_tb = 0;
394 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
395 (((env->hflags2 & HF2_VINTR_MASK) &&
396 (env->hflags2 & HF2_HIF_MASK)) ||
397 (!(env->hflags2 & HF2_VINTR_MASK) &&
398 (env->eflags & IF_MASK &&
399 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
400 int intno;
401 svm_check_intercept(SVM_EXIT_INTR);
402 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
403 intno = cpu_get_pic_interrupt(env);
404 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
405 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
406 #undef env
407 env = cpu_single_env;
408 #define env cpu_single_env
409 #endif
410 do_interrupt(intno, 0, 0, 0, 1);
411 /* ensure that no TB jump will be modified as
412 the program flow was changed */
413 next_tb = 0;
414 #if !defined(CONFIG_USER_ONLY)
415 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
416 (env->eflags & IF_MASK) &&
417 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
418 int intno;
419 /* FIXME: this should respect TPR */
420 svm_check_intercept(SVM_EXIT_VINTR);
421 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
422 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
423 do_interrupt(intno, 0, 0, 0, 1);
424 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
425 next_tb = 0;
426 #endif
427 }
428 }
429 #elif defined(TARGET_PPC)
430 #if 0
431 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
432 cpu_reset(env);
433 }
434 #endif
435 if (interrupt_request & CPU_INTERRUPT_HARD) {
436 ppc_hw_interrupt(env);
437 if (env->pending_interrupts == 0)
438 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
439 next_tb = 0;
440 }
441 #elif defined(TARGET_MICROBLAZE)
442 if ((interrupt_request & CPU_INTERRUPT_HARD)
443 && (env->sregs[SR_MSR] & MSR_IE)
444 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
445 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
446 env->exception_index = EXCP_IRQ;
447 do_interrupt(env);
448 next_tb = 0;
449 }
450 #elif defined(TARGET_MIPS)
451 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
452 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
453 (env->CP0_Status & (1 << CP0St_IE)) &&
454 !(env->CP0_Status & (1 << CP0St_EXL)) &&
455 !(env->CP0_Status & (1 << CP0St_ERL)) &&
456 !(env->hflags & MIPS_HFLAG_DM)) {
457 /* Raise it */
458 env->exception_index = EXCP_EXT_INTERRUPT;
459 env->error_code = 0;
460 do_interrupt(env);
461 next_tb = 0;
462 }
463 #elif defined(TARGET_SPARC)
464 if (interrupt_request & CPU_INTERRUPT_HARD) {
465 if (cpu_interrupts_enabled(env) &&
466 env->interrupt_index > 0) {
467 int pil = env->interrupt_index & 0xf;
468 int type = env->interrupt_index & 0xf0;
469
470 if (((type == TT_EXTINT) &&
471 cpu_pil_allowed(env, pil)) ||
472 type != TT_EXTINT) {
473 env->exception_index = env->interrupt_index;
474 do_interrupt(env);
475 next_tb = 0;
476 }
477 }
478 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
479 //do_interrupt(0, 0, 0, 0, 0);
480 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
481 }
482 #elif defined(TARGET_ARM)
483 if (interrupt_request & CPU_INTERRUPT_FIQ
484 && !(env->uncached_cpsr & CPSR_F)) {
485 env->exception_index = EXCP_FIQ;
486 do_interrupt(env);
487 next_tb = 0;
488 }
489 /* ARMv7-M interrupt return works by loading a magic value
490 into the PC. On real hardware the load causes the
491 return to occur. The qemu implementation performs the
492 jump normally, then does the exception return when the
493 CPU tries to execute code at the magic address.
494 This will cause the magic PC value to be pushed to
495 the stack if an interrupt occured at the wrong time.
496 We avoid this by disabling interrupts when
497 pc contains a magic address. */
498 if (interrupt_request & CPU_INTERRUPT_HARD
499 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
500 || !(env->uncached_cpsr & CPSR_I))) {
501 env->exception_index = EXCP_IRQ;
502 do_interrupt(env);
503 next_tb = 0;
504 }
505 #elif defined(TARGET_SH4)
506 if (interrupt_request & CPU_INTERRUPT_HARD) {
507 do_interrupt(env);
508 next_tb = 0;
509 }
510 #elif defined(TARGET_ALPHA)
511 if (interrupt_request & CPU_INTERRUPT_HARD) {
512 do_interrupt(env);
513 next_tb = 0;
514 }
515 #elif defined(TARGET_CRIS)
516 if (interrupt_request & CPU_INTERRUPT_HARD
517 && (env->pregs[PR_CCS] & I_FLAG)
518 && !env->locked_irq) {
519 env->exception_index = EXCP_IRQ;
520 do_interrupt(env);
521 next_tb = 0;
522 }
523 if (interrupt_request & CPU_INTERRUPT_NMI
524 && (env->pregs[PR_CCS] & M_FLAG)) {
525 env->exception_index = EXCP_NMI;
526 do_interrupt(env);
527 next_tb = 0;
528 }
529 #elif defined(TARGET_M68K)
530 if (interrupt_request & CPU_INTERRUPT_HARD
531 && ((env->sr & SR_I) >> SR_I_SHIFT)
532 < env->pending_level) {
533 /* Real hardware gets the interrupt vector via an
534 IACK cycle at this point. Current emulated
535 hardware doesn't rely on this, so we
536 provide/save the vector when the interrupt is
537 first signalled. */
538 env->exception_index = env->pending_vector;
539 do_interrupt(1);
540 next_tb = 0;
541 }
542 #endif
543 /* Don't use the cached interupt_request value,
544 do_interrupt may have updated the EXITTB flag. */
545 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
546 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
547 /* ensure that no TB jump will be modified as
548 the program flow was changed */
549 next_tb = 0;
550 }
551 }
552 if (unlikely(env->exit_request)) {
553 env->exit_request = 0;
554 env->exception_index = EXCP_INTERRUPT;
555 cpu_loop_exit();
556 }
557 #if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
558 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
559 /* restore flags in standard format */
560 #if defined(TARGET_I386)
561 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
562 log_cpu_state(env, X86_DUMP_CCOP);
563 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
564 #elif defined(TARGET_M68K)
565 cpu_m68k_flush_flags(env, env->cc_op);
566 env->cc_op = CC_OP_FLAGS;
567 env->sr = (env->sr & 0xffe0)
568 | env->cc_dest | (env->cc_x << 4);
569 log_cpu_state(env, 0);
570 #else
571 log_cpu_state(env, 0);
572 #endif
573 }
574 #endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
575 spin_lock(&tb_lock);
576 tb = tb_find_fast();
577 /* Note: we do it here to avoid a gcc bug on Mac OS X when
578 doing it in tb_find_slow */
579 if (tb_invalidated_flag) {
580 /* as some TB could have been invalidated because
581 of memory exceptions while generating the code, we
582 must recompute the hash index here */
583 next_tb = 0;
584 tb_invalidated_flag = 0;
585 }
586 #ifdef CONFIG_DEBUG_EXEC
587 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
588 (long)tb->tc_ptr, tb->pc,
589 lookup_symbol(tb->pc));
590 #endif
591 /* see if we can patch the calling TB. When the TB
592 spans two pages, we cannot safely do a direct
593 jump. */
594 if (next_tb != 0 && tb->page_addr[1] == -1) {
595 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
596 }
597 spin_unlock(&tb_lock);
598
599 /* cpu_interrupt might be called while translating the
600 TB, but before it is linked into a potentially
601 infinite loop and becomes env->current_tb. Avoid
602 starting execution if there is a pending interrupt. */
603 if (!unlikely (env->exit_request)) {
604 env->current_tb = tb;
605 tc_ptr = tb->tc_ptr;
606 /* execute the generated code */
607 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
608 #undef env
609 env = cpu_single_env;
610 #define env cpu_single_env
611 #endif
612 next_tb = tcg_qemu_tb_exec(tc_ptr);
613 env->current_tb = NULL;
614 if ((next_tb & 3) == 2) {
615 /* Instruction counter expired. */
616 int insns_left;
617 tb = (TranslationBlock *)(long)(next_tb & ~3);
618 /* Restore PC. */
619 cpu_pc_from_tb(env, tb);
620 insns_left = env->icount_decr.u32;
621 if (env->icount_extra && insns_left >= 0) {
622 /* Refill decrementer and continue execution. */
623 env->icount_extra += insns_left;
624 if (env->icount_extra > 0xffff) {
625 insns_left = 0xffff;
626 } else {
627 insns_left = env->icount_extra;
628 }
629 env->icount_extra -= insns_left;
630 env->icount_decr.u16.low = insns_left;
631 } else {
632 if (insns_left > 0) {
633 /* Execute remaining instructions. */
634 cpu_exec_nocache(insns_left, tb);
635 }
636 env->exception_index = EXCP_INTERRUPT;
637 next_tb = 0;
638 cpu_loop_exit();
639 }
640 }
641 }
642 /* reset soft MMU for next block (it can currently
643 only be set by a memory fault) */
644 } /* for(;;) */
645 }
646 } /* for(;;) */
647
648
649 #if defined(TARGET_I386)
650 /* restore flags in standard format */
651 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
652 #elif defined(TARGET_ARM)
653 /* XXX: Save/restore host fpu exception state?. */
654 #elif defined(TARGET_SPARC)
655 #elif defined(TARGET_PPC)
656 #elif defined(TARGET_M68K)
657 cpu_m68k_flush_flags(env, env->cc_op);
658 env->cc_op = CC_OP_FLAGS;
659 env->sr = (env->sr & 0xffe0)
660 | env->cc_dest | (env->cc_x << 4);
661 #elif defined(TARGET_MICROBLAZE)
662 #elif defined(TARGET_MIPS)
663 #elif defined(TARGET_SH4)
664 #elif defined(TARGET_ALPHA)
665 #elif defined(TARGET_CRIS)
666 #elif defined(TARGET_S390X)
667 /* XXXXX */
668 #else
669 #error unsupported target CPU
670 #endif
671
672 /* restore global registers */
673 barrier();
674 env = (void *) saved_env_reg;
675
676 /* fail safe : never use cpu_single_env outside cpu_exec() */
677 cpu_single_env = NULL;
678 return ret;
679 }
680
681 /* must only be called from the generated code as an exception can be
682 generated */
683 void tb_invalidate_page_range(target_ulong start, target_ulong end)
684 {
685 /* XXX: cannot enable it yet because it yields to MMU exception
686 where NIP != read address on PowerPC */
687 #if 0
688 target_ulong phys_addr;
689 phys_addr = get_phys_addr_code(env, start);
690 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
691 #endif
692 }
693
694 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
695
696 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
697 {
698 CPUX86State *saved_env;
699
700 saved_env = env;
701 env = s;
702 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
703 selector &= 0xffff;
704 cpu_x86_load_seg_cache(env, seg_reg, selector,
705 (selector << 4), 0xffff, 0);
706 } else {
707 helper_load_seg(seg_reg, selector);
708 }
709 env = saved_env;
710 }
711
712 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
713 {
714 CPUX86State *saved_env;
715
716 saved_env = env;
717 env = s;
718
719 helper_fsave(ptr, data32);
720
721 env = saved_env;
722 }
723
724 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
725 {
726 CPUX86State *saved_env;
727
728 saved_env = env;
729 env = s;
730
731 helper_frstor(ptr, data32);
732
733 env = saved_env;
734 }
735
736 #endif /* TARGET_I386 */
737
738 #if !defined(CONFIG_SOFTMMU)
739
740 #if defined(TARGET_I386)
741 #define EXCEPTION_ACTION raise_exception_err(env->exception_index, env->error_code)
742 #else
743 #define EXCEPTION_ACTION cpu_loop_exit()
744 #endif
745
746 /* 'pc' is the host PC at which the exception was raised. 'address' is
747 the effective address of the memory exception. 'is_write' is 1 if a
748 write caused the exception and otherwise 0'. 'old_set' is the
749 signal set which should be restored */
750 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
751 int is_write, sigset_t *old_set,
752 void *puc)
753 {
754 TranslationBlock *tb;
755 int ret;
756
757 if (cpu_single_env)
758 env = cpu_single_env; /* XXX: find a correct solution for multithread */
759 #if defined(DEBUG_SIGNAL)
760 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
761 pc, address, is_write, *(unsigned long *)old_set);
762 #endif
763 /* XXX: locking issue */
764 if (is_write && page_unprotect(h2g(address), pc, puc)) {
765 return 1;
766 }
767
768 /* see if it is an MMU fault */
769 ret = cpu_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
770 if (ret < 0)
771 return 0; /* not an MMU fault */
772 if (ret == 0)
773 return 1; /* the MMU fault was handled without causing real CPU fault */
774 /* now we have a real cpu fault */
775 tb = tb_find_pc(pc);
776 if (tb) {
777 /* the PC is inside the translated code. It means that we have
778 a virtual CPU fault */
779 cpu_restore_state(tb, env, pc, puc);
780 }
781
782 /* we restore the process signal mask as the sigreturn should
783 do it (XXX: use sigsetjmp) */
784 sigprocmask(SIG_SETMASK, old_set, NULL);
785 EXCEPTION_ACTION;
786
787 /* never comes here */
788 return 1;
789 }
790
791 #if defined(__i386__)
792
793 #if defined(__APPLE__)
794 # include <sys/ucontext.h>
795
796 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
797 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
798 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
799 # define MASK_sig(context) ((context)->uc_sigmask)
800 #elif defined (__NetBSD__)
801 # include <ucontext.h>
802
803 # define EIP_sig(context) ((context)->uc_mcontext.__gregs[_REG_EIP])
804 # define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
805 # define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
806 # define MASK_sig(context) ((context)->uc_sigmask)
807 #elif defined (__FreeBSD__) || defined(__DragonFly__)
808 # include <ucontext.h>
809
810 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_eip))
811 # define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
812 # define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
813 # define MASK_sig(context) ((context)->uc_sigmask)
814 #elif defined(__OpenBSD__)
815 # define EIP_sig(context) ((context)->sc_eip)
816 # define TRAP_sig(context) ((context)->sc_trapno)
817 # define ERROR_sig(context) ((context)->sc_err)
818 # define MASK_sig(context) ((context)->sc_mask)
819 #else
820 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
821 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
822 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
823 # define MASK_sig(context) ((context)->uc_sigmask)
824 #endif
825
826 int cpu_signal_handler(int host_signum, void *pinfo,
827 void *puc)
828 {
829 siginfo_t *info = pinfo;
830 #if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
831 ucontext_t *uc = puc;
832 #elif defined(__OpenBSD__)
833 struct sigcontext *uc = puc;
834 #else
835 struct ucontext *uc = puc;
836 #endif
837 unsigned long pc;
838 int trapno;
839
840 #ifndef REG_EIP
841 /* for glibc 2.1 */
842 #define REG_EIP EIP
843 #define REG_ERR ERR
844 #define REG_TRAPNO TRAPNO
845 #endif
846 pc = EIP_sig(uc);
847 trapno = TRAP_sig(uc);
848 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
849 trapno == 0xe ?
850 (ERROR_sig(uc) >> 1) & 1 : 0,
851 &MASK_sig(uc), puc);
852 }
853
854 #elif defined(__x86_64__)
855
856 #ifdef __NetBSD__
857 #define PC_sig(context) _UC_MACHINE_PC(context)
858 #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
859 #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
860 #define MASK_sig(context) ((context)->uc_sigmask)
861 #elif defined(__OpenBSD__)
862 #define PC_sig(context) ((context)->sc_rip)
863 #define TRAP_sig(context) ((context)->sc_trapno)
864 #define ERROR_sig(context) ((context)->sc_err)
865 #define MASK_sig(context) ((context)->sc_mask)
866 #elif defined (__FreeBSD__) || defined(__DragonFly__)
867 #include <ucontext.h>
868
869 #define PC_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_rip))
870 #define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
871 #define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
872 #define MASK_sig(context) ((context)->uc_sigmask)
873 #else
874 #define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
875 #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
876 #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
877 #define MASK_sig(context) ((context)->uc_sigmask)
878 #endif
879
880 int cpu_signal_handler(int host_signum, void *pinfo,
881 void *puc)
882 {
883 siginfo_t *info = pinfo;
884 unsigned long pc;
885 #if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
886 ucontext_t *uc = puc;
887 #elif defined(__OpenBSD__)
888 struct sigcontext *uc = puc;
889 #else
890 struct ucontext *uc = puc;
891 #endif
892
893 pc = PC_sig(uc);
894 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
895 TRAP_sig(uc) == 0xe ?
896 (ERROR_sig(uc) >> 1) & 1 : 0,
897 &MASK_sig(uc), puc);
898 }
899
900 #elif defined(_ARCH_PPC)
901
902 /***********************************************************************
903 * signal context platform-specific definitions
904 * From Wine
905 */
906 #ifdef linux
907 /* All Registers access - only for local access */
908 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
909 /* Gpr Registers access */
910 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
911 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
912 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
913 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
914 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
915 # define LR_sig(context) REG_sig(link, context) /* Link register */
916 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
917 /* Float Registers access */
918 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
919 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
920 /* Exception Registers access */
921 # define DAR_sig(context) REG_sig(dar, context)
922 # define DSISR_sig(context) REG_sig(dsisr, context)
923 # define TRAP_sig(context) REG_sig(trap, context)
924 #endif /* linux */
925
926 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
927 #include <ucontext.h>
928 # define IAR_sig(context) ((context)->uc_mcontext.mc_srr0)
929 # define MSR_sig(context) ((context)->uc_mcontext.mc_srr1)
930 # define CTR_sig(context) ((context)->uc_mcontext.mc_ctr)
931 # define XER_sig(context) ((context)->uc_mcontext.mc_xer)
932 # define LR_sig(context) ((context)->uc_mcontext.mc_lr)
933 # define CR_sig(context) ((context)->uc_mcontext.mc_cr)
934 /* Exception Registers access */
935 # define DAR_sig(context) ((context)->uc_mcontext.mc_dar)
936 # define DSISR_sig(context) ((context)->uc_mcontext.mc_dsisr)
937 # define TRAP_sig(context) ((context)->uc_mcontext.mc_exc)
938 #endif /* __FreeBSD__|| __FreeBSD_kernel__ */
939
940 #ifdef __APPLE__
941 # include <sys/ucontext.h>
942 typedef struct ucontext SIGCONTEXT;
943 /* All Registers access - only for local access */
944 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
945 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
946 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
947 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
948 /* Gpr Registers access */
949 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
950 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
951 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
952 # define CTR_sig(context) REG_sig(ctr, context)
953 # define XER_sig(context) REG_sig(xer, context) /* Link register */
954 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
955 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
956 /* Float Registers access */
957 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
958 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
959 /* Exception Registers access */
960 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
961 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
962 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
963 #endif /* __APPLE__ */
964
965 int cpu_signal_handler(int host_signum, void *pinfo,
966 void *puc)
967 {
968 siginfo_t *info = pinfo;
969 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
970 ucontext_t *uc = puc;
971 #else
972 struct ucontext *uc = puc;
973 #endif
974 unsigned long pc;
975 int is_write;
976
977 pc = IAR_sig(uc);
978 is_write = 0;
979 #if 0
980 /* ppc 4xx case */
981 if (DSISR_sig(uc) & 0x00800000)
982 is_write = 1;
983 #else
984 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
985 is_write = 1;
986 #endif
987 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
988 is_write, &uc->uc_sigmask, puc);
989 }
990
991 #elif defined(__alpha__)
992
993 int cpu_signal_handler(int host_signum, void *pinfo,
994 void *puc)
995 {
996 siginfo_t *info = pinfo;
997 struct ucontext *uc = puc;
998 uint32_t *pc = uc->uc_mcontext.sc_pc;
999 uint32_t insn = *pc;
1000 int is_write = 0;
1001
1002 /* XXX: need kernel patch to get write flag faster */
1003 switch (insn >> 26) {
1004 case 0x0d: // stw
1005 case 0x0e: // stb
1006 case 0x0f: // stq_u
1007 case 0x24: // stf
1008 case 0x25: // stg
1009 case 0x26: // sts
1010 case 0x27: // stt
1011 case 0x2c: // stl
1012 case 0x2d: // stq
1013 case 0x2e: // stl_c
1014 case 0x2f: // stq_c
1015 is_write = 1;
1016 }
1017
1018 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1019 is_write, &uc->uc_sigmask, puc);
1020 }
1021 #elif defined(__sparc__)
1022
1023 int cpu_signal_handler(int host_signum, void *pinfo,
1024 void *puc)
1025 {
1026 siginfo_t *info = pinfo;
1027 int is_write;
1028 uint32_t insn;
1029 #if !defined(__arch64__) || defined(CONFIG_SOLARIS)
1030 uint32_t *regs = (uint32_t *)(info + 1);
1031 void *sigmask = (regs + 20);
1032 /* XXX: is there a standard glibc define ? */
1033 unsigned long pc = regs[1];
1034 #else
1035 #ifdef __linux__
1036 struct sigcontext *sc = puc;
1037 unsigned long pc = sc->sigc_regs.tpc;
1038 void *sigmask = (void *)sc->sigc_mask;
1039 #elif defined(__OpenBSD__)
1040 struct sigcontext *uc = puc;
1041 unsigned long pc = uc->sc_pc;
1042 void *sigmask = (void *)(long)uc->sc_mask;
1043 #endif
1044 #endif
1045
1046 /* XXX: need kernel patch to get write flag faster */
1047 is_write = 0;
1048 insn = *(uint32_t *)pc;
1049 if ((insn >> 30) == 3) {
1050 switch((insn >> 19) & 0x3f) {
1051 case 0x05: // stb
1052 case 0x15: // stba
1053 case 0x06: // sth
1054 case 0x16: // stha
1055 case 0x04: // st
1056 case 0x14: // sta
1057 case 0x07: // std
1058 case 0x17: // stda
1059 case 0x0e: // stx
1060 case 0x1e: // stxa
1061 case 0x24: // stf
1062 case 0x34: // stfa
1063 case 0x27: // stdf
1064 case 0x37: // stdfa
1065 case 0x26: // stqf
1066 case 0x36: // stqfa
1067 case 0x25: // stfsr
1068 case 0x3c: // casa
1069 case 0x3e: // casxa
1070 is_write = 1;
1071 break;
1072 }
1073 }
1074 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1075 is_write, sigmask, NULL);
1076 }
1077
1078 #elif defined(__arm__)
1079
1080 int cpu_signal_handler(int host_signum, void *pinfo,
1081 void *puc)
1082 {
1083 siginfo_t *info = pinfo;
1084 struct ucontext *uc = puc;
1085 unsigned long pc;
1086 int is_write;
1087
1088 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1089 pc = uc->uc_mcontext.gregs[R15];
1090 #else
1091 pc = uc->uc_mcontext.arm_pc;
1092 #endif
1093 /* XXX: compute is_write */
1094 is_write = 0;
1095 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1096 is_write,
1097 &uc->uc_sigmask, puc);
1098 }
1099
1100 #elif defined(__mc68000)
1101
1102 int cpu_signal_handler(int host_signum, void *pinfo,
1103 void *puc)
1104 {
1105 siginfo_t *info = pinfo;
1106 struct ucontext *uc = puc;
1107 unsigned long pc;
1108 int is_write;
1109
1110 pc = uc->uc_mcontext.gregs[16];
1111 /* XXX: compute is_write */
1112 is_write = 0;
1113 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1114 is_write,
1115 &uc->uc_sigmask, puc);
1116 }
1117
1118 #elif defined(__ia64)
1119
1120 #ifndef __ISR_VALID
1121 /* This ought to be in <bits/siginfo.h>... */
1122 # define __ISR_VALID 1
1123 #endif
1124
1125 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1126 {
1127 siginfo_t *info = pinfo;
1128 struct ucontext *uc = puc;
1129 unsigned long ip;
1130 int is_write = 0;
1131
1132 ip = uc->uc_mcontext.sc_ip;
1133 switch (host_signum) {
1134 case SIGILL:
1135 case SIGFPE:
1136 case SIGSEGV:
1137 case SIGBUS:
1138 case SIGTRAP:
1139 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1140 /* ISR.W (write-access) is bit 33: */
1141 is_write = (info->si_isr >> 33) & 1;
1142 break;
1143
1144 default:
1145 break;
1146 }
1147 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1148 is_write,
1149 (sigset_t *)&uc->uc_sigmask, puc);
1150 }
1151
1152 #elif defined(__s390__)
1153
1154 int cpu_signal_handler(int host_signum, void *pinfo,
1155 void *puc)
1156 {
1157 siginfo_t *info = pinfo;
1158 struct ucontext *uc = puc;
1159 unsigned long pc;
1160 uint16_t *pinsn;
1161 int is_write = 0;
1162
1163 pc = uc->uc_mcontext.psw.addr;
1164
1165 /* ??? On linux, the non-rt signal handler has 4 (!) arguments instead
1166 of the normal 2 arguments. The 3rd argument contains the "int_code"
1167 from the hardware which does in fact contain the is_write value.
1168 The rt signal handler, as far as I can tell, does not give this value
1169 at all. Not that we could get to it from here even if it were. */
1170 /* ??? This is not even close to complete, since it ignores all
1171 of the read-modify-write instructions. */
1172 pinsn = (uint16_t *)pc;
1173 switch (pinsn[0] >> 8) {
1174 case 0x50: /* ST */
1175 case 0x42: /* STC */
1176 case 0x40: /* STH */
1177 is_write = 1;
1178 break;
1179 case 0xc4: /* RIL format insns */
1180 switch (pinsn[0] & 0xf) {
1181 case 0xf: /* STRL */
1182 case 0xb: /* STGRL */
1183 case 0x7: /* STHRL */
1184 is_write = 1;
1185 }
1186 break;
1187 case 0xe3: /* RXY format insns */
1188 switch (pinsn[2] & 0xff) {
1189 case 0x50: /* STY */
1190 case 0x24: /* STG */
1191 case 0x72: /* STCY */
1192 case 0x70: /* STHY */
1193 case 0x8e: /* STPQ */
1194 case 0x3f: /* STRVH */
1195 case 0x3e: /* STRV */
1196 case 0x2f: /* STRVG */
1197 is_write = 1;
1198 }
1199 break;
1200 }
1201 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1202 is_write, &uc->uc_sigmask, puc);
1203 }
1204
1205 #elif defined(__mips__)
1206
1207 int cpu_signal_handler(int host_signum, void *pinfo,
1208 void *puc)
1209 {
1210 siginfo_t *info = pinfo;
1211 struct ucontext *uc = puc;
1212 greg_t pc = uc->uc_mcontext.pc;
1213 int is_write;
1214
1215 /* XXX: compute is_write */
1216 is_write = 0;
1217 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1218 is_write, &uc->uc_sigmask, puc);
1219 }
1220
1221 #elif defined(__hppa__)
1222
1223 int cpu_signal_handler(int host_signum, void *pinfo,
1224 void *puc)
1225 {
1226 struct siginfo *info = pinfo;
1227 struct ucontext *uc = puc;
1228 unsigned long pc = uc->uc_mcontext.sc_iaoq[0];
1229 uint32_t insn = *(uint32_t *)pc;
1230 int is_write = 0;
1231
1232 /* XXX: need kernel patch to get write flag faster. */
1233 switch (insn >> 26) {
1234 case 0x1a: /* STW */
1235 case 0x19: /* STH */
1236 case 0x18: /* STB */
1237 case 0x1b: /* STWM */
1238 is_write = 1;
1239 break;
1240
1241 case 0x09: /* CSTWX, FSTWX, FSTWS */
1242 case 0x0b: /* CSTDX, FSTDX, FSTDS */
1243 /* Distinguish from coprocessor load ... */
1244 is_write = (insn >> 9) & 1;
1245 break;
1246
1247 case 0x03:
1248 switch ((insn >> 6) & 15) {
1249 case 0xa: /* STWS */
1250 case 0x9: /* STHS */
1251 case 0x8: /* STBS */
1252 case 0xe: /* STWAS */
1253 case 0xc: /* STBYS */
1254 is_write = 1;
1255 }
1256 break;
1257 }
1258
1259 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1260 is_write, &uc->uc_sigmask, puc);
1261 }
1262
1263 #else
1264
1265 #error host CPU specific signal handler needed
1266
1267 #endif
1268
1269 #endif /* !defined(CONFIG_SOFTMMU) */