]> git.proxmox.com Git - qemu.git/blob - cpu-exec.c
Update for 0.12.4 release
[qemu.git] / cpu-exec.c
1 /*
2 * i386 emulator main execution loop
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "config.h"
20 #include "exec.h"
21 #include "disas.h"
22 #include "tcg.h"
23 #include "kvm.h"
24
25 #if !defined(CONFIG_SOFTMMU)
26 #undef EAX
27 #undef ECX
28 #undef EDX
29 #undef EBX
30 #undef ESP
31 #undef EBP
32 #undef ESI
33 #undef EDI
34 #undef EIP
35 #include <signal.h>
36 #ifdef __linux__
37 #include <sys/ucontext.h>
38 #endif
39 #endif
40
41 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
42 // Work around ugly bugs in glibc that mangle global register contents
43 #undef env
44 #define env cpu_single_env
45 #endif
46
47 int tb_invalidated_flag;
48
49 //#define CONFIG_DEBUG_EXEC
50 //#define DEBUG_SIGNAL
51
52 int qemu_cpu_has_work(CPUState *env)
53 {
54 return cpu_has_work(env);
55 }
56
57 void cpu_loop_exit(void)
58 {
59 /* NOTE: the register at this point must be saved by hand because
60 longjmp restore them */
61 regs_to_env();
62 longjmp(env->jmp_env, 1);
63 }
64
65 /* exit the current TB from a signal handler. The host registers are
66 restored in a state compatible with the CPU emulator
67 */
68 void cpu_resume_from_signal(CPUState *env1, void *puc)
69 {
70 #if !defined(CONFIG_SOFTMMU)
71 #ifdef __linux__
72 struct ucontext *uc = puc;
73 #elif defined(__OpenBSD__)
74 struct sigcontext *uc = puc;
75 #endif
76 #endif
77
78 env = env1;
79
80 /* XXX: restore cpu registers saved in host registers */
81
82 #if !defined(CONFIG_SOFTMMU)
83 if (puc) {
84 /* XXX: use siglongjmp ? */
85 #ifdef __linux__
86 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
87 #elif defined(__OpenBSD__)
88 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
89 #endif
90 }
91 #endif
92 env->exception_index = -1;
93 longjmp(env->jmp_env, 1);
94 }
95
96 /* Execute the code without caching the generated code. An interpreter
97 could be used if available. */
98 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
99 {
100 unsigned long next_tb;
101 TranslationBlock *tb;
102
103 /* Should never happen.
104 We only end up here when an existing TB is too long. */
105 if (max_cycles > CF_COUNT_MASK)
106 max_cycles = CF_COUNT_MASK;
107
108 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
109 max_cycles);
110 env->current_tb = tb;
111 /* execute the generated code */
112 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
113
114 if ((next_tb & 3) == 2) {
115 /* Restore PC. This may happen if async event occurs before
116 the TB starts executing. */
117 cpu_pc_from_tb(env, tb);
118 }
119 tb_phys_invalidate(tb, -1);
120 tb_free(tb);
121 }
122
123 static TranslationBlock *tb_find_slow(target_ulong pc,
124 target_ulong cs_base,
125 uint64_t flags)
126 {
127 TranslationBlock *tb, **ptb1;
128 unsigned int h;
129 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
130
131 tb_invalidated_flag = 0;
132
133 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
134
135 /* find translated block using physical mappings */
136 phys_pc = get_phys_addr_code(env, pc);
137 phys_page1 = phys_pc & TARGET_PAGE_MASK;
138 phys_page2 = -1;
139 h = tb_phys_hash_func(phys_pc);
140 ptb1 = &tb_phys_hash[h];
141 for(;;) {
142 tb = *ptb1;
143 if (!tb)
144 goto not_found;
145 if (tb->pc == pc &&
146 tb->page_addr[0] == phys_page1 &&
147 tb->cs_base == cs_base &&
148 tb->flags == flags) {
149 /* check next page if needed */
150 if (tb->page_addr[1] != -1) {
151 virt_page2 = (pc & TARGET_PAGE_MASK) +
152 TARGET_PAGE_SIZE;
153 phys_page2 = get_phys_addr_code(env, virt_page2);
154 if (tb->page_addr[1] == phys_page2)
155 goto found;
156 } else {
157 goto found;
158 }
159 }
160 ptb1 = &tb->phys_hash_next;
161 }
162 not_found:
163 /* if no translated code available, then translate it now */
164 tb = tb_gen_code(env, pc, cs_base, flags, 0);
165
166 found:
167 /* we add the TB in the virtual pc hash table */
168 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
169 return tb;
170 }
171
172 static inline TranslationBlock *tb_find_fast(void)
173 {
174 TranslationBlock *tb;
175 target_ulong cs_base, pc;
176 int flags;
177
178 /* we record a subset of the CPU state. It will
179 always be the same before a given translated block
180 is executed. */
181 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
182 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
183 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
184 tb->flags != flags)) {
185 tb = tb_find_slow(pc, cs_base, flags);
186 }
187 return tb;
188 }
189
190 static CPUDebugExcpHandler *debug_excp_handler;
191
192 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
193 {
194 CPUDebugExcpHandler *old_handler = debug_excp_handler;
195
196 debug_excp_handler = handler;
197 return old_handler;
198 }
199
200 static void cpu_handle_debug_exception(CPUState *env)
201 {
202 CPUWatchpoint *wp;
203
204 if (!env->watchpoint_hit)
205 QTAILQ_FOREACH(wp, &env->watchpoints, entry)
206 wp->flags &= ~BP_WATCHPOINT_HIT;
207
208 if (debug_excp_handler)
209 debug_excp_handler(env);
210 }
211
212 /* main execution loop */
213
214 int cpu_exec(CPUState *env1)
215 {
216 #define DECLARE_HOST_REGS 1
217 #include "hostregs_helper.h"
218 int ret, interrupt_request;
219 TranslationBlock *tb;
220 uint8_t *tc_ptr;
221 unsigned long next_tb;
222
223 if (cpu_halted(env1) == EXCP_HALTED)
224 return EXCP_HALTED;
225
226 cpu_single_env = env1;
227
228 /* first we save global registers */
229 #define SAVE_HOST_REGS 1
230 #include "hostregs_helper.h"
231 env = env1;
232
233 env_to_regs();
234 #if defined(TARGET_I386)
235 if (!kvm_enabled()) {
236 /* put eflags in CPU temporary format */
237 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
238 DF = 1 - (2 * ((env->eflags >> 10) & 1));
239 CC_OP = CC_OP_EFLAGS;
240 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
241 }
242 #elif defined(TARGET_SPARC)
243 #elif defined(TARGET_M68K)
244 env->cc_op = CC_OP_FLAGS;
245 env->cc_dest = env->sr & 0xf;
246 env->cc_x = (env->sr >> 4) & 1;
247 #elif defined(TARGET_ALPHA)
248 #elif defined(TARGET_ARM)
249 #elif defined(TARGET_PPC)
250 #elif defined(TARGET_MICROBLAZE)
251 #elif defined(TARGET_MIPS)
252 #elif defined(TARGET_SH4)
253 #elif defined(TARGET_CRIS)
254 #elif defined(TARGET_S390X)
255 /* XXXXX */
256 #else
257 #error unsupported target CPU
258 #endif
259 env->exception_index = -1;
260
261 /* prepare setjmp context for exception handling */
262 for(;;) {
263 if (setjmp(env->jmp_env) == 0) {
264 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
265 #undef env
266 env = cpu_single_env;
267 #define env cpu_single_env
268 #endif
269 env->current_tb = NULL;
270 /* if an exception is pending, we execute it here */
271 if (env->exception_index >= 0) {
272 if (env->exception_index >= EXCP_INTERRUPT) {
273 /* exit request from the cpu execution loop */
274 ret = env->exception_index;
275 if (ret == EXCP_DEBUG)
276 cpu_handle_debug_exception(env);
277 break;
278 } else {
279 #if defined(CONFIG_USER_ONLY)
280 /* if user mode only, we simulate a fake exception
281 which will be handled outside the cpu execution
282 loop */
283 #if defined(TARGET_I386)
284 do_interrupt_user(env->exception_index,
285 env->exception_is_int,
286 env->error_code,
287 env->exception_next_eip);
288 /* successfully delivered */
289 env->old_exception = -1;
290 #endif
291 ret = env->exception_index;
292 break;
293 #else
294 #if defined(TARGET_I386)
295 /* simulate a real cpu exception. On i386, it can
296 trigger new exceptions, but we do not handle
297 double or triple faults yet. */
298 do_interrupt(env->exception_index,
299 env->exception_is_int,
300 env->error_code,
301 env->exception_next_eip, 0);
302 /* successfully delivered */
303 env->old_exception = -1;
304 #elif defined(TARGET_PPC)
305 do_interrupt(env);
306 #elif defined(TARGET_MICROBLAZE)
307 do_interrupt(env);
308 #elif defined(TARGET_MIPS)
309 do_interrupt(env);
310 #elif defined(TARGET_SPARC)
311 do_interrupt(env);
312 #elif defined(TARGET_ARM)
313 do_interrupt(env);
314 #elif defined(TARGET_SH4)
315 do_interrupt(env);
316 #elif defined(TARGET_ALPHA)
317 do_interrupt(env);
318 #elif defined(TARGET_CRIS)
319 do_interrupt(env);
320 #elif defined(TARGET_M68K)
321 do_interrupt(0);
322 #endif
323 #endif
324 }
325 env->exception_index = -1;
326 }
327
328 if (kvm_enabled()) {
329 kvm_cpu_exec(env);
330 longjmp(env->jmp_env, 1);
331 }
332
333 next_tb = 0; /* force lookup of first TB */
334 for(;;) {
335 interrupt_request = env->interrupt_request;
336 if (unlikely(interrupt_request)) {
337 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
338 /* Mask out external interrupts for this step. */
339 interrupt_request &= ~(CPU_INTERRUPT_HARD |
340 CPU_INTERRUPT_FIQ |
341 CPU_INTERRUPT_SMI |
342 CPU_INTERRUPT_NMI);
343 }
344 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
345 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
346 env->exception_index = EXCP_DEBUG;
347 cpu_loop_exit();
348 }
349 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
350 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
351 defined(TARGET_MICROBLAZE)
352 if (interrupt_request & CPU_INTERRUPT_HALT) {
353 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
354 env->halted = 1;
355 env->exception_index = EXCP_HLT;
356 cpu_loop_exit();
357 }
358 #endif
359 #if defined(TARGET_I386)
360 if (interrupt_request & CPU_INTERRUPT_INIT) {
361 svm_check_intercept(SVM_EXIT_INIT);
362 do_cpu_init(env);
363 env->exception_index = EXCP_HALTED;
364 cpu_loop_exit();
365 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
366 do_cpu_sipi(env);
367 } else if (env->hflags2 & HF2_GIF_MASK) {
368 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
369 !(env->hflags & HF_SMM_MASK)) {
370 svm_check_intercept(SVM_EXIT_SMI);
371 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
372 do_smm_enter();
373 next_tb = 0;
374 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
375 !(env->hflags2 & HF2_NMI_MASK)) {
376 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
377 env->hflags2 |= HF2_NMI_MASK;
378 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
379 next_tb = 0;
380 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
381 env->interrupt_request &= ~CPU_INTERRUPT_MCE;
382 do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
383 next_tb = 0;
384 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
385 (((env->hflags2 & HF2_VINTR_MASK) &&
386 (env->hflags2 & HF2_HIF_MASK)) ||
387 (!(env->hflags2 & HF2_VINTR_MASK) &&
388 (env->eflags & IF_MASK &&
389 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
390 int intno;
391 svm_check_intercept(SVM_EXIT_INTR);
392 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
393 intno = cpu_get_pic_interrupt(env);
394 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
395 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
396 #undef env
397 env = cpu_single_env;
398 #define env cpu_single_env
399 #endif
400 do_interrupt(intno, 0, 0, 0, 1);
401 /* ensure that no TB jump will be modified as
402 the program flow was changed */
403 next_tb = 0;
404 #if !defined(CONFIG_USER_ONLY)
405 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
406 (env->eflags & IF_MASK) &&
407 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
408 int intno;
409 /* FIXME: this should respect TPR */
410 svm_check_intercept(SVM_EXIT_VINTR);
411 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
412 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
413 do_interrupt(intno, 0, 0, 0, 1);
414 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
415 next_tb = 0;
416 #endif
417 }
418 }
419 #elif defined(TARGET_PPC)
420 #if 0
421 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
422 cpu_reset(env);
423 }
424 #endif
425 if (interrupt_request & CPU_INTERRUPT_HARD) {
426 ppc_hw_interrupt(env);
427 if (env->pending_interrupts == 0)
428 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
429 next_tb = 0;
430 }
431 #elif defined(TARGET_MICROBLAZE)
432 if ((interrupt_request & CPU_INTERRUPT_HARD)
433 && (env->sregs[SR_MSR] & MSR_IE)
434 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
435 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
436 env->exception_index = EXCP_IRQ;
437 do_interrupt(env);
438 next_tb = 0;
439 }
440 #elif defined(TARGET_MIPS)
441 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
442 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
443 (env->CP0_Status & (1 << CP0St_IE)) &&
444 !(env->CP0_Status & (1 << CP0St_EXL)) &&
445 !(env->CP0_Status & (1 << CP0St_ERL)) &&
446 !(env->hflags & MIPS_HFLAG_DM)) {
447 /* Raise it */
448 env->exception_index = EXCP_EXT_INTERRUPT;
449 env->error_code = 0;
450 do_interrupt(env);
451 next_tb = 0;
452 }
453 #elif defined(TARGET_SPARC)
454 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
455 cpu_interrupts_enabled(env)) {
456 int pil = env->interrupt_index & 15;
457 int type = env->interrupt_index & 0xf0;
458
459 if (((type == TT_EXTINT) &&
460 (pil == 15 || pil > env->psrpil)) ||
461 type != TT_EXTINT) {
462 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
463 env->exception_index = env->interrupt_index;
464 do_interrupt(env);
465 env->interrupt_index = 0;
466 next_tb = 0;
467 }
468 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
469 //do_interrupt(0, 0, 0, 0, 0);
470 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
471 }
472 #elif defined(TARGET_ARM)
473 if (interrupt_request & CPU_INTERRUPT_FIQ
474 && !(env->uncached_cpsr & CPSR_F)) {
475 env->exception_index = EXCP_FIQ;
476 do_interrupt(env);
477 next_tb = 0;
478 }
479 /* ARMv7-M interrupt return works by loading a magic value
480 into the PC. On real hardware the load causes the
481 return to occur. The qemu implementation performs the
482 jump normally, then does the exception return when the
483 CPU tries to execute code at the magic address.
484 This will cause the magic PC value to be pushed to
485 the stack if an interrupt occured at the wrong time.
486 We avoid this by disabling interrupts when
487 pc contains a magic address. */
488 if (interrupt_request & CPU_INTERRUPT_HARD
489 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
490 || !(env->uncached_cpsr & CPSR_I))) {
491 env->exception_index = EXCP_IRQ;
492 do_interrupt(env);
493 next_tb = 0;
494 }
495 #elif defined(TARGET_SH4)
496 if (interrupt_request & CPU_INTERRUPT_HARD) {
497 do_interrupt(env);
498 next_tb = 0;
499 }
500 #elif defined(TARGET_ALPHA)
501 if (interrupt_request & CPU_INTERRUPT_HARD) {
502 do_interrupt(env);
503 next_tb = 0;
504 }
505 #elif defined(TARGET_CRIS)
506 if (interrupt_request & CPU_INTERRUPT_HARD
507 && (env->pregs[PR_CCS] & I_FLAG)) {
508 env->exception_index = EXCP_IRQ;
509 do_interrupt(env);
510 next_tb = 0;
511 }
512 if (interrupt_request & CPU_INTERRUPT_NMI
513 && (env->pregs[PR_CCS] & M_FLAG)) {
514 env->exception_index = EXCP_NMI;
515 do_interrupt(env);
516 next_tb = 0;
517 }
518 #elif defined(TARGET_M68K)
519 if (interrupt_request & CPU_INTERRUPT_HARD
520 && ((env->sr & SR_I) >> SR_I_SHIFT)
521 < env->pending_level) {
522 /* Real hardware gets the interrupt vector via an
523 IACK cycle at this point. Current emulated
524 hardware doesn't rely on this, so we
525 provide/save the vector when the interrupt is
526 first signalled. */
527 env->exception_index = env->pending_vector;
528 do_interrupt(1);
529 next_tb = 0;
530 }
531 #endif
532 /* Don't use the cached interupt_request value,
533 do_interrupt may have updated the EXITTB flag. */
534 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
535 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
536 /* ensure that no TB jump will be modified as
537 the program flow was changed */
538 next_tb = 0;
539 }
540 }
541 if (unlikely(env->exit_request)) {
542 env->exit_request = 0;
543 env->exception_index = EXCP_INTERRUPT;
544 cpu_loop_exit();
545 }
546 #ifdef CONFIG_DEBUG_EXEC
547 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
548 /* restore flags in standard format */
549 regs_to_env();
550 #if defined(TARGET_I386)
551 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
552 log_cpu_state(env, X86_DUMP_CCOP);
553 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
554 #elif defined(TARGET_ARM)
555 log_cpu_state(env, 0);
556 #elif defined(TARGET_SPARC)
557 log_cpu_state(env, 0);
558 #elif defined(TARGET_PPC)
559 log_cpu_state(env, 0);
560 #elif defined(TARGET_M68K)
561 cpu_m68k_flush_flags(env, env->cc_op);
562 env->cc_op = CC_OP_FLAGS;
563 env->sr = (env->sr & 0xffe0)
564 | env->cc_dest | (env->cc_x << 4);
565 log_cpu_state(env, 0);
566 #elif defined(TARGET_MICROBLAZE)
567 log_cpu_state(env, 0);
568 #elif defined(TARGET_MIPS)
569 log_cpu_state(env, 0);
570 #elif defined(TARGET_SH4)
571 log_cpu_state(env, 0);
572 #elif defined(TARGET_ALPHA)
573 log_cpu_state(env, 0);
574 #elif defined(TARGET_CRIS)
575 log_cpu_state(env, 0);
576 #else
577 #error unsupported target CPU
578 #endif
579 }
580 #endif
581 spin_lock(&tb_lock);
582 tb = tb_find_fast();
583 /* Note: we do it here to avoid a gcc bug on Mac OS X when
584 doing it in tb_find_slow */
585 if (tb_invalidated_flag) {
586 /* as some TB could have been invalidated because
587 of memory exceptions while generating the code, we
588 must recompute the hash index here */
589 next_tb = 0;
590 tb_invalidated_flag = 0;
591 }
592 #ifdef CONFIG_DEBUG_EXEC
593 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
594 (long)tb->tc_ptr, tb->pc,
595 lookup_symbol(tb->pc));
596 #endif
597 /* see if we can patch the calling TB. When the TB
598 spans two pages, we cannot safely do a direct
599 jump. */
600 {
601 if (next_tb != 0 && tb->page_addr[1] == -1) {
602 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
603 }
604 }
605 spin_unlock(&tb_lock);
606 env->current_tb = tb;
607
608 /* cpu_interrupt might be called while translating the
609 TB, but before it is linked into a potentially
610 infinite loop and becomes env->current_tb. Avoid
611 starting execution if there is a pending interrupt. */
612 if (unlikely (env->exit_request))
613 env->current_tb = NULL;
614
615 while (env->current_tb) {
616 tc_ptr = tb->tc_ptr;
617 /* execute the generated code */
618 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
619 #undef env
620 env = cpu_single_env;
621 #define env cpu_single_env
622 #endif
623 next_tb = tcg_qemu_tb_exec(tc_ptr);
624 env->current_tb = NULL;
625 if ((next_tb & 3) == 2) {
626 /* Instruction counter expired. */
627 int insns_left;
628 tb = (TranslationBlock *)(long)(next_tb & ~3);
629 /* Restore PC. */
630 cpu_pc_from_tb(env, tb);
631 insns_left = env->icount_decr.u32;
632 if (env->icount_extra && insns_left >= 0) {
633 /* Refill decrementer and continue execution. */
634 env->icount_extra += insns_left;
635 if (env->icount_extra > 0xffff) {
636 insns_left = 0xffff;
637 } else {
638 insns_left = env->icount_extra;
639 }
640 env->icount_extra -= insns_left;
641 env->icount_decr.u16.low = insns_left;
642 } else {
643 if (insns_left > 0) {
644 /* Execute remaining instructions. */
645 cpu_exec_nocache(insns_left, tb);
646 }
647 env->exception_index = EXCP_INTERRUPT;
648 next_tb = 0;
649 cpu_loop_exit();
650 }
651 }
652 }
653 /* reset soft MMU for next block (it can currently
654 only be set by a memory fault) */
655 } /* for(;;) */
656 } else {
657 env_to_regs();
658 }
659 } /* for(;;) */
660
661
662 #if defined(TARGET_I386)
663 /* restore flags in standard format */
664 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
665 #elif defined(TARGET_ARM)
666 /* XXX: Save/restore host fpu exception state?. */
667 #elif defined(TARGET_SPARC)
668 #elif defined(TARGET_PPC)
669 #elif defined(TARGET_M68K)
670 cpu_m68k_flush_flags(env, env->cc_op);
671 env->cc_op = CC_OP_FLAGS;
672 env->sr = (env->sr & 0xffe0)
673 | env->cc_dest | (env->cc_x << 4);
674 #elif defined(TARGET_MICROBLAZE)
675 #elif defined(TARGET_MIPS)
676 #elif defined(TARGET_SH4)
677 #elif defined(TARGET_ALPHA)
678 #elif defined(TARGET_CRIS)
679 #elif defined(TARGET_S390X)
680 /* XXXXX */
681 #else
682 #error unsupported target CPU
683 #endif
684
685 /* restore global registers */
686 #include "hostregs_helper.h"
687
688 /* fail safe : never use cpu_single_env outside cpu_exec() */
689 cpu_single_env = NULL;
690 return ret;
691 }
692
693 /* must only be called from the generated code as an exception can be
694 generated */
695 void tb_invalidate_page_range(target_ulong start, target_ulong end)
696 {
697 /* XXX: cannot enable it yet because it yields to MMU exception
698 where NIP != read address on PowerPC */
699 #if 0
700 target_ulong phys_addr;
701 phys_addr = get_phys_addr_code(env, start);
702 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
703 #endif
704 }
705
706 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
707
708 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
709 {
710 CPUX86State *saved_env;
711
712 saved_env = env;
713 env = s;
714 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
715 selector &= 0xffff;
716 cpu_x86_load_seg_cache(env, seg_reg, selector,
717 (selector << 4), 0xffff, 0);
718 } else {
719 helper_load_seg(seg_reg, selector);
720 }
721 env = saved_env;
722 }
723
724 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
725 {
726 CPUX86State *saved_env;
727
728 saved_env = env;
729 env = s;
730
731 helper_fsave(ptr, data32);
732
733 env = saved_env;
734 }
735
736 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
737 {
738 CPUX86State *saved_env;
739
740 saved_env = env;
741 env = s;
742
743 helper_frstor(ptr, data32);
744
745 env = saved_env;
746 }
747
748 #endif /* TARGET_I386 */
749
750 #if !defined(CONFIG_SOFTMMU)
751
752 #if defined(TARGET_I386)
753 #define EXCEPTION_ACTION raise_exception_err(env->exception_index, env->error_code)
754 #else
755 #define EXCEPTION_ACTION cpu_loop_exit()
756 #endif
757
758 /* 'pc' is the host PC at which the exception was raised. 'address' is
759 the effective address of the memory exception. 'is_write' is 1 if a
760 write caused the exception and otherwise 0'. 'old_set' is the
761 signal set which should be restored */
762 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
763 int is_write, sigset_t *old_set,
764 void *puc)
765 {
766 TranslationBlock *tb;
767 int ret;
768
769 if (cpu_single_env)
770 env = cpu_single_env; /* XXX: find a correct solution for multithread */
771 #if defined(DEBUG_SIGNAL)
772 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
773 pc, address, is_write, *(unsigned long *)old_set);
774 #endif
775 /* XXX: locking issue */
776 if (is_write && page_unprotect(h2g(address), pc, puc)) {
777 return 1;
778 }
779
780 /* see if it is an MMU fault */
781 ret = cpu_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
782 if (ret < 0)
783 return 0; /* not an MMU fault */
784 if (ret == 0)
785 return 1; /* the MMU fault was handled without causing real CPU fault */
786 /* now we have a real cpu fault */
787 tb = tb_find_pc(pc);
788 if (tb) {
789 /* the PC is inside the translated code. It means that we have
790 a virtual CPU fault */
791 cpu_restore_state(tb, env, pc, puc);
792 }
793
794 /* we restore the process signal mask as the sigreturn should
795 do it (XXX: use sigsetjmp) */
796 sigprocmask(SIG_SETMASK, old_set, NULL);
797 EXCEPTION_ACTION;
798
799 /* never comes here */
800 return 1;
801 }
802
803 #if defined(__i386__)
804
805 #if defined(__APPLE__)
806 # include <sys/ucontext.h>
807
808 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
809 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
810 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
811 # define MASK_sig(context) ((context)->uc_sigmask)
812 #elif defined (__NetBSD__)
813 # include <ucontext.h>
814
815 # define EIP_sig(context) ((context)->uc_mcontext.__gregs[_REG_EIP])
816 # define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
817 # define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
818 # define MASK_sig(context) ((context)->uc_sigmask)
819 #elif defined (__FreeBSD__) || defined(__DragonFly__)
820 # include <ucontext.h>
821
822 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_eip))
823 # define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
824 # define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
825 # define MASK_sig(context) ((context)->uc_sigmask)
826 #elif defined(__OpenBSD__)
827 # define EIP_sig(context) ((context)->sc_eip)
828 # define TRAP_sig(context) ((context)->sc_trapno)
829 # define ERROR_sig(context) ((context)->sc_err)
830 # define MASK_sig(context) ((context)->sc_mask)
831 #else
832 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
833 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
834 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
835 # define MASK_sig(context) ((context)->uc_sigmask)
836 #endif
837
838 int cpu_signal_handler(int host_signum, void *pinfo,
839 void *puc)
840 {
841 siginfo_t *info = pinfo;
842 #if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
843 ucontext_t *uc = puc;
844 #elif defined(__OpenBSD__)
845 struct sigcontext *uc = puc;
846 #else
847 struct ucontext *uc = puc;
848 #endif
849 unsigned long pc;
850 int trapno;
851
852 #ifndef REG_EIP
853 /* for glibc 2.1 */
854 #define REG_EIP EIP
855 #define REG_ERR ERR
856 #define REG_TRAPNO TRAPNO
857 #endif
858 pc = EIP_sig(uc);
859 trapno = TRAP_sig(uc);
860 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
861 trapno == 0xe ?
862 (ERROR_sig(uc) >> 1) & 1 : 0,
863 &MASK_sig(uc), puc);
864 }
865
866 #elif defined(__x86_64__)
867
868 #ifdef __NetBSD__
869 #define PC_sig(context) _UC_MACHINE_PC(context)
870 #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
871 #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
872 #define MASK_sig(context) ((context)->uc_sigmask)
873 #elif defined(__OpenBSD__)
874 #define PC_sig(context) ((context)->sc_rip)
875 #define TRAP_sig(context) ((context)->sc_trapno)
876 #define ERROR_sig(context) ((context)->sc_err)
877 #define MASK_sig(context) ((context)->sc_mask)
878 #elif defined (__FreeBSD__) || defined(__DragonFly__)
879 #include <ucontext.h>
880
881 #define PC_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_rip))
882 #define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
883 #define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
884 #define MASK_sig(context) ((context)->uc_sigmask)
885 #else
886 #define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
887 #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
888 #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
889 #define MASK_sig(context) ((context)->uc_sigmask)
890 #endif
891
892 int cpu_signal_handler(int host_signum, void *pinfo,
893 void *puc)
894 {
895 siginfo_t *info = pinfo;
896 unsigned long pc;
897 #if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
898 ucontext_t *uc = puc;
899 #elif defined(__OpenBSD__)
900 struct sigcontext *uc = puc;
901 #else
902 struct ucontext *uc = puc;
903 #endif
904
905 pc = PC_sig(uc);
906 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
907 TRAP_sig(uc) == 0xe ?
908 (ERROR_sig(uc) >> 1) & 1 : 0,
909 &MASK_sig(uc), puc);
910 }
911
912 #elif defined(_ARCH_PPC)
913
914 /***********************************************************************
915 * signal context platform-specific definitions
916 * From Wine
917 */
918 #ifdef linux
919 /* All Registers access - only for local access */
920 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
921 /* Gpr Registers access */
922 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
923 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
924 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
925 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
926 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
927 # define LR_sig(context) REG_sig(link, context) /* Link register */
928 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
929 /* Float Registers access */
930 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
931 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
932 /* Exception Registers access */
933 # define DAR_sig(context) REG_sig(dar, context)
934 # define DSISR_sig(context) REG_sig(dsisr, context)
935 # define TRAP_sig(context) REG_sig(trap, context)
936 #endif /* linux */
937
938 #ifdef __APPLE__
939 # include <sys/ucontext.h>
940 typedef struct ucontext SIGCONTEXT;
941 /* All Registers access - only for local access */
942 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
943 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
944 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
945 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
946 /* Gpr Registers access */
947 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
948 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
949 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
950 # define CTR_sig(context) REG_sig(ctr, context)
951 # define XER_sig(context) REG_sig(xer, context) /* Link register */
952 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
953 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
954 /* Float Registers access */
955 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
956 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
957 /* Exception Registers access */
958 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
959 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
960 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
961 #endif /* __APPLE__ */
962
963 int cpu_signal_handler(int host_signum, void *pinfo,
964 void *puc)
965 {
966 siginfo_t *info = pinfo;
967 struct ucontext *uc = puc;
968 unsigned long pc;
969 int is_write;
970
971 pc = IAR_sig(uc);
972 is_write = 0;
973 #if 0
974 /* ppc 4xx case */
975 if (DSISR_sig(uc) & 0x00800000)
976 is_write = 1;
977 #else
978 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
979 is_write = 1;
980 #endif
981 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
982 is_write, &uc->uc_sigmask, puc);
983 }
984
985 #elif defined(__alpha__)
986
987 int cpu_signal_handler(int host_signum, void *pinfo,
988 void *puc)
989 {
990 siginfo_t *info = pinfo;
991 struct ucontext *uc = puc;
992 uint32_t *pc = uc->uc_mcontext.sc_pc;
993 uint32_t insn = *pc;
994 int is_write = 0;
995
996 /* XXX: need kernel patch to get write flag faster */
997 switch (insn >> 26) {
998 case 0x0d: // stw
999 case 0x0e: // stb
1000 case 0x0f: // stq_u
1001 case 0x24: // stf
1002 case 0x25: // stg
1003 case 0x26: // sts
1004 case 0x27: // stt
1005 case 0x2c: // stl
1006 case 0x2d: // stq
1007 case 0x2e: // stl_c
1008 case 0x2f: // stq_c
1009 is_write = 1;
1010 }
1011
1012 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1013 is_write, &uc->uc_sigmask, puc);
1014 }
1015 #elif defined(__sparc__)
1016
1017 int cpu_signal_handler(int host_signum, void *pinfo,
1018 void *puc)
1019 {
1020 siginfo_t *info = pinfo;
1021 int is_write;
1022 uint32_t insn;
1023 #if !defined(__arch64__) || defined(CONFIG_SOLARIS)
1024 uint32_t *regs = (uint32_t *)(info + 1);
1025 void *sigmask = (regs + 20);
1026 /* XXX: is there a standard glibc define ? */
1027 unsigned long pc = regs[1];
1028 #else
1029 #ifdef __linux__
1030 struct sigcontext *sc = puc;
1031 unsigned long pc = sc->sigc_regs.tpc;
1032 void *sigmask = (void *)sc->sigc_mask;
1033 #elif defined(__OpenBSD__)
1034 struct sigcontext *uc = puc;
1035 unsigned long pc = uc->sc_pc;
1036 void *sigmask = (void *)(long)uc->sc_mask;
1037 #endif
1038 #endif
1039
1040 /* XXX: need kernel patch to get write flag faster */
1041 is_write = 0;
1042 insn = *(uint32_t *)pc;
1043 if ((insn >> 30) == 3) {
1044 switch((insn >> 19) & 0x3f) {
1045 case 0x05: // stb
1046 case 0x15: // stba
1047 case 0x06: // sth
1048 case 0x16: // stha
1049 case 0x04: // st
1050 case 0x14: // sta
1051 case 0x07: // std
1052 case 0x17: // stda
1053 case 0x0e: // stx
1054 case 0x1e: // stxa
1055 case 0x24: // stf
1056 case 0x34: // stfa
1057 case 0x27: // stdf
1058 case 0x37: // stdfa
1059 case 0x26: // stqf
1060 case 0x36: // stqfa
1061 case 0x25: // stfsr
1062 case 0x3c: // casa
1063 case 0x3e: // casxa
1064 is_write = 1;
1065 break;
1066 }
1067 }
1068 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1069 is_write, sigmask, NULL);
1070 }
1071
1072 #elif defined(__arm__)
1073
1074 int cpu_signal_handler(int host_signum, void *pinfo,
1075 void *puc)
1076 {
1077 siginfo_t *info = pinfo;
1078 struct ucontext *uc = puc;
1079 unsigned long pc;
1080 int is_write;
1081
1082 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1083 pc = uc->uc_mcontext.gregs[R15];
1084 #else
1085 pc = uc->uc_mcontext.arm_pc;
1086 #endif
1087 /* XXX: compute is_write */
1088 is_write = 0;
1089 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1090 is_write,
1091 &uc->uc_sigmask, puc);
1092 }
1093
1094 #elif defined(__mc68000)
1095
1096 int cpu_signal_handler(int host_signum, void *pinfo,
1097 void *puc)
1098 {
1099 siginfo_t *info = pinfo;
1100 struct ucontext *uc = puc;
1101 unsigned long pc;
1102 int is_write;
1103
1104 pc = uc->uc_mcontext.gregs[16];
1105 /* XXX: compute is_write */
1106 is_write = 0;
1107 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1108 is_write,
1109 &uc->uc_sigmask, puc);
1110 }
1111
1112 #elif defined(__ia64)
1113
1114 #ifndef __ISR_VALID
1115 /* This ought to be in <bits/siginfo.h>... */
1116 # define __ISR_VALID 1
1117 #endif
1118
1119 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1120 {
1121 siginfo_t *info = pinfo;
1122 struct ucontext *uc = puc;
1123 unsigned long ip;
1124 int is_write = 0;
1125
1126 ip = uc->uc_mcontext.sc_ip;
1127 switch (host_signum) {
1128 case SIGILL:
1129 case SIGFPE:
1130 case SIGSEGV:
1131 case SIGBUS:
1132 case SIGTRAP:
1133 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1134 /* ISR.W (write-access) is bit 33: */
1135 is_write = (info->si_isr >> 33) & 1;
1136 break;
1137
1138 default:
1139 break;
1140 }
1141 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1142 is_write,
1143 &uc->uc_sigmask, puc);
1144 }
1145
1146 #elif defined(__s390__)
1147
1148 int cpu_signal_handler(int host_signum, void *pinfo,
1149 void *puc)
1150 {
1151 siginfo_t *info = pinfo;
1152 struct ucontext *uc = puc;
1153 unsigned long pc;
1154 int is_write;
1155
1156 pc = uc->uc_mcontext.psw.addr;
1157 /* XXX: compute is_write */
1158 is_write = 0;
1159 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1160 is_write, &uc->uc_sigmask, puc);
1161 }
1162
1163 #elif defined(__mips__)
1164
1165 int cpu_signal_handler(int host_signum, void *pinfo,
1166 void *puc)
1167 {
1168 siginfo_t *info = pinfo;
1169 struct ucontext *uc = puc;
1170 greg_t pc = uc->uc_mcontext.pc;
1171 int is_write;
1172
1173 /* XXX: compute is_write */
1174 is_write = 0;
1175 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1176 is_write, &uc->uc_sigmask, puc);
1177 }
1178
1179 #elif defined(__hppa__)
1180
1181 int cpu_signal_handler(int host_signum, void *pinfo,
1182 void *puc)
1183 {
1184 struct siginfo *info = pinfo;
1185 struct ucontext *uc = puc;
1186 unsigned long pc;
1187 int is_write;
1188
1189 pc = uc->uc_mcontext.sc_iaoq[0];
1190 /* FIXME: compute is_write */
1191 is_write = 0;
1192 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1193 is_write,
1194 &uc->uc_sigmask, puc);
1195 }
1196
1197 #else
1198
1199 #error host CPU specific signal handler needed
1200
1201 #endif
1202
1203 #endif /* !defined(CONFIG_SOFTMMU) */