]> git.proxmox.com Git - qemu.git/blob - cpu-exec.c
4029ea25ff54983d172e8de7bd0b983f803c1b45
[qemu.git] / cpu-exec.c
1 /*
2 * i386 emulator main execution loop
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "config.h"
20 #include "exec.h"
21 #include "disas.h"
22 #include "tcg.h"
23 #include "kvm.h"
24
25 #if !defined(CONFIG_SOFTMMU)
26 #undef EAX
27 #undef ECX
28 #undef EDX
29 #undef EBX
30 #undef ESP
31 #undef EBP
32 #undef ESI
33 #undef EDI
34 #undef EIP
35 #include <signal.h>
36 #ifdef __linux__
37 #include <sys/ucontext.h>
38 #endif
39 #endif
40
41 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
42 // Work around ugly bugs in glibc that mangle global register contents
43 #undef env
44 #define env cpu_single_env
45 #endif
46
47 int tb_invalidated_flag;
48
49 //#define CONFIG_DEBUG_EXEC
50 //#define DEBUG_SIGNAL
51
52 int qemu_cpu_has_work(CPUState *env)
53 {
54 return cpu_has_work(env);
55 }
56
57 void cpu_loop_exit(void)
58 {
59 env->current_tb = NULL;
60 longjmp(env->jmp_env, 1);
61 }
62
63 /* exit the current TB from a signal handler. The host registers are
64 restored in a state compatible with the CPU emulator
65 */
66 void cpu_resume_from_signal(CPUState *env1, void *puc)
67 {
68 #if !defined(CONFIG_SOFTMMU)
69 #ifdef __linux__
70 struct ucontext *uc = puc;
71 #elif defined(__OpenBSD__)
72 struct sigcontext *uc = puc;
73 #endif
74 #endif
75
76 env = env1;
77
78 /* XXX: restore cpu registers saved in host registers */
79
80 #if !defined(CONFIG_SOFTMMU)
81 if (puc) {
82 /* XXX: use siglongjmp ? */
83 #ifdef __linux__
84 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
85 #elif defined(__OpenBSD__)
86 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
87 #endif
88 }
89 #endif
90 env->exception_index = -1;
91 longjmp(env->jmp_env, 1);
92 }
93
94 /* Execute the code without caching the generated code. An interpreter
95 could be used if available. */
96 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
97 {
98 unsigned long next_tb;
99 TranslationBlock *tb;
100
101 /* Should never happen.
102 We only end up here when an existing TB is too long. */
103 if (max_cycles > CF_COUNT_MASK)
104 max_cycles = CF_COUNT_MASK;
105
106 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
107 max_cycles);
108 env->current_tb = tb;
109 /* execute the generated code */
110 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
111 env->current_tb = NULL;
112
113 if ((next_tb & 3) == 2) {
114 /* Restore PC. This may happen if async event occurs before
115 the TB starts executing. */
116 cpu_pc_from_tb(env, tb);
117 }
118 tb_phys_invalidate(tb, -1);
119 tb_free(tb);
120 }
121
122 static TranslationBlock *tb_find_slow(target_ulong pc,
123 target_ulong cs_base,
124 uint64_t flags)
125 {
126 TranslationBlock *tb, **ptb1;
127 unsigned int h;
128 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
129
130 tb_invalidated_flag = 0;
131
132 /* find translated block using physical mappings */
133 phys_pc = get_phys_addr_code(env, pc);
134 phys_page1 = phys_pc & TARGET_PAGE_MASK;
135 phys_page2 = -1;
136 h = tb_phys_hash_func(phys_pc);
137 ptb1 = &tb_phys_hash[h];
138 for(;;) {
139 tb = *ptb1;
140 if (!tb)
141 goto not_found;
142 if (tb->pc == pc &&
143 tb->page_addr[0] == phys_page1 &&
144 tb->cs_base == cs_base &&
145 tb->flags == flags) {
146 /* check next page if needed */
147 if (tb->page_addr[1] != -1) {
148 virt_page2 = (pc & TARGET_PAGE_MASK) +
149 TARGET_PAGE_SIZE;
150 phys_page2 = get_phys_addr_code(env, virt_page2);
151 if (tb->page_addr[1] == phys_page2)
152 goto found;
153 } else {
154 goto found;
155 }
156 }
157 ptb1 = &tb->phys_hash_next;
158 }
159 not_found:
160 /* if no translated code available, then translate it now */
161 tb = tb_gen_code(env, pc, cs_base, flags, 0);
162
163 found:
164 /* we add the TB in the virtual pc hash table */
165 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
166 return tb;
167 }
168
169 static inline TranslationBlock *tb_find_fast(void)
170 {
171 TranslationBlock *tb;
172 target_ulong cs_base, pc;
173 int flags;
174
175 /* we record a subset of the CPU state. It will
176 always be the same before a given translated block
177 is executed. */
178 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
179 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
180 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
181 tb->flags != flags)) {
182 tb = tb_find_slow(pc, cs_base, flags);
183 }
184 return tb;
185 }
186
187 static CPUDebugExcpHandler *debug_excp_handler;
188
189 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
190 {
191 CPUDebugExcpHandler *old_handler = debug_excp_handler;
192
193 debug_excp_handler = handler;
194 return old_handler;
195 }
196
197 static void cpu_handle_debug_exception(CPUState *env)
198 {
199 CPUWatchpoint *wp;
200
201 if (!env->watchpoint_hit)
202 QTAILQ_FOREACH(wp, &env->watchpoints, entry)
203 wp->flags &= ~BP_WATCHPOINT_HIT;
204
205 if (debug_excp_handler)
206 debug_excp_handler(env);
207 }
208
209 /* main execution loop */
210
211 int cpu_exec(CPUState *env1)
212 {
213 #define DECLARE_HOST_REGS 1
214 #include "hostregs_helper.h"
215 int ret, interrupt_request;
216 TranslationBlock *tb;
217 uint8_t *tc_ptr;
218 unsigned long next_tb;
219
220 if (cpu_halted(env1) == EXCP_HALTED)
221 return EXCP_HALTED;
222
223 cpu_single_env = env1;
224
225 /* first we save global registers */
226 #define SAVE_HOST_REGS 1
227 #include "hostregs_helper.h"
228 env = env1;
229
230 #if defined(TARGET_I386)
231 if (!kvm_enabled()) {
232 /* put eflags in CPU temporary format */
233 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
234 DF = 1 - (2 * ((env->eflags >> 10) & 1));
235 CC_OP = CC_OP_EFLAGS;
236 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
237 }
238 #elif defined(TARGET_SPARC)
239 #elif defined(TARGET_M68K)
240 env->cc_op = CC_OP_FLAGS;
241 env->cc_dest = env->sr & 0xf;
242 env->cc_x = (env->sr >> 4) & 1;
243 #elif defined(TARGET_ALPHA)
244 #elif defined(TARGET_ARM)
245 #elif defined(TARGET_PPC)
246 #elif defined(TARGET_MICROBLAZE)
247 #elif defined(TARGET_MIPS)
248 #elif defined(TARGET_SH4)
249 #elif defined(TARGET_CRIS)
250 #elif defined(TARGET_S390X)
251 /* XXXXX */
252 #else
253 #error unsupported target CPU
254 #endif
255 env->exception_index = -1;
256
257 /* prepare setjmp context for exception handling */
258 for(;;) {
259 if (setjmp(env->jmp_env) == 0) {
260 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
261 #undef env
262 env = cpu_single_env;
263 #define env cpu_single_env
264 #endif
265 /* if an exception is pending, we execute it here */
266 if (env->exception_index >= 0) {
267 if (env->exception_index >= EXCP_INTERRUPT) {
268 /* exit request from the cpu execution loop */
269 ret = env->exception_index;
270 if (ret == EXCP_DEBUG)
271 cpu_handle_debug_exception(env);
272 break;
273 } else {
274 #if defined(CONFIG_USER_ONLY)
275 /* if user mode only, we simulate a fake exception
276 which will be handled outside the cpu execution
277 loop */
278 #if defined(TARGET_I386)
279 do_interrupt_user(env->exception_index,
280 env->exception_is_int,
281 env->error_code,
282 env->exception_next_eip);
283 /* successfully delivered */
284 env->old_exception = -1;
285 #endif
286 ret = env->exception_index;
287 break;
288 #else
289 #if defined(TARGET_I386)
290 /* simulate a real cpu exception. On i386, it can
291 trigger new exceptions, but we do not handle
292 double or triple faults yet. */
293 do_interrupt(env->exception_index,
294 env->exception_is_int,
295 env->error_code,
296 env->exception_next_eip, 0);
297 /* successfully delivered */
298 env->old_exception = -1;
299 #elif defined(TARGET_PPC)
300 do_interrupt(env);
301 #elif defined(TARGET_MICROBLAZE)
302 do_interrupt(env);
303 #elif defined(TARGET_MIPS)
304 do_interrupt(env);
305 #elif defined(TARGET_SPARC)
306 do_interrupt(env);
307 #elif defined(TARGET_ARM)
308 do_interrupt(env);
309 #elif defined(TARGET_SH4)
310 do_interrupt(env);
311 #elif defined(TARGET_ALPHA)
312 do_interrupt(env);
313 #elif defined(TARGET_CRIS)
314 do_interrupt(env);
315 #elif defined(TARGET_M68K)
316 do_interrupt(0);
317 #endif
318 env->exception_index = -1;
319 #endif
320 }
321 }
322
323 if (kvm_enabled()) {
324 kvm_cpu_exec(env);
325 longjmp(env->jmp_env, 1);
326 }
327
328 next_tb = 0; /* force lookup of first TB */
329 for(;;) {
330 interrupt_request = env->interrupt_request;
331 if (unlikely(interrupt_request)) {
332 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
333 /* Mask out external interrupts for this step. */
334 interrupt_request &= ~(CPU_INTERRUPT_HARD |
335 CPU_INTERRUPT_FIQ |
336 CPU_INTERRUPT_SMI |
337 CPU_INTERRUPT_NMI);
338 }
339 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
340 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
341 env->exception_index = EXCP_DEBUG;
342 cpu_loop_exit();
343 }
344 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
345 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
346 defined(TARGET_MICROBLAZE)
347 if (interrupt_request & CPU_INTERRUPT_HALT) {
348 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
349 env->halted = 1;
350 env->exception_index = EXCP_HLT;
351 cpu_loop_exit();
352 }
353 #endif
354 #if defined(TARGET_I386)
355 if (interrupt_request & CPU_INTERRUPT_INIT) {
356 svm_check_intercept(SVM_EXIT_INIT);
357 do_cpu_init(env);
358 env->exception_index = EXCP_HALTED;
359 cpu_loop_exit();
360 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
361 do_cpu_sipi(env);
362 } else if (env->hflags2 & HF2_GIF_MASK) {
363 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
364 !(env->hflags & HF_SMM_MASK)) {
365 svm_check_intercept(SVM_EXIT_SMI);
366 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
367 do_smm_enter();
368 next_tb = 0;
369 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
370 !(env->hflags2 & HF2_NMI_MASK)) {
371 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
372 env->hflags2 |= HF2_NMI_MASK;
373 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
374 next_tb = 0;
375 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
376 env->interrupt_request &= ~CPU_INTERRUPT_MCE;
377 do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
378 next_tb = 0;
379 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
380 (((env->hflags2 & HF2_VINTR_MASK) &&
381 (env->hflags2 & HF2_HIF_MASK)) ||
382 (!(env->hflags2 & HF2_VINTR_MASK) &&
383 (env->eflags & IF_MASK &&
384 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
385 int intno;
386 svm_check_intercept(SVM_EXIT_INTR);
387 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
388 intno = cpu_get_pic_interrupt(env);
389 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
390 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
391 #undef env
392 env = cpu_single_env;
393 #define env cpu_single_env
394 #endif
395 do_interrupt(intno, 0, 0, 0, 1);
396 /* ensure that no TB jump will be modified as
397 the program flow was changed */
398 next_tb = 0;
399 #if !defined(CONFIG_USER_ONLY)
400 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
401 (env->eflags & IF_MASK) &&
402 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
403 int intno;
404 /* FIXME: this should respect TPR */
405 svm_check_intercept(SVM_EXIT_VINTR);
406 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
407 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
408 do_interrupt(intno, 0, 0, 0, 1);
409 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
410 next_tb = 0;
411 #endif
412 }
413 }
414 #elif defined(TARGET_PPC)
415 #if 0
416 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
417 cpu_reset(env);
418 }
419 #endif
420 if (interrupt_request & CPU_INTERRUPT_HARD) {
421 ppc_hw_interrupt(env);
422 if (env->pending_interrupts == 0)
423 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
424 next_tb = 0;
425 }
426 #elif defined(TARGET_MICROBLAZE)
427 if ((interrupt_request & CPU_INTERRUPT_HARD)
428 && (env->sregs[SR_MSR] & MSR_IE)
429 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
430 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
431 env->exception_index = EXCP_IRQ;
432 do_interrupt(env);
433 next_tb = 0;
434 }
435 #elif defined(TARGET_MIPS)
436 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
437 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
438 (env->CP0_Status & (1 << CP0St_IE)) &&
439 !(env->CP0_Status & (1 << CP0St_EXL)) &&
440 !(env->CP0_Status & (1 << CP0St_ERL)) &&
441 !(env->hflags & MIPS_HFLAG_DM)) {
442 /* Raise it */
443 env->exception_index = EXCP_EXT_INTERRUPT;
444 env->error_code = 0;
445 do_interrupt(env);
446 next_tb = 0;
447 }
448 #elif defined(TARGET_SPARC)
449 if (interrupt_request & CPU_INTERRUPT_HARD) {
450 if (cpu_interrupts_enabled(env) &&
451 env->interrupt_index > 0) {
452 int pil = env->interrupt_index & 0xf;
453 int type = env->interrupt_index & 0xf0;
454
455 if (((type == TT_EXTINT) &&
456 cpu_pil_allowed(env, pil)) ||
457 type != TT_EXTINT) {
458 env->exception_index = env->interrupt_index;
459 do_interrupt(env);
460 next_tb = 0;
461 }
462 }
463 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
464 //do_interrupt(0, 0, 0, 0, 0);
465 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
466 }
467 #elif defined(TARGET_ARM)
468 if (interrupt_request & CPU_INTERRUPT_FIQ
469 && !(env->uncached_cpsr & CPSR_F)) {
470 env->exception_index = EXCP_FIQ;
471 do_interrupt(env);
472 next_tb = 0;
473 }
474 /* ARMv7-M interrupt return works by loading a magic value
475 into the PC. On real hardware the load causes the
476 return to occur. The qemu implementation performs the
477 jump normally, then does the exception return when the
478 CPU tries to execute code at the magic address.
479 This will cause the magic PC value to be pushed to
480 the stack if an interrupt occured at the wrong time.
481 We avoid this by disabling interrupts when
482 pc contains a magic address. */
483 if (interrupt_request & CPU_INTERRUPT_HARD
484 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
485 || !(env->uncached_cpsr & CPSR_I))) {
486 env->exception_index = EXCP_IRQ;
487 do_interrupt(env);
488 next_tb = 0;
489 }
490 #elif defined(TARGET_SH4)
491 if (interrupt_request & CPU_INTERRUPT_HARD) {
492 do_interrupt(env);
493 next_tb = 0;
494 }
495 #elif defined(TARGET_ALPHA)
496 if (interrupt_request & CPU_INTERRUPT_HARD) {
497 do_interrupt(env);
498 next_tb = 0;
499 }
500 #elif defined(TARGET_CRIS)
501 if (interrupt_request & CPU_INTERRUPT_HARD
502 && (env->pregs[PR_CCS] & I_FLAG)
503 && !env->locked_irq) {
504 env->exception_index = EXCP_IRQ;
505 do_interrupt(env);
506 next_tb = 0;
507 }
508 if (interrupt_request & CPU_INTERRUPT_NMI
509 && (env->pregs[PR_CCS] & M_FLAG)) {
510 env->exception_index = EXCP_NMI;
511 do_interrupt(env);
512 next_tb = 0;
513 }
514 #elif defined(TARGET_M68K)
515 if (interrupt_request & CPU_INTERRUPT_HARD
516 && ((env->sr & SR_I) >> SR_I_SHIFT)
517 < env->pending_level) {
518 /* Real hardware gets the interrupt vector via an
519 IACK cycle at this point. Current emulated
520 hardware doesn't rely on this, so we
521 provide/save the vector when the interrupt is
522 first signalled. */
523 env->exception_index = env->pending_vector;
524 do_interrupt(1);
525 next_tb = 0;
526 }
527 #endif
528 /* Don't use the cached interupt_request value,
529 do_interrupt may have updated the EXITTB flag. */
530 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
531 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
532 /* ensure that no TB jump will be modified as
533 the program flow was changed */
534 next_tb = 0;
535 }
536 }
537 if (unlikely(env->exit_request)) {
538 env->exit_request = 0;
539 env->exception_index = EXCP_INTERRUPT;
540 cpu_loop_exit();
541 }
542 #ifdef CONFIG_DEBUG_EXEC
543 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
544 /* restore flags in standard format */
545 #if defined(TARGET_I386)
546 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
547 log_cpu_state(env, X86_DUMP_CCOP);
548 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
549 #elif defined(TARGET_ARM)
550 log_cpu_state(env, 0);
551 #elif defined(TARGET_SPARC)
552 log_cpu_state(env, 0);
553 #elif defined(TARGET_PPC)
554 log_cpu_state(env, 0);
555 #elif defined(TARGET_M68K)
556 cpu_m68k_flush_flags(env, env->cc_op);
557 env->cc_op = CC_OP_FLAGS;
558 env->sr = (env->sr & 0xffe0)
559 | env->cc_dest | (env->cc_x << 4);
560 log_cpu_state(env, 0);
561 #elif defined(TARGET_MICROBLAZE)
562 log_cpu_state(env, 0);
563 #elif defined(TARGET_MIPS)
564 log_cpu_state(env, 0);
565 #elif defined(TARGET_SH4)
566 log_cpu_state(env, 0);
567 #elif defined(TARGET_ALPHA)
568 log_cpu_state(env, 0);
569 #elif defined(TARGET_CRIS)
570 log_cpu_state(env, 0);
571 #else
572 #error unsupported target CPU
573 #endif
574 }
575 #endif
576 spin_lock(&tb_lock);
577 tb = tb_find_fast();
578 /* Note: we do it here to avoid a gcc bug on Mac OS X when
579 doing it in tb_find_slow */
580 if (tb_invalidated_flag) {
581 /* as some TB could have been invalidated because
582 of memory exceptions while generating the code, we
583 must recompute the hash index here */
584 next_tb = 0;
585 tb_invalidated_flag = 0;
586 }
587 #ifdef CONFIG_DEBUG_EXEC
588 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
589 (long)tb->tc_ptr, tb->pc,
590 lookup_symbol(tb->pc));
591 #endif
592 /* see if we can patch the calling TB. When the TB
593 spans two pages, we cannot safely do a direct
594 jump. */
595 if (next_tb != 0 && tb->page_addr[1] == -1) {
596 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
597 }
598 spin_unlock(&tb_lock);
599
600 /* cpu_interrupt might be called while translating the
601 TB, but before it is linked into a potentially
602 infinite loop and becomes env->current_tb. Avoid
603 starting execution if there is a pending interrupt. */
604 if (!unlikely (env->exit_request)) {
605 env->current_tb = tb;
606 tc_ptr = tb->tc_ptr;
607 /* execute the generated code */
608 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
609 #undef env
610 env = cpu_single_env;
611 #define env cpu_single_env
612 #endif
613 next_tb = tcg_qemu_tb_exec(tc_ptr);
614 env->current_tb = NULL;
615 if ((next_tb & 3) == 2) {
616 /* Instruction counter expired. */
617 int insns_left;
618 tb = (TranslationBlock *)(long)(next_tb & ~3);
619 /* Restore PC. */
620 cpu_pc_from_tb(env, tb);
621 insns_left = env->icount_decr.u32;
622 if (env->icount_extra && insns_left >= 0) {
623 /* Refill decrementer and continue execution. */
624 env->icount_extra += insns_left;
625 if (env->icount_extra > 0xffff) {
626 insns_left = 0xffff;
627 } else {
628 insns_left = env->icount_extra;
629 }
630 env->icount_extra -= insns_left;
631 env->icount_decr.u16.low = insns_left;
632 } else {
633 if (insns_left > 0) {
634 /* Execute remaining instructions. */
635 cpu_exec_nocache(insns_left, tb);
636 }
637 env->exception_index = EXCP_INTERRUPT;
638 next_tb = 0;
639 cpu_loop_exit();
640 }
641 }
642 }
643 /* reset soft MMU for next block (it can currently
644 only be set by a memory fault) */
645 } /* for(;;) */
646 }
647 } /* for(;;) */
648
649
650 #if defined(TARGET_I386)
651 /* restore flags in standard format */
652 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
653 #elif defined(TARGET_ARM)
654 /* XXX: Save/restore host fpu exception state?. */
655 #elif defined(TARGET_SPARC)
656 #elif defined(TARGET_PPC)
657 #elif defined(TARGET_M68K)
658 cpu_m68k_flush_flags(env, env->cc_op);
659 env->cc_op = CC_OP_FLAGS;
660 env->sr = (env->sr & 0xffe0)
661 | env->cc_dest | (env->cc_x << 4);
662 #elif defined(TARGET_MICROBLAZE)
663 #elif defined(TARGET_MIPS)
664 #elif defined(TARGET_SH4)
665 #elif defined(TARGET_ALPHA)
666 #elif defined(TARGET_CRIS)
667 #elif defined(TARGET_S390X)
668 /* XXXXX */
669 #else
670 #error unsupported target CPU
671 #endif
672
673 /* restore global registers */
674 #include "hostregs_helper.h"
675
676 /* fail safe : never use cpu_single_env outside cpu_exec() */
677 cpu_single_env = NULL;
678 return ret;
679 }
680
681 /* must only be called from the generated code as an exception can be
682 generated */
683 void tb_invalidate_page_range(target_ulong start, target_ulong end)
684 {
685 /* XXX: cannot enable it yet because it yields to MMU exception
686 where NIP != read address on PowerPC */
687 #if 0
688 target_ulong phys_addr;
689 phys_addr = get_phys_addr_code(env, start);
690 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
691 #endif
692 }
693
694 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
695
696 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
697 {
698 CPUX86State *saved_env;
699
700 saved_env = env;
701 env = s;
702 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
703 selector &= 0xffff;
704 cpu_x86_load_seg_cache(env, seg_reg, selector,
705 (selector << 4), 0xffff, 0);
706 } else {
707 helper_load_seg(seg_reg, selector);
708 }
709 env = saved_env;
710 }
711
712 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
713 {
714 CPUX86State *saved_env;
715
716 saved_env = env;
717 env = s;
718
719 helper_fsave(ptr, data32);
720
721 env = saved_env;
722 }
723
724 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
725 {
726 CPUX86State *saved_env;
727
728 saved_env = env;
729 env = s;
730
731 helper_frstor(ptr, data32);
732
733 env = saved_env;
734 }
735
736 #endif /* TARGET_I386 */
737
738 #if !defined(CONFIG_SOFTMMU)
739
740 #if defined(TARGET_I386)
741 #define EXCEPTION_ACTION raise_exception_err(env->exception_index, env->error_code)
742 #else
743 #define EXCEPTION_ACTION cpu_loop_exit()
744 #endif
745
746 /* 'pc' is the host PC at which the exception was raised. 'address' is
747 the effective address of the memory exception. 'is_write' is 1 if a
748 write caused the exception and otherwise 0'. 'old_set' is the
749 signal set which should be restored */
750 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
751 int is_write, sigset_t *old_set,
752 void *puc)
753 {
754 TranslationBlock *tb;
755 int ret;
756
757 if (cpu_single_env)
758 env = cpu_single_env; /* XXX: find a correct solution for multithread */
759 #if defined(DEBUG_SIGNAL)
760 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
761 pc, address, is_write, *(unsigned long *)old_set);
762 #endif
763 /* XXX: locking issue */
764 if (is_write && page_unprotect(h2g(address), pc, puc)) {
765 return 1;
766 }
767
768 /* see if it is an MMU fault */
769 ret = cpu_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
770 if (ret < 0)
771 return 0; /* not an MMU fault */
772 if (ret == 0)
773 return 1; /* the MMU fault was handled without causing real CPU fault */
774 /* now we have a real cpu fault */
775 tb = tb_find_pc(pc);
776 if (tb) {
777 /* the PC is inside the translated code. It means that we have
778 a virtual CPU fault */
779 cpu_restore_state(tb, env, pc, puc);
780 }
781
782 /* we restore the process signal mask as the sigreturn should
783 do it (XXX: use sigsetjmp) */
784 sigprocmask(SIG_SETMASK, old_set, NULL);
785 EXCEPTION_ACTION;
786
787 /* never comes here */
788 return 1;
789 }
790
791 #if defined(__i386__)
792
793 #if defined(__APPLE__)
794 # include <sys/ucontext.h>
795
796 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
797 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
798 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
799 # define MASK_sig(context) ((context)->uc_sigmask)
800 #elif defined (__NetBSD__)
801 # include <ucontext.h>
802
803 # define EIP_sig(context) ((context)->uc_mcontext.__gregs[_REG_EIP])
804 # define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
805 # define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
806 # define MASK_sig(context) ((context)->uc_sigmask)
807 #elif defined (__FreeBSD__) || defined(__DragonFly__)
808 # include <ucontext.h>
809
810 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_eip))
811 # define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
812 # define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
813 # define MASK_sig(context) ((context)->uc_sigmask)
814 #elif defined(__OpenBSD__)
815 # define EIP_sig(context) ((context)->sc_eip)
816 # define TRAP_sig(context) ((context)->sc_trapno)
817 # define ERROR_sig(context) ((context)->sc_err)
818 # define MASK_sig(context) ((context)->sc_mask)
819 #else
820 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
821 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
822 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
823 # define MASK_sig(context) ((context)->uc_sigmask)
824 #endif
825
826 int cpu_signal_handler(int host_signum, void *pinfo,
827 void *puc)
828 {
829 siginfo_t *info = pinfo;
830 #if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
831 ucontext_t *uc = puc;
832 #elif defined(__OpenBSD__)
833 struct sigcontext *uc = puc;
834 #else
835 struct ucontext *uc = puc;
836 #endif
837 unsigned long pc;
838 int trapno;
839
840 #ifndef REG_EIP
841 /* for glibc 2.1 */
842 #define REG_EIP EIP
843 #define REG_ERR ERR
844 #define REG_TRAPNO TRAPNO
845 #endif
846 pc = EIP_sig(uc);
847 trapno = TRAP_sig(uc);
848 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
849 trapno == 0xe ?
850 (ERROR_sig(uc) >> 1) & 1 : 0,
851 &MASK_sig(uc), puc);
852 }
853
854 #elif defined(__x86_64__)
855
856 #ifdef __NetBSD__
857 #define PC_sig(context) _UC_MACHINE_PC(context)
858 #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
859 #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
860 #define MASK_sig(context) ((context)->uc_sigmask)
861 #elif defined(__OpenBSD__)
862 #define PC_sig(context) ((context)->sc_rip)
863 #define TRAP_sig(context) ((context)->sc_trapno)
864 #define ERROR_sig(context) ((context)->sc_err)
865 #define MASK_sig(context) ((context)->sc_mask)
866 #elif defined (__FreeBSD__) || defined(__DragonFly__)
867 #include <ucontext.h>
868
869 #define PC_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_rip))
870 #define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
871 #define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
872 #define MASK_sig(context) ((context)->uc_sigmask)
873 #else
874 #define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
875 #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
876 #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
877 #define MASK_sig(context) ((context)->uc_sigmask)
878 #endif
879
880 int cpu_signal_handler(int host_signum, void *pinfo,
881 void *puc)
882 {
883 siginfo_t *info = pinfo;
884 unsigned long pc;
885 #if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
886 ucontext_t *uc = puc;
887 #elif defined(__OpenBSD__)
888 struct sigcontext *uc = puc;
889 #else
890 struct ucontext *uc = puc;
891 #endif
892
893 pc = PC_sig(uc);
894 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
895 TRAP_sig(uc) == 0xe ?
896 (ERROR_sig(uc) >> 1) & 1 : 0,
897 &MASK_sig(uc), puc);
898 }
899
900 #elif defined(_ARCH_PPC)
901
902 /***********************************************************************
903 * signal context platform-specific definitions
904 * From Wine
905 */
906 #ifdef linux
907 /* All Registers access - only for local access */
908 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
909 /* Gpr Registers access */
910 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
911 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
912 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
913 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
914 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
915 # define LR_sig(context) REG_sig(link, context) /* Link register */
916 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
917 /* Float Registers access */
918 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
919 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
920 /* Exception Registers access */
921 # define DAR_sig(context) REG_sig(dar, context)
922 # define DSISR_sig(context) REG_sig(dsisr, context)
923 # define TRAP_sig(context) REG_sig(trap, context)
924 #endif /* linux */
925
926 #ifdef __APPLE__
927 # include <sys/ucontext.h>
928 typedef struct ucontext SIGCONTEXT;
929 /* All Registers access - only for local access */
930 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
931 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
932 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
933 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
934 /* Gpr Registers access */
935 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
936 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
937 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
938 # define CTR_sig(context) REG_sig(ctr, context)
939 # define XER_sig(context) REG_sig(xer, context) /* Link register */
940 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
941 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
942 /* Float Registers access */
943 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
944 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
945 /* Exception Registers access */
946 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
947 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
948 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
949 #endif /* __APPLE__ */
950
951 int cpu_signal_handler(int host_signum, void *pinfo,
952 void *puc)
953 {
954 siginfo_t *info = pinfo;
955 struct ucontext *uc = puc;
956 unsigned long pc;
957 int is_write;
958
959 pc = IAR_sig(uc);
960 is_write = 0;
961 #if 0
962 /* ppc 4xx case */
963 if (DSISR_sig(uc) & 0x00800000)
964 is_write = 1;
965 #else
966 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
967 is_write = 1;
968 #endif
969 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
970 is_write, &uc->uc_sigmask, puc);
971 }
972
973 #elif defined(__alpha__)
974
975 int cpu_signal_handler(int host_signum, void *pinfo,
976 void *puc)
977 {
978 siginfo_t *info = pinfo;
979 struct ucontext *uc = puc;
980 uint32_t *pc = uc->uc_mcontext.sc_pc;
981 uint32_t insn = *pc;
982 int is_write = 0;
983
984 /* XXX: need kernel patch to get write flag faster */
985 switch (insn >> 26) {
986 case 0x0d: // stw
987 case 0x0e: // stb
988 case 0x0f: // stq_u
989 case 0x24: // stf
990 case 0x25: // stg
991 case 0x26: // sts
992 case 0x27: // stt
993 case 0x2c: // stl
994 case 0x2d: // stq
995 case 0x2e: // stl_c
996 case 0x2f: // stq_c
997 is_write = 1;
998 }
999
1000 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1001 is_write, &uc->uc_sigmask, puc);
1002 }
1003 #elif defined(__sparc__)
1004
1005 int cpu_signal_handler(int host_signum, void *pinfo,
1006 void *puc)
1007 {
1008 siginfo_t *info = pinfo;
1009 int is_write;
1010 uint32_t insn;
1011 #if !defined(__arch64__) || defined(CONFIG_SOLARIS)
1012 uint32_t *regs = (uint32_t *)(info + 1);
1013 void *sigmask = (regs + 20);
1014 /* XXX: is there a standard glibc define ? */
1015 unsigned long pc = regs[1];
1016 #else
1017 #ifdef __linux__
1018 struct sigcontext *sc = puc;
1019 unsigned long pc = sc->sigc_regs.tpc;
1020 void *sigmask = (void *)sc->sigc_mask;
1021 #elif defined(__OpenBSD__)
1022 struct sigcontext *uc = puc;
1023 unsigned long pc = uc->sc_pc;
1024 void *sigmask = (void *)(long)uc->sc_mask;
1025 #endif
1026 #endif
1027
1028 /* XXX: need kernel patch to get write flag faster */
1029 is_write = 0;
1030 insn = *(uint32_t *)pc;
1031 if ((insn >> 30) == 3) {
1032 switch((insn >> 19) & 0x3f) {
1033 case 0x05: // stb
1034 case 0x15: // stba
1035 case 0x06: // sth
1036 case 0x16: // stha
1037 case 0x04: // st
1038 case 0x14: // sta
1039 case 0x07: // std
1040 case 0x17: // stda
1041 case 0x0e: // stx
1042 case 0x1e: // stxa
1043 case 0x24: // stf
1044 case 0x34: // stfa
1045 case 0x27: // stdf
1046 case 0x37: // stdfa
1047 case 0x26: // stqf
1048 case 0x36: // stqfa
1049 case 0x25: // stfsr
1050 case 0x3c: // casa
1051 case 0x3e: // casxa
1052 is_write = 1;
1053 break;
1054 }
1055 }
1056 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1057 is_write, sigmask, NULL);
1058 }
1059
1060 #elif defined(__arm__)
1061
1062 int cpu_signal_handler(int host_signum, void *pinfo,
1063 void *puc)
1064 {
1065 siginfo_t *info = pinfo;
1066 struct ucontext *uc = puc;
1067 unsigned long pc;
1068 int is_write;
1069
1070 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1071 pc = uc->uc_mcontext.gregs[R15];
1072 #else
1073 pc = uc->uc_mcontext.arm_pc;
1074 #endif
1075 /* XXX: compute is_write */
1076 is_write = 0;
1077 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1078 is_write,
1079 &uc->uc_sigmask, puc);
1080 }
1081
1082 #elif defined(__mc68000)
1083
1084 int cpu_signal_handler(int host_signum, void *pinfo,
1085 void *puc)
1086 {
1087 siginfo_t *info = pinfo;
1088 struct ucontext *uc = puc;
1089 unsigned long pc;
1090 int is_write;
1091
1092 pc = uc->uc_mcontext.gregs[16];
1093 /* XXX: compute is_write */
1094 is_write = 0;
1095 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1096 is_write,
1097 &uc->uc_sigmask, puc);
1098 }
1099
1100 #elif defined(__ia64)
1101
1102 #ifndef __ISR_VALID
1103 /* This ought to be in <bits/siginfo.h>... */
1104 # define __ISR_VALID 1
1105 #endif
1106
1107 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1108 {
1109 siginfo_t *info = pinfo;
1110 struct ucontext *uc = puc;
1111 unsigned long ip;
1112 int is_write = 0;
1113
1114 ip = uc->uc_mcontext.sc_ip;
1115 switch (host_signum) {
1116 case SIGILL:
1117 case SIGFPE:
1118 case SIGSEGV:
1119 case SIGBUS:
1120 case SIGTRAP:
1121 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1122 /* ISR.W (write-access) is bit 33: */
1123 is_write = (info->si_isr >> 33) & 1;
1124 break;
1125
1126 default:
1127 break;
1128 }
1129 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1130 is_write,
1131 &uc->uc_sigmask, puc);
1132 }
1133
1134 #elif defined(__s390__)
1135
1136 int cpu_signal_handler(int host_signum, void *pinfo,
1137 void *puc)
1138 {
1139 siginfo_t *info = pinfo;
1140 struct ucontext *uc = puc;
1141 unsigned long pc;
1142 int is_write;
1143
1144 pc = uc->uc_mcontext.psw.addr;
1145 /* XXX: compute is_write */
1146 is_write = 0;
1147 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1148 is_write, &uc->uc_sigmask, puc);
1149 }
1150
1151 #elif defined(__mips__)
1152
1153 int cpu_signal_handler(int host_signum, void *pinfo,
1154 void *puc)
1155 {
1156 siginfo_t *info = pinfo;
1157 struct ucontext *uc = puc;
1158 greg_t pc = uc->uc_mcontext.pc;
1159 int is_write;
1160
1161 /* XXX: compute is_write */
1162 is_write = 0;
1163 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1164 is_write, &uc->uc_sigmask, puc);
1165 }
1166
1167 #elif defined(__hppa__)
1168
1169 int cpu_signal_handler(int host_signum, void *pinfo,
1170 void *puc)
1171 {
1172 struct siginfo *info = pinfo;
1173 struct ucontext *uc = puc;
1174 unsigned long pc;
1175 int is_write;
1176
1177 pc = uc->uc_mcontext.sc_iaoq[0];
1178 /* FIXME: compute is_write */
1179 is_write = 0;
1180 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1181 is_write,
1182 &uc->uc_sigmask, puc);
1183 }
1184
1185 #else
1186
1187 #error host CPU specific signal handler needed
1188
1189 #endif
1190
1191 #endif /* !defined(CONFIG_SOFTMMU) */