]> git.proxmox.com Git - mirror_qemu.git/blob - cpu-exec.c
i8259: add -no-spurious-interrupt-hack option
[mirror_qemu.git] / cpu-exec.c
1 /*
2 * emulator main execution loop
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "config.h"
20 #include "cpu.h"
21 #include "disas.h"
22 #include "tcg.h"
23 #include "qemu-barrier.h"
24 #include "qtest.h"
25
26 int tb_invalidated_flag;
27
28 //#define CONFIG_DEBUG_EXEC
29
30 bool qemu_cpu_has_work(CPUArchState *env)
31 {
32 return cpu_has_work(env);
33 }
34
35 void cpu_loop_exit(CPUArchState *env)
36 {
37 env->current_tb = NULL;
38 longjmp(env->jmp_env, 1);
39 }
40
41 /* exit the current TB from a signal handler. The host registers are
42 restored in a state compatible with the CPU emulator
43 */
44 #if defined(CONFIG_SOFTMMU)
45 void cpu_resume_from_signal(CPUArchState *env, void *puc)
46 {
47 /* XXX: restore cpu registers saved in host registers */
48
49 env->exception_index = -1;
50 longjmp(env->jmp_env, 1);
51 }
52 #endif
53
54 /* Execute the code without caching the generated code. An interpreter
55 could be used if available. */
56 static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
57 TranslationBlock *orig_tb)
58 {
59 tcg_target_ulong next_tb;
60 TranslationBlock *tb;
61
62 /* Should never happen.
63 We only end up here when an existing TB is too long. */
64 if (max_cycles > CF_COUNT_MASK)
65 max_cycles = CF_COUNT_MASK;
66
67 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
68 max_cycles);
69 env->current_tb = tb;
70 /* execute the generated code */
71 next_tb = tcg_qemu_tb_exec(env, tb->tc_ptr);
72 env->current_tb = NULL;
73
74 if ((next_tb & 3) == 2) {
75 /* Restore PC. This may happen if async event occurs before
76 the TB starts executing. */
77 cpu_pc_from_tb(env, tb);
78 }
79 tb_phys_invalidate(tb, -1);
80 tb_free(tb);
81 }
82
83 static TranslationBlock *tb_find_slow(CPUArchState *env,
84 target_ulong pc,
85 target_ulong cs_base,
86 uint64_t flags)
87 {
88 TranslationBlock *tb, **ptb1;
89 unsigned int h;
90 tb_page_addr_t phys_pc, phys_page1;
91 target_ulong virt_page2;
92
93 tb_invalidated_flag = 0;
94
95 /* find translated block using physical mappings */
96 phys_pc = get_page_addr_code(env, pc);
97 phys_page1 = phys_pc & TARGET_PAGE_MASK;
98 h = tb_phys_hash_func(phys_pc);
99 ptb1 = &tb_phys_hash[h];
100 for(;;) {
101 tb = *ptb1;
102 if (!tb)
103 goto not_found;
104 if (tb->pc == pc &&
105 tb->page_addr[0] == phys_page1 &&
106 tb->cs_base == cs_base &&
107 tb->flags == flags) {
108 /* check next page if needed */
109 if (tb->page_addr[1] != -1) {
110 tb_page_addr_t phys_page2;
111
112 virt_page2 = (pc & TARGET_PAGE_MASK) +
113 TARGET_PAGE_SIZE;
114 phys_page2 = get_page_addr_code(env, virt_page2);
115 if (tb->page_addr[1] == phys_page2)
116 goto found;
117 } else {
118 goto found;
119 }
120 }
121 ptb1 = &tb->phys_hash_next;
122 }
123 not_found:
124 /* if no translated code available, then translate it now */
125 tb = tb_gen_code(env, pc, cs_base, flags, 0);
126
127 found:
128 /* Move the last found TB to the head of the list */
129 if (likely(*ptb1)) {
130 *ptb1 = tb->phys_hash_next;
131 tb->phys_hash_next = tb_phys_hash[h];
132 tb_phys_hash[h] = tb;
133 }
134 /* we add the TB in the virtual pc hash table */
135 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
136 return tb;
137 }
138
139 static inline TranslationBlock *tb_find_fast(CPUArchState *env)
140 {
141 TranslationBlock *tb;
142 target_ulong cs_base, pc;
143 int flags;
144
145 /* we record a subset of the CPU state. It will
146 always be the same before a given translated block
147 is executed. */
148 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
149 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
150 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
151 tb->flags != flags)) {
152 tb = tb_find_slow(env, pc, cs_base, flags);
153 }
154 return tb;
155 }
156
157 static CPUDebugExcpHandler *debug_excp_handler;
158
159 void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
160 {
161 debug_excp_handler = handler;
162 }
163
164 static void cpu_handle_debug_exception(CPUArchState *env)
165 {
166 CPUWatchpoint *wp;
167
168 if (!env->watchpoint_hit) {
169 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
170 wp->flags &= ~BP_WATCHPOINT_HIT;
171 }
172 }
173 if (debug_excp_handler) {
174 debug_excp_handler(env);
175 }
176 }
177
178 /* main execution loop */
179
180 volatile sig_atomic_t exit_request;
181
182 int cpu_exec(CPUArchState *env)
183 {
184 #ifdef TARGET_PPC
185 CPUState *cpu = ENV_GET_CPU(env);
186 #endif
187 int ret, interrupt_request;
188 TranslationBlock *tb;
189 uint8_t *tc_ptr;
190 tcg_target_ulong next_tb;
191
192 if (env->halted) {
193 if (!cpu_has_work(env)) {
194 return EXCP_HALTED;
195 }
196
197 env->halted = 0;
198 }
199
200 cpu_single_env = env;
201
202 if (unlikely(exit_request)) {
203 env->exit_request = 1;
204 }
205
206 #if defined(TARGET_I386)
207 /* put eflags in CPU temporary format */
208 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
209 DF = 1 - (2 * ((env->eflags >> 10) & 1));
210 CC_OP = CC_OP_EFLAGS;
211 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
212 #elif defined(TARGET_SPARC)
213 #elif defined(TARGET_M68K)
214 env->cc_op = CC_OP_FLAGS;
215 env->cc_dest = env->sr & 0xf;
216 env->cc_x = (env->sr >> 4) & 1;
217 #elif defined(TARGET_ALPHA)
218 #elif defined(TARGET_ARM)
219 #elif defined(TARGET_UNICORE32)
220 #elif defined(TARGET_PPC)
221 env->reserve_addr = -1;
222 #elif defined(TARGET_LM32)
223 #elif defined(TARGET_MICROBLAZE)
224 #elif defined(TARGET_MIPS)
225 #elif defined(TARGET_OPENRISC)
226 #elif defined(TARGET_SH4)
227 #elif defined(TARGET_CRIS)
228 #elif defined(TARGET_S390X)
229 #elif defined(TARGET_XTENSA)
230 /* XXXXX */
231 #else
232 #error unsupported target CPU
233 #endif
234 env->exception_index = -1;
235
236 /* prepare setjmp context for exception handling */
237 for(;;) {
238 if (setjmp(env->jmp_env) == 0) {
239 /* if an exception is pending, we execute it here */
240 if (env->exception_index >= 0) {
241 if (env->exception_index >= EXCP_INTERRUPT) {
242 /* exit request from the cpu execution loop */
243 ret = env->exception_index;
244 if (ret == EXCP_DEBUG) {
245 cpu_handle_debug_exception(env);
246 }
247 break;
248 } else {
249 #if defined(CONFIG_USER_ONLY)
250 /* if user mode only, we simulate a fake exception
251 which will be handled outside the cpu execution
252 loop */
253 #if defined(TARGET_I386)
254 do_interrupt(env);
255 #endif
256 ret = env->exception_index;
257 break;
258 #else
259 do_interrupt(env);
260 env->exception_index = -1;
261 #endif
262 }
263 }
264
265 next_tb = 0; /* force lookup of first TB */
266 for(;;) {
267 interrupt_request = env->interrupt_request;
268 if (unlikely(interrupt_request)) {
269 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
270 /* Mask out external interrupts for this step. */
271 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
272 }
273 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
274 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
275 env->exception_index = EXCP_DEBUG;
276 cpu_loop_exit(env);
277 }
278 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
279 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
280 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
281 if (interrupt_request & CPU_INTERRUPT_HALT) {
282 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
283 env->halted = 1;
284 env->exception_index = EXCP_HLT;
285 cpu_loop_exit(env);
286 }
287 #endif
288 #if defined(TARGET_I386)
289 #if !defined(CONFIG_USER_ONLY)
290 if (interrupt_request & CPU_INTERRUPT_POLL) {
291 env->interrupt_request &= ~CPU_INTERRUPT_POLL;
292 apic_poll_irq(env->apic_state);
293 }
294 #endif
295 if (interrupt_request & CPU_INTERRUPT_INIT) {
296 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
297 0);
298 do_cpu_init(x86_env_get_cpu(env));
299 env->exception_index = EXCP_HALTED;
300 cpu_loop_exit(env);
301 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
302 do_cpu_sipi(x86_env_get_cpu(env));
303 } else if (env->hflags2 & HF2_GIF_MASK) {
304 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
305 !(env->hflags & HF_SMM_MASK)) {
306 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
307 0);
308 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
309 do_smm_enter(env);
310 next_tb = 0;
311 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
312 !(env->hflags2 & HF2_NMI_MASK)) {
313 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
314 env->hflags2 |= HF2_NMI_MASK;
315 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
316 next_tb = 0;
317 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
318 env->interrupt_request &= ~CPU_INTERRUPT_MCE;
319 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
320 next_tb = 0;
321 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
322 (((env->hflags2 & HF2_VINTR_MASK) &&
323 (env->hflags2 & HF2_HIF_MASK)) ||
324 (!(env->hflags2 & HF2_VINTR_MASK) &&
325 (env->eflags & IF_MASK &&
326 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
327 int intno;
328 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
329 0);
330 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
331 intno = cpu_get_pic_interrupt(env);
332 if (intno >= 0) {
333 qemu_log_mask(CPU_LOG_TB_IN_ASM,
334 "Servicing hardware INT=0x%02x\n",
335 intno);
336 do_interrupt_x86_hardirq(env, intno, 1);
337 /* ensure that no TB jump will be modified as
338 the program flow was changed */
339 next_tb = 0;
340 }
341 #if !defined(CONFIG_USER_ONLY)
342 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
343 (env->eflags & IF_MASK) &&
344 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
345 int intno;
346 /* FIXME: this should respect TPR */
347 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
348 0);
349 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
350 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
351 do_interrupt_x86_hardirq(env, intno, 1);
352 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
353 next_tb = 0;
354 #endif
355 }
356 }
357 #elif defined(TARGET_PPC)
358 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
359 cpu_reset(cpu);
360 }
361 if (interrupt_request & CPU_INTERRUPT_HARD) {
362 ppc_hw_interrupt(env);
363 if (env->pending_interrupts == 0)
364 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
365 next_tb = 0;
366 }
367 #elif defined(TARGET_LM32)
368 if ((interrupt_request & CPU_INTERRUPT_HARD)
369 && (env->ie & IE_IE)) {
370 env->exception_index = EXCP_IRQ;
371 do_interrupt(env);
372 next_tb = 0;
373 }
374 #elif defined(TARGET_MICROBLAZE)
375 if ((interrupt_request & CPU_INTERRUPT_HARD)
376 && (env->sregs[SR_MSR] & MSR_IE)
377 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
378 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
379 env->exception_index = EXCP_IRQ;
380 do_interrupt(env);
381 next_tb = 0;
382 }
383 #elif defined(TARGET_MIPS)
384 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
385 cpu_mips_hw_interrupts_pending(env)) {
386 /* Raise it */
387 env->exception_index = EXCP_EXT_INTERRUPT;
388 env->error_code = 0;
389 do_interrupt(env);
390 next_tb = 0;
391 }
392 #elif defined(TARGET_OPENRISC)
393 {
394 int idx = -1;
395 if ((interrupt_request & CPU_INTERRUPT_HARD)
396 && (env->sr & SR_IEE)) {
397 idx = EXCP_INT;
398 }
399 if ((interrupt_request & CPU_INTERRUPT_TIMER)
400 && (env->sr & SR_TEE)) {
401 idx = EXCP_TICK;
402 }
403 if (idx >= 0) {
404 env->exception_index = idx;
405 do_interrupt(env);
406 next_tb = 0;
407 }
408 }
409 #elif defined(TARGET_SPARC)
410 if (interrupt_request & CPU_INTERRUPT_HARD) {
411 if (cpu_interrupts_enabled(env) &&
412 env->interrupt_index > 0) {
413 int pil = env->interrupt_index & 0xf;
414 int type = env->interrupt_index & 0xf0;
415
416 if (((type == TT_EXTINT) &&
417 cpu_pil_allowed(env, pil)) ||
418 type != TT_EXTINT) {
419 env->exception_index = env->interrupt_index;
420 do_interrupt(env);
421 next_tb = 0;
422 }
423 }
424 }
425 #elif defined(TARGET_ARM)
426 if (interrupt_request & CPU_INTERRUPT_FIQ
427 && !(env->uncached_cpsr & CPSR_F)) {
428 env->exception_index = EXCP_FIQ;
429 do_interrupt(env);
430 next_tb = 0;
431 }
432 /* ARMv7-M interrupt return works by loading a magic value
433 into the PC. On real hardware the load causes the
434 return to occur. The qemu implementation performs the
435 jump normally, then does the exception return when the
436 CPU tries to execute code at the magic address.
437 This will cause the magic PC value to be pushed to
438 the stack if an interrupt occurred at the wrong time.
439 We avoid this by disabling interrupts when
440 pc contains a magic address. */
441 if (interrupt_request & CPU_INTERRUPT_HARD
442 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
443 || !(env->uncached_cpsr & CPSR_I))) {
444 env->exception_index = EXCP_IRQ;
445 do_interrupt(env);
446 next_tb = 0;
447 }
448 #elif defined(TARGET_UNICORE32)
449 if (interrupt_request & CPU_INTERRUPT_HARD
450 && !(env->uncached_asr & ASR_I)) {
451 env->exception_index = UC32_EXCP_INTR;
452 do_interrupt(env);
453 next_tb = 0;
454 }
455 #elif defined(TARGET_SH4)
456 if (interrupt_request & CPU_INTERRUPT_HARD) {
457 do_interrupt(env);
458 next_tb = 0;
459 }
460 #elif defined(TARGET_ALPHA)
461 {
462 int idx = -1;
463 /* ??? This hard-codes the OSF/1 interrupt levels. */
464 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
465 case 0 ... 3:
466 if (interrupt_request & CPU_INTERRUPT_HARD) {
467 idx = EXCP_DEV_INTERRUPT;
468 }
469 /* FALLTHRU */
470 case 4:
471 if (interrupt_request & CPU_INTERRUPT_TIMER) {
472 idx = EXCP_CLK_INTERRUPT;
473 }
474 /* FALLTHRU */
475 case 5:
476 if (interrupt_request & CPU_INTERRUPT_SMP) {
477 idx = EXCP_SMP_INTERRUPT;
478 }
479 /* FALLTHRU */
480 case 6:
481 if (interrupt_request & CPU_INTERRUPT_MCHK) {
482 idx = EXCP_MCHK;
483 }
484 }
485 if (idx >= 0) {
486 env->exception_index = idx;
487 env->error_code = 0;
488 do_interrupt(env);
489 next_tb = 0;
490 }
491 }
492 #elif defined(TARGET_CRIS)
493 if (interrupt_request & CPU_INTERRUPT_HARD
494 && (env->pregs[PR_CCS] & I_FLAG)
495 && !env->locked_irq) {
496 env->exception_index = EXCP_IRQ;
497 do_interrupt(env);
498 next_tb = 0;
499 }
500 if (interrupt_request & CPU_INTERRUPT_NMI) {
501 unsigned int m_flag_archval;
502 if (env->pregs[PR_VR] < 32) {
503 m_flag_archval = M_FLAG_V10;
504 } else {
505 m_flag_archval = M_FLAG_V32;
506 }
507 if ((env->pregs[PR_CCS] & m_flag_archval)) {
508 env->exception_index = EXCP_NMI;
509 do_interrupt(env);
510 next_tb = 0;
511 }
512 }
513 #elif defined(TARGET_M68K)
514 if (interrupt_request & CPU_INTERRUPT_HARD
515 && ((env->sr & SR_I) >> SR_I_SHIFT)
516 < env->pending_level) {
517 /* Real hardware gets the interrupt vector via an
518 IACK cycle at this point. Current emulated
519 hardware doesn't rely on this, so we
520 provide/save the vector when the interrupt is
521 first signalled. */
522 env->exception_index = env->pending_vector;
523 do_interrupt_m68k_hardirq(env);
524 next_tb = 0;
525 }
526 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
527 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
528 (env->psw.mask & PSW_MASK_EXT)) {
529 do_interrupt(env);
530 next_tb = 0;
531 }
532 #elif defined(TARGET_XTENSA)
533 if (interrupt_request & CPU_INTERRUPT_HARD) {
534 env->exception_index = EXC_IRQ;
535 do_interrupt(env);
536 next_tb = 0;
537 }
538 #endif
539 /* Don't use the cached interrupt_request value,
540 do_interrupt may have updated the EXITTB flag. */
541 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
542 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
543 /* ensure that no TB jump will be modified as
544 the program flow was changed */
545 next_tb = 0;
546 }
547 }
548 if (unlikely(env->exit_request)) {
549 env->exit_request = 0;
550 env->exception_index = EXCP_INTERRUPT;
551 cpu_loop_exit(env);
552 }
553 #if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
554 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
555 /* restore flags in standard format */
556 #if defined(TARGET_I386)
557 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
558 | (DF & DF_MASK);
559 log_cpu_state(env, X86_DUMP_CCOP);
560 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
561 #elif defined(TARGET_M68K)
562 cpu_m68k_flush_flags(env, env->cc_op);
563 env->cc_op = CC_OP_FLAGS;
564 env->sr = (env->sr & 0xffe0)
565 | env->cc_dest | (env->cc_x << 4);
566 log_cpu_state(env, 0);
567 #else
568 log_cpu_state(env, 0);
569 #endif
570 }
571 #endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
572 spin_lock(&tb_lock);
573 tb = tb_find_fast(env);
574 /* Note: we do it here to avoid a gcc bug on Mac OS X when
575 doing it in tb_find_slow */
576 if (tb_invalidated_flag) {
577 /* as some TB could have been invalidated because
578 of memory exceptions while generating the code, we
579 must recompute the hash index here */
580 next_tb = 0;
581 tb_invalidated_flag = 0;
582 }
583 #ifdef CONFIG_DEBUG_EXEC
584 qemu_log_mask(CPU_LOG_EXEC, "Trace %p [" TARGET_FMT_lx "] %s\n",
585 tb->tc_ptr, tb->pc,
586 lookup_symbol(tb->pc));
587 #endif
588 /* see if we can patch the calling TB. When the TB
589 spans two pages, we cannot safely do a direct
590 jump. */
591 if (next_tb != 0 && tb->page_addr[1] == -1) {
592 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
593 }
594 spin_unlock(&tb_lock);
595
596 /* cpu_interrupt might be called while translating the
597 TB, but before it is linked into a potentially
598 infinite loop and becomes env->current_tb. Avoid
599 starting execution if there is a pending interrupt. */
600 env->current_tb = tb;
601 barrier();
602 if (likely(!env->exit_request)) {
603 tc_ptr = tb->tc_ptr;
604 /* execute the generated code */
605 next_tb = tcg_qemu_tb_exec(env, tc_ptr);
606 if ((next_tb & 3) == 2) {
607 /* Instruction counter expired. */
608 int insns_left;
609 tb = (TranslationBlock *)(next_tb & ~3);
610 /* Restore PC. */
611 cpu_pc_from_tb(env, tb);
612 insns_left = env->icount_decr.u32;
613 if (env->icount_extra && insns_left >= 0) {
614 /* Refill decrementer and continue execution. */
615 env->icount_extra += insns_left;
616 if (env->icount_extra > 0xffff) {
617 insns_left = 0xffff;
618 } else {
619 insns_left = env->icount_extra;
620 }
621 env->icount_extra -= insns_left;
622 env->icount_decr.u16.low = insns_left;
623 } else {
624 if (insns_left > 0) {
625 /* Execute remaining instructions. */
626 cpu_exec_nocache(env, insns_left, tb);
627 }
628 env->exception_index = EXCP_INTERRUPT;
629 next_tb = 0;
630 cpu_loop_exit(env);
631 }
632 }
633 }
634 env->current_tb = NULL;
635 /* reset soft MMU for next block (it can currently
636 only be set by a memory fault) */
637 } /* for(;;) */
638 } else {
639 /* Reload env after longjmp - the compiler may have smashed all
640 * local variables as longjmp is marked 'noreturn'. */
641 env = cpu_single_env;
642 }
643 } /* for(;;) */
644
645
646 #if defined(TARGET_I386)
647 /* restore flags in standard format */
648 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
649 | (DF & DF_MASK);
650 #elif defined(TARGET_ARM)
651 /* XXX: Save/restore host fpu exception state?. */
652 #elif defined(TARGET_UNICORE32)
653 #elif defined(TARGET_SPARC)
654 #elif defined(TARGET_PPC)
655 #elif defined(TARGET_LM32)
656 #elif defined(TARGET_M68K)
657 cpu_m68k_flush_flags(env, env->cc_op);
658 env->cc_op = CC_OP_FLAGS;
659 env->sr = (env->sr & 0xffe0)
660 | env->cc_dest | (env->cc_x << 4);
661 #elif defined(TARGET_MICROBLAZE)
662 #elif defined(TARGET_MIPS)
663 #elif defined(TARGET_OPENRISC)
664 #elif defined(TARGET_SH4)
665 #elif defined(TARGET_ALPHA)
666 #elif defined(TARGET_CRIS)
667 #elif defined(TARGET_S390X)
668 #elif defined(TARGET_XTENSA)
669 /* XXXXX */
670 #else
671 #error unsupported target CPU
672 #endif
673
674 /* fail safe : never use cpu_single_env outside cpu_exec() */
675 cpu_single_env = NULL;
676 return ret;
677 }