]> git.proxmox.com Git - mirror_qemu.git/blob - cpu-exec.c
exec.h: fix coding style and change cpu_has_work to return bool
[mirror_qemu.git] / cpu-exec.c
1 /*
2 * i386 emulator main execution loop
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "config.h"
20 #include "exec.h"
21 #include "disas.h"
22 #include "tcg.h"
23 #include "qemu-barrier.h"
24
25 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
26 // Work around ugly bugs in glibc that mangle global register contents
27 #undef env
28 #define env cpu_single_env
29 #endif
30
31 int tb_invalidated_flag;
32
33 //#define CONFIG_DEBUG_EXEC
34
35 bool qemu_cpu_has_work(CPUState *env)
36 {
37 return cpu_has_work(env);
38 }
39
40 void cpu_loop_exit(CPUState *env1)
41 {
42 env1->current_tb = NULL;
43 longjmp(env1->jmp_env, 1);
44 }
45
46 /* exit the current TB from a signal handler. The host registers are
47 restored in a state compatible with the CPU emulator
48 */
49 #if defined(CONFIG_SOFTMMU)
50 void cpu_resume_from_signal(CPUState *env1, void *puc)
51 {
52 env = env1;
53
54 /* XXX: restore cpu registers saved in host registers */
55
56 env->exception_index = -1;
57 longjmp(env->jmp_env, 1);
58 }
59 #endif
60
61 /* Execute the code without caching the generated code. An interpreter
62 could be used if available. */
63 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
64 {
65 unsigned long next_tb;
66 TranslationBlock *tb;
67
68 /* Should never happen.
69 We only end up here when an existing TB is too long. */
70 if (max_cycles > CF_COUNT_MASK)
71 max_cycles = CF_COUNT_MASK;
72
73 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
74 max_cycles);
75 env->current_tb = tb;
76 /* execute the generated code */
77 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
78 env->current_tb = NULL;
79
80 if ((next_tb & 3) == 2) {
81 /* Restore PC. This may happen if async event occurs before
82 the TB starts executing. */
83 cpu_pc_from_tb(env, tb);
84 }
85 tb_phys_invalidate(tb, -1);
86 tb_free(tb);
87 }
88
89 static TranslationBlock *tb_find_slow(target_ulong pc,
90 target_ulong cs_base,
91 uint64_t flags)
92 {
93 TranslationBlock *tb, **ptb1;
94 unsigned int h;
95 tb_page_addr_t phys_pc, phys_page1, phys_page2;
96 target_ulong virt_page2;
97
98 tb_invalidated_flag = 0;
99
100 /* find translated block using physical mappings */
101 phys_pc = get_page_addr_code(env, pc);
102 phys_page1 = phys_pc & TARGET_PAGE_MASK;
103 phys_page2 = -1;
104 h = tb_phys_hash_func(phys_pc);
105 ptb1 = &tb_phys_hash[h];
106 for(;;) {
107 tb = *ptb1;
108 if (!tb)
109 goto not_found;
110 if (tb->pc == pc &&
111 tb->page_addr[0] == phys_page1 &&
112 tb->cs_base == cs_base &&
113 tb->flags == flags) {
114 /* check next page if needed */
115 if (tb->page_addr[1] != -1) {
116 virt_page2 = (pc & TARGET_PAGE_MASK) +
117 TARGET_PAGE_SIZE;
118 phys_page2 = get_page_addr_code(env, virt_page2);
119 if (tb->page_addr[1] == phys_page2)
120 goto found;
121 } else {
122 goto found;
123 }
124 }
125 ptb1 = &tb->phys_hash_next;
126 }
127 not_found:
128 /* if no translated code available, then translate it now */
129 tb = tb_gen_code(env, pc, cs_base, flags, 0);
130
131 found:
132 /* Move the last found TB to the head of the list */
133 if (likely(*ptb1)) {
134 *ptb1 = tb->phys_hash_next;
135 tb->phys_hash_next = tb_phys_hash[h];
136 tb_phys_hash[h] = tb;
137 }
138 /* we add the TB in the virtual pc hash table */
139 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
140 return tb;
141 }
142
143 static inline TranslationBlock *tb_find_fast(void)
144 {
145 TranslationBlock *tb;
146 target_ulong cs_base, pc;
147 int flags;
148
149 /* we record a subset of the CPU state. It will
150 always be the same before a given translated block
151 is executed. */
152 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
153 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
154 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
155 tb->flags != flags)) {
156 tb = tb_find_slow(pc, cs_base, flags);
157 }
158 return tb;
159 }
160
161 static CPUDebugExcpHandler *debug_excp_handler;
162
163 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
164 {
165 CPUDebugExcpHandler *old_handler = debug_excp_handler;
166
167 debug_excp_handler = handler;
168 return old_handler;
169 }
170
171 static void cpu_handle_debug_exception(CPUState *env)
172 {
173 CPUWatchpoint *wp;
174
175 if (!env->watchpoint_hit) {
176 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
177 wp->flags &= ~BP_WATCHPOINT_HIT;
178 }
179 }
180 if (debug_excp_handler) {
181 debug_excp_handler(env);
182 }
183 }
184
185 /* main execution loop */
186
187 volatile sig_atomic_t exit_request;
188
189 int cpu_exec(CPUState *env1)
190 {
191 volatile host_reg_t saved_env_reg;
192 int ret, interrupt_request;
193 TranslationBlock *tb;
194 uint8_t *tc_ptr;
195 unsigned long next_tb;
196
197 if (env1->halted) {
198 if (!cpu_has_work(env1)) {
199 return EXCP_HALTED;
200 }
201
202 env1->halted = 0;
203 }
204
205 cpu_single_env = env1;
206
207 /* the access to env below is actually saving the global register's
208 value, so that files not including target-xyz/exec.h are free to
209 use it. */
210 QEMU_BUILD_BUG_ON (sizeof (saved_env_reg) != sizeof (env));
211 saved_env_reg = (host_reg_t) env;
212 barrier();
213 env = env1;
214
215 if (unlikely(exit_request)) {
216 env->exit_request = 1;
217 }
218
219 #if defined(TARGET_I386)
220 /* put eflags in CPU temporary format */
221 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
222 DF = 1 - (2 * ((env->eflags >> 10) & 1));
223 CC_OP = CC_OP_EFLAGS;
224 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
225 #elif defined(TARGET_SPARC)
226 #elif defined(TARGET_M68K)
227 env->cc_op = CC_OP_FLAGS;
228 env->cc_dest = env->sr & 0xf;
229 env->cc_x = (env->sr >> 4) & 1;
230 #elif defined(TARGET_ALPHA)
231 #elif defined(TARGET_ARM)
232 #elif defined(TARGET_UNICORE32)
233 #elif defined(TARGET_PPC)
234 #elif defined(TARGET_LM32)
235 #elif defined(TARGET_MICROBLAZE)
236 #elif defined(TARGET_MIPS)
237 #elif defined(TARGET_SH4)
238 #elif defined(TARGET_CRIS)
239 #elif defined(TARGET_S390X)
240 /* XXXXX */
241 #else
242 #error unsupported target CPU
243 #endif
244 env->exception_index = -1;
245
246 /* prepare setjmp context for exception handling */
247 for(;;) {
248 if (setjmp(env->jmp_env) == 0) {
249 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
250 #undef env
251 env = cpu_single_env;
252 #define env cpu_single_env
253 #endif
254 /* if an exception is pending, we execute it here */
255 if (env->exception_index >= 0) {
256 if (env->exception_index >= EXCP_INTERRUPT) {
257 /* exit request from the cpu execution loop */
258 ret = env->exception_index;
259 if (ret == EXCP_DEBUG) {
260 cpu_handle_debug_exception(env);
261 }
262 break;
263 } else {
264 #if defined(CONFIG_USER_ONLY)
265 /* if user mode only, we simulate a fake exception
266 which will be handled outside the cpu execution
267 loop */
268 #if defined(TARGET_I386)
269 do_interrupt(env);
270 #endif
271 ret = env->exception_index;
272 break;
273 #else
274 do_interrupt(env);
275 env->exception_index = -1;
276 #endif
277 }
278 }
279
280 next_tb = 0; /* force lookup of first TB */
281 for(;;) {
282 interrupt_request = env->interrupt_request;
283 if (unlikely(interrupt_request)) {
284 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
285 /* Mask out external interrupts for this step. */
286 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
287 }
288 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
289 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
290 env->exception_index = EXCP_DEBUG;
291 cpu_loop_exit(env);
292 }
293 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
294 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
295 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
296 if (interrupt_request & CPU_INTERRUPT_HALT) {
297 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
298 env->halted = 1;
299 env->exception_index = EXCP_HLT;
300 cpu_loop_exit(env);
301 }
302 #endif
303 #if defined(TARGET_I386)
304 if (interrupt_request & CPU_INTERRUPT_INIT) {
305 svm_check_intercept(env, SVM_EXIT_INIT);
306 do_cpu_init(env);
307 env->exception_index = EXCP_HALTED;
308 cpu_loop_exit(env);
309 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
310 do_cpu_sipi(env);
311 } else if (env->hflags2 & HF2_GIF_MASK) {
312 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
313 !(env->hflags & HF_SMM_MASK)) {
314 svm_check_intercept(env, SVM_EXIT_SMI);
315 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
316 do_smm_enter(env);
317 next_tb = 0;
318 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
319 !(env->hflags2 & HF2_NMI_MASK)) {
320 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
321 env->hflags2 |= HF2_NMI_MASK;
322 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
323 next_tb = 0;
324 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
325 env->interrupt_request &= ~CPU_INTERRUPT_MCE;
326 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
327 next_tb = 0;
328 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
329 (((env->hflags2 & HF2_VINTR_MASK) &&
330 (env->hflags2 & HF2_HIF_MASK)) ||
331 (!(env->hflags2 & HF2_VINTR_MASK) &&
332 (env->eflags & IF_MASK &&
333 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
334 int intno;
335 svm_check_intercept(env, SVM_EXIT_INTR);
336 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
337 intno = cpu_get_pic_interrupt(env);
338 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
339 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
340 #undef env
341 env = cpu_single_env;
342 #define env cpu_single_env
343 #endif
344 do_interrupt_x86_hardirq(env, intno, 1);
345 /* ensure that no TB jump will be modified as
346 the program flow was changed */
347 next_tb = 0;
348 #if !defined(CONFIG_USER_ONLY)
349 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
350 (env->eflags & IF_MASK) &&
351 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
352 int intno;
353 /* FIXME: this should respect TPR */
354 svm_check_intercept(env, SVM_EXIT_VINTR);
355 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
356 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
357 do_interrupt_x86_hardirq(env, intno, 1);
358 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
359 next_tb = 0;
360 #endif
361 }
362 }
363 #elif defined(TARGET_PPC)
364 #if 0
365 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
366 cpu_reset(env);
367 }
368 #endif
369 if (interrupt_request & CPU_INTERRUPT_HARD) {
370 ppc_hw_interrupt(env);
371 if (env->pending_interrupts == 0)
372 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
373 next_tb = 0;
374 }
375 #elif defined(TARGET_LM32)
376 if ((interrupt_request & CPU_INTERRUPT_HARD)
377 && (env->ie & IE_IE)) {
378 env->exception_index = EXCP_IRQ;
379 do_interrupt(env);
380 next_tb = 0;
381 }
382 #elif defined(TARGET_MICROBLAZE)
383 if ((interrupt_request & CPU_INTERRUPT_HARD)
384 && (env->sregs[SR_MSR] & MSR_IE)
385 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
386 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
387 env->exception_index = EXCP_IRQ;
388 do_interrupt(env);
389 next_tb = 0;
390 }
391 #elif defined(TARGET_MIPS)
392 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
393 cpu_mips_hw_interrupts_pending(env)) {
394 /* Raise it */
395 env->exception_index = EXCP_EXT_INTERRUPT;
396 env->error_code = 0;
397 do_interrupt(env);
398 next_tb = 0;
399 }
400 #elif defined(TARGET_SPARC)
401 if (interrupt_request & CPU_INTERRUPT_HARD) {
402 if (cpu_interrupts_enabled(env) &&
403 env->interrupt_index > 0) {
404 int pil = env->interrupt_index & 0xf;
405 int type = env->interrupt_index & 0xf0;
406
407 if (((type == TT_EXTINT) &&
408 cpu_pil_allowed(env, pil)) ||
409 type != TT_EXTINT) {
410 env->exception_index = env->interrupt_index;
411 do_interrupt(env);
412 next_tb = 0;
413 }
414 }
415 }
416 #elif defined(TARGET_ARM)
417 if (interrupt_request & CPU_INTERRUPT_FIQ
418 && !(env->uncached_cpsr & CPSR_F)) {
419 env->exception_index = EXCP_FIQ;
420 do_interrupt(env);
421 next_tb = 0;
422 }
423 /* ARMv7-M interrupt return works by loading a magic value
424 into the PC. On real hardware the load causes the
425 return to occur. The qemu implementation performs the
426 jump normally, then does the exception return when the
427 CPU tries to execute code at the magic address.
428 This will cause the magic PC value to be pushed to
429 the stack if an interrupt occurred at the wrong time.
430 We avoid this by disabling interrupts when
431 pc contains a magic address. */
432 if (interrupt_request & CPU_INTERRUPT_HARD
433 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
434 || !(env->uncached_cpsr & CPSR_I))) {
435 env->exception_index = EXCP_IRQ;
436 do_interrupt(env);
437 next_tb = 0;
438 }
439 #elif defined(TARGET_UNICORE32)
440 if (interrupt_request & CPU_INTERRUPT_HARD
441 && !(env->uncached_asr & ASR_I)) {
442 do_interrupt(env);
443 next_tb = 0;
444 }
445 #elif defined(TARGET_SH4)
446 if (interrupt_request & CPU_INTERRUPT_HARD) {
447 do_interrupt(env);
448 next_tb = 0;
449 }
450 #elif defined(TARGET_ALPHA)
451 {
452 int idx = -1;
453 /* ??? This hard-codes the OSF/1 interrupt levels. */
454 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
455 case 0 ... 3:
456 if (interrupt_request & CPU_INTERRUPT_HARD) {
457 idx = EXCP_DEV_INTERRUPT;
458 }
459 /* FALLTHRU */
460 case 4:
461 if (interrupt_request & CPU_INTERRUPT_TIMER) {
462 idx = EXCP_CLK_INTERRUPT;
463 }
464 /* FALLTHRU */
465 case 5:
466 if (interrupt_request & CPU_INTERRUPT_SMP) {
467 idx = EXCP_SMP_INTERRUPT;
468 }
469 /* FALLTHRU */
470 case 6:
471 if (interrupt_request & CPU_INTERRUPT_MCHK) {
472 idx = EXCP_MCHK;
473 }
474 }
475 if (idx >= 0) {
476 env->exception_index = idx;
477 env->error_code = 0;
478 do_interrupt(env);
479 next_tb = 0;
480 }
481 }
482 #elif defined(TARGET_CRIS)
483 if (interrupt_request & CPU_INTERRUPT_HARD
484 && (env->pregs[PR_CCS] & I_FLAG)
485 && !env->locked_irq) {
486 env->exception_index = EXCP_IRQ;
487 do_interrupt(env);
488 next_tb = 0;
489 }
490 if (interrupt_request & CPU_INTERRUPT_NMI
491 && (env->pregs[PR_CCS] & M_FLAG)) {
492 env->exception_index = EXCP_NMI;
493 do_interrupt(env);
494 next_tb = 0;
495 }
496 #elif defined(TARGET_M68K)
497 if (interrupt_request & CPU_INTERRUPT_HARD
498 && ((env->sr & SR_I) >> SR_I_SHIFT)
499 < env->pending_level) {
500 /* Real hardware gets the interrupt vector via an
501 IACK cycle at this point. Current emulated
502 hardware doesn't rely on this, so we
503 provide/save the vector when the interrupt is
504 first signalled. */
505 env->exception_index = env->pending_vector;
506 do_interrupt_m68k_hardirq(env);
507 next_tb = 0;
508 }
509 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
510 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
511 (env->psw.mask & PSW_MASK_EXT)) {
512 do_interrupt(env);
513 next_tb = 0;
514 }
515 #endif
516 /* Don't use the cached interrupt_request value,
517 do_interrupt may have updated the EXITTB flag. */
518 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
519 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
520 /* ensure that no TB jump will be modified as
521 the program flow was changed */
522 next_tb = 0;
523 }
524 }
525 if (unlikely(env->exit_request)) {
526 env->exit_request = 0;
527 env->exception_index = EXCP_INTERRUPT;
528 cpu_loop_exit(env);
529 }
530 #if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
531 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
532 /* restore flags in standard format */
533 #if defined(TARGET_I386)
534 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
535 | (DF & DF_MASK);
536 log_cpu_state(env, X86_DUMP_CCOP);
537 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
538 #elif defined(TARGET_M68K)
539 cpu_m68k_flush_flags(env, env->cc_op);
540 env->cc_op = CC_OP_FLAGS;
541 env->sr = (env->sr & 0xffe0)
542 | env->cc_dest | (env->cc_x << 4);
543 log_cpu_state(env, 0);
544 #else
545 log_cpu_state(env, 0);
546 #endif
547 }
548 #endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
549 spin_lock(&tb_lock);
550 tb = tb_find_fast();
551 /* Note: we do it here to avoid a gcc bug on Mac OS X when
552 doing it in tb_find_slow */
553 if (tb_invalidated_flag) {
554 /* as some TB could have been invalidated because
555 of memory exceptions while generating the code, we
556 must recompute the hash index here */
557 next_tb = 0;
558 tb_invalidated_flag = 0;
559 }
560 #ifdef CONFIG_DEBUG_EXEC
561 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
562 (long)tb->tc_ptr, tb->pc,
563 lookup_symbol(tb->pc));
564 #endif
565 /* see if we can patch the calling TB. When the TB
566 spans two pages, we cannot safely do a direct
567 jump. */
568 if (next_tb != 0 && tb->page_addr[1] == -1) {
569 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
570 }
571 spin_unlock(&tb_lock);
572
573 /* cpu_interrupt might be called while translating the
574 TB, but before it is linked into a potentially
575 infinite loop and becomes env->current_tb. Avoid
576 starting execution if there is a pending interrupt. */
577 env->current_tb = tb;
578 barrier();
579 if (likely(!env->exit_request)) {
580 tc_ptr = tb->tc_ptr;
581 /* execute the generated code */
582 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
583 #undef env
584 env = cpu_single_env;
585 #define env cpu_single_env
586 #endif
587 next_tb = tcg_qemu_tb_exec(tc_ptr);
588 if ((next_tb & 3) == 2) {
589 /* Instruction counter expired. */
590 int insns_left;
591 tb = (TranslationBlock *)(long)(next_tb & ~3);
592 /* Restore PC. */
593 cpu_pc_from_tb(env, tb);
594 insns_left = env->icount_decr.u32;
595 if (env->icount_extra && insns_left >= 0) {
596 /* Refill decrementer and continue execution. */
597 env->icount_extra += insns_left;
598 if (env->icount_extra > 0xffff) {
599 insns_left = 0xffff;
600 } else {
601 insns_left = env->icount_extra;
602 }
603 env->icount_extra -= insns_left;
604 env->icount_decr.u16.low = insns_left;
605 } else {
606 if (insns_left > 0) {
607 /* Execute remaining instructions. */
608 cpu_exec_nocache(insns_left, tb);
609 }
610 env->exception_index = EXCP_INTERRUPT;
611 next_tb = 0;
612 cpu_loop_exit(env);
613 }
614 }
615 }
616 env->current_tb = NULL;
617 /* reset soft MMU for next block (it can currently
618 only be set by a memory fault) */
619 } /* for(;;) */
620 }
621 } /* for(;;) */
622
623
624 #if defined(TARGET_I386)
625 /* restore flags in standard format */
626 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
627 | (DF & DF_MASK);
628 #elif defined(TARGET_ARM)
629 /* XXX: Save/restore host fpu exception state?. */
630 #elif defined(TARGET_UNICORE32)
631 #elif defined(TARGET_SPARC)
632 #elif defined(TARGET_PPC)
633 #elif defined(TARGET_LM32)
634 #elif defined(TARGET_M68K)
635 cpu_m68k_flush_flags(env, env->cc_op);
636 env->cc_op = CC_OP_FLAGS;
637 env->sr = (env->sr & 0xffe0)
638 | env->cc_dest | (env->cc_x << 4);
639 #elif defined(TARGET_MICROBLAZE)
640 #elif defined(TARGET_MIPS)
641 #elif defined(TARGET_SH4)
642 #elif defined(TARGET_ALPHA)
643 #elif defined(TARGET_CRIS)
644 #elif defined(TARGET_S390X)
645 /* XXXXX */
646 #else
647 #error unsupported target CPU
648 #endif
649
650 /* restore global registers */
651 barrier();
652 env = (void *) saved_env_reg;
653
654 /* fail safe : never use cpu_single_env outside cpu_exec() */
655 cpu_single_env = NULL;
656 return ret;
657 }