]> git.proxmox.com Git - qemu.git/blob - cpu-exec.c
qcow2: Return real error in qcow2_update_snapshot_refcount
[qemu.git] / cpu-exec.c
1 /*
2 * emulator main execution loop
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "config.h"
20 #include "cpu.h"
21 #include "disas/disas.h"
22 #include "tcg.h"
23 #include "qemu/atomic.h"
24 #include "sysemu/qtest.h"
25
26 //#define CONFIG_DEBUG_EXEC
27
28 bool qemu_cpu_has_work(CPUState *cpu)
29 {
30 return cpu_has_work(cpu);
31 }
32
33 void cpu_loop_exit(CPUArchState *env)
34 {
35 CPUState *cpu = ENV_GET_CPU(env);
36
37 cpu->current_tb = NULL;
38 siglongjmp(env->jmp_env, 1);
39 }
40
41 /* exit the current TB from a signal handler. The host registers are
42 restored in a state compatible with the CPU emulator
43 */
44 #if defined(CONFIG_SOFTMMU)
45 void cpu_resume_from_signal(CPUArchState *env, void *puc)
46 {
47 /* XXX: restore cpu registers saved in host registers */
48
49 env->exception_index = -1;
50 siglongjmp(env->jmp_env, 1);
51 }
52 #endif
53
54 /* Execute a TB, and fix up the CPU state afterwards if necessary */
55 static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
56 {
57 CPUArchState *env = cpu->env_ptr;
58 tcg_target_ulong next_tb = tcg_qemu_tb_exec(env, tb_ptr);
59 if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
60 /* We didn't start executing this TB (eg because the instruction
61 * counter hit zero); we must restore the guest PC to the address
62 * of the start of the TB.
63 */
64 TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
65 cpu_pc_from_tb(env, tb);
66 }
67 if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
68 /* We were asked to stop executing TBs (probably a pending
69 * interrupt. We've now stopped, so clear the flag.
70 */
71 cpu->tcg_exit_req = 0;
72 }
73 return next_tb;
74 }
75
76 /* Execute the code without caching the generated code. An interpreter
77 could be used if available. */
78 static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
79 TranslationBlock *orig_tb)
80 {
81 CPUState *cpu = ENV_GET_CPU(env);
82 TranslationBlock *tb;
83
84 /* Should never happen.
85 We only end up here when an existing TB is too long. */
86 if (max_cycles > CF_COUNT_MASK)
87 max_cycles = CF_COUNT_MASK;
88
89 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
90 max_cycles);
91 cpu->current_tb = tb;
92 /* execute the generated code */
93 cpu_tb_exec(cpu, tb->tc_ptr);
94 cpu->current_tb = NULL;
95 tb_phys_invalidate(tb, -1);
96 tb_free(tb);
97 }
98
99 static TranslationBlock *tb_find_slow(CPUArchState *env,
100 target_ulong pc,
101 target_ulong cs_base,
102 uint64_t flags)
103 {
104 TranslationBlock *tb, **ptb1;
105 unsigned int h;
106 tb_page_addr_t phys_pc, phys_page1;
107 target_ulong virt_page2;
108
109 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
110
111 /* find translated block using physical mappings */
112 phys_pc = get_page_addr_code(env, pc);
113 phys_page1 = phys_pc & TARGET_PAGE_MASK;
114 h = tb_phys_hash_func(phys_pc);
115 ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
116 for(;;) {
117 tb = *ptb1;
118 if (!tb)
119 goto not_found;
120 if (tb->pc == pc &&
121 tb->page_addr[0] == phys_page1 &&
122 tb->cs_base == cs_base &&
123 tb->flags == flags) {
124 /* check next page if needed */
125 if (tb->page_addr[1] != -1) {
126 tb_page_addr_t phys_page2;
127
128 virt_page2 = (pc & TARGET_PAGE_MASK) +
129 TARGET_PAGE_SIZE;
130 phys_page2 = get_page_addr_code(env, virt_page2);
131 if (tb->page_addr[1] == phys_page2)
132 goto found;
133 } else {
134 goto found;
135 }
136 }
137 ptb1 = &tb->phys_hash_next;
138 }
139 not_found:
140 /* if no translated code available, then translate it now */
141 tb = tb_gen_code(env, pc, cs_base, flags, 0);
142
143 found:
144 /* Move the last found TB to the head of the list */
145 if (likely(*ptb1)) {
146 *ptb1 = tb->phys_hash_next;
147 tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
148 tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
149 }
150 /* we add the TB in the virtual pc hash table */
151 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
152 return tb;
153 }
154
155 static inline TranslationBlock *tb_find_fast(CPUArchState *env)
156 {
157 TranslationBlock *tb;
158 target_ulong cs_base, pc;
159 int flags;
160
161 /* we record a subset of the CPU state. It will
162 always be the same before a given translated block
163 is executed. */
164 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
165 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
166 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
167 tb->flags != flags)) {
168 tb = tb_find_slow(env, pc, cs_base, flags);
169 }
170 return tb;
171 }
172
173 static CPUDebugExcpHandler *debug_excp_handler;
174
175 void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
176 {
177 debug_excp_handler = handler;
178 }
179
180 static void cpu_handle_debug_exception(CPUArchState *env)
181 {
182 CPUWatchpoint *wp;
183
184 if (!env->watchpoint_hit) {
185 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
186 wp->flags &= ~BP_WATCHPOINT_HIT;
187 }
188 }
189 if (debug_excp_handler) {
190 debug_excp_handler(env);
191 }
192 }
193
194 /* main execution loop */
195
196 volatile sig_atomic_t exit_request;
197
198 int cpu_exec(CPUArchState *env)
199 {
200 CPUState *cpu = ENV_GET_CPU(env);
201 #if !(defined(CONFIG_USER_ONLY) && \
202 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
203 CPUClass *cc = CPU_GET_CLASS(cpu);
204 #endif
205 int ret, interrupt_request;
206 TranslationBlock *tb;
207 uint8_t *tc_ptr;
208 tcg_target_ulong next_tb;
209
210 if (cpu->halted) {
211 if (!cpu_has_work(cpu)) {
212 return EXCP_HALTED;
213 }
214
215 cpu->halted = 0;
216 }
217
218 cpu_single_env = env;
219
220 if (unlikely(exit_request)) {
221 cpu->exit_request = 1;
222 }
223
224 #if defined(TARGET_I386)
225 /* put eflags in CPU temporary format */
226 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
227 DF = 1 - (2 * ((env->eflags >> 10) & 1));
228 CC_OP = CC_OP_EFLAGS;
229 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
230 #elif defined(TARGET_SPARC)
231 #elif defined(TARGET_M68K)
232 env->cc_op = CC_OP_FLAGS;
233 env->cc_dest = env->sr & 0xf;
234 env->cc_x = (env->sr >> 4) & 1;
235 #elif defined(TARGET_ALPHA)
236 #elif defined(TARGET_ARM)
237 #elif defined(TARGET_UNICORE32)
238 #elif defined(TARGET_PPC)
239 env->reserve_addr = -1;
240 #elif defined(TARGET_LM32)
241 #elif defined(TARGET_MICROBLAZE)
242 #elif defined(TARGET_MIPS)
243 #elif defined(TARGET_MOXIE)
244 #elif defined(TARGET_OPENRISC)
245 #elif defined(TARGET_SH4)
246 #elif defined(TARGET_CRIS)
247 #elif defined(TARGET_S390X)
248 #elif defined(TARGET_XTENSA)
249 /* XXXXX */
250 #else
251 #error unsupported target CPU
252 #endif
253 env->exception_index = -1;
254
255 /* prepare setjmp context for exception handling */
256 for(;;) {
257 if (sigsetjmp(env->jmp_env, 0) == 0) {
258 /* if an exception is pending, we execute it here */
259 if (env->exception_index >= 0) {
260 if (env->exception_index >= EXCP_INTERRUPT) {
261 /* exit request from the cpu execution loop */
262 ret = env->exception_index;
263 if (ret == EXCP_DEBUG) {
264 cpu_handle_debug_exception(env);
265 }
266 break;
267 } else {
268 #if defined(CONFIG_USER_ONLY)
269 /* if user mode only, we simulate a fake exception
270 which will be handled outside the cpu execution
271 loop */
272 #if defined(TARGET_I386)
273 cc->do_interrupt(cpu);
274 #endif
275 ret = env->exception_index;
276 break;
277 #else
278 cc->do_interrupt(cpu);
279 env->exception_index = -1;
280 #endif
281 }
282 }
283
284 next_tb = 0; /* force lookup of first TB */
285 for(;;) {
286 interrupt_request = cpu->interrupt_request;
287 if (unlikely(interrupt_request)) {
288 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
289 /* Mask out external interrupts for this step. */
290 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
291 }
292 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
293 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
294 env->exception_index = EXCP_DEBUG;
295 cpu_loop_exit(env);
296 }
297 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
298 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
299 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
300 if (interrupt_request & CPU_INTERRUPT_HALT) {
301 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
302 cpu->halted = 1;
303 env->exception_index = EXCP_HLT;
304 cpu_loop_exit(env);
305 }
306 #endif
307 #if defined(TARGET_I386)
308 #if !defined(CONFIG_USER_ONLY)
309 if (interrupt_request & CPU_INTERRUPT_POLL) {
310 cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
311 apic_poll_irq(env->apic_state);
312 }
313 #endif
314 if (interrupt_request & CPU_INTERRUPT_INIT) {
315 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
316 0);
317 do_cpu_init(x86_env_get_cpu(env));
318 env->exception_index = EXCP_HALTED;
319 cpu_loop_exit(env);
320 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
321 do_cpu_sipi(x86_env_get_cpu(env));
322 } else if (env->hflags2 & HF2_GIF_MASK) {
323 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
324 !(env->hflags & HF_SMM_MASK)) {
325 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
326 0);
327 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
328 do_smm_enter(env);
329 next_tb = 0;
330 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
331 !(env->hflags2 & HF2_NMI_MASK)) {
332 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
333 env->hflags2 |= HF2_NMI_MASK;
334 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
335 next_tb = 0;
336 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
337 cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
338 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
339 next_tb = 0;
340 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
341 (((env->hflags2 & HF2_VINTR_MASK) &&
342 (env->hflags2 & HF2_HIF_MASK)) ||
343 (!(env->hflags2 & HF2_VINTR_MASK) &&
344 (env->eflags & IF_MASK &&
345 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
346 int intno;
347 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
348 0);
349 cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
350 CPU_INTERRUPT_VIRQ);
351 intno = cpu_get_pic_interrupt(env);
352 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
353 do_interrupt_x86_hardirq(env, intno, 1);
354 /* ensure that no TB jump will be modified as
355 the program flow was changed */
356 next_tb = 0;
357 #if !defined(CONFIG_USER_ONLY)
358 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
359 (env->eflags & IF_MASK) &&
360 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
361 int intno;
362 /* FIXME: this should respect TPR */
363 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
364 0);
365 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
366 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
367 do_interrupt_x86_hardirq(env, intno, 1);
368 cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
369 next_tb = 0;
370 #endif
371 }
372 }
373 #elif defined(TARGET_PPC)
374 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
375 cpu_reset(cpu);
376 }
377 if (interrupt_request & CPU_INTERRUPT_HARD) {
378 ppc_hw_interrupt(env);
379 if (env->pending_interrupts == 0) {
380 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
381 }
382 next_tb = 0;
383 }
384 #elif defined(TARGET_LM32)
385 if ((interrupt_request & CPU_INTERRUPT_HARD)
386 && (env->ie & IE_IE)) {
387 env->exception_index = EXCP_IRQ;
388 cc->do_interrupt(cpu);
389 next_tb = 0;
390 }
391 #elif defined(TARGET_MICROBLAZE)
392 if ((interrupt_request & CPU_INTERRUPT_HARD)
393 && (env->sregs[SR_MSR] & MSR_IE)
394 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
395 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
396 env->exception_index = EXCP_IRQ;
397 cc->do_interrupt(cpu);
398 next_tb = 0;
399 }
400 #elif defined(TARGET_MIPS)
401 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
402 cpu_mips_hw_interrupts_pending(env)) {
403 /* Raise it */
404 env->exception_index = EXCP_EXT_INTERRUPT;
405 env->error_code = 0;
406 cc->do_interrupt(cpu);
407 next_tb = 0;
408 }
409 #elif defined(TARGET_OPENRISC)
410 {
411 int idx = -1;
412 if ((interrupt_request & CPU_INTERRUPT_HARD)
413 && (env->sr & SR_IEE)) {
414 idx = EXCP_INT;
415 }
416 if ((interrupt_request & CPU_INTERRUPT_TIMER)
417 && (env->sr & SR_TEE)) {
418 idx = EXCP_TICK;
419 }
420 if (idx >= 0) {
421 env->exception_index = idx;
422 cc->do_interrupt(cpu);
423 next_tb = 0;
424 }
425 }
426 #elif defined(TARGET_SPARC)
427 if (interrupt_request & CPU_INTERRUPT_HARD) {
428 if (cpu_interrupts_enabled(env) &&
429 env->interrupt_index > 0) {
430 int pil = env->interrupt_index & 0xf;
431 int type = env->interrupt_index & 0xf0;
432
433 if (((type == TT_EXTINT) &&
434 cpu_pil_allowed(env, pil)) ||
435 type != TT_EXTINT) {
436 env->exception_index = env->interrupt_index;
437 cc->do_interrupt(cpu);
438 next_tb = 0;
439 }
440 }
441 }
442 #elif defined(TARGET_ARM)
443 if (interrupt_request & CPU_INTERRUPT_FIQ
444 && !(env->uncached_cpsr & CPSR_F)) {
445 env->exception_index = EXCP_FIQ;
446 cc->do_interrupt(cpu);
447 next_tb = 0;
448 }
449 /* ARMv7-M interrupt return works by loading a magic value
450 into the PC. On real hardware the load causes the
451 return to occur. The qemu implementation performs the
452 jump normally, then does the exception return when the
453 CPU tries to execute code at the magic address.
454 This will cause the magic PC value to be pushed to
455 the stack if an interrupt occurred at the wrong time.
456 We avoid this by disabling interrupts when
457 pc contains a magic address. */
458 if (interrupt_request & CPU_INTERRUPT_HARD
459 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
460 || !(env->uncached_cpsr & CPSR_I))) {
461 env->exception_index = EXCP_IRQ;
462 cc->do_interrupt(cpu);
463 next_tb = 0;
464 }
465 #elif defined(TARGET_UNICORE32)
466 if (interrupt_request & CPU_INTERRUPT_HARD
467 && !(env->uncached_asr & ASR_I)) {
468 env->exception_index = UC32_EXCP_INTR;
469 cc->do_interrupt(cpu);
470 next_tb = 0;
471 }
472 #elif defined(TARGET_SH4)
473 if (interrupt_request & CPU_INTERRUPT_HARD) {
474 cc->do_interrupt(cpu);
475 next_tb = 0;
476 }
477 #elif defined(TARGET_ALPHA)
478 {
479 int idx = -1;
480 /* ??? This hard-codes the OSF/1 interrupt levels. */
481 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
482 case 0 ... 3:
483 if (interrupt_request & CPU_INTERRUPT_HARD) {
484 idx = EXCP_DEV_INTERRUPT;
485 }
486 /* FALLTHRU */
487 case 4:
488 if (interrupt_request & CPU_INTERRUPT_TIMER) {
489 idx = EXCP_CLK_INTERRUPT;
490 }
491 /* FALLTHRU */
492 case 5:
493 if (interrupt_request & CPU_INTERRUPT_SMP) {
494 idx = EXCP_SMP_INTERRUPT;
495 }
496 /* FALLTHRU */
497 case 6:
498 if (interrupt_request & CPU_INTERRUPT_MCHK) {
499 idx = EXCP_MCHK;
500 }
501 }
502 if (idx >= 0) {
503 env->exception_index = idx;
504 env->error_code = 0;
505 cc->do_interrupt(cpu);
506 next_tb = 0;
507 }
508 }
509 #elif defined(TARGET_CRIS)
510 if (interrupt_request & CPU_INTERRUPT_HARD
511 && (env->pregs[PR_CCS] & I_FLAG)
512 && !env->locked_irq) {
513 env->exception_index = EXCP_IRQ;
514 cc->do_interrupt(cpu);
515 next_tb = 0;
516 }
517 if (interrupt_request & CPU_INTERRUPT_NMI) {
518 unsigned int m_flag_archval;
519 if (env->pregs[PR_VR] < 32) {
520 m_flag_archval = M_FLAG_V10;
521 } else {
522 m_flag_archval = M_FLAG_V32;
523 }
524 if ((env->pregs[PR_CCS] & m_flag_archval)) {
525 env->exception_index = EXCP_NMI;
526 cc->do_interrupt(cpu);
527 next_tb = 0;
528 }
529 }
530 #elif defined(TARGET_M68K)
531 if (interrupt_request & CPU_INTERRUPT_HARD
532 && ((env->sr & SR_I) >> SR_I_SHIFT)
533 < env->pending_level) {
534 /* Real hardware gets the interrupt vector via an
535 IACK cycle at this point. Current emulated
536 hardware doesn't rely on this, so we
537 provide/save the vector when the interrupt is
538 first signalled. */
539 env->exception_index = env->pending_vector;
540 do_interrupt_m68k_hardirq(env);
541 next_tb = 0;
542 }
543 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
544 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
545 (env->psw.mask & PSW_MASK_EXT)) {
546 cc->do_interrupt(cpu);
547 next_tb = 0;
548 }
549 #elif defined(TARGET_XTENSA)
550 if (interrupt_request & CPU_INTERRUPT_HARD) {
551 env->exception_index = EXC_IRQ;
552 cc->do_interrupt(cpu);
553 next_tb = 0;
554 }
555 #endif
556 /* Don't use the cached interrupt_request value,
557 do_interrupt may have updated the EXITTB flag. */
558 if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
559 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
560 /* ensure that no TB jump will be modified as
561 the program flow was changed */
562 next_tb = 0;
563 }
564 }
565 if (unlikely(cpu->exit_request)) {
566 cpu->exit_request = 0;
567 env->exception_index = EXCP_INTERRUPT;
568 cpu_loop_exit(env);
569 }
570 #if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
571 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
572 /* restore flags in standard format */
573 #if defined(TARGET_I386)
574 log_cpu_state(env, CPU_DUMP_CCOP);
575 #elif defined(TARGET_M68K)
576 cpu_m68k_flush_flags(env, env->cc_op);
577 env->cc_op = CC_OP_FLAGS;
578 env->sr = (env->sr & 0xffe0)
579 | env->cc_dest | (env->cc_x << 4);
580 log_cpu_state(env, 0);
581 #else
582 log_cpu_state(env, 0);
583 #endif
584 }
585 #endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
586 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
587 tb = tb_find_fast(env);
588 /* Note: we do it here to avoid a gcc bug on Mac OS X when
589 doing it in tb_find_slow */
590 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
591 /* as some TB could have been invalidated because
592 of memory exceptions while generating the code, we
593 must recompute the hash index here */
594 next_tb = 0;
595 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
596 }
597 #ifdef CONFIG_DEBUG_EXEC
598 qemu_log_mask(CPU_LOG_EXEC, "Trace %p [" TARGET_FMT_lx "] %s\n",
599 tb->tc_ptr, tb->pc,
600 lookup_symbol(tb->pc));
601 #endif
602 /* see if we can patch the calling TB. When the TB
603 spans two pages, we cannot safely do a direct
604 jump. */
605 if (next_tb != 0 && tb->page_addr[1] == -1) {
606 tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
607 next_tb & TB_EXIT_MASK, tb);
608 }
609 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
610
611 /* cpu_interrupt might be called while translating the
612 TB, but before it is linked into a potentially
613 infinite loop and becomes env->current_tb. Avoid
614 starting execution if there is a pending interrupt. */
615 cpu->current_tb = tb;
616 barrier();
617 if (likely(!cpu->exit_request)) {
618 tc_ptr = tb->tc_ptr;
619 /* execute the generated code */
620 next_tb = cpu_tb_exec(cpu, tc_ptr);
621 switch (next_tb & TB_EXIT_MASK) {
622 case TB_EXIT_REQUESTED:
623 /* Something asked us to stop executing
624 * chained TBs; just continue round the main
625 * loop. Whatever requested the exit will also
626 * have set something else (eg exit_request or
627 * interrupt_request) which we will handle
628 * next time around the loop.
629 */
630 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
631 next_tb = 0;
632 break;
633 case TB_EXIT_ICOUNT_EXPIRED:
634 {
635 /* Instruction counter expired. */
636 int insns_left;
637 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
638 insns_left = env->icount_decr.u32;
639 if (env->icount_extra && insns_left >= 0) {
640 /* Refill decrementer and continue execution. */
641 env->icount_extra += insns_left;
642 if (env->icount_extra > 0xffff) {
643 insns_left = 0xffff;
644 } else {
645 insns_left = env->icount_extra;
646 }
647 env->icount_extra -= insns_left;
648 env->icount_decr.u16.low = insns_left;
649 } else {
650 if (insns_left > 0) {
651 /* Execute remaining instructions. */
652 cpu_exec_nocache(env, insns_left, tb);
653 }
654 env->exception_index = EXCP_INTERRUPT;
655 next_tb = 0;
656 cpu_loop_exit(env);
657 }
658 break;
659 }
660 default:
661 break;
662 }
663 }
664 cpu->current_tb = NULL;
665 /* reset soft MMU for next block (it can currently
666 only be set by a memory fault) */
667 } /* for(;;) */
668 } else {
669 /* Reload env after longjmp - the compiler may have smashed all
670 * local variables as longjmp is marked 'noreturn'. */
671 env = cpu_single_env;
672 }
673 } /* for(;;) */
674
675
676 #if defined(TARGET_I386)
677 /* restore flags in standard format */
678 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
679 | (DF & DF_MASK);
680 #elif defined(TARGET_ARM)
681 /* XXX: Save/restore host fpu exception state?. */
682 #elif defined(TARGET_UNICORE32)
683 #elif defined(TARGET_SPARC)
684 #elif defined(TARGET_PPC)
685 #elif defined(TARGET_LM32)
686 #elif defined(TARGET_M68K)
687 cpu_m68k_flush_flags(env, env->cc_op);
688 env->cc_op = CC_OP_FLAGS;
689 env->sr = (env->sr & 0xffe0)
690 | env->cc_dest | (env->cc_x << 4);
691 #elif defined(TARGET_MICROBLAZE)
692 #elif defined(TARGET_MIPS)
693 #elif defined(TARGET_MOXIE)
694 #elif defined(TARGET_OPENRISC)
695 #elif defined(TARGET_SH4)
696 #elif defined(TARGET_ALPHA)
697 #elif defined(TARGET_CRIS)
698 #elif defined(TARGET_S390X)
699 #elif defined(TARGET_XTENSA)
700 /* XXXXX */
701 #else
702 #error unsupported target CPU
703 #endif
704
705 /* fail safe : never use cpu_single_env outside cpu_exec() */
706 cpu_single_env = NULL;
707 return ret;
708 }