]> git.proxmox.com Git - mirror_qemu.git/blob - cpu-exec.c
Merge remote-tracking branch 'rth/ldst-i386-2' into staging
[mirror_qemu.git] / cpu-exec.c
1 /*
2 * emulator main execution loop
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "config.h"
20 #include "cpu.h"
21 #include "disas/disas.h"
22 #include "tcg.h"
23 #include "qemu/atomic.h"
24 #include "sysemu/qtest.h"
25
26 bool qemu_cpu_has_work(CPUState *cpu)
27 {
28 return cpu_has_work(cpu);
29 }
30
31 void cpu_loop_exit(CPUArchState *env)
32 {
33 CPUState *cpu = ENV_GET_CPU(env);
34
35 cpu->current_tb = NULL;
36 siglongjmp(env->jmp_env, 1);
37 }
38
39 /* exit the current TB from a signal handler. The host registers are
40 restored in a state compatible with the CPU emulator
41 */
42 #if defined(CONFIG_SOFTMMU)
43 void cpu_resume_from_signal(CPUArchState *env, void *puc)
44 {
45 /* XXX: restore cpu registers saved in host registers */
46
47 env->exception_index = -1;
48 siglongjmp(env->jmp_env, 1);
49 }
50 #endif
51
52 /* Execute a TB, and fix up the CPU state afterwards if necessary */
53 static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
54 {
55 CPUArchState *env = cpu->env_ptr;
56 uintptr_t next_tb;
57
58 #if defined(DEBUG_DISAS)
59 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
60 #if defined(TARGET_I386)
61 log_cpu_state(cpu, CPU_DUMP_CCOP);
62 #elif defined(TARGET_M68K)
63 /* ??? Should not modify env state for dumping. */
64 cpu_m68k_flush_flags(env, env->cc_op);
65 env->cc_op = CC_OP_FLAGS;
66 env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
67 log_cpu_state(cpu, 0);
68 #else
69 log_cpu_state(cpu, 0);
70 #endif
71 }
72 #endif /* DEBUG_DISAS */
73
74 next_tb = tcg_qemu_tb_exec(env, tb_ptr);
75 if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
76 /* We didn't start executing this TB (eg because the instruction
77 * counter hit zero); we must restore the guest PC to the address
78 * of the start of the TB.
79 */
80 CPUClass *cc = CPU_GET_CLASS(cpu);
81 TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
82 if (cc->synchronize_from_tb) {
83 cc->synchronize_from_tb(cpu, tb);
84 } else {
85 assert(cc->set_pc);
86 cc->set_pc(cpu, tb->pc);
87 }
88 }
89 if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
90 /* We were asked to stop executing TBs (probably a pending
91 * interrupt. We've now stopped, so clear the flag.
92 */
93 cpu->tcg_exit_req = 0;
94 }
95 return next_tb;
96 }
97
98 /* Execute the code without caching the generated code. An interpreter
99 could be used if available. */
100 static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
101 TranslationBlock *orig_tb)
102 {
103 CPUState *cpu = ENV_GET_CPU(env);
104 TranslationBlock *tb;
105
106 /* Should never happen.
107 We only end up here when an existing TB is too long. */
108 if (max_cycles > CF_COUNT_MASK)
109 max_cycles = CF_COUNT_MASK;
110
111 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
112 max_cycles);
113 cpu->current_tb = tb;
114 /* execute the generated code */
115 cpu_tb_exec(cpu, tb->tc_ptr);
116 cpu->current_tb = NULL;
117 tb_phys_invalidate(tb, -1);
118 tb_free(tb);
119 }
120
121 static TranslationBlock *tb_find_slow(CPUArchState *env,
122 target_ulong pc,
123 target_ulong cs_base,
124 uint64_t flags)
125 {
126 TranslationBlock *tb, **ptb1;
127 unsigned int h;
128 tb_page_addr_t phys_pc, phys_page1;
129 target_ulong virt_page2;
130
131 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
132
133 /* find translated block using physical mappings */
134 phys_pc = get_page_addr_code(env, pc);
135 phys_page1 = phys_pc & TARGET_PAGE_MASK;
136 h = tb_phys_hash_func(phys_pc);
137 ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
138 for(;;) {
139 tb = *ptb1;
140 if (!tb)
141 goto not_found;
142 if (tb->pc == pc &&
143 tb->page_addr[0] == phys_page1 &&
144 tb->cs_base == cs_base &&
145 tb->flags == flags) {
146 /* check next page if needed */
147 if (tb->page_addr[1] != -1) {
148 tb_page_addr_t phys_page2;
149
150 virt_page2 = (pc & TARGET_PAGE_MASK) +
151 TARGET_PAGE_SIZE;
152 phys_page2 = get_page_addr_code(env, virt_page2);
153 if (tb->page_addr[1] == phys_page2)
154 goto found;
155 } else {
156 goto found;
157 }
158 }
159 ptb1 = &tb->phys_hash_next;
160 }
161 not_found:
162 /* if no translated code available, then translate it now */
163 tb = tb_gen_code(env, pc, cs_base, flags, 0);
164
165 found:
166 /* Move the last found TB to the head of the list */
167 if (likely(*ptb1)) {
168 *ptb1 = tb->phys_hash_next;
169 tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
170 tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
171 }
172 /* we add the TB in the virtual pc hash table */
173 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
174 return tb;
175 }
176
177 static inline TranslationBlock *tb_find_fast(CPUArchState *env)
178 {
179 TranslationBlock *tb;
180 target_ulong cs_base, pc;
181 int flags;
182
183 /* we record a subset of the CPU state. It will
184 always be the same before a given translated block
185 is executed. */
186 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
187 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
188 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
189 tb->flags != flags)) {
190 tb = tb_find_slow(env, pc, cs_base, flags);
191 }
192 return tb;
193 }
194
195 static CPUDebugExcpHandler *debug_excp_handler;
196
197 void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
198 {
199 debug_excp_handler = handler;
200 }
201
202 static void cpu_handle_debug_exception(CPUArchState *env)
203 {
204 CPUWatchpoint *wp;
205
206 if (!env->watchpoint_hit) {
207 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
208 wp->flags &= ~BP_WATCHPOINT_HIT;
209 }
210 }
211 if (debug_excp_handler) {
212 debug_excp_handler(env);
213 }
214 }
215
216 /* main execution loop */
217
218 volatile sig_atomic_t exit_request;
219
220 int cpu_exec(CPUArchState *env)
221 {
222 CPUState *cpu = ENV_GET_CPU(env);
223 #if !(defined(CONFIG_USER_ONLY) && \
224 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
225 CPUClass *cc = CPU_GET_CLASS(cpu);
226 #endif
227 int ret, interrupt_request;
228 TranslationBlock *tb;
229 uint8_t *tc_ptr;
230 uintptr_t next_tb;
231
232 if (cpu->halted) {
233 if (!cpu_has_work(cpu)) {
234 return EXCP_HALTED;
235 }
236
237 cpu->halted = 0;
238 }
239
240 current_cpu = cpu;
241
242 /* As long as current_cpu is null, up to the assignment just above,
243 * requests by other threads to exit the execution loop are expected to
244 * be issued using the exit_request global. We must make sure that our
245 * evaluation of the global value is performed past the current_cpu
246 * value transition point, which requires a memory barrier as well as
247 * an instruction scheduling constraint on modern architectures. */
248 smp_mb();
249
250 if (unlikely(exit_request)) {
251 cpu->exit_request = 1;
252 }
253
254 #if defined(TARGET_I386)
255 /* put eflags in CPU temporary format */
256 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
257 env->df = 1 - (2 * ((env->eflags >> 10) & 1));
258 CC_OP = CC_OP_EFLAGS;
259 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
260 #elif defined(TARGET_SPARC)
261 #elif defined(TARGET_M68K)
262 env->cc_op = CC_OP_FLAGS;
263 env->cc_dest = env->sr & 0xf;
264 env->cc_x = (env->sr >> 4) & 1;
265 #elif defined(TARGET_ALPHA)
266 #elif defined(TARGET_ARM)
267 #elif defined(TARGET_UNICORE32)
268 #elif defined(TARGET_PPC)
269 env->reserve_addr = -1;
270 #elif defined(TARGET_LM32)
271 #elif defined(TARGET_MICROBLAZE)
272 #elif defined(TARGET_MIPS)
273 #elif defined(TARGET_MOXIE)
274 #elif defined(TARGET_OPENRISC)
275 #elif defined(TARGET_SH4)
276 #elif defined(TARGET_CRIS)
277 #elif defined(TARGET_S390X)
278 #elif defined(TARGET_XTENSA)
279 /* XXXXX */
280 #else
281 #error unsupported target CPU
282 #endif
283 env->exception_index = -1;
284
285 /* prepare setjmp context for exception handling */
286 for(;;) {
287 if (sigsetjmp(env->jmp_env, 0) == 0) {
288 /* if an exception is pending, we execute it here */
289 if (env->exception_index >= 0) {
290 if (env->exception_index >= EXCP_INTERRUPT) {
291 /* exit request from the cpu execution loop */
292 ret = env->exception_index;
293 if (ret == EXCP_DEBUG) {
294 cpu_handle_debug_exception(env);
295 }
296 break;
297 } else {
298 #if defined(CONFIG_USER_ONLY)
299 /* if user mode only, we simulate a fake exception
300 which will be handled outside the cpu execution
301 loop */
302 #if defined(TARGET_I386)
303 cc->do_interrupt(cpu);
304 #endif
305 ret = env->exception_index;
306 break;
307 #else
308 cc->do_interrupt(cpu);
309 env->exception_index = -1;
310 #endif
311 }
312 }
313
314 next_tb = 0; /* force lookup of first TB */
315 for(;;) {
316 interrupt_request = cpu->interrupt_request;
317 if (unlikely(interrupt_request)) {
318 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
319 /* Mask out external interrupts for this step. */
320 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
321 }
322 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
323 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
324 env->exception_index = EXCP_DEBUG;
325 cpu_loop_exit(env);
326 }
327 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
328 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
329 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
330 if (interrupt_request & CPU_INTERRUPT_HALT) {
331 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
332 cpu->halted = 1;
333 env->exception_index = EXCP_HLT;
334 cpu_loop_exit(env);
335 }
336 #endif
337 #if defined(TARGET_I386)
338 #if !defined(CONFIG_USER_ONLY)
339 if (interrupt_request & CPU_INTERRUPT_POLL) {
340 cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
341 apic_poll_irq(env->apic_state);
342 }
343 #endif
344 if (interrupt_request & CPU_INTERRUPT_INIT) {
345 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
346 0);
347 do_cpu_init(x86_env_get_cpu(env));
348 env->exception_index = EXCP_HALTED;
349 cpu_loop_exit(env);
350 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
351 do_cpu_sipi(x86_env_get_cpu(env));
352 } else if (env->hflags2 & HF2_GIF_MASK) {
353 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
354 !(env->hflags & HF_SMM_MASK)) {
355 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
356 0);
357 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
358 do_smm_enter(x86_env_get_cpu(env));
359 next_tb = 0;
360 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
361 !(env->hflags2 & HF2_NMI_MASK)) {
362 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
363 env->hflags2 |= HF2_NMI_MASK;
364 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
365 next_tb = 0;
366 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
367 cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
368 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
369 next_tb = 0;
370 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
371 (((env->hflags2 & HF2_VINTR_MASK) &&
372 (env->hflags2 & HF2_HIF_MASK)) ||
373 (!(env->hflags2 & HF2_VINTR_MASK) &&
374 (env->eflags & IF_MASK &&
375 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
376 int intno;
377 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
378 0);
379 cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
380 CPU_INTERRUPT_VIRQ);
381 intno = cpu_get_pic_interrupt(env);
382 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
383 do_interrupt_x86_hardirq(env, intno, 1);
384 /* ensure that no TB jump will be modified as
385 the program flow was changed */
386 next_tb = 0;
387 #if !defined(CONFIG_USER_ONLY)
388 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
389 (env->eflags & IF_MASK) &&
390 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
391 int intno;
392 /* FIXME: this should respect TPR */
393 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
394 0);
395 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
396 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
397 do_interrupt_x86_hardirq(env, intno, 1);
398 cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
399 next_tb = 0;
400 #endif
401 }
402 }
403 #elif defined(TARGET_PPC)
404 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
405 cpu_reset(cpu);
406 }
407 if (interrupt_request & CPU_INTERRUPT_HARD) {
408 ppc_hw_interrupt(env);
409 if (env->pending_interrupts == 0) {
410 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
411 }
412 next_tb = 0;
413 }
414 #elif defined(TARGET_LM32)
415 if ((interrupt_request & CPU_INTERRUPT_HARD)
416 && (env->ie & IE_IE)) {
417 env->exception_index = EXCP_IRQ;
418 cc->do_interrupt(cpu);
419 next_tb = 0;
420 }
421 #elif defined(TARGET_MICROBLAZE)
422 if ((interrupt_request & CPU_INTERRUPT_HARD)
423 && (env->sregs[SR_MSR] & MSR_IE)
424 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
425 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
426 env->exception_index = EXCP_IRQ;
427 cc->do_interrupt(cpu);
428 next_tb = 0;
429 }
430 #elif defined(TARGET_MIPS)
431 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
432 cpu_mips_hw_interrupts_pending(env)) {
433 /* Raise it */
434 env->exception_index = EXCP_EXT_INTERRUPT;
435 env->error_code = 0;
436 cc->do_interrupt(cpu);
437 next_tb = 0;
438 }
439 #elif defined(TARGET_OPENRISC)
440 {
441 int idx = -1;
442 if ((interrupt_request & CPU_INTERRUPT_HARD)
443 && (env->sr & SR_IEE)) {
444 idx = EXCP_INT;
445 }
446 if ((interrupt_request & CPU_INTERRUPT_TIMER)
447 && (env->sr & SR_TEE)) {
448 idx = EXCP_TICK;
449 }
450 if (idx >= 0) {
451 env->exception_index = idx;
452 cc->do_interrupt(cpu);
453 next_tb = 0;
454 }
455 }
456 #elif defined(TARGET_SPARC)
457 if (interrupt_request & CPU_INTERRUPT_HARD) {
458 if (cpu_interrupts_enabled(env) &&
459 env->interrupt_index > 0) {
460 int pil = env->interrupt_index & 0xf;
461 int type = env->interrupt_index & 0xf0;
462
463 if (((type == TT_EXTINT) &&
464 cpu_pil_allowed(env, pil)) ||
465 type != TT_EXTINT) {
466 env->exception_index = env->interrupt_index;
467 cc->do_interrupt(cpu);
468 next_tb = 0;
469 }
470 }
471 }
472 #elif defined(TARGET_ARM)
473 if (interrupt_request & CPU_INTERRUPT_FIQ
474 && !(env->uncached_cpsr & CPSR_F)) {
475 env->exception_index = EXCP_FIQ;
476 cc->do_interrupt(cpu);
477 next_tb = 0;
478 }
479 /* ARMv7-M interrupt return works by loading a magic value
480 into the PC. On real hardware the load causes the
481 return to occur. The qemu implementation performs the
482 jump normally, then does the exception return when the
483 CPU tries to execute code at the magic address.
484 This will cause the magic PC value to be pushed to
485 the stack if an interrupt occurred at the wrong time.
486 We avoid this by disabling interrupts when
487 pc contains a magic address. */
488 if (interrupt_request & CPU_INTERRUPT_HARD
489 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
490 || !(env->uncached_cpsr & CPSR_I))) {
491 env->exception_index = EXCP_IRQ;
492 cc->do_interrupt(cpu);
493 next_tb = 0;
494 }
495 #elif defined(TARGET_UNICORE32)
496 if (interrupt_request & CPU_INTERRUPT_HARD
497 && !(env->uncached_asr & ASR_I)) {
498 env->exception_index = UC32_EXCP_INTR;
499 cc->do_interrupt(cpu);
500 next_tb = 0;
501 }
502 #elif defined(TARGET_SH4)
503 if (interrupt_request & CPU_INTERRUPT_HARD) {
504 cc->do_interrupt(cpu);
505 next_tb = 0;
506 }
507 #elif defined(TARGET_ALPHA)
508 {
509 int idx = -1;
510 /* ??? This hard-codes the OSF/1 interrupt levels. */
511 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
512 case 0 ... 3:
513 if (interrupt_request & CPU_INTERRUPT_HARD) {
514 idx = EXCP_DEV_INTERRUPT;
515 }
516 /* FALLTHRU */
517 case 4:
518 if (interrupt_request & CPU_INTERRUPT_TIMER) {
519 idx = EXCP_CLK_INTERRUPT;
520 }
521 /* FALLTHRU */
522 case 5:
523 if (interrupt_request & CPU_INTERRUPT_SMP) {
524 idx = EXCP_SMP_INTERRUPT;
525 }
526 /* FALLTHRU */
527 case 6:
528 if (interrupt_request & CPU_INTERRUPT_MCHK) {
529 idx = EXCP_MCHK;
530 }
531 }
532 if (idx >= 0) {
533 env->exception_index = idx;
534 env->error_code = 0;
535 cc->do_interrupt(cpu);
536 next_tb = 0;
537 }
538 }
539 #elif defined(TARGET_CRIS)
540 if (interrupt_request & CPU_INTERRUPT_HARD
541 && (env->pregs[PR_CCS] & I_FLAG)
542 && !env->locked_irq) {
543 env->exception_index = EXCP_IRQ;
544 cc->do_interrupt(cpu);
545 next_tb = 0;
546 }
547 if (interrupt_request & CPU_INTERRUPT_NMI) {
548 unsigned int m_flag_archval;
549 if (env->pregs[PR_VR] < 32) {
550 m_flag_archval = M_FLAG_V10;
551 } else {
552 m_flag_archval = M_FLAG_V32;
553 }
554 if ((env->pregs[PR_CCS] & m_flag_archval)) {
555 env->exception_index = EXCP_NMI;
556 cc->do_interrupt(cpu);
557 next_tb = 0;
558 }
559 }
560 #elif defined(TARGET_M68K)
561 if (interrupt_request & CPU_INTERRUPT_HARD
562 && ((env->sr & SR_I) >> SR_I_SHIFT)
563 < env->pending_level) {
564 /* Real hardware gets the interrupt vector via an
565 IACK cycle at this point. Current emulated
566 hardware doesn't rely on this, so we
567 provide/save the vector when the interrupt is
568 first signalled. */
569 env->exception_index = env->pending_vector;
570 do_interrupt_m68k_hardirq(env);
571 next_tb = 0;
572 }
573 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
574 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
575 (env->psw.mask & PSW_MASK_EXT)) {
576 cc->do_interrupt(cpu);
577 next_tb = 0;
578 }
579 #elif defined(TARGET_XTENSA)
580 if (interrupt_request & CPU_INTERRUPT_HARD) {
581 env->exception_index = EXC_IRQ;
582 cc->do_interrupt(cpu);
583 next_tb = 0;
584 }
585 #endif
586 /* Don't use the cached interrupt_request value,
587 do_interrupt may have updated the EXITTB flag. */
588 if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
589 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
590 /* ensure that no TB jump will be modified as
591 the program flow was changed */
592 next_tb = 0;
593 }
594 }
595 if (unlikely(cpu->exit_request)) {
596 cpu->exit_request = 0;
597 env->exception_index = EXCP_INTERRUPT;
598 cpu_loop_exit(env);
599 }
600 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
601 tb = tb_find_fast(env);
602 /* Note: we do it here to avoid a gcc bug on Mac OS X when
603 doing it in tb_find_slow */
604 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
605 /* as some TB could have been invalidated because
606 of memory exceptions while generating the code, we
607 must recompute the hash index here */
608 next_tb = 0;
609 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
610 }
611 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
612 qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
613 tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
614 }
615 /* see if we can patch the calling TB. When the TB
616 spans two pages, we cannot safely do a direct
617 jump. */
618 if (next_tb != 0 && tb->page_addr[1] == -1) {
619 tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
620 next_tb & TB_EXIT_MASK, tb);
621 }
622 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
623
624 /* cpu_interrupt might be called while translating the
625 TB, but before it is linked into a potentially
626 infinite loop and becomes env->current_tb. Avoid
627 starting execution if there is a pending interrupt. */
628 cpu->current_tb = tb;
629 barrier();
630 if (likely(!cpu->exit_request)) {
631 tc_ptr = tb->tc_ptr;
632 /* execute the generated code */
633 next_tb = cpu_tb_exec(cpu, tc_ptr);
634 switch (next_tb & TB_EXIT_MASK) {
635 case TB_EXIT_REQUESTED:
636 /* Something asked us to stop executing
637 * chained TBs; just continue round the main
638 * loop. Whatever requested the exit will also
639 * have set something else (eg exit_request or
640 * interrupt_request) which we will handle
641 * next time around the loop.
642 */
643 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
644 next_tb = 0;
645 break;
646 case TB_EXIT_ICOUNT_EXPIRED:
647 {
648 /* Instruction counter expired. */
649 int insns_left;
650 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
651 insns_left = env->icount_decr.u32;
652 if (env->icount_extra && insns_left >= 0) {
653 /* Refill decrementer and continue execution. */
654 env->icount_extra += insns_left;
655 if (env->icount_extra > 0xffff) {
656 insns_left = 0xffff;
657 } else {
658 insns_left = env->icount_extra;
659 }
660 env->icount_extra -= insns_left;
661 env->icount_decr.u16.low = insns_left;
662 } else {
663 if (insns_left > 0) {
664 /* Execute remaining instructions. */
665 cpu_exec_nocache(env, insns_left, tb);
666 }
667 env->exception_index = EXCP_INTERRUPT;
668 next_tb = 0;
669 cpu_loop_exit(env);
670 }
671 break;
672 }
673 default:
674 break;
675 }
676 }
677 cpu->current_tb = NULL;
678 /* reset soft MMU for next block (it can currently
679 only be set by a memory fault) */
680 } /* for(;;) */
681 } else {
682 /* Reload env after longjmp - the compiler may have smashed all
683 * local variables as longjmp is marked 'noreturn'. */
684 cpu = current_cpu;
685 env = cpu->env_ptr;
686 #if !(defined(CONFIG_USER_ONLY) && \
687 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
688 cc = CPU_GET_CLASS(cpu);
689 #endif
690 }
691 } /* for(;;) */
692
693
694 #if defined(TARGET_I386)
695 /* restore flags in standard format */
696 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
697 | (env->df & DF_MASK);
698 #elif defined(TARGET_ARM)
699 /* XXX: Save/restore host fpu exception state?. */
700 #elif defined(TARGET_UNICORE32)
701 #elif defined(TARGET_SPARC)
702 #elif defined(TARGET_PPC)
703 #elif defined(TARGET_LM32)
704 #elif defined(TARGET_M68K)
705 cpu_m68k_flush_flags(env, env->cc_op);
706 env->cc_op = CC_OP_FLAGS;
707 env->sr = (env->sr & 0xffe0)
708 | env->cc_dest | (env->cc_x << 4);
709 #elif defined(TARGET_MICROBLAZE)
710 #elif defined(TARGET_MIPS)
711 #elif defined(TARGET_MOXIE)
712 #elif defined(TARGET_OPENRISC)
713 #elif defined(TARGET_SH4)
714 #elif defined(TARGET_ALPHA)
715 #elif defined(TARGET_CRIS)
716 #elif defined(TARGET_S390X)
717 #elif defined(TARGET_XTENSA)
718 /* XXXXX */
719 #else
720 #error unsupported target CPU
721 #endif
722
723 /* fail safe : never use current_cpu outside cpu_exec() */
724 current_cpu = NULL;
725 return ret;
726 }