]> git.proxmox.com Git - qemu.git/blame - cpu-exec.c
Version 1.0.1
[qemu.git] / cpu-exec.c
CommitLineData
7d13299d
FB
1/*
2 * i386 emulator main execution loop
5fafdf24 3 *
66321a11 4 * Copyright (c) 2003-2005 Fabrice Bellard
7d13299d 5 *
3ef693a0
FB
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
7d13299d 10 *
3ef693a0
FB
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
7d13299d 15 *
3ef693a0 16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
7d13299d 18 */
e4533c7a 19#include "config.h"
cea5f9a2 20#include "cpu.h"
956034d7 21#include "disas.h"
7cb69cae 22#include "tcg.h"
1d93f0f0 23#include "qemu-barrier.h"
7d13299d 24
36bdbe54
FB
25int tb_invalidated_flag;
26
f0667e66 27//#define CONFIG_DEBUG_EXEC
7d13299d 28
f3e27037 29bool qemu_cpu_has_work(CPUState *env)
6a4955a8
AL
30{
31 return cpu_has_work(env);
32}
33
cea5f9a2 34void cpu_loop_exit(CPUState *env)
e4533c7a 35{
cea5f9a2
BS
36 env->current_tb = NULL;
37 longjmp(env->jmp_env, 1);
e4533c7a 38}
bfed01fc 39
fbf9eeb3
FB
40/* exit the current TB from a signal handler. The host registers are
41 restored in a state compatible with the CPU emulator
42 */
9eff14f3 43#if defined(CONFIG_SOFTMMU)
cea5f9a2 44void cpu_resume_from_signal(CPUState *env, void *puc)
9eff14f3 45{
9eff14f3
BS
46 /* XXX: restore cpu registers saved in host registers */
47
48 env->exception_index = -1;
49 longjmp(env->jmp_env, 1);
50}
9eff14f3 51#endif
fbf9eeb3 52
2e70f6ef
PB
53/* Execute the code without caching the generated code. An interpreter
54 could be used if available. */
cea5f9a2
BS
55static void cpu_exec_nocache(CPUState *env, int max_cycles,
56 TranslationBlock *orig_tb)
2e70f6ef
PB
57{
58 unsigned long next_tb;
59 TranslationBlock *tb;
60
61 /* Should never happen.
62 We only end up here when an existing TB is too long. */
63 if (max_cycles > CF_COUNT_MASK)
64 max_cycles = CF_COUNT_MASK;
65
66 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
67 max_cycles);
68 env->current_tb = tb;
69 /* execute the generated code */
cea5f9a2 70 next_tb = tcg_qemu_tb_exec(env, tb->tc_ptr);
1c3569fe 71 env->current_tb = NULL;
2e70f6ef
PB
72
73 if ((next_tb & 3) == 2) {
74 /* Restore PC. This may happen if async event occurs before
75 the TB starts executing. */
622ed360 76 cpu_pc_from_tb(env, tb);
2e70f6ef
PB
77 }
78 tb_phys_invalidate(tb, -1);
79 tb_free(tb);
80}
81
cea5f9a2
BS
82static TranslationBlock *tb_find_slow(CPUState *env,
83 target_ulong pc,
8a40a180 84 target_ulong cs_base,
c068688b 85 uint64_t flags)
8a40a180
FB
86{
87 TranslationBlock *tb, **ptb1;
8a40a180 88 unsigned int h;
337fc758 89 tb_page_addr_t phys_pc, phys_page1;
41c1b1c9 90 target_ulong virt_page2;
3b46e624 91
8a40a180 92 tb_invalidated_flag = 0;
3b46e624 93
8a40a180 94 /* find translated block using physical mappings */
41c1b1c9 95 phys_pc = get_page_addr_code(env, pc);
8a40a180 96 phys_page1 = phys_pc & TARGET_PAGE_MASK;
8a40a180
FB
97 h = tb_phys_hash_func(phys_pc);
98 ptb1 = &tb_phys_hash[h];
99 for(;;) {
100 tb = *ptb1;
101 if (!tb)
102 goto not_found;
5fafdf24 103 if (tb->pc == pc &&
8a40a180 104 tb->page_addr[0] == phys_page1 &&
5fafdf24 105 tb->cs_base == cs_base &&
8a40a180
FB
106 tb->flags == flags) {
107 /* check next page if needed */
108 if (tb->page_addr[1] != -1) {
337fc758
BS
109 tb_page_addr_t phys_page2;
110
5fafdf24 111 virt_page2 = (pc & TARGET_PAGE_MASK) +
8a40a180 112 TARGET_PAGE_SIZE;
41c1b1c9 113 phys_page2 = get_page_addr_code(env, virt_page2);
8a40a180
FB
114 if (tb->page_addr[1] == phys_page2)
115 goto found;
116 } else {
117 goto found;
118 }
119 }
120 ptb1 = &tb->phys_hash_next;
121 }
122 not_found:
2e70f6ef
PB
123 /* if no translated code available, then translate it now */
124 tb = tb_gen_code(env, pc, cs_base, flags, 0);
3b46e624 125
8a40a180 126 found:
2c90fe2b
KB
127 /* Move the last found TB to the head of the list */
128 if (likely(*ptb1)) {
129 *ptb1 = tb->phys_hash_next;
130 tb->phys_hash_next = tb_phys_hash[h];
131 tb_phys_hash[h] = tb;
132 }
8a40a180
FB
133 /* we add the TB in the virtual pc hash table */
134 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
8a40a180
FB
135 return tb;
136}
137
cea5f9a2 138static inline TranslationBlock *tb_find_fast(CPUState *env)
8a40a180
FB
139{
140 TranslationBlock *tb;
141 target_ulong cs_base, pc;
6b917547 142 int flags;
8a40a180
FB
143
144 /* we record a subset of the CPU state. It will
145 always be the same before a given translated block
146 is executed. */
6b917547 147 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
bce61846 148 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
551bd27f
TS
149 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
150 tb->flags != flags)) {
cea5f9a2 151 tb = tb_find_slow(env, pc, cs_base, flags);
8a40a180
FB
152 }
153 return tb;
154}
155
1009d2ed
JK
156static CPUDebugExcpHandler *debug_excp_handler;
157
158CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
159{
160 CPUDebugExcpHandler *old_handler = debug_excp_handler;
161
162 debug_excp_handler = handler;
163 return old_handler;
164}
165
166static void cpu_handle_debug_exception(CPUState *env)
167{
168 CPUWatchpoint *wp;
169
170 if (!env->watchpoint_hit) {
171 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
172 wp->flags &= ~BP_WATCHPOINT_HIT;
173 }
174 }
175 if (debug_excp_handler) {
176 debug_excp_handler(env);
177 }
178}
179
7d13299d
FB
180/* main execution loop */
181
1a28cac3
MT
182volatile sig_atomic_t exit_request;
183
cea5f9a2 184int cpu_exec(CPUState *env)
7d13299d 185{
8a40a180 186 int ret, interrupt_request;
8a40a180 187 TranslationBlock *tb;
c27004ec 188 uint8_t *tc_ptr;
d5975363 189 unsigned long next_tb;
8c6939c0 190
cea5f9a2
BS
191 if (env->halted) {
192 if (!cpu_has_work(env)) {
eda48c34
PB
193 return EXCP_HALTED;
194 }
195
cea5f9a2 196 env->halted = 0;
eda48c34 197 }
5a1e3cfc 198
cea5f9a2 199 cpu_single_env = env;
e4533c7a 200
c629a4bc 201 if (unlikely(exit_request)) {
1a28cac3 202 env->exit_request = 1;
1a28cac3
MT
203 }
204
ecb644f4 205#if defined(TARGET_I386)
6792a57b
JK
206 /* put eflags in CPU temporary format */
207 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
208 DF = 1 - (2 * ((env->eflags >> 10) & 1));
209 CC_OP = CC_OP_EFLAGS;
210 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
93ac68bc 211#elif defined(TARGET_SPARC)
e6e5906b
PB
212#elif defined(TARGET_M68K)
213 env->cc_op = CC_OP_FLAGS;
214 env->cc_dest = env->sr & 0xf;
215 env->cc_x = (env->sr >> 4) & 1;
ecb644f4
TS
216#elif defined(TARGET_ALPHA)
217#elif defined(TARGET_ARM)
d2fbca94 218#elif defined(TARGET_UNICORE32)
ecb644f4 219#elif defined(TARGET_PPC)
4e85f82c 220 env->reserve_addr = -1;
81ea0e13 221#elif defined(TARGET_LM32)
b779e29e 222#elif defined(TARGET_MICROBLAZE)
6af0bf9c 223#elif defined(TARGET_MIPS)
fdf9b3e8 224#elif defined(TARGET_SH4)
f1ccf904 225#elif defined(TARGET_CRIS)
10ec5117 226#elif defined(TARGET_S390X)
2328826b 227#elif defined(TARGET_XTENSA)
fdf9b3e8 228 /* XXXXX */
e4533c7a
FB
229#else
230#error unsupported target CPU
231#endif
3fb2ded1 232 env->exception_index = -1;
9d27abd9 233
7d13299d 234 /* prepare setjmp context for exception handling */
3fb2ded1
FB
235 for(;;) {
236 if (setjmp(env->jmp_env) == 0) {
237 /* if an exception is pending, we execute it here */
238 if (env->exception_index >= 0) {
239 if (env->exception_index >= EXCP_INTERRUPT) {
240 /* exit request from the cpu execution loop */
241 ret = env->exception_index;
1009d2ed
JK
242 if (ret == EXCP_DEBUG) {
243 cpu_handle_debug_exception(env);
244 }
3fb2ded1 245 break;
72d239ed
AJ
246 } else {
247#if defined(CONFIG_USER_ONLY)
3fb2ded1 248 /* if user mode only, we simulate a fake exception
9f083493 249 which will be handled outside the cpu execution
3fb2ded1 250 loop */
83479e77 251#if defined(TARGET_I386)
e694d4e2 252 do_interrupt(env);
83479e77 253#endif
3fb2ded1
FB
254 ret = env->exception_index;
255 break;
72d239ed 256#else
b5ff1b31 257 do_interrupt(env);
301d2908 258 env->exception_index = -1;
83479e77 259#endif
3fb2ded1 260 }
5fafdf24 261 }
9df217a3 262
b5fc09ae 263 next_tb = 0; /* force lookup of first TB */
3fb2ded1 264 for(;;) {
68a79315 265 interrupt_request = env->interrupt_request;
e1638bd8 266 if (unlikely(interrupt_request)) {
267 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
268 /* Mask out external interrupts for this step. */
3125f763 269 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
e1638bd8 270 }
6658ffb8
PB
271 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
272 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
273 env->exception_index = EXCP_DEBUG;
1162c041 274 cpu_loop_exit(env);
6658ffb8 275 }
a90b7318 276#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
b779e29e 277 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
d2fbca94 278 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
a90b7318
AZ
279 if (interrupt_request & CPU_INTERRUPT_HALT) {
280 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
281 env->halted = 1;
282 env->exception_index = EXCP_HLT;
1162c041 283 cpu_loop_exit(env);
a90b7318
AZ
284 }
285#endif
68a79315 286#if defined(TARGET_I386)
b09ea7d5 287 if (interrupt_request & CPU_INTERRUPT_INIT) {
e694d4e2 288 svm_check_intercept(env, SVM_EXIT_INIT);
b09ea7d5
GN
289 do_cpu_init(env);
290 env->exception_index = EXCP_HALTED;
1162c041 291 cpu_loop_exit(env);
b09ea7d5
GN
292 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
293 do_cpu_sipi(env);
294 } else if (env->hflags2 & HF2_GIF_MASK) {
db620f46
FB
295 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
296 !(env->hflags & HF_SMM_MASK)) {
e694d4e2 297 svm_check_intercept(env, SVM_EXIT_SMI);
db620f46 298 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
e694d4e2 299 do_smm_enter(env);
db620f46
FB
300 next_tb = 0;
301 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
302 !(env->hflags2 & HF2_NMI_MASK)) {
303 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
304 env->hflags2 |= HF2_NMI_MASK;
e694d4e2 305 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
db620f46 306 next_tb = 0;
79c4f6b0
HY
307 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
308 env->interrupt_request &= ~CPU_INTERRUPT_MCE;
e694d4e2 309 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
79c4f6b0 310 next_tb = 0;
db620f46
FB
311 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
312 (((env->hflags2 & HF2_VINTR_MASK) &&
313 (env->hflags2 & HF2_HIF_MASK)) ||
314 (!(env->hflags2 & HF2_VINTR_MASK) &&
315 (env->eflags & IF_MASK &&
316 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
317 int intno;
e694d4e2 318 svm_check_intercept(env, SVM_EXIT_INTR);
db620f46
FB
319 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
320 intno = cpu_get_pic_interrupt(env);
93fcfe39 321 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
e694d4e2 322 do_interrupt_x86_hardirq(env, intno, 1);
db620f46
FB
323 /* ensure that no TB jump will be modified as
324 the program flow was changed */
325 next_tb = 0;
0573fbfc 326#if !defined(CONFIG_USER_ONLY)
db620f46
FB
327 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
328 (env->eflags & IF_MASK) &&
329 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
330 int intno;
331 /* FIXME: this should respect TPR */
e694d4e2 332 svm_check_intercept(env, SVM_EXIT_VINTR);
db620f46 333 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
93fcfe39 334 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
e694d4e2 335 do_interrupt_x86_hardirq(env, intno, 1);
d40c54d6 336 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
db620f46 337 next_tb = 0;
907a5b26 338#endif
db620f46 339 }
68a79315 340 }
ce09776b 341#elif defined(TARGET_PPC)
9fddaa0c
FB
342#if 0
343 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
d84bda46 344 cpu_reset(env);
9fddaa0c
FB
345 }
346#endif
47103572 347 if (interrupt_request & CPU_INTERRUPT_HARD) {
e9df014c
JM
348 ppc_hw_interrupt(env);
349 if (env->pending_interrupts == 0)
350 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
b5fc09ae 351 next_tb = 0;
ce09776b 352 }
81ea0e13
MW
353#elif defined(TARGET_LM32)
354 if ((interrupt_request & CPU_INTERRUPT_HARD)
355 && (env->ie & IE_IE)) {
356 env->exception_index = EXCP_IRQ;
357 do_interrupt(env);
358 next_tb = 0;
359 }
b779e29e
EI
360#elif defined(TARGET_MICROBLAZE)
361 if ((interrupt_request & CPU_INTERRUPT_HARD)
362 && (env->sregs[SR_MSR] & MSR_IE)
363 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
364 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
365 env->exception_index = EXCP_IRQ;
366 do_interrupt(env);
367 next_tb = 0;
368 }
6af0bf9c
FB
369#elif defined(TARGET_MIPS)
370 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
4cdc1cd1 371 cpu_mips_hw_interrupts_pending(env)) {
6af0bf9c
FB
372 /* Raise it */
373 env->exception_index = EXCP_EXT_INTERRUPT;
374 env->error_code = 0;
375 do_interrupt(env);
b5fc09ae 376 next_tb = 0;
6af0bf9c 377 }
e95c8d51 378#elif defined(TARGET_SPARC)
d532b26c
IK
379 if (interrupt_request & CPU_INTERRUPT_HARD) {
380 if (cpu_interrupts_enabled(env) &&
381 env->interrupt_index > 0) {
382 int pil = env->interrupt_index & 0xf;
383 int type = env->interrupt_index & 0xf0;
384
385 if (((type == TT_EXTINT) &&
386 cpu_pil_allowed(env, pil)) ||
387 type != TT_EXTINT) {
388 env->exception_index = env->interrupt_index;
389 do_interrupt(env);
390 next_tb = 0;
391 }
392 }
a90b7318 393 }
b5ff1b31
FB
394#elif defined(TARGET_ARM)
395 if (interrupt_request & CPU_INTERRUPT_FIQ
396 && !(env->uncached_cpsr & CPSR_F)) {
397 env->exception_index = EXCP_FIQ;
398 do_interrupt(env);
b5fc09ae 399 next_tb = 0;
b5ff1b31 400 }
9ee6e8bb
PB
401 /* ARMv7-M interrupt return works by loading a magic value
402 into the PC. On real hardware the load causes the
403 return to occur. The qemu implementation performs the
404 jump normally, then does the exception return when the
405 CPU tries to execute code at the magic address.
406 This will cause the magic PC value to be pushed to
a1c7273b 407 the stack if an interrupt occurred at the wrong time.
9ee6e8bb
PB
408 We avoid this by disabling interrupts when
409 pc contains a magic address. */
b5ff1b31 410 if (interrupt_request & CPU_INTERRUPT_HARD
9ee6e8bb
PB
411 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
412 || !(env->uncached_cpsr & CPSR_I))) {
b5ff1b31
FB
413 env->exception_index = EXCP_IRQ;
414 do_interrupt(env);
b5fc09ae 415 next_tb = 0;
b5ff1b31 416 }
d2fbca94
GX
417#elif defined(TARGET_UNICORE32)
418 if (interrupt_request & CPU_INTERRUPT_HARD
419 && !(env->uncached_asr & ASR_I)) {
420 do_interrupt(env);
421 next_tb = 0;
422 }
fdf9b3e8 423#elif defined(TARGET_SH4)
e96e2044
TS
424 if (interrupt_request & CPU_INTERRUPT_HARD) {
425 do_interrupt(env);
b5fc09ae 426 next_tb = 0;
e96e2044 427 }
eddf68a6 428#elif defined(TARGET_ALPHA)
6a80e088
RH
429 {
430 int idx = -1;
431 /* ??? This hard-codes the OSF/1 interrupt levels. */
432 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
433 case 0 ... 3:
434 if (interrupt_request & CPU_INTERRUPT_HARD) {
435 idx = EXCP_DEV_INTERRUPT;
436 }
437 /* FALLTHRU */
438 case 4:
439 if (interrupt_request & CPU_INTERRUPT_TIMER) {
440 idx = EXCP_CLK_INTERRUPT;
441 }
442 /* FALLTHRU */
443 case 5:
444 if (interrupt_request & CPU_INTERRUPT_SMP) {
445 idx = EXCP_SMP_INTERRUPT;
446 }
447 /* FALLTHRU */
448 case 6:
449 if (interrupt_request & CPU_INTERRUPT_MCHK) {
450 idx = EXCP_MCHK;
451 }
452 }
453 if (idx >= 0) {
454 env->exception_index = idx;
455 env->error_code = 0;
456 do_interrupt(env);
457 next_tb = 0;
458 }
eddf68a6 459 }
f1ccf904 460#elif defined(TARGET_CRIS)
1b1a38b0 461 if (interrupt_request & CPU_INTERRUPT_HARD
fb9fb692
EI
462 && (env->pregs[PR_CCS] & I_FLAG)
463 && !env->locked_irq) {
1b1a38b0
EI
464 env->exception_index = EXCP_IRQ;
465 do_interrupt(env);
466 next_tb = 0;
467 }
468 if (interrupt_request & CPU_INTERRUPT_NMI
469 && (env->pregs[PR_CCS] & M_FLAG)) {
470 env->exception_index = EXCP_NMI;
f1ccf904 471 do_interrupt(env);
b5fc09ae 472 next_tb = 0;
f1ccf904 473 }
0633879f
PB
474#elif defined(TARGET_M68K)
475 if (interrupt_request & CPU_INTERRUPT_HARD
476 && ((env->sr & SR_I) >> SR_I_SHIFT)
477 < env->pending_level) {
478 /* Real hardware gets the interrupt vector via an
479 IACK cycle at this point. Current emulated
480 hardware doesn't rely on this, so we
481 provide/save the vector when the interrupt is
482 first signalled. */
483 env->exception_index = env->pending_vector;
3c688828 484 do_interrupt_m68k_hardirq(env);
b5fc09ae 485 next_tb = 0;
0633879f 486 }
3110e292
AG
487#elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
488 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
489 (env->psw.mask & PSW_MASK_EXT)) {
490 do_interrupt(env);
491 next_tb = 0;
492 }
40643d7c
MF
493#elif defined(TARGET_XTENSA)
494 if (interrupt_request & CPU_INTERRUPT_HARD) {
495 env->exception_index = EXC_IRQ;
496 do_interrupt(env);
497 next_tb = 0;
498 }
68a79315 499#endif
ff2712ba 500 /* Don't use the cached interrupt_request value,
9d05095e 501 do_interrupt may have updated the EXITTB flag. */
b5ff1b31 502 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
bf3e8bf1
FB
503 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
504 /* ensure that no TB jump will be modified as
505 the program flow was changed */
b5fc09ae 506 next_tb = 0;
bf3e8bf1 507 }
be214e6c
AJ
508 }
509 if (unlikely(env->exit_request)) {
510 env->exit_request = 0;
511 env->exception_index = EXCP_INTERRUPT;
1162c041 512 cpu_loop_exit(env);
3fb2ded1 513 }
a73b1fd9 514#if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
8fec2b8c 515 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
3fb2ded1 516 /* restore flags in standard format */
ecb644f4 517#if defined(TARGET_I386)
e694d4e2
BS
518 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
519 | (DF & DF_MASK);
93fcfe39 520 log_cpu_state(env, X86_DUMP_CCOP);
3fb2ded1 521 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
e6e5906b
PB
522#elif defined(TARGET_M68K)
523 cpu_m68k_flush_flags(env, env->cc_op);
524 env->cc_op = CC_OP_FLAGS;
525 env->sr = (env->sr & 0xffe0)
526 | env->cc_dest | (env->cc_x << 4);
93fcfe39 527 log_cpu_state(env, 0);
e4533c7a 528#else
a73b1fd9 529 log_cpu_state(env, 0);
e4533c7a 530#endif
3fb2ded1 531 }
a73b1fd9 532#endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
d5975363 533 spin_lock(&tb_lock);
cea5f9a2 534 tb = tb_find_fast(env);
d5975363
PB
535 /* Note: we do it here to avoid a gcc bug on Mac OS X when
536 doing it in tb_find_slow */
537 if (tb_invalidated_flag) {
538 /* as some TB could have been invalidated because
539 of memory exceptions while generating the code, we
540 must recompute the hash index here */
541 next_tb = 0;
2e70f6ef 542 tb_invalidated_flag = 0;
d5975363 543 }
f0667e66 544#ifdef CONFIG_DEBUG_EXEC
93fcfe39
AL
545 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
546 (long)tb->tc_ptr, tb->pc,
547 lookup_symbol(tb->pc));
9d27abd9 548#endif
8a40a180
FB
549 /* see if we can patch the calling TB. When the TB
550 spans two pages, we cannot safely do a direct
551 jump. */
040f2fb2 552 if (next_tb != 0 && tb->page_addr[1] == -1) {
b5fc09ae 553 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
3fb2ded1 554 }
d5975363 555 spin_unlock(&tb_lock);
55e8b85e 556
557 /* cpu_interrupt might be called while translating the
558 TB, but before it is linked into a potentially
559 infinite loop and becomes env->current_tb. Avoid
560 starting execution if there is a pending interrupt. */
b0052d15
JK
561 env->current_tb = tb;
562 barrier();
563 if (likely(!env->exit_request)) {
2e70f6ef 564 tc_ptr = tb->tc_ptr;
3fb2ded1 565 /* execute the generated code */
cea5f9a2 566 next_tb = tcg_qemu_tb_exec(env, tc_ptr);
2e70f6ef 567 if ((next_tb & 3) == 2) {
bf20dc07 568 /* Instruction counter expired. */
2e70f6ef
PB
569 int insns_left;
570 tb = (TranslationBlock *)(long)(next_tb & ~3);
571 /* Restore PC. */
622ed360 572 cpu_pc_from_tb(env, tb);
2e70f6ef
PB
573 insns_left = env->icount_decr.u32;
574 if (env->icount_extra && insns_left >= 0) {
575 /* Refill decrementer and continue execution. */
576 env->icount_extra += insns_left;
577 if (env->icount_extra > 0xffff) {
578 insns_left = 0xffff;
579 } else {
580 insns_left = env->icount_extra;
581 }
582 env->icount_extra -= insns_left;
583 env->icount_decr.u16.low = insns_left;
584 } else {
585 if (insns_left > 0) {
586 /* Execute remaining instructions. */
cea5f9a2 587 cpu_exec_nocache(env, insns_left, tb);
2e70f6ef
PB
588 }
589 env->exception_index = EXCP_INTERRUPT;
590 next_tb = 0;
1162c041 591 cpu_loop_exit(env);
2e70f6ef
PB
592 }
593 }
594 }
b0052d15 595 env->current_tb = NULL;
4cbf74b6
FB
596 /* reset soft MMU for next block (it can currently
597 only be set by a memory fault) */
50a518e3 598 } /* for(;;) */
0d101938
JK
599 } else {
600 /* Reload env after longjmp - the compiler may have smashed all
601 * local variables as longjmp is marked 'noreturn'. */
602 env = cpu_single_env;
7d13299d 603 }
3fb2ded1
FB
604 } /* for(;;) */
605
7d13299d 606
e4533c7a 607#if defined(TARGET_I386)
9de5e440 608 /* restore flags in standard format */
e694d4e2
BS
609 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
610 | (DF & DF_MASK);
e4533c7a 611#elif defined(TARGET_ARM)
b7bcbe95 612 /* XXX: Save/restore host fpu exception state?. */
d2fbca94 613#elif defined(TARGET_UNICORE32)
93ac68bc 614#elif defined(TARGET_SPARC)
67867308 615#elif defined(TARGET_PPC)
81ea0e13 616#elif defined(TARGET_LM32)
e6e5906b
PB
617#elif defined(TARGET_M68K)
618 cpu_m68k_flush_flags(env, env->cc_op);
619 env->cc_op = CC_OP_FLAGS;
620 env->sr = (env->sr & 0xffe0)
621 | env->cc_dest | (env->cc_x << 4);
b779e29e 622#elif defined(TARGET_MICROBLAZE)
6af0bf9c 623#elif defined(TARGET_MIPS)
fdf9b3e8 624#elif defined(TARGET_SH4)
eddf68a6 625#elif defined(TARGET_ALPHA)
f1ccf904 626#elif defined(TARGET_CRIS)
10ec5117 627#elif defined(TARGET_S390X)
2328826b 628#elif defined(TARGET_XTENSA)
fdf9b3e8 629 /* XXXXX */
e4533c7a
FB
630#else
631#error unsupported target CPU
632#endif
1057eaa7 633
6a00d601 634 /* fail safe : never use cpu_single_env outside cpu_exec() */
5fafdf24 635 cpu_single_env = NULL;
7d13299d
FB
636 return ret;
637}