]> git.proxmox.com Git - qemu.git/blame - cpu-exec.c
rtl8139: support byte read to TxStatus registers
[qemu.git] / cpu-exec.c
CommitLineData
7d13299d 1/*
e965fc38 2 * emulator main execution loop
5fafdf24 3 *
66321a11 4 * Copyright (c) 2003-2005 Fabrice Bellard
7d13299d 5 *
3ef693a0
FB
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
7d13299d 10 *
3ef693a0
FB
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
7d13299d 15 *
3ef693a0 16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
7d13299d 18 */
e4533c7a 19#include "config.h"
cea5f9a2 20#include "cpu.h"
956034d7 21#include "disas.h"
7cb69cae 22#include "tcg.h"
1d93f0f0 23#include "qemu-barrier.h"
7d13299d 24
36bdbe54
FB
25int tb_invalidated_flag;
26
f0667e66 27//#define CONFIG_DEBUG_EXEC
7d13299d 28
9349b4f9 29bool qemu_cpu_has_work(CPUArchState *env)
6a4955a8
AL
30{
31 return cpu_has_work(env);
32}
33
9349b4f9 34void cpu_loop_exit(CPUArchState *env)
e4533c7a 35{
cea5f9a2
BS
36 env->current_tb = NULL;
37 longjmp(env->jmp_env, 1);
e4533c7a 38}
bfed01fc 39
fbf9eeb3
FB
40/* exit the current TB from a signal handler. The host registers are
41 restored in a state compatible with the CPU emulator
42 */
9eff14f3 43#if defined(CONFIG_SOFTMMU)
9349b4f9 44void cpu_resume_from_signal(CPUArchState *env, void *puc)
9eff14f3 45{
9eff14f3
BS
46 /* XXX: restore cpu registers saved in host registers */
47
48 env->exception_index = -1;
49 longjmp(env->jmp_env, 1);
50}
9eff14f3 51#endif
fbf9eeb3 52
2e70f6ef
PB
53/* Execute the code without caching the generated code. An interpreter
54 could be used if available. */
9349b4f9 55static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
cea5f9a2 56 TranslationBlock *orig_tb)
2e70f6ef
PB
57{
58 unsigned long next_tb;
59 TranslationBlock *tb;
60
61 /* Should never happen.
62 We only end up here when an existing TB is too long. */
63 if (max_cycles > CF_COUNT_MASK)
64 max_cycles = CF_COUNT_MASK;
65
66 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
67 max_cycles);
68 env->current_tb = tb;
69 /* execute the generated code */
cea5f9a2 70 next_tb = tcg_qemu_tb_exec(env, tb->tc_ptr);
1c3569fe 71 env->current_tb = NULL;
2e70f6ef
PB
72
73 if ((next_tb & 3) == 2) {
74 /* Restore PC. This may happen if async event occurs before
75 the TB starts executing. */
622ed360 76 cpu_pc_from_tb(env, tb);
2e70f6ef
PB
77 }
78 tb_phys_invalidate(tb, -1);
79 tb_free(tb);
80}
81
9349b4f9 82static TranslationBlock *tb_find_slow(CPUArchState *env,
cea5f9a2 83 target_ulong pc,
8a40a180 84 target_ulong cs_base,
c068688b 85 uint64_t flags)
8a40a180
FB
86{
87 TranslationBlock *tb, **ptb1;
8a40a180 88 unsigned int h;
337fc758 89 tb_page_addr_t phys_pc, phys_page1;
41c1b1c9 90 target_ulong virt_page2;
3b46e624 91
8a40a180 92 tb_invalidated_flag = 0;
3b46e624 93
8a40a180 94 /* find translated block using physical mappings */
41c1b1c9 95 phys_pc = get_page_addr_code(env, pc);
8a40a180 96 phys_page1 = phys_pc & TARGET_PAGE_MASK;
8a40a180
FB
97 h = tb_phys_hash_func(phys_pc);
98 ptb1 = &tb_phys_hash[h];
99 for(;;) {
100 tb = *ptb1;
101 if (!tb)
102 goto not_found;
5fafdf24 103 if (tb->pc == pc &&
8a40a180 104 tb->page_addr[0] == phys_page1 &&
5fafdf24 105 tb->cs_base == cs_base &&
8a40a180
FB
106 tb->flags == flags) {
107 /* check next page if needed */
108 if (tb->page_addr[1] != -1) {
337fc758
BS
109 tb_page_addr_t phys_page2;
110
5fafdf24 111 virt_page2 = (pc & TARGET_PAGE_MASK) +
8a40a180 112 TARGET_PAGE_SIZE;
41c1b1c9 113 phys_page2 = get_page_addr_code(env, virt_page2);
8a40a180
FB
114 if (tb->page_addr[1] == phys_page2)
115 goto found;
116 } else {
117 goto found;
118 }
119 }
120 ptb1 = &tb->phys_hash_next;
121 }
122 not_found:
2e70f6ef
PB
123 /* if no translated code available, then translate it now */
124 tb = tb_gen_code(env, pc, cs_base, flags, 0);
3b46e624 125
8a40a180 126 found:
2c90fe2b
KB
127 /* Move the last found TB to the head of the list */
128 if (likely(*ptb1)) {
129 *ptb1 = tb->phys_hash_next;
130 tb->phys_hash_next = tb_phys_hash[h];
131 tb_phys_hash[h] = tb;
132 }
8a40a180
FB
133 /* we add the TB in the virtual pc hash table */
134 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
8a40a180
FB
135 return tb;
136}
137
9349b4f9 138static inline TranslationBlock *tb_find_fast(CPUArchState *env)
8a40a180
FB
139{
140 TranslationBlock *tb;
141 target_ulong cs_base, pc;
6b917547 142 int flags;
8a40a180
FB
143
144 /* we record a subset of the CPU state. It will
145 always be the same before a given translated block
146 is executed. */
6b917547 147 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
bce61846 148 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
551bd27f
TS
149 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
150 tb->flags != flags)) {
cea5f9a2 151 tb = tb_find_slow(env, pc, cs_base, flags);
8a40a180
FB
152 }
153 return tb;
154}
155
1009d2ed
JK
156static CPUDebugExcpHandler *debug_excp_handler;
157
158CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
159{
160 CPUDebugExcpHandler *old_handler = debug_excp_handler;
161
162 debug_excp_handler = handler;
163 return old_handler;
164}
165
9349b4f9 166static void cpu_handle_debug_exception(CPUArchState *env)
1009d2ed
JK
167{
168 CPUWatchpoint *wp;
169
170 if (!env->watchpoint_hit) {
171 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
172 wp->flags &= ~BP_WATCHPOINT_HIT;
173 }
174 }
175 if (debug_excp_handler) {
176 debug_excp_handler(env);
177 }
178}
179
7d13299d
FB
180/* main execution loop */
181
1a28cac3
MT
182volatile sig_atomic_t exit_request;
183
9349b4f9 184int cpu_exec(CPUArchState *env)
7d13299d 185{
8a40a180 186 int ret, interrupt_request;
8a40a180 187 TranslationBlock *tb;
c27004ec 188 uint8_t *tc_ptr;
d5975363 189 unsigned long next_tb;
8c6939c0 190
cea5f9a2
BS
191 if (env->halted) {
192 if (!cpu_has_work(env)) {
eda48c34
PB
193 return EXCP_HALTED;
194 }
195
cea5f9a2 196 env->halted = 0;
eda48c34 197 }
5a1e3cfc 198
cea5f9a2 199 cpu_single_env = env;
e4533c7a 200
c629a4bc 201 if (unlikely(exit_request)) {
1a28cac3 202 env->exit_request = 1;
1a28cac3
MT
203 }
204
ecb644f4 205#if defined(TARGET_I386)
6792a57b
JK
206 /* put eflags in CPU temporary format */
207 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
208 DF = 1 - (2 * ((env->eflags >> 10) & 1));
209 CC_OP = CC_OP_EFLAGS;
210 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
93ac68bc 211#elif defined(TARGET_SPARC)
e6e5906b
PB
212#elif defined(TARGET_M68K)
213 env->cc_op = CC_OP_FLAGS;
214 env->cc_dest = env->sr & 0xf;
215 env->cc_x = (env->sr >> 4) & 1;
ecb644f4
TS
216#elif defined(TARGET_ALPHA)
217#elif defined(TARGET_ARM)
d2fbca94 218#elif defined(TARGET_UNICORE32)
ecb644f4 219#elif defined(TARGET_PPC)
4e85f82c 220 env->reserve_addr = -1;
81ea0e13 221#elif defined(TARGET_LM32)
b779e29e 222#elif defined(TARGET_MICROBLAZE)
6af0bf9c 223#elif defined(TARGET_MIPS)
fdf9b3e8 224#elif defined(TARGET_SH4)
f1ccf904 225#elif defined(TARGET_CRIS)
10ec5117 226#elif defined(TARGET_S390X)
2328826b 227#elif defined(TARGET_XTENSA)
fdf9b3e8 228 /* XXXXX */
e4533c7a
FB
229#else
230#error unsupported target CPU
231#endif
3fb2ded1 232 env->exception_index = -1;
9d27abd9 233
7d13299d 234 /* prepare setjmp context for exception handling */
3fb2ded1
FB
235 for(;;) {
236 if (setjmp(env->jmp_env) == 0) {
237 /* if an exception is pending, we execute it here */
238 if (env->exception_index >= 0) {
239 if (env->exception_index >= EXCP_INTERRUPT) {
240 /* exit request from the cpu execution loop */
241 ret = env->exception_index;
1009d2ed
JK
242 if (ret == EXCP_DEBUG) {
243 cpu_handle_debug_exception(env);
244 }
3fb2ded1 245 break;
72d239ed
AJ
246 } else {
247#if defined(CONFIG_USER_ONLY)
3fb2ded1 248 /* if user mode only, we simulate a fake exception
9f083493 249 which will be handled outside the cpu execution
3fb2ded1 250 loop */
83479e77 251#if defined(TARGET_I386)
e694d4e2 252 do_interrupt(env);
83479e77 253#endif
3fb2ded1
FB
254 ret = env->exception_index;
255 break;
72d239ed 256#else
b5ff1b31 257 do_interrupt(env);
301d2908 258 env->exception_index = -1;
83479e77 259#endif
3fb2ded1 260 }
5fafdf24 261 }
9df217a3 262
b5fc09ae 263 next_tb = 0; /* force lookup of first TB */
3fb2ded1 264 for(;;) {
68a79315 265 interrupt_request = env->interrupt_request;
e1638bd8 266 if (unlikely(interrupt_request)) {
267 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
268 /* Mask out external interrupts for this step. */
3125f763 269 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
e1638bd8 270 }
6658ffb8
PB
271 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
272 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
273 env->exception_index = EXCP_DEBUG;
1162c041 274 cpu_loop_exit(env);
6658ffb8 275 }
a90b7318 276#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
b779e29e 277 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
d2fbca94 278 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
a90b7318
AZ
279 if (interrupt_request & CPU_INTERRUPT_HALT) {
280 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
281 env->halted = 1;
282 env->exception_index = EXCP_HLT;
1162c041 283 cpu_loop_exit(env);
a90b7318
AZ
284 }
285#endif
68a79315 286#if defined(TARGET_I386)
b09ea7d5 287 if (interrupt_request & CPU_INTERRUPT_INIT) {
e694d4e2 288 svm_check_intercept(env, SVM_EXIT_INIT);
b09ea7d5
GN
289 do_cpu_init(env);
290 env->exception_index = EXCP_HALTED;
1162c041 291 cpu_loop_exit(env);
b09ea7d5
GN
292 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
293 do_cpu_sipi(env);
294 } else if (env->hflags2 & HF2_GIF_MASK) {
db620f46
FB
295 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
296 !(env->hflags & HF_SMM_MASK)) {
e694d4e2 297 svm_check_intercept(env, SVM_EXIT_SMI);
db620f46 298 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
e694d4e2 299 do_smm_enter(env);
db620f46
FB
300 next_tb = 0;
301 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
302 !(env->hflags2 & HF2_NMI_MASK)) {
303 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
304 env->hflags2 |= HF2_NMI_MASK;
e694d4e2 305 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
db620f46 306 next_tb = 0;
e965fc38 307 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
79c4f6b0 308 env->interrupt_request &= ~CPU_INTERRUPT_MCE;
e694d4e2 309 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
79c4f6b0 310 next_tb = 0;
db620f46
FB
311 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
312 (((env->hflags2 & HF2_VINTR_MASK) &&
313 (env->hflags2 & HF2_HIF_MASK)) ||
314 (!(env->hflags2 & HF2_VINTR_MASK) &&
315 (env->eflags & IF_MASK &&
316 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
317 int intno;
e694d4e2 318 svm_check_intercept(env, SVM_EXIT_INTR);
db620f46
FB
319 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
320 intno = cpu_get_pic_interrupt(env);
93fcfe39 321 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
e694d4e2 322 do_interrupt_x86_hardirq(env, intno, 1);
db620f46
FB
323 /* ensure that no TB jump will be modified as
324 the program flow was changed */
325 next_tb = 0;
0573fbfc 326#if !defined(CONFIG_USER_ONLY)
db620f46
FB
327 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
328 (env->eflags & IF_MASK) &&
329 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
330 int intno;
331 /* FIXME: this should respect TPR */
e694d4e2 332 svm_check_intercept(env, SVM_EXIT_VINTR);
db620f46 333 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
93fcfe39 334 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
e694d4e2 335 do_interrupt_x86_hardirq(env, intno, 1);
d40c54d6 336 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
db620f46 337 next_tb = 0;
907a5b26 338#endif
db620f46 339 }
68a79315 340 }
ce09776b 341#elif defined(TARGET_PPC)
9fddaa0c 342 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
1bba0dc9 343 cpu_state_reset(env);
9fddaa0c 344 }
47103572 345 if (interrupt_request & CPU_INTERRUPT_HARD) {
e9df014c
JM
346 ppc_hw_interrupt(env);
347 if (env->pending_interrupts == 0)
348 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
b5fc09ae 349 next_tb = 0;
ce09776b 350 }
81ea0e13
MW
351#elif defined(TARGET_LM32)
352 if ((interrupt_request & CPU_INTERRUPT_HARD)
353 && (env->ie & IE_IE)) {
354 env->exception_index = EXCP_IRQ;
355 do_interrupt(env);
356 next_tb = 0;
357 }
b779e29e
EI
358#elif defined(TARGET_MICROBLAZE)
359 if ((interrupt_request & CPU_INTERRUPT_HARD)
360 && (env->sregs[SR_MSR] & MSR_IE)
361 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
362 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
363 env->exception_index = EXCP_IRQ;
364 do_interrupt(env);
365 next_tb = 0;
366 }
6af0bf9c
FB
367#elif defined(TARGET_MIPS)
368 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
4cdc1cd1 369 cpu_mips_hw_interrupts_pending(env)) {
6af0bf9c
FB
370 /* Raise it */
371 env->exception_index = EXCP_EXT_INTERRUPT;
372 env->error_code = 0;
373 do_interrupt(env);
b5fc09ae 374 next_tb = 0;
6af0bf9c 375 }
e95c8d51 376#elif defined(TARGET_SPARC)
d532b26c
IK
377 if (interrupt_request & CPU_INTERRUPT_HARD) {
378 if (cpu_interrupts_enabled(env) &&
379 env->interrupt_index > 0) {
380 int pil = env->interrupt_index & 0xf;
381 int type = env->interrupt_index & 0xf0;
382
383 if (((type == TT_EXTINT) &&
384 cpu_pil_allowed(env, pil)) ||
385 type != TT_EXTINT) {
386 env->exception_index = env->interrupt_index;
387 do_interrupt(env);
388 next_tb = 0;
389 }
390 }
e965fc38 391 }
b5ff1b31
FB
392#elif defined(TARGET_ARM)
393 if (interrupt_request & CPU_INTERRUPT_FIQ
394 && !(env->uncached_cpsr & CPSR_F)) {
395 env->exception_index = EXCP_FIQ;
396 do_interrupt(env);
b5fc09ae 397 next_tb = 0;
b5ff1b31 398 }
9ee6e8bb
PB
399 /* ARMv7-M interrupt return works by loading a magic value
400 into the PC. On real hardware the load causes the
401 return to occur. The qemu implementation performs the
402 jump normally, then does the exception return when the
403 CPU tries to execute code at the magic address.
404 This will cause the magic PC value to be pushed to
a1c7273b 405 the stack if an interrupt occurred at the wrong time.
9ee6e8bb
PB
406 We avoid this by disabling interrupts when
407 pc contains a magic address. */
b5ff1b31 408 if (interrupt_request & CPU_INTERRUPT_HARD
9ee6e8bb
PB
409 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
410 || !(env->uncached_cpsr & CPSR_I))) {
b5ff1b31
FB
411 env->exception_index = EXCP_IRQ;
412 do_interrupt(env);
b5fc09ae 413 next_tb = 0;
b5ff1b31 414 }
d2fbca94
GX
415#elif defined(TARGET_UNICORE32)
416 if (interrupt_request & CPU_INTERRUPT_HARD
417 && !(env->uncached_asr & ASR_I)) {
418 do_interrupt(env);
419 next_tb = 0;
420 }
fdf9b3e8 421#elif defined(TARGET_SH4)
e96e2044
TS
422 if (interrupt_request & CPU_INTERRUPT_HARD) {
423 do_interrupt(env);
b5fc09ae 424 next_tb = 0;
e96e2044 425 }
eddf68a6 426#elif defined(TARGET_ALPHA)
6a80e088
RH
427 {
428 int idx = -1;
429 /* ??? This hard-codes the OSF/1 interrupt levels. */
e965fc38 430 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
6a80e088
RH
431 case 0 ... 3:
432 if (interrupt_request & CPU_INTERRUPT_HARD) {
433 idx = EXCP_DEV_INTERRUPT;
434 }
435 /* FALLTHRU */
436 case 4:
437 if (interrupt_request & CPU_INTERRUPT_TIMER) {
438 idx = EXCP_CLK_INTERRUPT;
439 }
440 /* FALLTHRU */
441 case 5:
442 if (interrupt_request & CPU_INTERRUPT_SMP) {
443 idx = EXCP_SMP_INTERRUPT;
444 }
445 /* FALLTHRU */
446 case 6:
447 if (interrupt_request & CPU_INTERRUPT_MCHK) {
448 idx = EXCP_MCHK;
449 }
450 }
451 if (idx >= 0) {
452 env->exception_index = idx;
453 env->error_code = 0;
454 do_interrupt(env);
455 next_tb = 0;
456 }
eddf68a6 457 }
f1ccf904 458#elif defined(TARGET_CRIS)
1b1a38b0 459 if (interrupt_request & CPU_INTERRUPT_HARD
fb9fb692
EI
460 && (env->pregs[PR_CCS] & I_FLAG)
461 && !env->locked_irq) {
1b1a38b0
EI
462 env->exception_index = EXCP_IRQ;
463 do_interrupt(env);
464 next_tb = 0;
465 }
466 if (interrupt_request & CPU_INTERRUPT_NMI
467 && (env->pregs[PR_CCS] & M_FLAG)) {
468 env->exception_index = EXCP_NMI;
f1ccf904 469 do_interrupt(env);
b5fc09ae 470 next_tb = 0;
f1ccf904 471 }
0633879f
PB
472#elif defined(TARGET_M68K)
473 if (interrupt_request & CPU_INTERRUPT_HARD
474 && ((env->sr & SR_I) >> SR_I_SHIFT)
475 < env->pending_level) {
476 /* Real hardware gets the interrupt vector via an
477 IACK cycle at this point. Current emulated
478 hardware doesn't rely on this, so we
479 provide/save the vector when the interrupt is
480 first signalled. */
481 env->exception_index = env->pending_vector;
3c688828 482 do_interrupt_m68k_hardirq(env);
b5fc09ae 483 next_tb = 0;
0633879f 484 }
3110e292
AG
485#elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
486 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
487 (env->psw.mask & PSW_MASK_EXT)) {
488 do_interrupt(env);
489 next_tb = 0;
490 }
40643d7c
MF
491#elif defined(TARGET_XTENSA)
492 if (interrupt_request & CPU_INTERRUPT_HARD) {
493 env->exception_index = EXC_IRQ;
494 do_interrupt(env);
495 next_tb = 0;
496 }
68a79315 497#endif
ff2712ba 498 /* Don't use the cached interrupt_request value,
9d05095e 499 do_interrupt may have updated the EXITTB flag. */
b5ff1b31 500 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
bf3e8bf1
FB
501 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
502 /* ensure that no TB jump will be modified as
503 the program flow was changed */
b5fc09ae 504 next_tb = 0;
bf3e8bf1 505 }
be214e6c
AJ
506 }
507 if (unlikely(env->exit_request)) {
508 env->exit_request = 0;
509 env->exception_index = EXCP_INTERRUPT;
1162c041 510 cpu_loop_exit(env);
3fb2ded1 511 }
a73b1fd9 512#if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
8fec2b8c 513 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
3fb2ded1 514 /* restore flags in standard format */
ecb644f4 515#if defined(TARGET_I386)
e694d4e2
BS
516 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
517 | (DF & DF_MASK);
93fcfe39 518 log_cpu_state(env, X86_DUMP_CCOP);
3fb2ded1 519 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
e6e5906b
PB
520#elif defined(TARGET_M68K)
521 cpu_m68k_flush_flags(env, env->cc_op);
522 env->cc_op = CC_OP_FLAGS;
523 env->sr = (env->sr & 0xffe0)
524 | env->cc_dest | (env->cc_x << 4);
93fcfe39 525 log_cpu_state(env, 0);
e4533c7a 526#else
a73b1fd9 527 log_cpu_state(env, 0);
e4533c7a 528#endif
3fb2ded1 529 }
a73b1fd9 530#endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
d5975363 531 spin_lock(&tb_lock);
cea5f9a2 532 tb = tb_find_fast(env);
d5975363
PB
533 /* Note: we do it here to avoid a gcc bug on Mac OS X when
534 doing it in tb_find_slow */
535 if (tb_invalidated_flag) {
536 /* as some TB could have been invalidated because
537 of memory exceptions while generating the code, we
538 must recompute the hash index here */
539 next_tb = 0;
2e70f6ef 540 tb_invalidated_flag = 0;
d5975363 541 }
f0667e66 542#ifdef CONFIG_DEBUG_EXEC
93fcfe39
AL
543 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
544 (long)tb->tc_ptr, tb->pc,
545 lookup_symbol(tb->pc));
9d27abd9 546#endif
8a40a180
FB
547 /* see if we can patch the calling TB. When the TB
548 spans two pages, we cannot safely do a direct
549 jump. */
040f2fb2 550 if (next_tb != 0 && tb->page_addr[1] == -1) {
b5fc09ae 551 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
3fb2ded1 552 }
d5975363 553 spin_unlock(&tb_lock);
55e8b85e 554
555 /* cpu_interrupt might be called while translating the
556 TB, but before it is linked into a potentially
557 infinite loop and becomes env->current_tb. Avoid
558 starting execution if there is a pending interrupt. */
b0052d15
JK
559 env->current_tb = tb;
560 barrier();
561 if (likely(!env->exit_request)) {
2e70f6ef 562 tc_ptr = tb->tc_ptr;
e965fc38 563 /* execute the generated code */
cea5f9a2 564 next_tb = tcg_qemu_tb_exec(env, tc_ptr);
2e70f6ef 565 if ((next_tb & 3) == 2) {
bf20dc07 566 /* Instruction counter expired. */
2e70f6ef
PB
567 int insns_left;
568 tb = (TranslationBlock *)(long)(next_tb & ~3);
569 /* Restore PC. */
622ed360 570 cpu_pc_from_tb(env, tb);
2e70f6ef
PB
571 insns_left = env->icount_decr.u32;
572 if (env->icount_extra && insns_left >= 0) {
573 /* Refill decrementer and continue execution. */
574 env->icount_extra += insns_left;
575 if (env->icount_extra > 0xffff) {
576 insns_left = 0xffff;
577 } else {
578 insns_left = env->icount_extra;
579 }
580 env->icount_extra -= insns_left;
581 env->icount_decr.u16.low = insns_left;
582 } else {
583 if (insns_left > 0) {
584 /* Execute remaining instructions. */
cea5f9a2 585 cpu_exec_nocache(env, insns_left, tb);
2e70f6ef
PB
586 }
587 env->exception_index = EXCP_INTERRUPT;
588 next_tb = 0;
1162c041 589 cpu_loop_exit(env);
2e70f6ef
PB
590 }
591 }
592 }
b0052d15 593 env->current_tb = NULL;
4cbf74b6
FB
594 /* reset soft MMU for next block (it can currently
595 only be set by a memory fault) */
50a518e3 596 } /* for(;;) */
0d101938
JK
597 } else {
598 /* Reload env after longjmp - the compiler may have smashed all
599 * local variables as longjmp is marked 'noreturn'. */
600 env = cpu_single_env;
7d13299d 601 }
3fb2ded1
FB
602 } /* for(;;) */
603
7d13299d 604
e4533c7a 605#if defined(TARGET_I386)
9de5e440 606 /* restore flags in standard format */
e694d4e2
BS
607 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
608 | (DF & DF_MASK);
e4533c7a 609#elif defined(TARGET_ARM)
b7bcbe95 610 /* XXX: Save/restore host fpu exception state?. */
d2fbca94 611#elif defined(TARGET_UNICORE32)
93ac68bc 612#elif defined(TARGET_SPARC)
67867308 613#elif defined(TARGET_PPC)
81ea0e13 614#elif defined(TARGET_LM32)
e6e5906b
PB
615#elif defined(TARGET_M68K)
616 cpu_m68k_flush_flags(env, env->cc_op);
617 env->cc_op = CC_OP_FLAGS;
618 env->sr = (env->sr & 0xffe0)
619 | env->cc_dest | (env->cc_x << 4);
b779e29e 620#elif defined(TARGET_MICROBLAZE)
6af0bf9c 621#elif defined(TARGET_MIPS)
fdf9b3e8 622#elif defined(TARGET_SH4)
eddf68a6 623#elif defined(TARGET_ALPHA)
f1ccf904 624#elif defined(TARGET_CRIS)
10ec5117 625#elif defined(TARGET_S390X)
2328826b 626#elif defined(TARGET_XTENSA)
fdf9b3e8 627 /* XXXXX */
e4533c7a
FB
628#else
629#error unsupported target CPU
630#endif
1057eaa7 631
6a00d601 632 /* fail safe : never use cpu_single_env outside cpu_exec() */
5fafdf24 633 cpu_single_env = NULL;
7d13299d
FB
634 return ret;
635}