]> git.proxmox.com Git - mirror_qemu.git/blame - cpu-exec.c
mipsnet: use trace framework
[mirror_qemu.git] / cpu-exec.c
CommitLineData
7d13299d
FB
1/*
2 * i386 emulator main execution loop
5fafdf24 3 *
66321a11 4 * Copyright (c) 2003-2005 Fabrice Bellard
7d13299d 5 *
3ef693a0
FB
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
7d13299d 10 *
3ef693a0
FB
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
7d13299d 15 *
3ef693a0 16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
7d13299d 18 */
e4533c7a 19#include "config.h"
cea5f9a2 20#include "cpu.h"
956034d7 21#include "disas.h"
7cb69cae 22#include "tcg.h"
1d93f0f0 23#include "qemu-barrier.h"
7d13299d 24
36bdbe54
FB
25int tb_invalidated_flag;
26
f0667e66 27//#define CONFIG_DEBUG_EXEC
7d13299d 28
f3e27037 29bool qemu_cpu_has_work(CPUState *env)
6a4955a8
AL
30{
31 return cpu_has_work(env);
32}
33
cea5f9a2 34void cpu_loop_exit(CPUState *env)
e4533c7a 35{
cea5f9a2
BS
36 env->current_tb = NULL;
37 longjmp(env->jmp_env, 1);
e4533c7a 38}
bfed01fc 39
fbf9eeb3
FB
40/* exit the current TB from a signal handler. The host registers are
41 restored in a state compatible with the CPU emulator
42 */
9eff14f3 43#if defined(CONFIG_SOFTMMU)
cea5f9a2 44void cpu_resume_from_signal(CPUState *env, void *puc)
9eff14f3 45{
9eff14f3
BS
46 /* XXX: restore cpu registers saved in host registers */
47
48 env->exception_index = -1;
49 longjmp(env->jmp_env, 1);
50}
9eff14f3 51#endif
fbf9eeb3 52
2e70f6ef
PB
53/* Execute the code without caching the generated code. An interpreter
54 could be used if available. */
cea5f9a2
BS
55static void cpu_exec_nocache(CPUState *env, int max_cycles,
56 TranslationBlock *orig_tb)
2e70f6ef
PB
57{
58 unsigned long next_tb;
59 TranslationBlock *tb;
60
61 /* Should never happen.
62 We only end up here when an existing TB is too long. */
63 if (max_cycles > CF_COUNT_MASK)
64 max_cycles = CF_COUNT_MASK;
65
66 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
67 max_cycles);
68 env->current_tb = tb;
69 /* execute the generated code */
cea5f9a2 70 next_tb = tcg_qemu_tb_exec(env, tb->tc_ptr);
1c3569fe 71 env->current_tb = NULL;
2e70f6ef
PB
72
73 if ((next_tb & 3) == 2) {
74 /* Restore PC. This may happen if async event occurs before
75 the TB starts executing. */
622ed360 76 cpu_pc_from_tb(env, tb);
2e70f6ef
PB
77 }
78 tb_phys_invalidate(tb, -1);
79 tb_free(tb);
80}
81
cea5f9a2
BS
82static TranslationBlock *tb_find_slow(CPUState *env,
83 target_ulong pc,
8a40a180 84 target_ulong cs_base,
c068688b 85 uint64_t flags)
8a40a180
FB
86{
87 TranslationBlock *tb, **ptb1;
8a40a180 88 unsigned int h;
337fc758 89 tb_page_addr_t phys_pc, phys_page1;
41c1b1c9 90 target_ulong virt_page2;
3b46e624 91
8a40a180 92 tb_invalidated_flag = 0;
3b46e624 93
8a40a180 94 /* find translated block using physical mappings */
41c1b1c9 95 phys_pc = get_page_addr_code(env, pc);
8a40a180 96 phys_page1 = phys_pc & TARGET_PAGE_MASK;
8a40a180
FB
97 h = tb_phys_hash_func(phys_pc);
98 ptb1 = &tb_phys_hash[h];
99 for(;;) {
100 tb = *ptb1;
101 if (!tb)
102 goto not_found;
5fafdf24 103 if (tb->pc == pc &&
8a40a180 104 tb->page_addr[0] == phys_page1 &&
5fafdf24 105 tb->cs_base == cs_base &&
8a40a180
FB
106 tb->flags == flags) {
107 /* check next page if needed */
108 if (tb->page_addr[1] != -1) {
337fc758
BS
109 tb_page_addr_t phys_page2;
110
5fafdf24 111 virt_page2 = (pc & TARGET_PAGE_MASK) +
8a40a180 112 TARGET_PAGE_SIZE;
41c1b1c9 113 phys_page2 = get_page_addr_code(env, virt_page2);
8a40a180
FB
114 if (tb->page_addr[1] == phys_page2)
115 goto found;
116 } else {
117 goto found;
118 }
119 }
120 ptb1 = &tb->phys_hash_next;
121 }
122 not_found:
2e70f6ef
PB
123 /* if no translated code available, then translate it now */
124 tb = tb_gen_code(env, pc, cs_base, flags, 0);
3b46e624 125
8a40a180 126 found:
2c90fe2b
KB
127 /* Move the last found TB to the head of the list */
128 if (likely(*ptb1)) {
129 *ptb1 = tb->phys_hash_next;
130 tb->phys_hash_next = tb_phys_hash[h];
131 tb_phys_hash[h] = tb;
132 }
8a40a180
FB
133 /* we add the TB in the virtual pc hash table */
134 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
8a40a180
FB
135 return tb;
136}
137
cea5f9a2 138static inline TranslationBlock *tb_find_fast(CPUState *env)
8a40a180
FB
139{
140 TranslationBlock *tb;
141 target_ulong cs_base, pc;
6b917547 142 int flags;
8a40a180
FB
143
144 /* we record a subset of the CPU state. It will
145 always be the same before a given translated block
146 is executed. */
6b917547 147 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
bce61846 148 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
551bd27f
TS
149 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
150 tb->flags != flags)) {
cea5f9a2 151 tb = tb_find_slow(env, pc, cs_base, flags);
8a40a180
FB
152 }
153 return tb;
154}
155
1009d2ed
JK
156static CPUDebugExcpHandler *debug_excp_handler;
157
158CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
159{
160 CPUDebugExcpHandler *old_handler = debug_excp_handler;
161
162 debug_excp_handler = handler;
163 return old_handler;
164}
165
166static void cpu_handle_debug_exception(CPUState *env)
167{
168 CPUWatchpoint *wp;
169
170 if (!env->watchpoint_hit) {
171 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
172 wp->flags &= ~BP_WATCHPOINT_HIT;
173 }
174 }
175 if (debug_excp_handler) {
176 debug_excp_handler(env);
177 }
178}
179
7d13299d
FB
180/* main execution loop */
181
1a28cac3
MT
182volatile sig_atomic_t exit_request;
183
cea5f9a2 184int cpu_exec(CPUState *env)
7d13299d 185{
8a40a180 186 int ret, interrupt_request;
8a40a180 187 TranslationBlock *tb;
c27004ec 188 uint8_t *tc_ptr;
d5975363 189 unsigned long next_tb;
8c6939c0 190
cea5f9a2
BS
191 if (env->halted) {
192 if (!cpu_has_work(env)) {
eda48c34
PB
193 return EXCP_HALTED;
194 }
195
cea5f9a2 196 env->halted = 0;
eda48c34 197 }
5a1e3cfc 198
cea5f9a2 199 cpu_single_env = env;
e4533c7a 200
c629a4bc 201 if (unlikely(exit_request)) {
1a28cac3 202 env->exit_request = 1;
1a28cac3
MT
203 }
204
ecb644f4 205#if defined(TARGET_I386)
6792a57b
JK
206 /* put eflags in CPU temporary format */
207 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
208 DF = 1 - (2 * ((env->eflags >> 10) & 1));
209 CC_OP = CC_OP_EFLAGS;
210 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
93ac68bc 211#elif defined(TARGET_SPARC)
e6e5906b
PB
212#elif defined(TARGET_M68K)
213 env->cc_op = CC_OP_FLAGS;
214 env->cc_dest = env->sr & 0xf;
215 env->cc_x = (env->sr >> 4) & 1;
ecb644f4
TS
216#elif defined(TARGET_ALPHA)
217#elif defined(TARGET_ARM)
d2fbca94 218#elif defined(TARGET_UNICORE32)
ecb644f4 219#elif defined(TARGET_PPC)
81ea0e13 220#elif defined(TARGET_LM32)
b779e29e 221#elif defined(TARGET_MICROBLAZE)
6af0bf9c 222#elif defined(TARGET_MIPS)
fdf9b3e8 223#elif defined(TARGET_SH4)
f1ccf904 224#elif defined(TARGET_CRIS)
10ec5117 225#elif defined(TARGET_S390X)
fdf9b3e8 226 /* XXXXX */
e4533c7a
FB
227#else
228#error unsupported target CPU
229#endif
3fb2ded1 230 env->exception_index = -1;
9d27abd9 231
7d13299d 232 /* prepare setjmp context for exception handling */
3fb2ded1
FB
233 for(;;) {
234 if (setjmp(env->jmp_env) == 0) {
235 /* if an exception is pending, we execute it here */
236 if (env->exception_index >= 0) {
237 if (env->exception_index >= EXCP_INTERRUPT) {
238 /* exit request from the cpu execution loop */
239 ret = env->exception_index;
1009d2ed
JK
240 if (ret == EXCP_DEBUG) {
241 cpu_handle_debug_exception(env);
242 }
3fb2ded1 243 break;
72d239ed
AJ
244 } else {
245#if defined(CONFIG_USER_ONLY)
3fb2ded1 246 /* if user mode only, we simulate a fake exception
9f083493 247 which will be handled outside the cpu execution
3fb2ded1 248 loop */
83479e77 249#if defined(TARGET_I386)
e694d4e2 250 do_interrupt(env);
83479e77 251#endif
3fb2ded1
FB
252 ret = env->exception_index;
253 break;
72d239ed 254#else
b5ff1b31 255 do_interrupt(env);
301d2908 256 env->exception_index = -1;
83479e77 257#endif
3fb2ded1 258 }
5fafdf24 259 }
9df217a3 260
b5fc09ae 261 next_tb = 0; /* force lookup of first TB */
3fb2ded1 262 for(;;) {
68a79315 263 interrupt_request = env->interrupt_request;
e1638bd8 264 if (unlikely(interrupt_request)) {
265 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
266 /* Mask out external interrupts for this step. */
3125f763 267 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
e1638bd8 268 }
6658ffb8
PB
269 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
270 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
271 env->exception_index = EXCP_DEBUG;
1162c041 272 cpu_loop_exit(env);
6658ffb8 273 }
a90b7318 274#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
b779e29e 275 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
d2fbca94 276 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
a90b7318
AZ
277 if (interrupt_request & CPU_INTERRUPT_HALT) {
278 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
279 env->halted = 1;
280 env->exception_index = EXCP_HLT;
1162c041 281 cpu_loop_exit(env);
a90b7318
AZ
282 }
283#endif
68a79315 284#if defined(TARGET_I386)
b09ea7d5 285 if (interrupt_request & CPU_INTERRUPT_INIT) {
e694d4e2 286 svm_check_intercept(env, SVM_EXIT_INIT);
b09ea7d5
GN
287 do_cpu_init(env);
288 env->exception_index = EXCP_HALTED;
1162c041 289 cpu_loop_exit(env);
b09ea7d5
GN
290 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
291 do_cpu_sipi(env);
292 } else if (env->hflags2 & HF2_GIF_MASK) {
db620f46
FB
293 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
294 !(env->hflags & HF_SMM_MASK)) {
e694d4e2 295 svm_check_intercept(env, SVM_EXIT_SMI);
db620f46 296 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
e694d4e2 297 do_smm_enter(env);
db620f46
FB
298 next_tb = 0;
299 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
300 !(env->hflags2 & HF2_NMI_MASK)) {
301 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
302 env->hflags2 |= HF2_NMI_MASK;
e694d4e2 303 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
db620f46 304 next_tb = 0;
79c4f6b0
HY
305 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
306 env->interrupt_request &= ~CPU_INTERRUPT_MCE;
e694d4e2 307 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
79c4f6b0 308 next_tb = 0;
db620f46
FB
309 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
310 (((env->hflags2 & HF2_VINTR_MASK) &&
311 (env->hflags2 & HF2_HIF_MASK)) ||
312 (!(env->hflags2 & HF2_VINTR_MASK) &&
313 (env->eflags & IF_MASK &&
314 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
315 int intno;
e694d4e2 316 svm_check_intercept(env, SVM_EXIT_INTR);
db620f46
FB
317 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
318 intno = cpu_get_pic_interrupt(env);
93fcfe39 319 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
e694d4e2 320 do_interrupt_x86_hardirq(env, intno, 1);
db620f46
FB
321 /* ensure that no TB jump will be modified as
322 the program flow was changed */
323 next_tb = 0;
0573fbfc 324#if !defined(CONFIG_USER_ONLY)
db620f46
FB
325 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
326 (env->eflags & IF_MASK) &&
327 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
328 int intno;
329 /* FIXME: this should respect TPR */
e694d4e2 330 svm_check_intercept(env, SVM_EXIT_VINTR);
db620f46 331 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
93fcfe39 332 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
e694d4e2 333 do_interrupt_x86_hardirq(env, intno, 1);
d40c54d6 334 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
db620f46 335 next_tb = 0;
907a5b26 336#endif
db620f46 337 }
68a79315 338 }
ce09776b 339#elif defined(TARGET_PPC)
9fddaa0c
FB
340#if 0
341 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
d84bda46 342 cpu_reset(env);
9fddaa0c
FB
343 }
344#endif
47103572 345 if (interrupt_request & CPU_INTERRUPT_HARD) {
e9df014c
JM
346 ppc_hw_interrupt(env);
347 if (env->pending_interrupts == 0)
348 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
b5fc09ae 349 next_tb = 0;
ce09776b 350 }
81ea0e13
MW
351#elif defined(TARGET_LM32)
352 if ((interrupt_request & CPU_INTERRUPT_HARD)
353 && (env->ie & IE_IE)) {
354 env->exception_index = EXCP_IRQ;
355 do_interrupt(env);
356 next_tb = 0;
357 }
b779e29e
EI
358#elif defined(TARGET_MICROBLAZE)
359 if ((interrupt_request & CPU_INTERRUPT_HARD)
360 && (env->sregs[SR_MSR] & MSR_IE)
361 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
362 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
363 env->exception_index = EXCP_IRQ;
364 do_interrupt(env);
365 next_tb = 0;
366 }
6af0bf9c
FB
367#elif defined(TARGET_MIPS)
368 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
4cdc1cd1 369 cpu_mips_hw_interrupts_pending(env)) {
6af0bf9c
FB
370 /* Raise it */
371 env->exception_index = EXCP_EXT_INTERRUPT;
372 env->error_code = 0;
373 do_interrupt(env);
b5fc09ae 374 next_tb = 0;
6af0bf9c 375 }
e95c8d51 376#elif defined(TARGET_SPARC)
d532b26c
IK
377 if (interrupt_request & CPU_INTERRUPT_HARD) {
378 if (cpu_interrupts_enabled(env) &&
379 env->interrupt_index > 0) {
380 int pil = env->interrupt_index & 0xf;
381 int type = env->interrupt_index & 0xf0;
382
383 if (((type == TT_EXTINT) &&
384 cpu_pil_allowed(env, pil)) ||
385 type != TT_EXTINT) {
386 env->exception_index = env->interrupt_index;
387 do_interrupt(env);
388 next_tb = 0;
389 }
390 }
a90b7318 391 }
b5ff1b31
FB
392#elif defined(TARGET_ARM)
393 if (interrupt_request & CPU_INTERRUPT_FIQ
394 && !(env->uncached_cpsr & CPSR_F)) {
395 env->exception_index = EXCP_FIQ;
396 do_interrupt(env);
b5fc09ae 397 next_tb = 0;
b5ff1b31 398 }
9ee6e8bb
PB
399 /* ARMv7-M interrupt return works by loading a magic value
400 into the PC. On real hardware the load causes the
401 return to occur. The qemu implementation performs the
402 jump normally, then does the exception return when the
403 CPU tries to execute code at the magic address.
404 This will cause the magic PC value to be pushed to
a1c7273b 405 the stack if an interrupt occurred at the wrong time.
9ee6e8bb
PB
406 We avoid this by disabling interrupts when
407 pc contains a magic address. */
b5ff1b31 408 if (interrupt_request & CPU_INTERRUPT_HARD
9ee6e8bb
PB
409 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
410 || !(env->uncached_cpsr & CPSR_I))) {
b5ff1b31
FB
411 env->exception_index = EXCP_IRQ;
412 do_interrupt(env);
b5fc09ae 413 next_tb = 0;
b5ff1b31 414 }
d2fbca94
GX
415#elif defined(TARGET_UNICORE32)
416 if (interrupt_request & CPU_INTERRUPT_HARD
417 && !(env->uncached_asr & ASR_I)) {
418 do_interrupt(env);
419 next_tb = 0;
420 }
fdf9b3e8 421#elif defined(TARGET_SH4)
e96e2044
TS
422 if (interrupt_request & CPU_INTERRUPT_HARD) {
423 do_interrupt(env);
b5fc09ae 424 next_tb = 0;
e96e2044 425 }
eddf68a6 426#elif defined(TARGET_ALPHA)
6a80e088
RH
427 {
428 int idx = -1;
429 /* ??? This hard-codes the OSF/1 interrupt levels. */
430 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
431 case 0 ... 3:
432 if (interrupt_request & CPU_INTERRUPT_HARD) {
433 idx = EXCP_DEV_INTERRUPT;
434 }
435 /* FALLTHRU */
436 case 4:
437 if (interrupt_request & CPU_INTERRUPT_TIMER) {
438 idx = EXCP_CLK_INTERRUPT;
439 }
440 /* FALLTHRU */
441 case 5:
442 if (interrupt_request & CPU_INTERRUPT_SMP) {
443 idx = EXCP_SMP_INTERRUPT;
444 }
445 /* FALLTHRU */
446 case 6:
447 if (interrupt_request & CPU_INTERRUPT_MCHK) {
448 idx = EXCP_MCHK;
449 }
450 }
451 if (idx >= 0) {
452 env->exception_index = idx;
453 env->error_code = 0;
454 do_interrupt(env);
455 next_tb = 0;
456 }
eddf68a6 457 }
f1ccf904 458#elif defined(TARGET_CRIS)
1b1a38b0 459 if (interrupt_request & CPU_INTERRUPT_HARD
fb9fb692
EI
460 && (env->pregs[PR_CCS] & I_FLAG)
461 && !env->locked_irq) {
1b1a38b0
EI
462 env->exception_index = EXCP_IRQ;
463 do_interrupt(env);
464 next_tb = 0;
465 }
466 if (interrupt_request & CPU_INTERRUPT_NMI
467 && (env->pregs[PR_CCS] & M_FLAG)) {
468 env->exception_index = EXCP_NMI;
f1ccf904 469 do_interrupt(env);
b5fc09ae 470 next_tb = 0;
f1ccf904 471 }
0633879f
PB
472#elif defined(TARGET_M68K)
473 if (interrupt_request & CPU_INTERRUPT_HARD
474 && ((env->sr & SR_I) >> SR_I_SHIFT)
475 < env->pending_level) {
476 /* Real hardware gets the interrupt vector via an
477 IACK cycle at this point. Current emulated
478 hardware doesn't rely on this, so we
479 provide/save the vector when the interrupt is
480 first signalled. */
481 env->exception_index = env->pending_vector;
3c688828 482 do_interrupt_m68k_hardirq(env);
b5fc09ae 483 next_tb = 0;
0633879f 484 }
3110e292
AG
485#elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
486 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
487 (env->psw.mask & PSW_MASK_EXT)) {
488 do_interrupt(env);
489 next_tb = 0;
490 }
68a79315 491#endif
ff2712ba 492 /* Don't use the cached interrupt_request value,
9d05095e 493 do_interrupt may have updated the EXITTB flag. */
b5ff1b31 494 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
bf3e8bf1
FB
495 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
496 /* ensure that no TB jump will be modified as
497 the program flow was changed */
b5fc09ae 498 next_tb = 0;
bf3e8bf1 499 }
be214e6c
AJ
500 }
501 if (unlikely(env->exit_request)) {
502 env->exit_request = 0;
503 env->exception_index = EXCP_INTERRUPT;
1162c041 504 cpu_loop_exit(env);
3fb2ded1 505 }
a73b1fd9 506#if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
8fec2b8c 507 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
3fb2ded1 508 /* restore flags in standard format */
ecb644f4 509#if defined(TARGET_I386)
e694d4e2
BS
510 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
511 | (DF & DF_MASK);
93fcfe39 512 log_cpu_state(env, X86_DUMP_CCOP);
3fb2ded1 513 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
e6e5906b
PB
514#elif defined(TARGET_M68K)
515 cpu_m68k_flush_flags(env, env->cc_op);
516 env->cc_op = CC_OP_FLAGS;
517 env->sr = (env->sr & 0xffe0)
518 | env->cc_dest | (env->cc_x << 4);
93fcfe39 519 log_cpu_state(env, 0);
e4533c7a 520#else
a73b1fd9 521 log_cpu_state(env, 0);
e4533c7a 522#endif
3fb2ded1 523 }
a73b1fd9 524#endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
d5975363 525 spin_lock(&tb_lock);
cea5f9a2 526 tb = tb_find_fast(env);
d5975363
PB
527 /* Note: we do it here to avoid a gcc bug on Mac OS X when
528 doing it in tb_find_slow */
529 if (tb_invalidated_flag) {
530 /* as some TB could have been invalidated because
531 of memory exceptions while generating the code, we
532 must recompute the hash index here */
533 next_tb = 0;
2e70f6ef 534 tb_invalidated_flag = 0;
d5975363 535 }
f0667e66 536#ifdef CONFIG_DEBUG_EXEC
93fcfe39
AL
537 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
538 (long)tb->tc_ptr, tb->pc,
539 lookup_symbol(tb->pc));
9d27abd9 540#endif
8a40a180
FB
541 /* see if we can patch the calling TB. When the TB
542 spans two pages, we cannot safely do a direct
543 jump. */
040f2fb2 544 if (next_tb != 0 && tb->page_addr[1] == -1) {
b5fc09ae 545 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
3fb2ded1 546 }
d5975363 547 spin_unlock(&tb_lock);
55e8b85e 548
549 /* cpu_interrupt might be called while translating the
550 TB, but before it is linked into a potentially
551 infinite loop and becomes env->current_tb. Avoid
552 starting execution if there is a pending interrupt. */
b0052d15
JK
553 env->current_tb = tb;
554 barrier();
555 if (likely(!env->exit_request)) {
2e70f6ef 556 tc_ptr = tb->tc_ptr;
3fb2ded1 557 /* execute the generated code */
cea5f9a2 558 next_tb = tcg_qemu_tb_exec(env, tc_ptr);
2e70f6ef 559 if ((next_tb & 3) == 2) {
bf20dc07 560 /* Instruction counter expired. */
2e70f6ef
PB
561 int insns_left;
562 tb = (TranslationBlock *)(long)(next_tb & ~3);
563 /* Restore PC. */
622ed360 564 cpu_pc_from_tb(env, tb);
2e70f6ef
PB
565 insns_left = env->icount_decr.u32;
566 if (env->icount_extra && insns_left >= 0) {
567 /* Refill decrementer and continue execution. */
568 env->icount_extra += insns_left;
569 if (env->icount_extra > 0xffff) {
570 insns_left = 0xffff;
571 } else {
572 insns_left = env->icount_extra;
573 }
574 env->icount_extra -= insns_left;
575 env->icount_decr.u16.low = insns_left;
576 } else {
577 if (insns_left > 0) {
578 /* Execute remaining instructions. */
cea5f9a2 579 cpu_exec_nocache(env, insns_left, tb);
2e70f6ef
PB
580 }
581 env->exception_index = EXCP_INTERRUPT;
582 next_tb = 0;
1162c041 583 cpu_loop_exit(env);
2e70f6ef
PB
584 }
585 }
586 }
b0052d15 587 env->current_tb = NULL;
4cbf74b6
FB
588 /* reset soft MMU for next block (it can currently
589 only be set by a memory fault) */
50a518e3 590 } /* for(;;) */
0d101938
JK
591 } else {
592 /* Reload env after longjmp - the compiler may have smashed all
593 * local variables as longjmp is marked 'noreturn'. */
594 env = cpu_single_env;
7d13299d 595 }
3fb2ded1
FB
596 } /* for(;;) */
597
7d13299d 598
e4533c7a 599#if defined(TARGET_I386)
9de5e440 600 /* restore flags in standard format */
e694d4e2
BS
601 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
602 | (DF & DF_MASK);
e4533c7a 603#elif defined(TARGET_ARM)
b7bcbe95 604 /* XXX: Save/restore host fpu exception state?. */
d2fbca94 605#elif defined(TARGET_UNICORE32)
93ac68bc 606#elif defined(TARGET_SPARC)
67867308 607#elif defined(TARGET_PPC)
81ea0e13 608#elif defined(TARGET_LM32)
e6e5906b
PB
609#elif defined(TARGET_M68K)
610 cpu_m68k_flush_flags(env, env->cc_op);
611 env->cc_op = CC_OP_FLAGS;
612 env->sr = (env->sr & 0xffe0)
613 | env->cc_dest | (env->cc_x << 4);
b779e29e 614#elif defined(TARGET_MICROBLAZE)
6af0bf9c 615#elif defined(TARGET_MIPS)
fdf9b3e8 616#elif defined(TARGET_SH4)
eddf68a6 617#elif defined(TARGET_ALPHA)
f1ccf904 618#elif defined(TARGET_CRIS)
10ec5117 619#elif defined(TARGET_S390X)
fdf9b3e8 620 /* XXXXX */
e4533c7a
FB
621#else
622#error unsupported target CPU
623#endif
1057eaa7 624
6a00d601 625 /* fail safe : never use cpu_single_env outside cpu_exec() */
5fafdf24 626 cpu_single_env = NULL;
7d13299d
FB
627 return ret;
628}