]> git.proxmox.com Git - qemu.git/blame - cpu-exec.c
virtio-balloon: Fix header comment; add Copyright
[qemu.git] / cpu-exec.c
CommitLineData
7d13299d
FB
1/*
2 * i386 emulator main execution loop
5fafdf24 3 *
66321a11 4 * Copyright (c) 2003-2005 Fabrice Bellard
7d13299d 5 *
3ef693a0
FB
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
7d13299d 10 *
3ef693a0
FB
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
7d13299d 15 *
3ef693a0 16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
7d13299d 18 */
e4533c7a 19#include "config.h"
cea5f9a2 20#include "cpu.h"
956034d7 21#include "disas.h"
7cb69cae 22#include "tcg.h"
1d93f0f0 23#include "qemu-barrier.h"
7d13299d 24
36bdbe54
FB
25int tb_invalidated_flag;
26
f0667e66 27//#define CONFIG_DEBUG_EXEC
7d13299d 28
f3e27037 29bool qemu_cpu_has_work(CPUState *env)
6a4955a8
AL
30{
31 return cpu_has_work(env);
32}
33
cea5f9a2 34void cpu_loop_exit(CPUState *env)
e4533c7a 35{
cea5f9a2
BS
36 env->current_tb = NULL;
37 longjmp(env->jmp_env, 1);
e4533c7a 38}
bfed01fc 39
fbf9eeb3
FB
40/* exit the current TB from a signal handler. The host registers are
41 restored in a state compatible with the CPU emulator
42 */
9eff14f3 43#if defined(CONFIG_SOFTMMU)
cea5f9a2 44void cpu_resume_from_signal(CPUState *env, void *puc)
9eff14f3 45{
9eff14f3
BS
46 /* XXX: restore cpu registers saved in host registers */
47
48 env->exception_index = -1;
49 longjmp(env->jmp_env, 1);
50}
9eff14f3 51#endif
fbf9eeb3 52
2e70f6ef
PB
53/* Execute the code without caching the generated code. An interpreter
54 could be used if available. */
cea5f9a2
BS
55static void cpu_exec_nocache(CPUState *env, int max_cycles,
56 TranslationBlock *orig_tb)
2e70f6ef
PB
57{
58 unsigned long next_tb;
59 TranslationBlock *tb;
60
61 /* Should never happen.
62 We only end up here when an existing TB is too long. */
63 if (max_cycles > CF_COUNT_MASK)
64 max_cycles = CF_COUNT_MASK;
65
66 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
67 max_cycles);
68 env->current_tb = tb;
69 /* execute the generated code */
cea5f9a2 70 next_tb = tcg_qemu_tb_exec(env, tb->tc_ptr);
1c3569fe 71 env->current_tb = NULL;
2e70f6ef
PB
72
73 if ((next_tb & 3) == 2) {
74 /* Restore PC. This may happen if async event occurs before
75 the TB starts executing. */
622ed360 76 cpu_pc_from_tb(env, tb);
2e70f6ef
PB
77 }
78 tb_phys_invalidate(tb, -1);
79 tb_free(tb);
80}
81
cea5f9a2
BS
82static TranslationBlock *tb_find_slow(CPUState *env,
83 target_ulong pc,
8a40a180 84 target_ulong cs_base,
c068688b 85 uint64_t flags)
8a40a180
FB
86{
87 TranslationBlock *tb, **ptb1;
8a40a180 88 unsigned int h;
41c1b1c9
PB
89 tb_page_addr_t phys_pc, phys_page1, phys_page2;
90 target_ulong virt_page2;
3b46e624 91
8a40a180 92 tb_invalidated_flag = 0;
3b46e624 93
8a40a180 94 /* find translated block using physical mappings */
41c1b1c9 95 phys_pc = get_page_addr_code(env, pc);
8a40a180
FB
96 phys_page1 = phys_pc & TARGET_PAGE_MASK;
97 phys_page2 = -1;
98 h = tb_phys_hash_func(phys_pc);
99 ptb1 = &tb_phys_hash[h];
100 for(;;) {
101 tb = *ptb1;
102 if (!tb)
103 goto not_found;
5fafdf24 104 if (tb->pc == pc &&
8a40a180 105 tb->page_addr[0] == phys_page1 &&
5fafdf24 106 tb->cs_base == cs_base &&
8a40a180
FB
107 tb->flags == flags) {
108 /* check next page if needed */
109 if (tb->page_addr[1] != -1) {
5fafdf24 110 virt_page2 = (pc & TARGET_PAGE_MASK) +
8a40a180 111 TARGET_PAGE_SIZE;
41c1b1c9 112 phys_page2 = get_page_addr_code(env, virt_page2);
8a40a180
FB
113 if (tb->page_addr[1] == phys_page2)
114 goto found;
115 } else {
116 goto found;
117 }
118 }
119 ptb1 = &tb->phys_hash_next;
120 }
121 not_found:
2e70f6ef
PB
122 /* if no translated code available, then translate it now */
123 tb = tb_gen_code(env, pc, cs_base, flags, 0);
3b46e624 124
8a40a180 125 found:
2c90fe2b
KB
126 /* Move the last found TB to the head of the list */
127 if (likely(*ptb1)) {
128 *ptb1 = tb->phys_hash_next;
129 tb->phys_hash_next = tb_phys_hash[h];
130 tb_phys_hash[h] = tb;
131 }
8a40a180
FB
132 /* we add the TB in the virtual pc hash table */
133 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
8a40a180
FB
134 return tb;
135}
136
cea5f9a2 137static inline TranslationBlock *tb_find_fast(CPUState *env)
8a40a180
FB
138{
139 TranslationBlock *tb;
140 target_ulong cs_base, pc;
6b917547 141 int flags;
8a40a180
FB
142
143 /* we record a subset of the CPU state. It will
144 always be the same before a given translated block
145 is executed. */
6b917547 146 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
bce61846 147 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
551bd27f
TS
148 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
149 tb->flags != flags)) {
cea5f9a2 150 tb = tb_find_slow(env, pc, cs_base, flags);
8a40a180
FB
151 }
152 return tb;
153}
154
1009d2ed
JK
155static CPUDebugExcpHandler *debug_excp_handler;
156
157CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
158{
159 CPUDebugExcpHandler *old_handler = debug_excp_handler;
160
161 debug_excp_handler = handler;
162 return old_handler;
163}
164
165static void cpu_handle_debug_exception(CPUState *env)
166{
167 CPUWatchpoint *wp;
168
169 if (!env->watchpoint_hit) {
170 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
171 wp->flags &= ~BP_WATCHPOINT_HIT;
172 }
173 }
174 if (debug_excp_handler) {
175 debug_excp_handler(env);
176 }
177}
178
7d13299d
FB
179/* main execution loop */
180
1a28cac3
MT
181volatile sig_atomic_t exit_request;
182
cea5f9a2 183int cpu_exec(CPUState *env)
7d13299d 184{
8a40a180 185 int ret, interrupt_request;
8a40a180 186 TranslationBlock *tb;
c27004ec 187 uint8_t *tc_ptr;
d5975363 188 unsigned long next_tb;
8c6939c0 189
cea5f9a2
BS
190 if (env->halted) {
191 if (!cpu_has_work(env)) {
eda48c34
PB
192 return EXCP_HALTED;
193 }
194
cea5f9a2 195 env->halted = 0;
eda48c34 196 }
5a1e3cfc 197
cea5f9a2 198 cpu_single_env = env;
e4533c7a 199
c629a4bc 200 if (unlikely(exit_request)) {
1a28cac3 201 env->exit_request = 1;
1a28cac3
MT
202 }
203
ecb644f4 204#if defined(TARGET_I386)
6792a57b
JK
205 /* put eflags in CPU temporary format */
206 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
207 DF = 1 - (2 * ((env->eflags >> 10) & 1));
208 CC_OP = CC_OP_EFLAGS;
209 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
93ac68bc 210#elif defined(TARGET_SPARC)
e6e5906b
PB
211#elif defined(TARGET_M68K)
212 env->cc_op = CC_OP_FLAGS;
213 env->cc_dest = env->sr & 0xf;
214 env->cc_x = (env->sr >> 4) & 1;
ecb644f4
TS
215#elif defined(TARGET_ALPHA)
216#elif defined(TARGET_ARM)
d2fbca94 217#elif defined(TARGET_UNICORE32)
ecb644f4 218#elif defined(TARGET_PPC)
81ea0e13 219#elif defined(TARGET_LM32)
b779e29e 220#elif defined(TARGET_MICROBLAZE)
6af0bf9c 221#elif defined(TARGET_MIPS)
fdf9b3e8 222#elif defined(TARGET_SH4)
f1ccf904 223#elif defined(TARGET_CRIS)
10ec5117 224#elif defined(TARGET_S390X)
fdf9b3e8 225 /* XXXXX */
e4533c7a
FB
226#else
227#error unsupported target CPU
228#endif
3fb2ded1 229 env->exception_index = -1;
9d27abd9 230
7d13299d 231 /* prepare setjmp context for exception handling */
3fb2ded1
FB
232 for(;;) {
233 if (setjmp(env->jmp_env) == 0) {
234 /* if an exception is pending, we execute it here */
235 if (env->exception_index >= 0) {
236 if (env->exception_index >= EXCP_INTERRUPT) {
237 /* exit request from the cpu execution loop */
238 ret = env->exception_index;
1009d2ed
JK
239 if (ret == EXCP_DEBUG) {
240 cpu_handle_debug_exception(env);
241 }
3fb2ded1 242 break;
72d239ed
AJ
243 } else {
244#if defined(CONFIG_USER_ONLY)
3fb2ded1 245 /* if user mode only, we simulate a fake exception
9f083493 246 which will be handled outside the cpu execution
3fb2ded1 247 loop */
83479e77 248#if defined(TARGET_I386)
e694d4e2 249 do_interrupt(env);
83479e77 250#endif
3fb2ded1
FB
251 ret = env->exception_index;
252 break;
72d239ed 253#else
b5ff1b31 254 do_interrupt(env);
301d2908 255 env->exception_index = -1;
83479e77 256#endif
3fb2ded1 257 }
5fafdf24 258 }
9df217a3 259
b5fc09ae 260 next_tb = 0; /* force lookup of first TB */
3fb2ded1 261 for(;;) {
68a79315 262 interrupt_request = env->interrupt_request;
e1638bd8 263 if (unlikely(interrupt_request)) {
264 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
265 /* Mask out external interrupts for this step. */
3125f763 266 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
e1638bd8 267 }
6658ffb8
PB
268 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
269 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
270 env->exception_index = EXCP_DEBUG;
1162c041 271 cpu_loop_exit(env);
6658ffb8 272 }
a90b7318 273#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
b779e29e 274 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
d2fbca94 275 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
a90b7318
AZ
276 if (interrupt_request & CPU_INTERRUPT_HALT) {
277 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
278 env->halted = 1;
279 env->exception_index = EXCP_HLT;
1162c041 280 cpu_loop_exit(env);
a90b7318
AZ
281 }
282#endif
68a79315 283#if defined(TARGET_I386)
b09ea7d5 284 if (interrupt_request & CPU_INTERRUPT_INIT) {
e694d4e2 285 svm_check_intercept(env, SVM_EXIT_INIT);
b09ea7d5
GN
286 do_cpu_init(env);
287 env->exception_index = EXCP_HALTED;
1162c041 288 cpu_loop_exit(env);
b09ea7d5
GN
289 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
290 do_cpu_sipi(env);
291 } else if (env->hflags2 & HF2_GIF_MASK) {
db620f46
FB
292 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
293 !(env->hflags & HF_SMM_MASK)) {
e694d4e2 294 svm_check_intercept(env, SVM_EXIT_SMI);
db620f46 295 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
e694d4e2 296 do_smm_enter(env);
db620f46
FB
297 next_tb = 0;
298 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
299 !(env->hflags2 & HF2_NMI_MASK)) {
300 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
301 env->hflags2 |= HF2_NMI_MASK;
e694d4e2 302 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
db620f46 303 next_tb = 0;
79c4f6b0
HY
304 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
305 env->interrupt_request &= ~CPU_INTERRUPT_MCE;
e694d4e2 306 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
79c4f6b0 307 next_tb = 0;
db620f46
FB
308 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
309 (((env->hflags2 & HF2_VINTR_MASK) &&
310 (env->hflags2 & HF2_HIF_MASK)) ||
311 (!(env->hflags2 & HF2_VINTR_MASK) &&
312 (env->eflags & IF_MASK &&
313 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
314 int intno;
e694d4e2 315 svm_check_intercept(env, SVM_EXIT_INTR);
db620f46
FB
316 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
317 intno = cpu_get_pic_interrupt(env);
93fcfe39 318 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
e694d4e2 319 do_interrupt_x86_hardirq(env, intno, 1);
db620f46
FB
320 /* ensure that no TB jump will be modified as
321 the program flow was changed */
322 next_tb = 0;
0573fbfc 323#if !defined(CONFIG_USER_ONLY)
db620f46
FB
324 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
325 (env->eflags & IF_MASK) &&
326 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
327 int intno;
328 /* FIXME: this should respect TPR */
e694d4e2 329 svm_check_intercept(env, SVM_EXIT_VINTR);
db620f46 330 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
93fcfe39 331 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
e694d4e2 332 do_interrupt_x86_hardirq(env, intno, 1);
d40c54d6 333 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
db620f46 334 next_tb = 0;
907a5b26 335#endif
db620f46 336 }
68a79315 337 }
ce09776b 338#elif defined(TARGET_PPC)
9fddaa0c
FB
339#if 0
340 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
d84bda46 341 cpu_reset(env);
9fddaa0c
FB
342 }
343#endif
47103572 344 if (interrupt_request & CPU_INTERRUPT_HARD) {
e9df014c
JM
345 ppc_hw_interrupt(env);
346 if (env->pending_interrupts == 0)
347 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
b5fc09ae 348 next_tb = 0;
ce09776b 349 }
81ea0e13
MW
350#elif defined(TARGET_LM32)
351 if ((interrupt_request & CPU_INTERRUPT_HARD)
352 && (env->ie & IE_IE)) {
353 env->exception_index = EXCP_IRQ;
354 do_interrupt(env);
355 next_tb = 0;
356 }
b779e29e
EI
357#elif defined(TARGET_MICROBLAZE)
358 if ((interrupt_request & CPU_INTERRUPT_HARD)
359 && (env->sregs[SR_MSR] & MSR_IE)
360 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
361 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
362 env->exception_index = EXCP_IRQ;
363 do_interrupt(env);
364 next_tb = 0;
365 }
6af0bf9c
FB
366#elif defined(TARGET_MIPS)
367 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
4cdc1cd1 368 cpu_mips_hw_interrupts_pending(env)) {
6af0bf9c
FB
369 /* Raise it */
370 env->exception_index = EXCP_EXT_INTERRUPT;
371 env->error_code = 0;
372 do_interrupt(env);
b5fc09ae 373 next_tb = 0;
6af0bf9c 374 }
e95c8d51 375#elif defined(TARGET_SPARC)
d532b26c
IK
376 if (interrupt_request & CPU_INTERRUPT_HARD) {
377 if (cpu_interrupts_enabled(env) &&
378 env->interrupt_index > 0) {
379 int pil = env->interrupt_index & 0xf;
380 int type = env->interrupt_index & 0xf0;
381
382 if (((type == TT_EXTINT) &&
383 cpu_pil_allowed(env, pil)) ||
384 type != TT_EXTINT) {
385 env->exception_index = env->interrupt_index;
386 do_interrupt(env);
387 next_tb = 0;
388 }
389 }
a90b7318 390 }
b5ff1b31
FB
391#elif defined(TARGET_ARM)
392 if (interrupt_request & CPU_INTERRUPT_FIQ
393 && !(env->uncached_cpsr & CPSR_F)) {
394 env->exception_index = EXCP_FIQ;
395 do_interrupt(env);
b5fc09ae 396 next_tb = 0;
b5ff1b31 397 }
9ee6e8bb
PB
398 /* ARMv7-M interrupt return works by loading a magic value
399 into the PC. On real hardware the load causes the
400 return to occur. The qemu implementation performs the
401 jump normally, then does the exception return when the
402 CPU tries to execute code at the magic address.
403 This will cause the magic PC value to be pushed to
a1c7273b 404 the stack if an interrupt occurred at the wrong time.
9ee6e8bb
PB
405 We avoid this by disabling interrupts when
406 pc contains a magic address. */
b5ff1b31 407 if (interrupt_request & CPU_INTERRUPT_HARD
9ee6e8bb
PB
408 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
409 || !(env->uncached_cpsr & CPSR_I))) {
b5ff1b31
FB
410 env->exception_index = EXCP_IRQ;
411 do_interrupt(env);
b5fc09ae 412 next_tb = 0;
b5ff1b31 413 }
d2fbca94
GX
414#elif defined(TARGET_UNICORE32)
415 if (interrupt_request & CPU_INTERRUPT_HARD
416 && !(env->uncached_asr & ASR_I)) {
417 do_interrupt(env);
418 next_tb = 0;
419 }
fdf9b3e8 420#elif defined(TARGET_SH4)
e96e2044
TS
421 if (interrupt_request & CPU_INTERRUPT_HARD) {
422 do_interrupt(env);
b5fc09ae 423 next_tb = 0;
e96e2044 424 }
eddf68a6 425#elif defined(TARGET_ALPHA)
6a80e088
RH
426 {
427 int idx = -1;
428 /* ??? This hard-codes the OSF/1 interrupt levels. */
429 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
430 case 0 ... 3:
431 if (interrupt_request & CPU_INTERRUPT_HARD) {
432 idx = EXCP_DEV_INTERRUPT;
433 }
434 /* FALLTHRU */
435 case 4:
436 if (interrupt_request & CPU_INTERRUPT_TIMER) {
437 idx = EXCP_CLK_INTERRUPT;
438 }
439 /* FALLTHRU */
440 case 5:
441 if (interrupt_request & CPU_INTERRUPT_SMP) {
442 idx = EXCP_SMP_INTERRUPT;
443 }
444 /* FALLTHRU */
445 case 6:
446 if (interrupt_request & CPU_INTERRUPT_MCHK) {
447 idx = EXCP_MCHK;
448 }
449 }
450 if (idx >= 0) {
451 env->exception_index = idx;
452 env->error_code = 0;
453 do_interrupt(env);
454 next_tb = 0;
455 }
eddf68a6 456 }
f1ccf904 457#elif defined(TARGET_CRIS)
1b1a38b0 458 if (interrupt_request & CPU_INTERRUPT_HARD
fb9fb692
EI
459 && (env->pregs[PR_CCS] & I_FLAG)
460 && !env->locked_irq) {
1b1a38b0
EI
461 env->exception_index = EXCP_IRQ;
462 do_interrupt(env);
463 next_tb = 0;
464 }
465 if (interrupt_request & CPU_INTERRUPT_NMI
466 && (env->pregs[PR_CCS] & M_FLAG)) {
467 env->exception_index = EXCP_NMI;
f1ccf904 468 do_interrupt(env);
b5fc09ae 469 next_tb = 0;
f1ccf904 470 }
0633879f
PB
471#elif defined(TARGET_M68K)
472 if (interrupt_request & CPU_INTERRUPT_HARD
473 && ((env->sr & SR_I) >> SR_I_SHIFT)
474 < env->pending_level) {
475 /* Real hardware gets the interrupt vector via an
476 IACK cycle at this point. Current emulated
477 hardware doesn't rely on this, so we
478 provide/save the vector when the interrupt is
479 first signalled. */
480 env->exception_index = env->pending_vector;
3c688828 481 do_interrupt_m68k_hardirq(env);
b5fc09ae 482 next_tb = 0;
0633879f 483 }
3110e292
AG
484#elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
485 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
486 (env->psw.mask & PSW_MASK_EXT)) {
487 do_interrupt(env);
488 next_tb = 0;
489 }
68a79315 490#endif
ff2712ba 491 /* Don't use the cached interrupt_request value,
9d05095e 492 do_interrupt may have updated the EXITTB flag. */
b5ff1b31 493 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
bf3e8bf1
FB
494 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
495 /* ensure that no TB jump will be modified as
496 the program flow was changed */
b5fc09ae 497 next_tb = 0;
bf3e8bf1 498 }
be214e6c
AJ
499 }
500 if (unlikely(env->exit_request)) {
501 env->exit_request = 0;
502 env->exception_index = EXCP_INTERRUPT;
1162c041 503 cpu_loop_exit(env);
3fb2ded1 504 }
a73b1fd9 505#if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
8fec2b8c 506 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
3fb2ded1 507 /* restore flags in standard format */
ecb644f4 508#if defined(TARGET_I386)
e694d4e2
BS
509 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
510 | (DF & DF_MASK);
93fcfe39 511 log_cpu_state(env, X86_DUMP_CCOP);
3fb2ded1 512 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
e6e5906b
PB
513#elif defined(TARGET_M68K)
514 cpu_m68k_flush_flags(env, env->cc_op);
515 env->cc_op = CC_OP_FLAGS;
516 env->sr = (env->sr & 0xffe0)
517 | env->cc_dest | (env->cc_x << 4);
93fcfe39 518 log_cpu_state(env, 0);
e4533c7a 519#else
a73b1fd9 520 log_cpu_state(env, 0);
e4533c7a 521#endif
3fb2ded1 522 }
a73b1fd9 523#endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
d5975363 524 spin_lock(&tb_lock);
cea5f9a2 525 tb = tb_find_fast(env);
d5975363
PB
526 /* Note: we do it here to avoid a gcc bug on Mac OS X when
527 doing it in tb_find_slow */
528 if (tb_invalidated_flag) {
529 /* as some TB could have been invalidated because
530 of memory exceptions while generating the code, we
531 must recompute the hash index here */
532 next_tb = 0;
2e70f6ef 533 tb_invalidated_flag = 0;
d5975363 534 }
f0667e66 535#ifdef CONFIG_DEBUG_EXEC
93fcfe39
AL
536 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
537 (long)tb->tc_ptr, tb->pc,
538 lookup_symbol(tb->pc));
9d27abd9 539#endif
8a40a180
FB
540 /* see if we can patch the calling TB. When the TB
541 spans two pages, we cannot safely do a direct
542 jump. */
040f2fb2 543 if (next_tb != 0 && tb->page_addr[1] == -1) {
b5fc09ae 544 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
3fb2ded1 545 }
d5975363 546 spin_unlock(&tb_lock);
55e8b85e 547
548 /* cpu_interrupt might be called while translating the
549 TB, but before it is linked into a potentially
550 infinite loop and becomes env->current_tb. Avoid
551 starting execution if there is a pending interrupt. */
b0052d15
JK
552 env->current_tb = tb;
553 barrier();
554 if (likely(!env->exit_request)) {
2e70f6ef 555 tc_ptr = tb->tc_ptr;
3fb2ded1 556 /* execute the generated code */
cea5f9a2 557 next_tb = tcg_qemu_tb_exec(env, tc_ptr);
2e70f6ef 558 if ((next_tb & 3) == 2) {
bf20dc07 559 /* Instruction counter expired. */
2e70f6ef
PB
560 int insns_left;
561 tb = (TranslationBlock *)(long)(next_tb & ~3);
562 /* Restore PC. */
622ed360 563 cpu_pc_from_tb(env, tb);
2e70f6ef
PB
564 insns_left = env->icount_decr.u32;
565 if (env->icount_extra && insns_left >= 0) {
566 /* Refill decrementer and continue execution. */
567 env->icount_extra += insns_left;
568 if (env->icount_extra > 0xffff) {
569 insns_left = 0xffff;
570 } else {
571 insns_left = env->icount_extra;
572 }
573 env->icount_extra -= insns_left;
574 env->icount_decr.u16.low = insns_left;
575 } else {
576 if (insns_left > 0) {
577 /* Execute remaining instructions. */
cea5f9a2 578 cpu_exec_nocache(env, insns_left, tb);
2e70f6ef
PB
579 }
580 env->exception_index = EXCP_INTERRUPT;
581 next_tb = 0;
1162c041 582 cpu_loop_exit(env);
2e70f6ef
PB
583 }
584 }
585 }
b0052d15 586 env->current_tb = NULL;
4cbf74b6
FB
587 /* reset soft MMU for next block (it can currently
588 only be set by a memory fault) */
50a518e3 589 } /* for(;;) */
0d101938
JK
590 } else {
591 /* Reload env after longjmp - the compiler may have smashed all
592 * local variables as longjmp is marked 'noreturn'. */
593 env = cpu_single_env;
7d13299d 594 }
3fb2ded1
FB
595 } /* for(;;) */
596
7d13299d 597
e4533c7a 598#if defined(TARGET_I386)
9de5e440 599 /* restore flags in standard format */
e694d4e2
BS
600 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
601 | (DF & DF_MASK);
e4533c7a 602#elif defined(TARGET_ARM)
b7bcbe95 603 /* XXX: Save/restore host fpu exception state?. */
d2fbca94 604#elif defined(TARGET_UNICORE32)
93ac68bc 605#elif defined(TARGET_SPARC)
67867308 606#elif defined(TARGET_PPC)
81ea0e13 607#elif defined(TARGET_LM32)
e6e5906b
PB
608#elif defined(TARGET_M68K)
609 cpu_m68k_flush_flags(env, env->cc_op);
610 env->cc_op = CC_OP_FLAGS;
611 env->sr = (env->sr & 0xffe0)
612 | env->cc_dest | (env->cc_x << 4);
b779e29e 613#elif defined(TARGET_MICROBLAZE)
6af0bf9c 614#elif defined(TARGET_MIPS)
fdf9b3e8 615#elif defined(TARGET_SH4)
eddf68a6 616#elif defined(TARGET_ALPHA)
f1ccf904 617#elif defined(TARGET_CRIS)
10ec5117 618#elif defined(TARGET_S390X)
fdf9b3e8 619 /* XXXXX */
e4533c7a
FB
620#else
621#error unsupported target CPU
622#endif
1057eaa7 623
6a00d601 624 /* fail safe : never use cpu_single_env outside cpu_exec() */
5fafdf24 625 cpu_single_env = NULL;
7d13299d
FB
626 return ret;
627}