]> git.proxmox.com Git - mirror_qemu.git/blame - cpu-exec.c
cpu-exec: unify do_interrupt call
[mirror_qemu.git] / cpu-exec.c
CommitLineData
7d13299d
FB
1/*
2 * i386 emulator main execution loop
5fafdf24 3 *
66321a11 4 * Copyright (c) 2003-2005 Fabrice Bellard
7d13299d 5 *
3ef693a0
FB
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
7d13299d 10 *
3ef693a0
FB
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
7d13299d 15 *
3ef693a0 16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
7d13299d 18 */
e4533c7a 19#include "config.h"
93ac68bc 20#include "exec.h"
956034d7 21#include "disas.h"
7cb69cae 22#include "tcg.h"
1d93f0f0 23#include "qemu-barrier.h"
7d13299d 24
dfe5fff3 25#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
572a9d4a
BS
26// Work around ugly bugs in glibc that mangle global register contents
27#undef env
28#define env cpu_single_env
29#endif
30
36bdbe54
FB
31int tb_invalidated_flag;
32
f0667e66 33//#define CONFIG_DEBUG_EXEC
7d13299d 34
6a4955a8
AL
35int qemu_cpu_has_work(CPUState *env)
36{
37 return cpu_has_work(env);
38}
39
1162c041 40void cpu_loop_exit(CPUState *env1)
e4533c7a 41{
1162c041
BS
42 env1->current_tb = NULL;
43 longjmp(env1->jmp_env, 1);
e4533c7a 44}
bfed01fc 45
fbf9eeb3
FB
46/* exit the current TB from a signal handler. The host registers are
47 restored in a state compatible with the CPU emulator
48 */
9eff14f3
BS
49#if defined(CONFIG_SOFTMMU)
50void cpu_resume_from_signal(CPUState *env1, void *puc)
51{
52 env = env1;
53
54 /* XXX: restore cpu registers saved in host registers */
55
56 env->exception_index = -1;
57 longjmp(env->jmp_env, 1);
58}
9eff14f3 59#endif
fbf9eeb3 60
2e70f6ef
PB
61/* Execute the code without caching the generated code. An interpreter
62 could be used if available. */
63static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
64{
65 unsigned long next_tb;
66 TranslationBlock *tb;
67
68 /* Should never happen.
69 We only end up here when an existing TB is too long. */
70 if (max_cycles > CF_COUNT_MASK)
71 max_cycles = CF_COUNT_MASK;
72
73 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
74 max_cycles);
75 env->current_tb = tb;
76 /* execute the generated code */
77 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
1c3569fe 78 env->current_tb = NULL;
2e70f6ef
PB
79
80 if ((next_tb & 3) == 2) {
81 /* Restore PC. This may happen if async event occurs before
82 the TB starts executing. */
622ed360 83 cpu_pc_from_tb(env, tb);
2e70f6ef
PB
84 }
85 tb_phys_invalidate(tb, -1);
86 tb_free(tb);
87}
88
8a40a180
FB
89static TranslationBlock *tb_find_slow(target_ulong pc,
90 target_ulong cs_base,
c068688b 91 uint64_t flags)
8a40a180
FB
92{
93 TranslationBlock *tb, **ptb1;
8a40a180 94 unsigned int h;
41c1b1c9
PB
95 tb_page_addr_t phys_pc, phys_page1, phys_page2;
96 target_ulong virt_page2;
3b46e624 97
8a40a180 98 tb_invalidated_flag = 0;
3b46e624 99
8a40a180 100 /* find translated block using physical mappings */
41c1b1c9 101 phys_pc = get_page_addr_code(env, pc);
8a40a180
FB
102 phys_page1 = phys_pc & TARGET_PAGE_MASK;
103 phys_page2 = -1;
104 h = tb_phys_hash_func(phys_pc);
105 ptb1 = &tb_phys_hash[h];
106 for(;;) {
107 tb = *ptb1;
108 if (!tb)
109 goto not_found;
5fafdf24 110 if (tb->pc == pc &&
8a40a180 111 tb->page_addr[0] == phys_page1 &&
5fafdf24 112 tb->cs_base == cs_base &&
8a40a180
FB
113 tb->flags == flags) {
114 /* check next page if needed */
115 if (tb->page_addr[1] != -1) {
5fafdf24 116 virt_page2 = (pc & TARGET_PAGE_MASK) +
8a40a180 117 TARGET_PAGE_SIZE;
41c1b1c9 118 phys_page2 = get_page_addr_code(env, virt_page2);
8a40a180
FB
119 if (tb->page_addr[1] == phys_page2)
120 goto found;
121 } else {
122 goto found;
123 }
124 }
125 ptb1 = &tb->phys_hash_next;
126 }
127 not_found:
2e70f6ef
PB
128 /* if no translated code available, then translate it now */
129 tb = tb_gen_code(env, pc, cs_base, flags, 0);
3b46e624 130
8a40a180 131 found:
2c90fe2b
KB
132 /* Move the last found TB to the head of the list */
133 if (likely(*ptb1)) {
134 *ptb1 = tb->phys_hash_next;
135 tb->phys_hash_next = tb_phys_hash[h];
136 tb_phys_hash[h] = tb;
137 }
8a40a180
FB
138 /* we add the TB in the virtual pc hash table */
139 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
8a40a180
FB
140 return tb;
141}
142
143static inline TranslationBlock *tb_find_fast(void)
144{
145 TranslationBlock *tb;
146 target_ulong cs_base, pc;
6b917547 147 int flags;
8a40a180
FB
148
149 /* we record a subset of the CPU state. It will
150 always be the same before a given translated block
151 is executed. */
6b917547 152 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
bce61846 153 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
551bd27f
TS
154 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
155 tb->flags != flags)) {
8a40a180
FB
156 tb = tb_find_slow(pc, cs_base, flags);
157 }
158 return tb;
159}
160
1009d2ed
JK
161static CPUDebugExcpHandler *debug_excp_handler;
162
163CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
164{
165 CPUDebugExcpHandler *old_handler = debug_excp_handler;
166
167 debug_excp_handler = handler;
168 return old_handler;
169}
170
171static void cpu_handle_debug_exception(CPUState *env)
172{
173 CPUWatchpoint *wp;
174
175 if (!env->watchpoint_hit) {
176 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
177 wp->flags &= ~BP_WATCHPOINT_HIT;
178 }
179 }
180 if (debug_excp_handler) {
181 debug_excp_handler(env);
182 }
183}
184
7d13299d
FB
185/* main execution loop */
186
1a28cac3
MT
187volatile sig_atomic_t exit_request;
188
e4533c7a 189int cpu_exec(CPUState *env1)
7d13299d 190{
1d9000e8 191 volatile host_reg_t saved_env_reg;
8a40a180 192 int ret, interrupt_request;
8a40a180 193 TranslationBlock *tb;
c27004ec 194 uint8_t *tc_ptr;
d5975363 195 unsigned long next_tb;
8c6939c0 196
eda48c34
PB
197 if (env1->halted) {
198 if (!cpu_has_work(env1)) {
199 return EXCP_HALTED;
200 }
201
202 env1->halted = 0;
203 }
5a1e3cfc 204
5fafdf24 205 cpu_single_env = env1;
6a00d601 206
24ebf5f3
PB
207 /* the access to env below is actually saving the global register's
208 value, so that files not including target-xyz/exec.h are free to
209 use it. */
210 QEMU_BUILD_BUG_ON (sizeof (saved_env_reg) != sizeof (env));
211 saved_env_reg = (host_reg_t) env;
1d93f0f0 212 barrier();
c27004ec 213 env = env1;
e4533c7a 214
c629a4bc 215 if (unlikely(exit_request)) {
1a28cac3 216 env->exit_request = 1;
1a28cac3
MT
217 }
218
ecb644f4 219#if defined(TARGET_I386)
6792a57b
JK
220 /* put eflags in CPU temporary format */
221 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
222 DF = 1 - (2 * ((env->eflags >> 10) & 1));
223 CC_OP = CC_OP_EFLAGS;
224 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
93ac68bc 225#elif defined(TARGET_SPARC)
e6e5906b
PB
226#elif defined(TARGET_M68K)
227 env->cc_op = CC_OP_FLAGS;
228 env->cc_dest = env->sr & 0xf;
229 env->cc_x = (env->sr >> 4) & 1;
ecb644f4
TS
230#elif defined(TARGET_ALPHA)
231#elif defined(TARGET_ARM)
d2fbca94 232#elif defined(TARGET_UNICORE32)
ecb644f4 233#elif defined(TARGET_PPC)
81ea0e13 234#elif defined(TARGET_LM32)
b779e29e 235#elif defined(TARGET_MICROBLAZE)
6af0bf9c 236#elif defined(TARGET_MIPS)
fdf9b3e8 237#elif defined(TARGET_SH4)
f1ccf904 238#elif defined(TARGET_CRIS)
10ec5117 239#elif defined(TARGET_S390X)
fdf9b3e8 240 /* XXXXX */
e4533c7a
FB
241#else
242#error unsupported target CPU
243#endif
3fb2ded1 244 env->exception_index = -1;
9d27abd9 245
7d13299d 246 /* prepare setjmp context for exception handling */
3fb2ded1
FB
247 for(;;) {
248 if (setjmp(env->jmp_env) == 0) {
dfe5fff3 249#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
9ddff3d2 250#undef env
6792a57b 251 env = cpu_single_env;
9ddff3d2
BS
252#define env cpu_single_env
253#endif
3fb2ded1
FB
254 /* if an exception is pending, we execute it here */
255 if (env->exception_index >= 0) {
256 if (env->exception_index >= EXCP_INTERRUPT) {
257 /* exit request from the cpu execution loop */
258 ret = env->exception_index;
1009d2ed
JK
259 if (ret == EXCP_DEBUG) {
260 cpu_handle_debug_exception(env);
261 }
3fb2ded1 262 break;
72d239ed
AJ
263 } else {
264#if defined(CONFIG_USER_ONLY)
3fb2ded1 265 /* if user mode only, we simulate a fake exception
9f083493 266 which will be handled outside the cpu execution
3fb2ded1 267 loop */
83479e77 268#if defined(TARGET_I386)
e694d4e2 269 do_interrupt(env);
83479e77 270#endif
3fb2ded1
FB
271 ret = env->exception_index;
272 break;
72d239ed 273#else
b5ff1b31 274 do_interrupt(env);
301d2908 275 env->exception_index = -1;
83479e77 276#endif
3fb2ded1 277 }
5fafdf24 278 }
9df217a3 279
b5fc09ae 280 next_tb = 0; /* force lookup of first TB */
3fb2ded1 281 for(;;) {
68a79315 282 interrupt_request = env->interrupt_request;
e1638bd8 283 if (unlikely(interrupt_request)) {
284 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
285 /* Mask out external interrupts for this step. */
3125f763 286 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
e1638bd8 287 }
6658ffb8
PB
288 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
289 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
290 env->exception_index = EXCP_DEBUG;
1162c041 291 cpu_loop_exit(env);
6658ffb8 292 }
a90b7318 293#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
b779e29e 294 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
d2fbca94 295 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
a90b7318
AZ
296 if (interrupt_request & CPU_INTERRUPT_HALT) {
297 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
298 env->halted = 1;
299 env->exception_index = EXCP_HLT;
1162c041 300 cpu_loop_exit(env);
a90b7318
AZ
301 }
302#endif
68a79315 303#if defined(TARGET_I386)
b09ea7d5 304 if (interrupt_request & CPU_INTERRUPT_INIT) {
e694d4e2 305 svm_check_intercept(env, SVM_EXIT_INIT);
b09ea7d5
GN
306 do_cpu_init(env);
307 env->exception_index = EXCP_HALTED;
1162c041 308 cpu_loop_exit(env);
b09ea7d5
GN
309 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
310 do_cpu_sipi(env);
311 } else if (env->hflags2 & HF2_GIF_MASK) {
db620f46
FB
312 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
313 !(env->hflags & HF_SMM_MASK)) {
e694d4e2 314 svm_check_intercept(env, SVM_EXIT_SMI);
db620f46 315 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
e694d4e2 316 do_smm_enter(env);
db620f46
FB
317 next_tb = 0;
318 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
319 !(env->hflags2 & HF2_NMI_MASK)) {
320 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
321 env->hflags2 |= HF2_NMI_MASK;
e694d4e2 322 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
db620f46 323 next_tb = 0;
79c4f6b0
HY
324 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
325 env->interrupt_request &= ~CPU_INTERRUPT_MCE;
e694d4e2 326 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
79c4f6b0 327 next_tb = 0;
db620f46
FB
328 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
329 (((env->hflags2 & HF2_VINTR_MASK) &&
330 (env->hflags2 & HF2_HIF_MASK)) ||
331 (!(env->hflags2 & HF2_VINTR_MASK) &&
332 (env->eflags & IF_MASK &&
333 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
334 int intno;
e694d4e2 335 svm_check_intercept(env, SVM_EXIT_INTR);
db620f46
FB
336 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
337 intno = cpu_get_pic_interrupt(env);
93fcfe39 338 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
dfe5fff3 339#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
9ddff3d2
BS
340#undef env
341 env = cpu_single_env;
342#define env cpu_single_env
343#endif
e694d4e2 344 do_interrupt_x86_hardirq(env, intno, 1);
db620f46
FB
345 /* ensure that no TB jump will be modified as
346 the program flow was changed */
347 next_tb = 0;
0573fbfc 348#if !defined(CONFIG_USER_ONLY)
db620f46
FB
349 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
350 (env->eflags & IF_MASK) &&
351 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
352 int intno;
353 /* FIXME: this should respect TPR */
e694d4e2 354 svm_check_intercept(env, SVM_EXIT_VINTR);
db620f46 355 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
93fcfe39 356 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
e694d4e2 357 do_interrupt_x86_hardirq(env, intno, 1);
d40c54d6 358 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
db620f46 359 next_tb = 0;
907a5b26 360#endif
db620f46 361 }
68a79315 362 }
ce09776b 363#elif defined(TARGET_PPC)
9fddaa0c
FB
364#if 0
365 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
d84bda46 366 cpu_reset(env);
9fddaa0c
FB
367 }
368#endif
47103572 369 if (interrupt_request & CPU_INTERRUPT_HARD) {
e9df014c
JM
370 ppc_hw_interrupt(env);
371 if (env->pending_interrupts == 0)
372 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
b5fc09ae 373 next_tb = 0;
ce09776b 374 }
81ea0e13
MW
375#elif defined(TARGET_LM32)
376 if ((interrupt_request & CPU_INTERRUPT_HARD)
377 && (env->ie & IE_IE)) {
378 env->exception_index = EXCP_IRQ;
379 do_interrupt(env);
380 next_tb = 0;
381 }
b779e29e
EI
382#elif defined(TARGET_MICROBLAZE)
383 if ((interrupt_request & CPU_INTERRUPT_HARD)
384 && (env->sregs[SR_MSR] & MSR_IE)
385 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
386 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
387 env->exception_index = EXCP_IRQ;
388 do_interrupt(env);
389 next_tb = 0;
390 }
6af0bf9c
FB
391#elif defined(TARGET_MIPS)
392 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
4cdc1cd1 393 cpu_mips_hw_interrupts_pending(env)) {
6af0bf9c
FB
394 /* Raise it */
395 env->exception_index = EXCP_EXT_INTERRUPT;
396 env->error_code = 0;
397 do_interrupt(env);
b5fc09ae 398 next_tb = 0;
6af0bf9c 399 }
e95c8d51 400#elif defined(TARGET_SPARC)
d532b26c
IK
401 if (interrupt_request & CPU_INTERRUPT_HARD) {
402 if (cpu_interrupts_enabled(env) &&
403 env->interrupt_index > 0) {
404 int pil = env->interrupt_index & 0xf;
405 int type = env->interrupt_index & 0xf0;
406
407 if (((type == TT_EXTINT) &&
408 cpu_pil_allowed(env, pil)) ||
409 type != TT_EXTINT) {
410 env->exception_index = env->interrupt_index;
411 do_interrupt(env);
412 next_tb = 0;
413 }
414 }
a90b7318 415 }
b5ff1b31
FB
416#elif defined(TARGET_ARM)
417 if (interrupt_request & CPU_INTERRUPT_FIQ
418 && !(env->uncached_cpsr & CPSR_F)) {
419 env->exception_index = EXCP_FIQ;
420 do_interrupt(env);
b5fc09ae 421 next_tb = 0;
b5ff1b31 422 }
9ee6e8bb
PB
423 /* ARMv7-M interrupt return works by loading a magic value
424 into the PC. On real hardware the load causes the
425 return to occur. The qemu implementation performs the
426 jump normally, then does the exception return when the
427 CPU tries to execute code at the magic address.
428 This will cause the magic PC value to be pushed to
a1c7273b 429 the stack if an interrupt occurred at the wrong time.
9ee6e8bb
PB
430 We avoid this by disabling interrupts when
431 pc contains a magic address. */
b5ff1b31 432 if (interrupt_request & CPU_INTERRUPT_HARD
9ee6e8bb
PB
433 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
434 || !(env->uncached_cpsr & CPSR_I))) {
b5ff1b31
FB
435 env->exception_index = EXCP_IRQ;
436 do_interrupt(env);
b5fc09ae 437 next_tb = 0;
b5ff1b31 438 }
d2fbca94
GX
439#elif defined(TARGET_UNICORE32)
440 if (interrupt_request & CPU_INTERRUPT_HARD
441 && !(env->uncached_asr & ASR_I)) {
442 do_interrupt(env);
443 next_tb = 0;
444 }
fdf9b3e8 445#elif defined(TARGET_SH4)
e96e2044
TS
446 if (interrupt_request & CPU_INTERRUPT_HARD) {
447 do_interrupt(env);
b5fc09ae 448 next_tb = 0;
e96e2044 449 }
eddf68a6 450#elif defined(TARGET_ALPHA)
6a80e088
RH
451 {
452 int idx = -1;
453 /* ??? This hard-codes the OSF/1 interrupt levels. */
454 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
455 case 0 ... 3:
456 if (interrupt_request & CPU_INTERRUPT_HARD) {
457 idx = EXCP_DEV_INTERRUPT;
458 }
459 /* FALLTHRU */
460 case 4:
461 if (interrupt_request & CPU_INTERRUPT_TIMER) {
462 idx = EXCP_CLK_INTERRUPT;
463 }
464 /* FALLTHRU */
465 case 5:
466 if (interrupt_request & CPU_INTERRUPT_SMP) {
467 idx = EXCP_SMP_INTERRUPT;
468 }
469 /* FALLTHRU */
470 case 6:
471 if (interrupt_request & CPU_INTERRUPT_MCHK) {
472 idx = EXCP_MCHK;
473 }
474 }
475 if (idx >= 0) {
476 env->exception_index = idx;
477 env->error_code = 0;
478 do_interrupt(env);
479 next_tb = 0;
480 }
eddf68a6 481 }
f1ccf904 482#elif defined(TARGET_CRIS)
1b1a38b0 483 if (interrupt_request & CPU_INTERRUPT_HARD
fb9fb692
EI
484 && (env->pregs[PR_CCS] & I_FLAG)
485 && !env->locked_irq) {
1b1a38b0
EI
486 env->exception_index = EXCP_IRQ;
487 do_interrupt(env);
488 next_tb = 0;
489 }
490 if (interrupt_request & CPU_INTERRUPT_NMI
491 && (env->pregs[PR_CCS] & M_FLAG)) {
492 env->exception_index = EXCP_NMI;
f1ccf904 493 do_interrupt(env);
b5fc09ae 494 next_tb = 0;
f1ccf904 495 }
0633879f
PB
496#elif defined(TARGET_M68K)
497 if (interrupt_request & CPU_INTERRUPT_HARD
498 && ((env->sr & SR_I) >> SR_I_SHIFT)
499 < env->pending_level) {
500 /* Real hardware gets the interrupt vector via an
501 IACK cycle at this point. Current emulated
502 hardware doesn't rely on this, so we
503 provide/save the vector when the interrupt is
504 first signalled. */
505 env->exception_index = env->pending_vector;
3c688828 506 do_interrupt_m68k_hardirq(env);
b5fc09ae 507 next_tb = 0;
0633879f 508 }
3110e292
AG
509#elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
510 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
511 (env->psw.mask & PSW_MASK_EXT)) {
512 do_interrupt(env);
513 next_tb = 0;
514 }
68a79315 515#endif
ff2712ba 516 /* Don't use the cached interrupt_request value,
9d05095e 517 do_interrupt may have updated the EXITTB flag. */
b5ff1b31 518 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
bf3e8bf1
FB
519 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
520 /* ensure that no TB jump will be modified as
521 the program flow was changed */
b5fc09ae 522 next_tb = 0;
bf3e8bf1 523 }
be214e6c
AJ
524 }
525 if (unlikely(env->exit_request)) {
526 env->exit_request = 0;
527 env->exception_index = EXCP_INTERRUPT;
1162c041 528 cpu_loop_exit(env);
3fb2ded1 529 }
a73b1fd9 530#if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
8fec2b8c 531 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
3fb2ded1 532 /* restore flags in standard format */
ecb644f4 533#if defined(TARGET_I386)
e694d4e2
BS
534 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
535 | (DF & DF_MASK);
93fcfe39 536 log_cpu_state(env, X86_DUMP_CCOP);
3fb2ded1 537 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
e6e5906b
PB
538#elif defined(TARGET_M68K)
539 cpu_m68k_flush_flags(env, env->cc_op);
540 env->cc_op = CC_OP_FLAGS;
541 env->sr = (env->sr & 0xffe0)
542 | env->cc_dest | (env->cc_x << 4);
93fcfe39 543 log_cpu_state(env, 0);
e4533c7a 544#else
a73b1fd9 545 log_cpu_state(env, 0);
e4533c7a 546#endif
3fb2ded1 547 }
a73b1fd9 548#endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
d5975363 549 spin_lock(&tb_lock);
8a40a180 550 tb = tb_find_fast();
d5975363
PB
551 /* Note: we do it here to avoid a gcc bug on Mac OS X when
552 doing it in tb_find_slow */
553 if (tb_invalidated_flag) {
554 /* as some TB could have been invalidated because
555 of memory exceptions while generating the code, we
556 must recompute the hash index here */
557 next_tb = 0;
2e70f6ef 558 tb_invalidated_flag = 0;
d5975363 559 }
f0667e66 560#ifdef CONFIG_DEBUG_EXEC
93fcfe39
AL
561 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
562 (long)tb->tc_ptr, tb->pc,
563 lookup_symbol(tb->pc));
9d27abd9 564#endif
8a40a180
FB
565 /* see if we can patch the calling TB. When the TB
566 spans two pages, we cannot safely do a direct
567 jump. */
040f2fb2 568 if (next_tb != 0 && tb->page_addr[1] == -1) {
b5fc09ae 569 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
3fb2ded1 570 }
d5975363 571 spin_unlock(&tb_lock);
55e8b85e 572
573 /* cpu_interrupt might be called while translating the
574 TB, but before it is linked into a potentially
575 infinite loop and becomes env->current_tb. Avoid
576 starting execution if there is a pending interrupt. */
b0052d15
JK
577 env->current_tb = tb;
578 barrier();
579 if (likely(!env->exit_request)) {
2e70f6ef 580 tc_ptr = tb->tc_ptr;
3fb2ded1 581 /* execute the generated code */
dfe5fff3 582#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
572a9d4a 583#undef env
2e70f6ef 584 env = cpu_single_env;
572a9d4a
BS
585#define env cpu_single_env
586#endif
2e70f6ef 587 next_tb = tcg_qemu_tb_exec(tc_ptr);
2e70f6ef 588 if ((next_tb & 3) == 2) {
bf20dc07 589 /* Instruction counter expired. */
2e70f6ef
PB
590 int insns_left;
591 tb = (TranslationBlock *)(long)(next_tb & ~3);
592 /* Restore PC. */
622ed360 593 cpu_pc_from_tb(env, tb);
2e70f6ef
PB
594 insns_left = env->icount_decr.u32;
595 if (env->icount_extra && insns_left >= 0) {
596 /* Refill decrementer and continue execution. */
597 env->icount_extra += insns_left;
598 if (env->icount_extra > 0xffff) {
599 insns_left = 0xffff;
600 } else {
601 insns_left = env->icount_extra;
602 }
603 env->icount_extra -= insns_left;
604 env->icount_decr.u16.low = insns_left;
605 } else {
606 if (insns_left > 0) {
607 /* Execute remaining instructions. */
608 cpu_exec_nocache(insns_left, tb);
609 }
610 env->exception_index = EXCP_INTERRUPT;
611 next_tb = 0;
1162c041 612 cpu_loop_exit(env);
2e70f6ef
PB
613 }
614 }
615 }
b0052d15 616 env->current_tb = NULL;
4cbf74b6
FB
617 /* reset soft MMU for next block (it can currently
618 only be set by a memory fault) */
50a518e3 619 } /* for(;;) */
7d13299d 620 }
3fb2ded1
FB
621 } /* for(;;) */
622
7d13299d 623
e4533c7a 624#if defined(TARGET_I386)
9de5e440 625 /* restore flags in standard format */
e694d4e2
BS
626 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
627 | (DF & DF_MASK);
e4533c7a 628#elif defined(TARGET_ARM)
b7bcbe95 629 /* XXX: Save/restore host fpu exception state?. */
d2fbca94 630#elif defined(TARGET_UNICORE32)
93ac68bc 631#elif defined(TARGET_SPARC)
67867308 632#elif defined(TARGET_PPC)
81ea0e13 633#elif defined(TARGET_LM32)
e6e5906b
PB
634#elif defined(TARGET_M68K)
635 cpu_m68k_flush_flags(env, env->cc_op);
636 env->cc_op = CC_OP_FLAGS;
637 env->sr = (env->sr & 0xffe0)
638 | env->cc_dest | (env->cc_x << 4);
b779e29e 639#elif defined(TARGET_MICROBLAZE)
6af0bf9c 640#elif defined(TARGET_MIPS)
fdf9b3e8 641#elif defined(TARGET_SH4)
eddf68a6 642#elif defined(TARGET_ALPHA)
f1ccf904 643#elif defined(TARGET_CRIS)
10ec5117 644#elif defined(TARGET_S390X)
fdf9b3e8 645 /* XXXXX */
e4533c7a
FB
646#else
647#error unsupported target CPU
648#endif
1057eaa7
PB
649
650 /* restore global registers */
1d93f0f0 651 barrier();
24ebf5f3 652 env = (void *) saved_env_reg;
1057eaa7 653
6a00d601 654 /* fail safe : never use cpu_single_env outside cpu_exec() */
5fafdf24 655 cpu_single_env = NULL;
7d13299d
FB
656 return ret;
657}