]> git.proxmox.com Git - qemu.git/blame - cpu-exec.c
target-arm: Make VFP binop helpers take pointer to fpstatus, not CPUState
[qemu.git] / cpu-exec.c
CommitLineData
7d13299d
FB
1/*
2 * i386 emulator main execution loop
5fafdf24 3 *
66321a11 4 * Copyright (c) 2003-2005 Fabrice Bellard
7d13299d 5 *
3ef693a0
FB
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
7d13299d 10 *
3ef693a0
FB
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
7d13299d 15 *
3ef693a0 16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
7d13299d 18 */
e4533c7a 19#include "config.h"
93ac68bc 20#include "exec.h"
956034d7 21#include "disas.h"
7cb69cae 22#include "tcg.h"
1d93f0f0 23#include "qemu-barrier.h"
7d13299d 24
dfe5fff3 25#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
572a9d4a
BS
26// Work around ugly bugs in glibc that mangle global register contents
27#undef env
28#define env cpu_single_env
29#endif
30
36bdbe54
FB
31int tb_invalidated_flag;
32
f0667e66 33//#define CONFIG_DEBUG_EXEC
7d13299d 34
6a4955a8
AL
35int qemu_cpu_has_work(CPUState *env)
36{
37 return cpu_has_work(env);
38}
39
e4533c7a
FB
40void cpu_loop_exit(void)
41{
1c3569fe 42 env->current_tb = NULL;
e4533c7a
FB
43 longjmp(env->jmp_env, 1);
44}
bfed01fc 45
fbf9eeb3
FB
46/* exit the current TB from a signal handler. The host registers are
47 restored in a state compatible with the CPU emulator
48 */
9eff14f3
BS
49#if defined(CONFIG_SOFTMMU)
50void cpu_resume_from_signal(CPUState *env1, void *puc)
51{
52 env = env1;
53
54 /* XXX: restore cpu registers saved in host registers */
55
56 env->exception_index = -1;
57 longjmp(env->jmp_env, 1);
58}
9eff14f3 59#endif
fbf9eeb3 60
2e70f6ef
PB
61/* Execute the code without caching the generated code. An interpreter
62 could be used if available. */
63static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
64{
65 unsigned long next_tb;
66 TranslationBlock *tb;
67
68 /* Should never happen.
69 We only end up here when an existing TB is too long. */
70 if (max_cycles > CF_COUNT_MASK)
71 max_cycles = CF_COUNT_MASK;
72
73 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
74 max_cycles);
75 env->current_tb = tb;
76 /* execute the generated code */
77 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
1c3569fe 78 env->current_tb = NULL;
2e70f6ef
PB
79
80 if ((next_tb & 3) == 2) {
81 /* Restore PC. This may happen if async event occurs before
82 the TB starts executing. */
622ed360 83 cpu_pc_from_tb(env, tb);
2e70f6ef
PB
84 }
85 tb_phys_invalidate(tb, -1);
86 tb_free(tb);
87}
88
8a40a180
FB
89static TranslationBlock *tb_find_slow(target_ulong pc,
90 target_ulong cs_base,
c068688b 91 uint64_t flags)
8a40a180
FB
92{
93 TranslationBlock *tb, **ptb1;
8a40a180 94 unsigned int h;
41c1b1c9
PB
95 tb_page_addr_t phys_pc, phys_page1, phys_page2;
96 target_ulong virt_page2;
3b46e624 97
8a40a180 98 tb_invalidated_flag = 0;
3b46e624 99
8a40a180 100 /* find translated block using physical mappings */
41c1b1c9 101 phys_pc = get_page_addr_code(env, pc);
8a40a180
FB
102 phys_page1 = phys_pc & TARGET_PAGE_MASK;
103 phys_page2 = -1;
104 h = tb_phys_hash_func(phys_pc);
105 ptb1 = &tb_phys_hash[h];
106 for(;;) {
107 tb = *ptb1;
108 if (!tb)
109 goto not_found;
5fafdf24 110 if (tb->pc == pc &&
8a40a180 111 tb->page_addr[0] == phys_page1 &&
5fafdf24 112 tb->cs_base == cs_base &&
8a40a180
FB
113 tb->flags == flags) {
114 /* check next page if needed */
115 if (tb->page_addr[1] != -1) {
5fafdf24 116 virt_page2 = (pc & TARGET_PAGE_MASK) +
8a40a180 117 TARGET_PAGE_SIZE;
41c1b1c9 118 phys_page2 = get_page_addr_code(env, virt_page2);
8a40a180
FB
119 if (tb->page_addr[1] == phys_page2)
120 goto found;
121 } else {
122 goto found;
123 }
124 }
125 ptb1 = &tb->phys_hash_next;
126 }
127 not_found:
2e70f6ef
PB
128 /* if no translated code available, then translate it now */
129 tb = tb_gen_code(env, pc, cs_base, flags, 0);
3b46e624 130
8a40a180 131 found:
2c90fe2b
KB
132 /* Move the last found TB to the head of the list */
133 if (likely(*ptb1)) {
134 *ptb1 = tb->phys_hash_next;
135 tb->phys_hash_next = tb_phys_hash[h];
136 tb_phys_hash[h] = tb;
137 }
8a40a180
FB
138 /* we add the TB in the virtual pc hash table */
139 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
8a40a180
FB
140 return tb;
141}
142
143static inline TranslationBlock *tb_find_fast(void)
144{
145 TranslationBlock *tb;
146 target_ulong cs_base, pc;
6b917547 147 int flags;
8a40a180
FB
148
149 /* we record a subset of the CPU state. It will
150 always be the same before a given translated block
151 is executed. */
6b917547 152 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
bce61846 153 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
551bd27f
TS
154 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
155 tb->flags != flags)) {
8a40a180
FB
156 tb = tb_find_slow(pc, cs_base, flags);
157 }
158 return tb;
159}
160
1009d2ed
JK
161static CPUDebugExcpHandler *debug_excp_handler;
162
163CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
164{
165 CPUDebugExcpHandler *old_handler = debug_excp_handler;
166
167 debug_excp_handler = handler;
168 return old_handler;
169}
170
171static void cpu_handle_debug_exception(CPUState *env)
172{
173 CPUWatchpoint *wp;
174
175 if (!env->watchpoint_hit) {
176 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
177 wp->flags &= ~BP_WATCHPOINT_HIT;
178 }
179 }
180 if (debug_excp_handler) {
181 debug_excp_handler(env);
182 }
183}
184
7d13299d
FB
185/* main execution loop */
186
1a28cac3
MT
187volatile sig_atomic_t exit_request;
188
e4533c7a 189int cpu_exec(CPUState *env1)
7d13299d 190{
1d9000e8 191 volatile host_reg_t saved_env_reg;
8a40a180 192 int ret, interrupt_request;
8a40a180 193 TranslationBlock *tb;
c27004ec 194 uint8_t *tc_ptr;
d5975363 195 unsigned long next_tb;
8c6939c0 196
eda48c34
PB
197 if (env1->halted) {
198 if (!cpu_has_work(env1)) {
199 return EXCP_HALTED;
200 }
201
202 env1->halted = 0;
203 }
5a1e3cfc 204
5fafdf24 205 cpu_single_env = env1;
6a00d601 206
24ebf5f3
PB
207 /* the access to env below is actually saving the global register's
208 value, so that files not including target-xyz/exec.h are free to
209 use it. */
210 QEMU_BUILD_BUG_ON (sizeof (saved_env_reg) != sizeof (env));
211 saved_env_reg = (host_reg_t) env;
1d93f0f0 212 barrier();
c27004ec 213 env = env1;
e4533c7a 214
c629a4bc 215 if (unlikely(exit_request)) {
1a28cac3 216 env->exit_request = 1;
1a28cac3
MT
217 }
218
ecb644f4 219#if defined(TARGET_I386)
6792a57b
JK
220 /* put eflags in CPU temporary format */
221 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
222 DF = 1 - (2 * ((env->eflags >> 10) & 1));
223 CC_OP = CC_OP_EFLAGS;
224 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
93ac68bc 225#elif defined(TARGET_SPARC)
e6e5906b
PB
226#elif defined(TARGET_M68K)
227 env->cc_op = CC_OP_FLAGS;
228 env->cc_dest = env->sr & 0xf;
229 env->cc_x = (env->sr >> 4) & 1;
ecb644f4
TS
230#elif defined(TARGET_ALPHA)
231#elif defined(TARGET_ARM)
d2fbca94 232#elif defined(TARGET_UNICORE32)
ecb644f4 233#elif defined(TARGET_PPC)
81ea0e13 234#elif defined(TARGET_LM32)
b779e29e 235#elif defined(TARGET_MICROBLAZE)
6af0bf9c 236#elif defined(TARGET_MIPS)
fdf9b3e8 237#elif defined(TARGET_SH4)
f1ccf904 238#elif defined(TARGET_CRIS)
10ec5117 239#elif defined(TARGET_S390X)
fdf9b3e8 240 /* XXXXX */
e4533c7a
FB
241#else
242#error unsupported target CPU
243#endif
3fb2ded1 244 env->exception_index = -1;
9d27abd9 245
7d13299d 246 /* prepare setjmp context for exception handling */
3fb2ded1
FB
247 for(;;) {
248 if (setjmp(env->jmp_env) == 0) {
dfe5fff3 249#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
9ddff3d2 250#undef env
6792a57b 251 env = cpu_single_env;
9ddff3d2
BS
252#define env cpu_single_env
253#endif
3fb2ded1
FB
254 /* if an exception is pending, we execute it here */
255 if (env->exception_index >= 0) {
256 if (env->exception_index >= EXCP_INTERRUPT) {
257 /* exit request from the cpu execution loop */
258 ret = env->exception_index;
1009d2ed
JK
259 if (ret == EXCP_DEBUG) {
260 cpu_handle_debug_exception(env);
261 }
3fb2ded1 262 break;
72d239ed
AJ
263 } else {
264#if defined(CONFIG_USER_ONLY)
3fb2ded1 265 /* if user mode only, we simulate a fake exception
9f083493 266 which will be handled outside the cpu execution
3fb2ded1 267 loop */
83479e77 268#if defined(TARGET_I386)
5fafdf24
TS
269 do_interrupt_user(env->exception_index,
270 env->exception_is_int,
271 env->error_code,
3fb2ded1 272 env->exception_next_eip);
eba01623
FB
273 /* successfully delivered */
274 env->old_exception = -1;
83479e77 275#endif
3fb2ded1
FB
276 ret = env->exception_index;
277 break;
72d239ed 278#else
83479e77 279#if defined(TARGET_I386)
3fb2ded1
FB
280 /* simulate a real cpu exception. On i386, it can
281 trigger new exceptions, but we do not handle
282 double or triple faults yet. */
5fafdf24
TS
283 do_interrupt(env->exception_index,
284 env->exception_is_int,
285 env->error_code,
d05e66d2 286 env->exception_next_eip, 0);
678dde13
TS
287 /* successfully delivered */
288 env->old_exception = -1;
ce09776b
FB
289#elif defined(TARGET_PPC)
290 do_interrupt(env);
81ea0e13
MW
291#elif defined(TARGET_LM32)
292 do_interrupt(env);
b779e29e
EI
293#elif defined(TARGET_MICROBLAZE)
294 do_interrupt(env);
6af0bf9c
FB
295#elif defined(TARGET_MIPS)
296 do_interrupt(env);
e95c8d51 297#elif defined(TARGET_SPARC)
f2bc7e7f 298 do_interrupt(env);
b5ff1b31
FB
299#elif defined(TARGET_ARM)
300 do_interrupt(env);
d2fbca94
GX
301#elif defined(TARGET_UNICORE32)
302 do_interrupt(env);
fdf9b3e8
FB
303#elif defined(TARGET_SH4)
304 do_interrupt(env);
eddf68a6
JM
305#elif defined(TARGET_ALPHA)
306 do_interrupt(env);
f1ccf904
TS
307#elif defined(TARGET_CRIS)
308 do_interrupt(env);
0633879f
PB
309#elif defined(TARGET_M68K)
310 do_interrupt(0);
3110e292
AG
311#elif defined(TARGET_S390X)
312 do_interrupt(env);
72d239ed 313#endif
301d2908 314 env->exception_index = -1;
83479e77 315#endif
3fb2ded1 316 }
5fafdf24 317 }
9df217a3 318
b5fc09ae 319 next_tb = 0; /* force lookup of first TB */
3fb2ded1 320 for(;;) {
68a79315 321 interrupt_request = env->interrupt_request;
e1638bd8 322 if (unlikely(interrupt_request)) {
323 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
324 /* Mask out external interrupts for this step. */
3125f763 325 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
e1638bd8 326 }
6658ffb8
PB
327 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
328 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
329 env->exception_index = EXCP_DEBUG;
330 cpu_loop_exit();
331 }
a90b7318 332#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
b779e29e 333 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
d2fbca94 334 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
a90b7318
AZ
335 if (interrupt_request & CPU_INTERRUPT_HALT) {
336 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
337 env->halted = 1;
338 env->exception_index = EXCP_HLT;
339 cpu_loop_exit();
340 }
341#endif
68a79315 342#if defined(TARGET_I386)
b09ea7d5
GN
343 if (interrupt_request & CPU_INTERRUPT_INIT) {
344 svm_check_intercept(SVM_EXIT_INIT);
345 do_cpu_init(env);
346 env->exception_index = EXCP_HALTED;
347 cpu_loop_exit();
348 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
349 do_cpu_sipi(env);
350 } else if (env->hflags2 & HF2_GIF_MASK) {
db620f46
FB
351 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
352 !(env->hflags & HF_SMM_MASK)) {
353 svm_check_intercept(SVM_EXIT_SMI);
354 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
355 do_smm_enter();
356 next_tb = 0;
357 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
358 !(env->hflags2 & HF2_NMI_MASK)) {
359 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
360 env->hflags2 |= HF2_NMI_MASK;
361 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
362 next_tb = 0;
79c4f6b0
HY
363 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
364 env->interrupt_request &= ~CPU_INTERRUPT_MCE;
365 do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
366 next_tb = 0;
db620f46
FB
367 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
368 (((env->hflags2 & HF2_VINTR_MASK) &&
369 (env->hflags2 & HF2_HIF_MASK)) ||
370 (!(env->hflags2 & HF2_VINTR_MASK) &&
371 (env->eflags & IF_MASK &&
372 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
373 int intno;
374 svm_check_intercept(SVM_EXIT_INTR);
375 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
376 intno = cpu_get_pic_interrupt(env);
93fcfe39 377 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
dfe5fff3 378#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
9ddff3d2
BS
379#undef env
380 env = cpu_single_env;
381#define env cpu_single_env
382#endif
db620f46
FB
383 do_interrupt(intno, 0, 0, 0, 1);
384 /* ensure that no TB jump will be modified as
385 the program flow was changed */
386 next_tb = 0;
0573fbfc 387#if !defined(CONFIG_USER_ONLY)
db620f46
FB
388 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
389 (env->eflags & IF_MASK) &&
390 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
391 int intno;
392 /* FIXME: this should respect TPR */
393 svm_check_intercept(SVM_EXIT_VINTR);
db620f46 394 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
93fcfe39 395 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
db620f46 396 do_interrupt(intno, 0, 0, 0, 1);
d40c54d6 397 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
db620f46 398 next_tb = 0;
907a5b26 399#endif
db620f46 400 }
68a79315 401 }
ce09776b 402#elif defined(TARGET_PPC)
9fddaa0c
FB
403#if 0
404 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
d84bda46 405 cpu_reset(env);
9fddaa0c
FB
406 }
407#endif
47103572 408 if (interrupt_request & CPU_INTERRUPT_HARD) {
e9df014c
JM
409 ppc_hw_interrupt(env);
410 if (env->pending_interrupts == 0)
411 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
b5fc09ae 412 next_tb = 0;
ce09776b 413 }
81ea0e13
MW
414#elif defined(TARGET_LM32)
415 if ((interrupt_request & CPU_INTERRUPT_HARD)
416 && (env->ie & IE_IE)) {
417 env->exception_index = EXCP_IRQ;
418 do_interrupt(env);
419 next_tb = 0;
420 }
b779e29e
EI
421#elif defined(TARGET_MICROBLAZE)
422 if ((interrupt_request & CPU_INTERRUPT_HARD)
423 && (env->sregs[SR_MSR] & MSR_IE)
424 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
425 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
426 env->exception_index = EXCP_IRQ;
427 do_interrupt(env);
428 next_tb = 0;
429 }
6af0bf9c
FB
430#elif defined(TARGET_MIPS)
431 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
4cdc1cd1 432 cpu_mips_hw_interrupts_pending(env)) {
6af0bf9c
FB
433 /* Raise it */
434 env->exception_index = EXCP_EXT_INTERRUPT;
435 env->error_code = 0;
436 do_interrupt(env);
b5fc09ae 437 next_tb = 0;
6af0bf9c 438 }
e95c8d51 439#elif defined(TARGET_SPARC)
d532b26c
IK
440 if (interrupt_request & CPU_INTERRUPT_HARD) {
441 if (cpu_interrupts_enabled(env) &&
442 env->interrupt_index > 0) {
443 int pil = env->interrupt_index & 0xf;
444 int type = env->interrupt_index & 0xf0;
445
446 if (((type == TT_EXTINT) &&
447 cpu_pil_allowed(env, pil)) ||
448 type != TT_EXTINT) {
449 env->exception_index = env->interrupt_index;
450 do_interrupt(env);
451 next_tb = 0;
452 }
453 }
a90b7318 454 }
b5ff1b31
FB
455#elif defined(TARGET_ARM)
456 if (interrupt_request & CPU_INTERRUPT_FIQ
457 && !(env->uncached_cpsr & CPSR_F)) {
458 env->exception_index = EXCP_FIQ;
459 do_interrupt(env);
b5fc09ae 460 next_tb = 0;
b5ff1b31 461 }
9ee6e8bb
PB
462 /* ARMv7-M interrupt return works by loading a magic value
463 into the PC. On real hardware the load causes the
464 return to occur. The qemu implementation performs the
465 jump normally, then does the exception return when the
466 CPU tries to execute code at the magic address.
467 This will cause the magic PC value to be pushed to
a1c7273b 468 the stack if an interrupt occurred at the wrong time.
9ee6e8bb
PB
469 We avoid this by disabling interrupts when
470 pc contains a magic address. */
b5ff1b31 471 if (interrupt_request & CPU_INTERRUPT_HARD
9ee6e8bb
PB
472 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
473 || !(env->uncached_cpsr & CPSR_I))) {
b5ff1b31
FB
474 env->exception_index = EXCP_IRQ;
475 do_interrupt(env);
b5fc09ae 476 next_tb = 0;
b5ff1b31 477 }
d2fbca94
GX
478#elif defined(TARGET_UNICORE32)
479 if (interrupt_request & CPU_INTERRUPT_HARD
480 && !(env->uncached_asr & ASR_I)) {
481 do_interrupt(env);
482 next_tb = 0;
483 }
fdf9b3e8 484#elif defined(TARGET_SH4)
e96e2044
TS
485 if (interrupt_request & CPU_INTERRUPT_HARD) {
486 do_interrupt(env);
b5fc09ae 487 next_tb = 0;
e96e2044 488 }
eddf68a6 489#elif defined(TARGET_ALPHA)
6a80e088
RH
490 {
491 int idx = -1;
492 /* ??? This hard-codes the OSF/1 interrupt levels. */
493 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
494 case 0 ... 3:
495 if (interrupt_request & CPU_INTERRUPT_HARD) {
496 idx = EXCP_DEV_INTERRUPT;
497 }
498 /* FALLTHRU */
499 case 4:
500 if (interrupt_request & CPU_INTERRUPT_TIMER) {
501 idx = EXCP_CLK_INTERRUPT;
502 }
503 /* FALLTHRU */
504 case 5:
505 if (interrupt_request & CPU_INTERRUPT_SMP) {
506 idx = EXCP_SMP_INTERRUPT;
507 }
508 /* FALLTHRU */
509 case 6:
510 if (interrupt_request & CPU_INTERRUPT_MCHK) {
511 idx = EXCP_MCHK;
512 }
513 }
514 if (idx >= 0) {
515 env->exception_index = idx;
516 env->error_code = 0;
517 do_interrupt(env);
518 next_tb = 0;
519 }
eddf68a6 520 }
f1ccf904 521#elif defined(TARGET_CRIS)
1b1a38b0 522 if (interrupt_request & CPU_INTERRUPT_HARD
fb9fb692
EI
523 && (env->pregs[PR_CCS] & I_FLAG)
524 && !env->locked_irq) {
1b1a38b0
EI
525 env->exception_index = EXCP_IRQ;
526 do_interrupt(env);
527 next_tb = 0;
528 }
529 if (interrupt_request & CPU_INTERRUPT_NMI
530 && (env->pregs[PR_CCS] & M_FLAG)) {
531 env->exception_index = EXCP_NMI;
f1ccf904 532 do_interrupt(env);
b5fc09ae 533 next_tb = 0;
f1ccf904 534 }
0633879f
PB
535#elif defined(TARGET_M68K)
536 if (interrupt_request & CPU_INTERRUPT_HARD
537 && ((env->sr & SR_I) >> SR_I_SHIFT)
538 < env->pending_level) {
539 /* Real hardware gets the interrupt vector via an
540 IACK cycle at this point. Current emulated
541 hardware doesn't rely on this, so we
542 provide/save the vector when the interrupt is
543 first signalled. */
544 env->exception_index = env->pending_vector;
545 do_interrupt(1);
b5fc09ae 546 next_tb = 0;
0633879f 547 }
3110e292
AG
548#elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
549 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
550 (env->psw.mask & PSW_MASK_EXT)) {
551 do_interrupt(env);
552 next_tb = 0;
553 }
68a79315 554#endif
ff2712ba 555 /* Don't use the cached interrupt_request value,
9d05095e 556 do_interrupt may have updated the EXITTB flag. */
b5ff1b31 557 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
bf3e8bf1
FB
558 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
559 /* ensure that no TB jump will be modified as
560 the program flow was changed */
b5fc09ae 561 next_tb = 0;
bf3e8bf1 562 }
be214e6c
AJ
563 }
564 if (unlikely(env->exit_request)) {
565 env->exit_request = 0;
566 env->exception_index = EXCP_INTERRUPT;
567 cpu_loop_exit();
3fb2ded1 568 }
a73b1fd9 569#if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
8fec2b8c 570 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
3fb2ded1 571 /* restore flags in standard format */
ecb644f4 572#if defined(TARGET_I386)
a7812ae4 573 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
93fcfe39 574 log_cpu_state(env, X86_DUMP_CCOP);
3fb2ded1 575 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
e6e5906b
PB
576#elif defined(TARGET_M68K)
577 cpu_m68k_flush_flags(env, env->cc_op);
578 env->cc_op = CC_OP_FLAGS;
579 env->sr = (env->sr & 0xffe0)
580 | env->cc_dest | (env->cc_x << 4);
93fcfe39 581 log_cpu_state(env, 0);
e4533c7a 582#else
a73b1fd9 583 log_cpu_state(env, 0);
e4533c7a 584#endif
3fb2ded1 585 }
a73b1fd9 586#endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
d5975363 587 spin_lock(&tb_lock);
8a40a180 588 tb = tb_find_fast();
d5975363
PB
589 /* Note: we do it here to avoid a gcc bug on Mac OS X when
590 doing it in tb_find_slow */
591 if (tb_invalidated_flag) {
592 /* as some TB could have been invalidated because
593 of memory exceptions while generating the code, we
594 must recompute the hash index here */
595 next_tb = 0;
2e70f6ef 596 tb_invalidated_flag = 0;
d5975363 597 }
f0667e66 598#ifdef CONFIG_DEBUG_EXEC
93fcfe39
AL
599 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
600 (long)tb->tc_ptr, tb->pc,
601 lookup_symbol(tb->pc));
9d27abd9 602#endif
8a40a180
FB
603 /* see if we can patch the calling TB. When the TB
604 spans two pages, we cannot safely do a direct
605 jump. */
040f2fb2 606 if (next_tb != 0 && tb->page_addr[1] == -1) {
b5fc09ae 607 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
3fb2ded1 608 }
d5975363 609 spin_unlock(&tb_lock);
55e8b85e 610
611 /* cpu_interrupt might be called while translating the
612 TB, but before it is linked into a potentially
613 infinite loop and becomes env->current_tb. Avoid
614 starting execution if there is a pending interrupt. */
b0052d15
JK
615 env->current_tb = tb;
616 barrier();
617 if (likely(!env->exit_request)) {
2e70f6ef 618 tc_ptr = tb->tc_ptr;
3fb2ded1 619 /* execute the generated code */
dfe5fff3 620#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
572a9d4a 621#undef env
2e70f6ef 622 env = cpu_single_env;
572a9d4a
BS
623#define env cpu_single_env
624#endif
2e70f6ef 625 next_tb = tcg_qemu_tb_exec(tc_ptr);
2e70f6ef 626 if ((next_tb & 3) == 2) {
bf20dc07 627 /* Instruction counter expired. */
2e70f6ef
PB
628 int insns_left;
629 tb = (TranslationBlock *)(long)(next_tb & ~3);
630 /* Restore PC. */
622ed360 631 cpu_pc_from_tb(env, tb);
2e70f6ef
PB
632 insns_left = env->icount_decr.u32;
633 if (env->icount_extra && insns_left >= 0) {
634 /* Refill decrementer and continue execution. */
635 env->icount_extra += insns_left;
636 if (env->icount_extra > 0xffff) {
637 insns_left = 0xffff;
638 } else {
639 insns_left = env->icount_extra;
640 }
641 env->icount_extra -= insns_left;
642 env->icount_decr.u16.low = insns_left;
643 } else {
644 if (insns_left > 0) {
645 /* Execute remaining instructions. */
646 cpu_exec_nocache(insns_left, tb);
647 }
648 env->exception_index = EXCP_INTERRUPT;
649 next_tb = 0;
650 cpu_loop_exit();
651 }
652 }
653 }
b0052d15 654 env->current_tb = NULL;
4cbf74b6
FB
655 /* reset soft MMU for next block (it can currently
656 only be set by a memory fault) */
50a518e3 657 } /* for(;;) */
7d13299d 658 }
3fb2ded1
FB
659 } /* for(;;) */
660
7d13299d 661
e4533c7a 662#if defined(TARGET_I386)
9de5e440 663 /* restore flags in standard format */
a7812ae4 664 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
e4533c7a 665#elif defined(TARGET_ARM)
b7bcbe95 666 /* XXX: Save/restore host fpu exception state?. */
d2fbca94 667#elif defined(TARGET_UNICORE32)
93ac68bc 668#elif defined(TARGET_SPARC)
67867308 669#elif defined(TARGET_PPC)
81ea0e13 670#elif defined(TARGET_LM32)
e6e5906b
PB
671#elif defined(TARGET_M68K)
672 cpu_m68k_flush_flags(env, env->cc_op);
673 env->cc_op = CC_OP_FLAGS;
674 env->sr = (env->sr & 0xffe0)
675 | env->cc_dest | (env->cc_x << 4);
b779e29e 676#elif defined(TARGET_MICROBLAZE)
6af0bf9c 677#elif defined(TARGET_MIPS)
fdf9b3e8 678#elif defined(TARGET_SH4)
eddf68a6 679#elif defined(TARGET_ALPHA)
f1ccf904 680#elif defined(TARGET_CRIS)
10ec5117 681#elif defined(TARGET_S390X)
fdf9b3e8 682 /* XXXXX */
e4533c7a
FB
683#else
684#error unsupported target CPU
685#endif
1057eaa7
PB
686
687 /* restore global registers */
1d93f0f0 688 barrier();
24ebf5f3 689 env = (void *) saved_env_reg;
1057eaa7 690
6a00d601 691 /* fail safe : never use cpu_single_env outside cpu_exec() */
5fafdf24 692 cpu_single_env = NULL;
7d13299d
FB
693 return ret;
694}