]> git.proxmox.com Git - qemu.git/blame - cpu-exec.c
cpu: Move running field to CPUState
[qemu.git] / cpu-exec.c
CommitLineData
7d13299d 1/*
e965fc38 2 * emulator main execution loop
5fafdf24 3 *
66321a11 4 * Copyright (c) 2003-2005 Fabrice Bellard
7d13299d 5 *
3ef693a0
FB
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
7d13299d 10 *
3ef693a0
FB
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
7d13299d 15 *
3ef693a0 16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
7d13299d 18 */
e4533c7a 19#include "config.h"
cea5f9a2 20#include "cpu.h"
76cad711 21#include "disas/disas.h"
7cb69cae 22#include "tcg.h"
1de7afc9 23#include "qemu/atomic.h"
9c17d615 24#include "sysemu/qtest.h"
7d13299d 25
f0667e66 26//#define CONFIG_DEBUG_EXEC
7d13299d 27
3993c6bd 28bool qemu_cpu_has_work(CPUState *cpu)
6a4955a8 29{
3993c6bd 30 return cpu_has_work(cpu);
6a4955a8
AL
31}
32
9349b4f9 33void cpu_loop_exit(CPUArchState *env)
e4533c7a 34{
cea5f9a2
BS
35 env->current_tb = NULL;
36 longjmp(env->jmp_env, 1);
e4533c7a 37}
bfed01fc 38
fbf9eeb3
FB
39/* exit the current TB from a signal handler. The host registers are
40 restored in a state compatible with the CPU emulator
41 */
9eff14f3 42#if defined(CONFIG_SOFTMMU)
9349b4f9 43void cpu_resume_from_signal(CPUArchState *env, void *puc)
9eff14f3 44{
9eff14f3
BS
45 /* XXX: restore cpu registers saved in host registers */
46
47 env->exception_index = -1;
48 longjmp(env->jmp_env, 1);
49}
9eff14f3 50#endif
fbf9eeb3 51
2e70f6ef
PB
52/* Execute the code without caching the generated code. An interpreter
53 could be used if available. */
9349b4f9 54static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
cea5f9a2 55 TranslationBlock *orig_tb)
2e70f6ef 56{
69784eae 57 tcg_target_ulong next_tb;
2e70f6ef
PB
58 TranslationBlock *tb;
59
60 /* Should never happen.
61 We only end up here when an existing TB is too long. */
62 if (max_cycles > CF_COUNT_MASK)
63 max_cycles = CF_COUNT_MASK;
64
65 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
66 max_cycles);
67 env->current_tb = tb;
68 /* execute the generated code */
cea5f9a2 69 next_tb = tcg_qemu_tb_exec(env, tb->tc_ptr);
1c3569fe 70 env->current_tb = NULL;
2e70f6ef
PB
71
72 if ((next_tb & 3) == 2) {
73 /* Restore PC. This may happen if async event occurs before
74 the TB starts executing. */
622ed360 75 cpu_pc_from_tb(env, tb);
2e70f6ef
PB
76 }
77 tb_phys_invalidate(tb, -1);
78 tb_free(tb);
79}
80
9349b4f9 81static TranslationBlock *tb_find_slow(CPUArchState *env,
cea5f9a2 82 target_ulong pc,
8a40a180 83 target_ulong cs_base,
c068688b 84 uint64_t flags)
8a40a180
FB
85{
86 TranslationBlock *tb, **ptb1;
8a40a180 87 unsigned int h;
337fc758 88 tb_page_addr_t phys_pc, phys_page1;
41c1b1c9 89 target_ulong virt_page2;
3b46e624 90
5e5f07e0 91 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
3b46e624 92
8a40a180 93 /* find translated block using physical mappings */
41c1b1c9 94 phys_pc = get_page_addr_code(env, pc);
8a40a180 95 phys_page1 = phys_pc & TARGET_PAGE_MASK;
8a40a180 96 h = tb_phys_hash_func(phys_pc);
5e5f07e0 97 ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
8a40a180
FB
98 for(;;) {
99 tb = *ptb1;
100 if (!tb)
101 goto not_found;
5fafdf24 102 if (tb->pc == pc &&
8a40a180 103 tb->page_addr[0] == phys_page1 &&
5fafdf24 104 tb->cs_base == cs_base &&
8a40a180
FB
105 tb->flags == flags) {
106 /* check next page if needed */
107 if (tb->page_addr[1] != -1) {
337fc758
BS
108 tb_page_addr_t phys_page2;
109
5fafdf24 110 virt_page2 = (pc & TARGET_PAGE_MASK) +
8a40a180 111 TARGET_PAGE_SIZE;
41c1b1c9 112 phys_page2 = get_page_addr_code(env, virt_page2);
8a40a180
FB
113 if (tb->page_addr[1] == phys_page2)
114 goto found;
115 } else {
116 goto found;
117 }
118 }
119 ptb1 = &tb->phys_hash_next;
120 }
121 not_found:
2e70f6ef
PB
122 /* if no translated code available, then translate it now */
123 tb = tb_gen_code(env, pc, cs_base, flags, 0);
3b46e624 124
8a40a180 125 found:
2c90fe2b
KB
126 /* Move the last found TB to the head of the list */
127 if (likely(*ptb1)) {
128 *ptb1 = tb->phys_hash_next;
5e5f07e0
EV
129 tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
130 tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
2c90fe2b 131 }
8a40a180
FB
132 /* we add the TB in the virtual pc hash table */
133 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
8a40a180
FB
134 return tb;
135}
136
9349b4f9 137static inline TranslationBlock *tb_find_fast(CPUArchState *env)
8a40a180
FB
138{
139 TranslationBlock *tb;
140 target_ulong cs_base, pc;
6b917547 141 int flags;
8a40a180
FB
142
143 /* we record a subset of the CPU state. It will
144 always be the same before a given translated block
145 is executed. */
6b917547 146 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
bce61846 147 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
551bd27f
TS
148 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
149 tb->flags != flags)) {
cea5f9a2 150 tb = tb_find_slow(env, pc, cs_base, flags);
8a40a180
FB
151 }
152 return tb;
153}
154
1009d2ed
JK
155static CPUDebugExcpHandler *debug_excp_handler;
156
84e3b602 157void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
1009d2ed 158{
1009d2ed 159 debug_excp_handler = handler;
1009d2ed
JK
160}
161
9349b4f9 162static void cpu_handle_debug_exception(CPUArchState *env)
1009d2ed
JK
163{
164 CPUWatchpoint *wp;
165
166 if (!env->watchpoint_hit) {
167 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
168 wp->flags &= ~BP_WATCHPOINT_HIT;
169 }
170 }
171 if (debug_excp_handler) {
172 debug_excp_handler(env);
173 }
174}
175
7d13299d
FB
176/* main execution loop */
177
1a28cac3
MT
178volatile sig_atomic_t exit_request;
179
9349b4f9 180int cpu_exec(CPUArchState *env)
7d13299d 181{
c356a1bc 182 CPUState *cpu = ENV_GET_CPU(env);
8a40a180 183 int ret, interrupt_request;
8a40a180 184 TranslationBlock *tb;
c27004ec 185 uint8_t *tc_ptr;
69784eae 186 tcg_target_ulong next_tb;
8c6939c0 187
cea5f9a2 188 if (env->halted) {
3993c6bd 189 if (!cpu_has_work(cpu)) {
eda48c34
PB
190 return EXCP_HALTED;
191 }
192
cea5f9a2 193 env->halted = 0;
eda48c34 194 }
5a1e3cfc 195
cea5f9a2 196 cpu_single_env = env;
e4533c7a 197
c629a4bc 198 if (unlikely(exit_request)) {
1a28cac3 199 env->exit_request = 1;
1a28cac3
MT
200 }
201
ecb644f4 202#if defined(TARGET_I386)
6792a57b
JK
203 /* put eflags in CPU temporary format */
204 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
205 DF = 1 - (2 * ((env->eflags >> 10) & 1));
206 CC_OP = CC_OP_EFLAGS;
207 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
93ac68bc 208#elif defined(TARGET_SPARC)
e6e5906b
PB
209#elif defined(TARGET_M68K)
210 env->cc_op = CC_OP_FLAGS;
211 env->cc_dest = env->sr & 0xf;
212 env->cc_x = (env->sr >> 4) & 1;
ecb644f4
TS
213#elif defined(TARGET_ALPHA)
214#elif defined(TARGET_ARM)
d2fbca94 215#elif defined(TARGET_UNICORE32)
ecb644f4 216#elif defined(TARGET_PPC)
4e85f82c 217 env->reserve_addr = -1;
81ea0e13 218#elif defined(TARGET_LM32)
b779e29e 219#elif defined(TARGET_MICROBLAZE)
6af0bf9c 220#elif defined(TARGET_MIPS)
e67db06e 221#elif defined(TARGET_OPENRISC)
fdf9b3e8 222#elif defined(TARGET_SH4)
f1ccf904 223#elif defined(TARGET_CRIS)
10ec5117 224#elif defined(TARGET_S390X)
2328826b 225#elif defined(TARGET_XTENSA)
fdf9b3e8 226 /* XXXXX */
e4533c7a
FB
227#else
228#error unsupported target CPU
229#endif
3fb2ded1 230 env->exception_index = -1;
9d27abd9 231
7d13299d 232 /* prepare setjmp context for exception handling */
3fb2ded1
FB
233 for(;;) {
234 if (setjmp(env->jmp_env) == 0) {
235 /* if an exception is pending, we execute it here */
236 if (env->exception_index >= 0) {
237 if (env->exception_index >= EXCP_INTERRUPT) {
238 /* exit request from the cpu execution loop */
239 ret = env->exception_index;
1009d2ed
JK
240 if (ret == EXCP_DEBUG) {
241 cpu_handle_debug_exception(env);
242 }
3fb2ded1 243 break;
72d239ed
AJ
244 } else {
245#if defined(CONFIG_USER_ONLY)
3fb2ded1 246 /* if user mode only, we simulate a fake exception
9f083493 247 which will be handled outside the cpu execution
3fb2ded1 248 loop */
83479e77 249#if defined(TARGET_I386)
e694d4e2 250 do_interrupt(env);
83479e77 251#endif
3fb2ded1
FB
252 ret = env->exception_index;
253 break;
72d239ed 254#else
b5ff1b31 255 do_interrupt(env);
301d2908 256 env->exception_index = -1;
83479e77 257#endif
3fb2ded1 258 }
5fafdf24 259 }
9df217a3 260
b5fc09ae 261 next_tb = 0; /* force lookup of first TB */
3fb2ded1 262 for(;;) {
68a79315 263 interrupt_request = env->interrupt_request;
e1638bd8 264 if (unlikely(interrupt_request)) {
265 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
266 /* Mask out external interrupts for this step. */
3125f763 267 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
e1638bd8 268 }
6658ffb8
PB
269 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
270 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
271 env->exception_index = EXCP_DEBUG;
1162c041 272 cpu_loop_exit(env);
6658ffb8 273 }
a90b7318 274#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
b779e29e 275 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
d2fbca94 276 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
a90b7318
AZ
277 if (interrupt_request & CPU_INTERRUPT_HALT) {
278 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
279 env->halted = 1;
280 env->exception_index = EXCP_HLT;
1162c041 281 cpu_loop_exit(env);
a90b7318
AZ
282 }
283#endif
68a79315 284#if defined(TARGET_I386)
5d62c43a
JK
285#if !defined(CONFIG_USER_ONLY)
286 if (interrupt_request & CPU_INTERRUPT_POLL) {
287 env->interrupt_request &= ~CPU_INTERRUPT_POLL;
288 apic_poll_irq(env->apic_state);
289 }
290#endif
b09ea7d5 291 if (interrupt_request & CPU_INTERRUPT_INIT) {
77b2bc2c
BS
292 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
293 0);
232fc23b 294 do_cpu_init(x86_env_get_cpu(env));
b09ea7d5 295 env->exception_index = EXCP_HALTED;
1162c041 296 cpu_loop_exit(env);
b09ea7d5 297 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
232fc23b 298 do_cpu_sipi(x86_env_get_cpu(env));
b09ea7d5 299 } else if (env->hflags2 & HF2_GIF_MASK) {
db620f46
FB
300 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
301 !(env->hflags & HF_SMM_MASK)) {
77b2bc2c
BS
302 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
303 0);
db620f46 304 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
e694d4e2 305 do_smm_enter(env);
db620f46
FB
306 next_tb = 0;
307 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
308 !(env->hflags2 & HF2_NMI_MASK)) {
309 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
310 env->hflags2 |= HF2_NMI_MASK;
e694d4e2 311 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
db620f46 312 next_tb = 0;
e965fc38 313 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
79c4f6b0 314 env->interrupt_request &= ~CPU_INTERRUPT_MCE;
e694d4e2 315 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
79c4f6b0 316 next_tb = 0;
db620f46
FB
317 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
318 (((env->hflags2 & HF2_VINTR_MASK) &&
319 (env->hflags2 & HF2_HIF_MASK)) ||
320 (!(env->hflags2 & HF2_VINTR_MASK) &&
321 (env->eflags & IF_MASK &&
322 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
323 int intno;
77b2bc2c
BS
324 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
325 0);
db620f46
FB
326 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
327 intno = cpu_get_pic_interrupt(env);
4f213879 328 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
329 do_interrupt_x86_hardirq(env, intno, 1);
330 /* ensure that no TB jump will be modified as
331 the program flow was changed */
332 next_tb = 0;
0573fbfc 333#if !defined(CONFIG_USER_ONLY)
db620f46
FB
334 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
335 (env->eflags & IF_MASK) &&
336 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
337 int intno;
338 /* FIXME: this should respect TPR */
77b2bc2c
BS
339 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
340 0);
db620f46 341 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
93fcfe39 342 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
e694d4e2 343 do_interrupt_x86_hardirq(env, intno, 1);
d40c54d6 344 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
db620f46 345 next_tb = 0;
907a5b26 346#endif
db620f46 347 }
68a79315 348 }
ce09776b 349#elif defined(TARGET_PPC)
9fddaa0c 350 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
c356a1bc 351 cpu_reset(cpu);
9fddaa0c 352 }
47103572 353 if (interrupt_request & CPU_INTERRUPT_HARD) {
e9df014c
JM
354 ppc_hw_interrupt(env);
355 if (env->pending_interrupts == 0)
356 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
b5fc09ae 357 next_tb = 0;
ce09776b 358 }
81ea0e13
MW
359#elif defined(TARGET_LM32)
360 if ((interrupt_request & CPU_INTERRUPT_HARD)
361 && (env->ie & IE_IE)) {
362 env->exception_index = EXCP_IRQ;
363 do_interrupt(env);
364 next_tb = 0;
365 }
b779e29e
EI
366#elif defined(TARGET_MICROBLAZE)
367 if ((interrupt_request & CPU_INTERRUPT_HARD)
368 && (env->sregs[SR_MSR] & MSR_IE)
369 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
370 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
371 env->exception_index = EXCP_IRQ;
372 do_interrupt(env);
373 next_tb = 0;
374 }
6af0bf9c
FB
375#elif defined(TARGET_MIPS)
376 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
4cdc1cd1 377 cpu_mips_hw_interrupts_pending(env)) {
6af0bf9c
FB
378 /* Raise it */
379 env->exception_index = EXCP_EXT_INTERRUPT;
380 env->error_code = 0;
381 do_interrupt(env);
b5fc09ae 382 next_tb = 0;
6af0bf9c 383 }
b6a71ef7
JL
384#elif defined(TARGET_OPENRISC)
385 {
386 int idx = -1;
387 if ((interrupt_request & CPU_INTERRUPT_HARD)
388 && (env->sr & SR_IEE)) {
389 idx = EXCP_INT;
390 }
391 if ((interrupt_request & CPU_INTERRUPT_TIMER)
392 && (env->sr & SR_TEE)) {
393 idx = EXCP_TICK;
394 }
395 if (idx >= 0) {
396 env->exception_index = idx;
397 do_interrupt(env);
398 next_tb = 0;
399 }
400 }
e95c8d51 401#elif defined(TARGET_SPARC)
d532b26c
IK
402 if (interrupt_request & CPU_INTERRUPT_HARD) {
403 if (cpu_interrupts_enabled(env) &&
404 env->interrupt_index > 0) {
405 int pil = env->interrupt_index & 0xf;
406 int type = env->interrupt_index & 0xf0;
407
408 if (((type == TT_EXTINT) &&
409 cpu_pil_allowed(env, pil)) ||
410 type != TT_EXTINT) {
411 env->exception_index = env->interrupt_index;
412 do_interrupt(env);
413 next_tb = 0;
414 }
415 }
e965fc38 416 }
b5ff1b31
FB
417#elif defined(TARGET_ARM)
418 if (interrupt_request & CPU_INTERRUPT_FIQ
419 && !(env->uncached_cpsr & CPSR_F)) {
420 env->exception_index = EXCP_FIQ;
421 do_interrupt(env);
b5fc09ae 422 next_tb = 0;
b5ff1b31 423 }
9ee6e8bb
PB
424 /* ARMv7-M interrupt return works by loading a magic value
425 into the PC. On real hardware the load causes the
426 return to occur. The qemu implementation performs the
427 jump normally, then does the exception return when the
428 CPU tries to execute code at the magic address.
429 This will cause the magic PC value to be pushed to
a1c7273b 430 the stack if an interrupt occurred at the wrong time.
9ee6e8bb
PB
431 We avoid this by disabling interrupts when
432 pc contains a magic address. */
b5ff1b31 433 if (interrupt_request & CPU_INTERRUPT_HARD
9ee6e8bb
PB
434 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
435 || !(env->uncached_cpsr & CPSR_I))) {
b5ff1b31
FB
436 env->exception_index = EXCP_IRQ;
437 do_interrupt(env);
b5fc09ae 438 next_tb = 0;
b5ff1b31 439 }
d2fbca94
GX
440#elif defined(TARGET_UNICORE32)
441 if (interrupt_request & CPU_INTERRUPT_HARD
442 && !(env->uncached_asr & ASR_I)) {
d48813dd 443 env->exception_index = UC32_EXCP_INTR;
d2fbca94
GX
444 do_interrupt(env);
445 next_tb = 0;
446 }
fdf9b3e8 447#elif defined(TARGET_SH4)
e96e2044
TS
448 if (interrupt_request & CPU_INTERRUPT_HARD) {
449 do_interrupt(env);
b5fc09ae 450 next_tb = 0;
e96e2044 451 }
eddf68a6 452#elif defined(TARGET_ALPHA)
6a80e088
RH
453 {
454 int idx = -1;
455 /* ??? This hard-codes the OSF/1 interrupt levels. */
e965fc38 456 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
6a80e088
RH
457 case 0 ... 3:
458 if (interrupt_request & CPU_INTERRUPT_HARD) {
459 idx = EXCP_DEV_INTERRUPT;
460 }
461 /* FALLTHRU */
462 case 4:
463 if (interrupt_request & CPU_INTERRUPT_TIMER) {
464 idx = EXCP_CLK_INTERRUPT;
465 }
466 /* FALLTHRU */
467 case 5:
468 if (interrupt_request & CPU_INTERRUPT_SMP) {
469 idx = EXCP_SMP_INTERRUPT;
470 }
471 /* FALLTHRU */
472 case 6:
473 if (interrupt_request & CPU_INTERRUPT_MCHK) {
474 idx = EXCP_MCHK;
475 }
476 }
477 if (idx >= 0) {
478 env->exception_index = idx;
479 env->error_code = 0;
480 do_interrupt(env);
481 next_tb = 0;
482 }
eddf68a6 483 }
f1ccf904 484#elif defined(TARGET_CRIS)
1b1a38b0 485 if (interrupt_request & CPU_INTERRUPT_HARD
fb9fb692
EI
486 && (env->pregs[PR_CCS] & I_FLAG)
487 && !env->locked_irq) {
1b1a38b0
EI
488 env->exception_index = EXCP_IRQ;
489 do_interrupt(env);
490 next_tb = 0;
491 }
8219314b
LP
492 if (interrupt_request & CPU_INTERRUPT_NMI) {
493 unsigned int m_flag_archval;
494 if (env->pregs[PR_VR] < 32) {
495 m_flag_archval = M_FLAG_V10;
496 } else {
497 m_flag_archval = M_FLAG_V32;
498 }
499 if ((env->pregs[PR_CCS] & m_flag_archval)) {
500 env->exception_index = EXCP_NMI;
501 do_interrupt(env);
502 next_tb = 0;
503 }
f1ccf904 504 }
0633879f
PB
505#elif defined(TARGET_M68K)
506 if (interrupt_request & CPU_INTERRUPT_HARD
507 && ((env->sr & SR_I) >> SR_I_SHIFT)
508 < env->pending_level) {
509 /* Real hardware gets the interrupt vector via an
510 IACK cycle at this point. Current emulated
511 hardware doesn't rely on this, so we
512 provide/save the vector when the interrupt is
513 first signalled. */
514 env->exception_index = env->pending_vector;
3c688828 515 do_interrupt_m68k_hardirq(env);
b5fc09ae 516 next_tb = 0;
0633879f 517 }
3110e292
AG
518#elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
519 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
520 (env->psw.mask & PSW_MASK_EXT)) {
521 do_interrupt(env);
522 next_tb = 0;
523 }
40643d7c
MF
524#elif defined(TARGET_XTENSA)
525 if (interrupt_request & CPU_INTERRUPT_HARD) {
526 env->exception_index = EXC_IRQ;
527 do_interrupt(env);
528 next_tb = 0;
529 }
68a79315 530#endif
ff2712ba 531 /* Don't use the cached interrupt_request value,
9d05095e 532 do_interrupt may have updated the EXITTB flag. */
b5ff1b31 533 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
bf3e8bf1
FB
534 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
535 /* ensure that no TB jump will be modified as
536 the program flow was changed */
b5fc09ae 537 next_tb = 0;
bf3e8bf1 538 }
be214e6c
AJ
539 }
540 if (unlikely(env->exit_request)) {
541 env->exit_request = 0;
542 env->exception_index = EXCP_INTERRUPT;
1162c041 543 cpu_loop_exit(env);
3fb2ded1 544 }
a73b1fd9 545#if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
8fec2b8c 546 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
3fb2ded1 547 /* restore flags in standard format */
ecb644f4 548#if defined(TARGET_I386)
e694d4e2
BS
549 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
550 | (DF & DF_MASK);
6fd2a026 551 log_cpu_state(env, CPU_DUMP_CCOP);
3fb2ded1 552 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
e6e5906b
PB
553#elif defined(TARGET_M68K)
554 cpu_m68k_flush_flags(env, env->cc_op);
555 env->cc_op = CC_OP_FLAGS;
556 env->sr = (env->sr & 0xffe0)
557 | env->cc_dest | (env->cc_x << 4);
93fcfe39 558 log_cpu_state(env, 0);
e4533c7a 559#else
a73b1fd9 560 log_cpu_state(env, 0);
e4533c7a 561#endif
3fb2ded1 562 }
a73b1fd9 563#endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
5e5f07e0 564 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
cea5f9a2 565 tb = tb_find_fast(env);
d5975363
PB
566 /* Note: we do it here to avoid a gcc bug on Mac OS X when
567 doing it in tb_find_slow */
5e5f07e0 568 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
d5975363
PB
569 /* as some TB could have been invalidated because
570 of memory exceptions while generating the code, we
571 must recompute the hash index here */
572 next_tb = 0;
5e5f07e0 573 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
d5975363 574 }
f0667e66 575#ifdef CONFIG_DEBUG_EXEC
3ba19255
SW
576 qemu_log_mask(CPU_LOG_EXEC, "Trace %p [" TARGET_FMT_lx "] %s\n",
577 tb->tc_ptr, tb->pc,
93fcfe39 578 lookup_symbol(tb->pc));
9d27abd9 579#endif
8a40a180
FB
580 /* see if we can patch the calling TB. When the TB
581 spans two pages, we cannot safely do a direct
582 jump. */
040f2fb2 583 if (next_tb != 0 && tb->page_addr[1] == -1) {
b5fc09ae 584 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
3fb2ded1 585 }
5e5f07e0 586 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
55e8b85e 587
588 /* cpu_interrupt might be called while translating the
589 TB, but before it is linked into a potentially
590 infinite loop and becomes env->current_tb. Avoid
591 starting execution if there is a pending interrupt. */
b0052d15
JK
592 env->current_tb = tb;
593 barrier();
594 if (likely(!env->exit_request)) {
2e70f6ef 595 tc_ptr = tb->tc_ptr;
e965fc38 596 /* execute the generated code */
cea5f9a2 597 next_tb = tcg_qemu_tb_exec(env, tc_ptr);
2e70f6ef 598 if ((next_tb & 3) == 2) {
bf20dc07 599 /* Instruction counter expired. */
2e70f6ef 600 int insns_left;
69784eae 601 tb = (TranslationBlock *)(next_tb & ~3);
2e70f6ef 602 /* Restore PC. */
622ed360 603 cpu_pc_from_tb(env, tb);
2e70f6ef
PB
604 insns_left = env->icount_decr.u32;
605 if (env->icount_extra && insns_left >= 0) {
606 /* Refill decrementer and continue execution. */
607 env->icount_extra += insns_left;
608 if (env->icount_extra > 0xffff) {
609 insns_left = 0xffff;
610 } else {
611 insns_left = env->icount_extra;
612 }
613 env->icount_extra -= insns_left;
614 env->icount_decr.u16.low = insns_left;
615 } else {
616 if (insns_left > 0) {
617 /* Execute remaining instructions. */
cea5f9a2 618 cpu_exec_nocache(env, insns_left, tb);
2e70f6ef
PB
619 }
620 env->exception_index = EXCP_INTERRUPT;
621 next_tb = 0;
1162c041 622 cpu_loop_exit(env);
2e70f6ef
PB
623 }
624 }
625 }
b0052d15 626 env->current_tb = NULL;
4cbf74b6
FB
627 /* reset soft MMU for next block (it can currently
628 only be set by a memory fault) */
50a518e3 629 } /* for(;;) */
0d101938
JK
630 } else {
631 /* Reload env after longjmp - the compiler may have smashed all
632 * local variables as longjmp is marked 'noreturn'. */
633 env = cpu_single_env;
7d13299d 634 }
3fb2ded1
FB
635 } /* for(;;) */
636
7d13299d 637
e4533c7a 638#if defined(TARGET_I386)
9de5e440 639 /* restore flags in standard format */
e694d4e2
BS
640 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
641 | (DF & DF_MASK);
e4533c7a 642#elif defined(TARGET_ARM)
b7bcbe95 643 /* XXX: Save/restore host fpu exception state?. */
d2fbca94 644#elif defined(TARGET_UNICORE32)
93ac68bc 645#elif defined(TARGET_SPARC)
67867308 646#elif defined(TARGET_PPC)
81ea0e13 647#elif defined(TARGET_LM32)
e6e5906b
PB
648#elif defined(TARGET_M68K)
649 cpu_m68k_flush_flags(env, env->cc_op);
650 env->cc_op = CC_OP_FLAGS;
651 env->sr = (env->sr & 0xffe0)
652 | env->cc_dest | (env->cc_x << 4);
b779e29e 653#elif defined(TARGET_MICROBLAZE)
6af0bf9c 654#elif defined(TARGET_MIPS)
e67db06e 655#elif defined(TARGET_OPENRISC)
fdf9b3e8 656#elif defined(TARGET_SH4)
eddf68a6 657#elif defined(TARGET_ALPHA)
f1ccf904 658#elif defined(TARGET_CRIS)
10ec5117 659#elif defined(TARGET_S390X)
2328826b 660#elif defined(TARGET_XTENSA)
fdf9b3e8 661 /* XXXXX */
e4533c7a
FB
662#else
663#error unsupported target CPU
664#endif
1057eaa7 665
6a00d601 666 /* fail safe : never use cpu_single_env outside cpu_exec() */
5fafdf24 667 cpu_single_env = NULL;
7d13299d
FB
668 return ret;
669}