]> git.proxmox.com Git - qemu.git/blame - cpu-exec.c
qmp: fix handling of cmd with Equals in qmp-shell
[qemu.git] / cpu-exec.c
CommitLineData
7d13299d 1/*
e965fc38 2 * emulator main execution loop
5fafdf24 3 *
66321a11 4 * Copyright (c) 2003-2005 Fabrice Bellard
7d13299d 5 *
3ef693a0
FB
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
7d13299d 10 *
3ef693a0
FB
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
7d13299d 15 *
3ef693a0 16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
7d13299d 18 */
e4533c7a 19#include "config.h"
cea5f9a2 20#include "cpu.h"
76cad711 21#include "disas/disas.h"
7cb69cae 22#include "tcg.h"
1de7afc9 23#include "qemu/atomic.h"
9c17d615 24#include "sysemu/qtest.h"
7d13299d 25
3993c6bd 26bool qemu_cpu_has_work(CPUState *cpu)
6a4955a8 27{
3993c6bd 28 return cpu_has_work(cpu);
6a4955a8
AL
29}
30
9349b4f9 31void cpu_loop_exit(CPUArchState *env)
e4533c7a 32{
d77953b9
AF
33 CPUState *cpu = ENV_GET_CPU(env);
34
35 cpu->current_tb = NULL;
6ab7e546 36 siglongjmp(env->jmp_env, 1);
e4533c7a 37}
bfed01fc 38
fbf9eeb3
FB
39/* exit the current TB from a signal handler. The host registers are
40 restored in a state compatible with the CPU emulator
41 */
9eff14f3 42#if defined(CONFIG_SOFTMMU)
9349b4f9 43void cpu_resume_from_signal(CPUArchState *env, void *puc)
9eff14f3 44{
9eff14f3
BS
45 /* XXX: restore cpu registers saved in host registers */
46
47 env->exception_index = -1;
6ab7e546 48 siglongjmp(env->jmp_env, 1);
9eff14f3 49}
9eff14f3 50#endif
fbf9eeb3 51
77211379
PM
52/* Execute a TB, and fix up the CPU state afterwards if necessary */
53static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
54{
55 CPUArchState *env = cpu->env_ptr;
56 tcg_target_ulong next_tb = tcg_qemu_tb_exec(env, tb_ptr);
57 if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
58 /* We didn't start executing this TB (eg because the instruction
59 * counter hit zero); we must restore the guest PC to the address
60 * of the start of the TB.
61 */
62 TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
63 cpu_pc_from_tb(env, tb);
64 }
378df4b2
PM
65 if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
66 /* We were asked to stop executing TBs (probably a pending
67 * interrupt. We've now stopped, so clear the flag.
68 */
69 cpu->tcg_exit_req = 0;
70 }
77211379
PM
71 return next_tb;
72}
73
2e70f6ef
PB
74/* Execute the code without caching the generated code. An interpreter
75 could be used if available. */
9349b4f9 76static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
cea5f9a2 77 TranslationBlock *orig_tb)
2e70f6ef 78{
d77953b9 79 CPUState *cpu = ENV_GET_CPU(env);
2e70f6ef
PB
80 TranslationBlock *tb;
81
82 /* Should never happen.
83 We only end up here when an existing TB is too long. */
84 if (max_cycles > CF_COUNT_MASK)
85 max_cycles = CF_COUNT_MASK;
86
87 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
88 max_cycles);
d77953b9 89 cpu->current_tb = tb;
2e70f6ef 90 /* execute the generated code */
77211379 91 cpu_tb_exec(cpu, tb->tc_ptr);
d77953b9 92 cpu->current_tb = NULL;
2e70f6ef
PB
93 tb_phys_invalidate(tb, -1);
94 tb_free(tb);
95}
96
9349b4f9 97static TranslationBlock *tb_find_slow(CPUArchState *env,
cea5f9a2 98 target_ulong pc,
8a40a180 99 target_ulong cs_base,
c068688b 100 uint64_t flags)
8a40a180
FB
101{
102 TranslationBlock *tb, **ptb1;
8a40a180 103 unsigned int h;
337fc758 104 tb_page_addr_t phys_pc, phys_page1;
41c1b1c9 105 target_ulong virt_page2;
3b46e624 106
5e5f07e0 107 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
3b46e624 108
8a40a180 109 /* find translated block using physical mappings */
41c1b1c9 110 phys_pc = get_page_addr_code(env, pc);
8a40a180 111 phys_page1 = phys_pc & TARGET_PAGE_MASK;
8a40a180 112 h = tb_phys_hash_func(phys_pc);
5e5f07e0 113 ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
8a40a180
FB
114 for(;;) {
115 tb = *ptb1;
116 if (!tb)
117 goto not_found;
5fafdf24 118 if (tb->pc == pc &&
8a40a180 119 tb->page_addr[0] == phys_page1 &&
5fafdf24 120 tb->cs_base == cs_base &&
8a40a180
FB
121 tb->flags == flags) {
122 /* check next page if needed */
123 if (tb->page_addr[1] != -1) {
337fc758
BS
124 tb_page_addr_t phys_page2;
125
5fafdf24 126 virt_page2 = (pc & TARGET_PAGE_MASK) +
8a40a180 127 TARGET_PAGE_SIZE;
41c1b1c9 128 phys_page2 = get_page_addr_code(env, virt_page2);
8a40a180
FB
129 if (tb->page_addr[1] == phys_page2)
130 goto found;
131 } else {
132 goto found;
133 }
134 }
135 ptb1 = &tb->phys_hash_next;
136 }
137 not_found:
2e70f6ef
PB
138 /* if no translated code available, then translate it now */
139 tb = tb_gen_code(env, pc, cs_base, flags, 0);
3b46e624 140
8a40a180 141 found:
2c90fe2b
KB
142 /* Move the last found TB to the head of the list */
143 if (likely(*ptb1)) {
144 *ptb1 = tb->phys_hash_next;
5e5f07e0
EV
145 tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
146 tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
2c90fe2b 147 }
8a40a180
FB
148 /* we add the TB in the virtual pc hash table */
149 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
8a40a180
FB
150 return tb;
151}
152
9349b4f9 153static inline TranslationBlock *tb_find_fast(CPUArchState *env)
8a40a180
FB
154{
155 TranslationBlock *tb;
156 target_ulong cs_base, pc;
6b917547 157 int flags;
8a40a180
FB
158
159 /* we record a subset of the CPU state. It will
160 always be the same before a given translated block
161 is executed. */
6b917547 162 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
bce61846 163 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
551bd27f
TS
164 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
165 tb->flags != flags)) {
cea5f9a2 166 tb = tb_find_slow(env, pc, cs_base, flags);
8a40a180
FB
167 }
168 return tb;
169}
170
1009d2ed
JK
171static CPUDebugExcpHandler *debug_excp_handler;
172
84e3b602 173void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
1009d2ed 174{
1009d2ed 175 debug_excp_handler = handler;
1009d2ed
JK
176}
177
9349b4f9 178static void cpu_handle_debug_exception(CPUArchState *env)
1009d2ed
JK
179{
180 CPUWatchpoint *wp;
181
182 if (!env->watchpoint_hit) {
183 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
184 wp->flags &= ~BP_WATCHPOINT_HIT;
185 }
186 }
187 if (debug_excp_handler) {
188 debug_excp_handler(env);
189 }
190}
191
7d13299d
FB
192/* main execution loop */
193
1a28cac3
MT
194volatile sig_atomic_t exit_request;
195
9349b4f9 196int cpu_exec(CPUArchState *env)
7d13299d 197{
c356a1bc 198 CPUState *cpu = ENV_GET_CPU(env);
97a8ea5a
AF
199#if !(defined(CONFIG_USER_ONLY) && \
200 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
201 CPUClass *cc = CPU_GET_CLASS(cpu);
202#endif
8a40a180 203 int ret, interrupt_request;
8a40a180 204 TranslationBlock *tb;
c27004ec 205 uint8_t *tc_ptr;
69784eae 206 tcg_target_ulong next_tb;
8c6939c0 207
259186a7 208 if (cpu->halted) {
3993c6bd 209 if (!cpu_has_work(cpu)) {
eda48c34
PB
210 return EXCP_HALTED;
211 }
212
259186a7 213 cpu->halted = 0;
eda48c34 214 }
5a1e3cfc 215
cea5f9a2 216 cpu_single_env = env;
e4533c7a 217
ec9bd89f
OH
218 /* As long as cpu_single_env is null, up to the assignment just above,
219 * requests by other threads to exit the execution loop are expected to
220 * be issued using the exit_request global. We must make sure that our
221 * evaluation of the global value is performed past the cpu_single_env
222 * value transition point, which requires a memory barrier as well as
223 * an instruction scheduling constraint on modern architectures. */
224 smp_mb();
225
c629a4bc 226 if (unlikely(exit_request)) {
fcd7d003 227 cpu->exit_request = 1;
1a28cac3
MT
228 }
229
ecb644f4 230#if defined(TARGET_I386)
6792a57b
JK
231 /* put eflags in CPU temporary format */
232 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
233 DF = 1 - (2 * ((env->eflags >> 10) & 1));
234 CC_OP = CC_OP_EFLAGS;
235 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
93ac68bc 236#elif defined(TARGET_SPARC)
e6e5906b
PB
237#elif defined(TARGET_M68K)
238 env->cc_op = CC_OP_FLAGS;
239 env->cc_dest = env->sr & 0xf;
240 env->cc_x = (env->sr >> 4) & 1;
ecb644f4
TS
241#elif defined(TARGET_ALPHA)
242#elif defined(TARGET_ARM)
d2fbca94 243#elif defined(TARGET_UNICORE32)
ecb644f4 244#elif defined(TARGET_PPC)
4e85f82c 245 env->reserve_addr = -1;
81ea0e13 246#elif defined(TARGET_LM32)
b779e29e 247#elif defined(TARGET_MICROBLAZE)
6af0bf9c 248#elif defined(TARGET_MIPS)
d15a9c23 249#elif defined(TARGET_MOXIE)
e67db06e 250#elif defined(TARGET_OPENRISC)
fdf9b3e8 251#elif defined(TARGET_SH4)
f1ccf904 252#elif defined(TARGET_CRIS)
10ec5117 253#elif defined(TARGET_S390X)
2328826b 254#elif defined(TARGET_XTENSA)
fdf9b3e8 255 /* XXXXX */
e4533c7a
FB
256#else
257#error unsupported target CPU
258#endif
3fb2ded1 259 env->exception_index = -1;
9d27abd9 260
7d13299d 261 /* prepare setjmp context for exception handling */
3fb2ded1 262 for(;;) {
6ab7e546 263 if (sigsetjmp(env->jmp_env, 0) == 0) {
3fb2ded1
FB
264 /* if an exception is pending, we execute it here */
265 if (env->exception_index >= 0) {
266 if (env->exception_index >= EXCP_INTERRUPT) {
267 /* exit request from the cpu execution loop */
268 ret = env->exception_index;
1009d2ed
JK
269 if (ret == EXCP_DEBUG) {
270 cpu_handle_debug_exception(env);
271 }
3fb2ded1 272 break;
72d239ed
AJ
273 } else {
274#if defined(CONFIG_USER_ONLY)
3fb2ded1 275 /* if user mode only, we simulate a fake exception
9f083493 276 which will be handled outside the cpu execution
3fb2ded1 277 loop */
83479e77 278#if defined(TARGET_I386)
97a8ea5a 279 cc->do_interrupt(cpu);
83479e77 280#endif
3fb2ded1
FB
281 ret = env->exception_index;
282 break;
72d239ed 283#else
97a8ea5a 284 cc->do_interrupt(cpu);
301d2908 285 env->exception_index = -1;
83479e77 286#endif
3fb2ded1 287 }
5fafdf24 288 }
9df217a3 289
b5fc09ae 290 next_tb = 0; /* force lookup of first TB */
3fb2ded1 291 for(;;) {
259186a7 292 interrupt_request = cpu->interrupt_request;
e1638bd8 293 if (unlikely(interrupt_request)) {
294 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
295 /* Mask out external interrupts for this step. */
3125f763 296 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
e1638bd8 297 }
6658ffb8 298 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
259186a7 299 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
6658ffb8 300 env->exception_index = EXCP_DEBUG;
1162c041 301 cpu_loop_exit(env);
6658ffb8 302 }
a90b7318 303#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
b779e29e 304 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
d2fbca94 305 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
a90b7318 306 if (interrupt_request & CPU_INTERRUPT_HALT) {
259186a7
AF
307 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
308 cpu->halted = 1;
a90b7318 309 env->exception_index = EXCP_HLT;
1162c041 310 cpu_loop_exit(env);
a90b7318
AZ
311 }
312#endif
68a79315 313#if defined(TARGET_I386)
5d62c43a
JK
314#if !defined(CONFIG_USER_ONLY)
315 if (interrupt_request & CPU_INTERRUPT_POLL) {
259186a7 316 cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
5d62c43a
JK
317 apic_poll_irq(env->apic_state);
318 }
319#endif
b09ea7d5 320 if (interrupt_request & CPU_INTERRUPT_INIT) {
77b2bc2c
BS
321 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
322 0);
232fc23b 323 do_cpu_init(x86_env_get_cpu(env));
b09ea7d5 324 env->exception_index = EXCP_HALTED;
1162c041 325 cpu_loop_exit(env);
b09ea7d5 326 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
232fc23b 327 do_cpu_sipi(x86_env_get_cpu(env));
b09ea7d5 328 } else if (env->hflags2 & HF2_GIF_MASK) {
db620f46
FB
329 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
330 !(env->hflags & HF_SMM_MASK)) {
77b2bc2c
BS
331 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
332 0);
259186a7 333 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
e694d4e2 334 do_smm_enter(env);
db620f46
FB
335 next_tb = 0;
336 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
337 !(env->hflags2 & HF2_NMI_MASK)) {
259186a7 338 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
db620f46 339 env->hflags2 |= HF2_NMI_MASK;
e694d4e2 340 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
db620f46 341 next_tb = 0;
e965fc38 342 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
259186a7 343 cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
e694d4e2 344 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
79c4f6b0 345 next_tb = 0;
db620f46
FB
346 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
347 (((env->hflags2 & HF2_VINTR_MASK) &&
348 (env->hflags2 & HF2_HIF_MASK)) ||
349 (!(env->hflags2 & HF2_VINTR_MASK) &&
350 (env->eflags & IF_MASK &&
351 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
352 int intno;
77b2bc2c
BS
353 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
354 0);
259186a7
AF
355 cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
356 CPU_INTERRUPT_VIRQ);
db620f46 357 intno = cpu_get_pic_interrupt(env);
4f213879 358 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
359 do_interrupt_x86_hardirq(env, intno, 1);
360 /* ensure that no TB jump will be modified as
361 the program flow was changed */
362 next_tb = 0;
0573fbfc 363#if !defined(CONFIG_USER_ONLY)
db620f46
FB
364 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
365 (env->eflags & IF_MASK) &&
366 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
367 int intno;
368 /* FIXME: this should respect TPR */
77b2bc2c
BS
369 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
370 0);
db620f46 371 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
93fcfe39 372 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
e694d4e2 373 do_interrupt_x86_hardirq(env, intno, 1);
259186a7 374 cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
db620f46 375 next_tb = 0;
907a5b26 376#endif
db620f46 377 }
68a79315 378 }
ce09776b 379#elif defined(TARGET_PPC)
9fddaa0c 380 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
c356a1bc 381 cpu_reset(cpu);
9fddaa0c 382 }
47103572 383 if (interrupt_request & CPU_INTERRUPT_HARD) {
e9df014c 384 ppc_hw_interrupt(env);
259186a7
AF
385 if (env->pending_interrupts == 0) {
386 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
387 }
b5fc09ae 388 next_tb = 0;
ce09776b 389 }
81ea0e13
MW
390#elif defined(TARGET_LM32)
391 if ((interrupt_request & CPU_INTERRUPT_HARD)
392 && (env->ie & IE_IE)) {
393 env->exception_index = EXCP_IRQ;
97a8ea5a 394 cc->do_interrupt(cpu);
81ea0e13
MW
395 next_tb = 0;
396 }
b779e29e
EI
397#elif defined(TARGET_MICROBLAZE)
398 if ((interrupt_request & CPU_INTERRUPT_HARD)
399 && (env->sregs[SR_MSR] & MSR_IE)
400 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
401 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
402 env->exception_index = EXCP_IRQ;
97a8ea5a 403 cc->do_interrupt(cpu);
b779e29e
EI
404 next_tb = 0;
405 }
6af0bf9c
FB
406#elif defined(TARGET_MIPS)
407 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
4cdc1cd1 408 cpu_mips_hw_interrupts_pending(env)) {
6af0bf9c
FB
409 /* Raise it */
410 env->exception_index = EXCP_EXT_INTERRUPT;
411 env->error_code = 0;
97a8ea5a 412 cc->do_interrupt(cpu);
b5fc09ae 413 next_tb = 0;
6af0bf9c 414 }
b6a71ef7
JL
415#elif defined(TARGET_OPENRISC)
416 {
417 int idx = -1;
418 if ((interrupt_request & CPU_INTERRUPT_HARD)
419 && (env->sr & SR_IEE)) {
420 idx = EXCP_INT;
421 }
422 if ((interrupt_request & CPU_INTERRUPT_TIMER)
423 && (env->sr & SR_TEE)) {
424 idx = EXCP_TICK;
425 }
426 if (idx >= 0) {
427 env->exception_index = idx;
97a8ea5a 428 cc->do_interrupt(cpu);
b6a71ef7
JL
429 next_tb = 0;
430 }
431 }
e95c8d51 432#elif defined(TARGET_SPARC)
d532b26c
IK
433 if (interrupt_request & CPU_INTERRUPT_HARD) {
434 if (cpu_interrupts_enabled(env) &&
435 env->interrupt_index > 0) {
436 int pil = env->interrupt_index & 0xf;
437 int type = env->interrupt_index & 0xf0;
438
439 if (((type == TT_EXTINT) &&
440 cpu_pil_allowed(env, pil)) ||
441 type != TT_EXTINT) {
442 env->exception_index = env->interrupt_index;
97a8ea5a 443 cc->do_interrupt(cpu);
d532b26c
IK
444 next_tb = 0;
445 }
446 }
e965fc38 447 }
b5ff1b31
FB
448#elif defined(TARGET_ARM)
449 if (interrupt_request & CPU_INTERRUPT_FIQ
450 && !(env->uncached_cpsr & CPSR_F)) {
451 env->exception_index = EXCP_FIQ;
97a8ea5a 452 cc->do_interrupt(cpu);
b5fc09ae 453 next_tb = 0;
b5ff1b31 454 }
9ee6e8bb
PB
455 /* ARMv7-M interrupt return works by loading a magic value
456 into the PC. On real hardware the load causes the
457 return to occur. The qemu implementation performs the
458 jump normally, then does the exception return when the
459 CPU tries to execute code at the magic address.
460 This will cause the magic PC value to be pushed to
a1c7273b 461 the stack if an interrupt occurred at the wrong time.
9ee6e8bb
PB
462 We avoid this by disabling interrupts when
463 pc contains a magic address. */
b5ff1b31 464 if (interrupt_request & CPU_INTERRUPT_HARD
9ee6e8bb
PB
465 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
466 || !(env->uncached_cpsr & CPSR_I))) {
b5ff1b31 467 env->exception_index = EXCP_IRQ;
97a8ea5a 468 cc->do_interrupt(cpu);
b5fc09ae 469 next_tb = 0;
b5ff1b31 470 }
d2fbca94
GX
471#elif defined(TARGET_UNICORE32)
472 if (interrupt_request & CPU_INTERRUPT_HARD
473 && !(env->uncached_asr & ASR_I)) {
d48813dd 474 env->exception_index = UC32_EXCP_INTR;
97a8ea5a 475 cc->do_interrupt(cpu);
d2fbca94
GX
476 next_tb = 0;
477 }
fdf9b3e8 478#elif defined(TARGET_SH4)
e96e2044 479 if (interrupt_request & CPU_INTERRUPT_HARD) {
97a8ea5a 480 cc->do_interrupt(cpu);
b5fc09ae 481 next_tb = 0;
e96e2044 482 }
eddf68a6 483#elif defined(TARGET_ALPHA)
6a80e088
RH
484 {
485 int idx = -1;
486 /* ??? This hard-codes the OSF/1 interrupt levels. */
e965fc38 487 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
6a80e088
RH
488 case 0 ... 3:
489 if (interrupt_request & CPU_INTERRUPT_HARD) {
490 idx = EXCP_DEV_INTERRUPT;
491 }
492 /* FALLTHRU */
493 case 4:
494 if (interrupt_request & CPU_INTERRUPT_TIMER) {
495 idx = EXCP_CLK_INTERRUPT;
496 }
497 /* FALLTHRU */
498 case 5:
499 if (interrupt_request & CPU_INTERRUPT_SMP) {
500 idx = EXCP_SMP_INTERRUPT;
501 }
502 /* FALLTHRU */
503 case 6:
504 if (interrupt_request & CPU_INTERRUPT_MCHK) {
505 idx = EXCP_MCHK;
506 }
507 }
508 if (idx >= 0) {
509 env->exception_index = idx;
510 env->error_code = 0;
97a8ea5a 511 cc->do_interrupt(cpu);
6a80e088
RH
512 next_tb = 0;
513 }
eddf68a6 514 }
f1ccf904 515#elif defined(TARGET_CRIS)
1b1a38b0 516 if (interrupt_request & CPU_INTERRUPT_HARD
fb9fb692
EI
517 && (env->pregs[PR_CCS] & I_FLAG)
518 && !env->locked_irq) {
1b1a38b0 519 env->exception_index = EXCP_IRQ;
97a8ea5a 520 cc->do_interrupt(cpu);
1b1a38b0
EI
521 next_tb = 0;
522 }
8219314b
LP
523 if (interrupt_request & CPU_INTERRUPT_NMI) {
524 unsigned int m_flag_archval;
525 if (env->pregs[PR_VR] < 32) {
526 m_flag_archval = M_FLAG_V10;
527 } else {
528 m_flag_archval = M_FLAG_V32;
529 }
530 if ((env->pregs[PR_CCS] & m_flag_archval)) {
531 env->exception_index = EXCP_NMI;
97a8ea5a 532 cc->do_interrupt(cpu);
8219314b
LP
533 next_tb = 0;
534 }
f1ccf904 535 }
0633879f
PB
536#elif defined(TARGET_M68K)
537 if (interrupt_request & CPU_INTERRUPT_HARD
538 && ((env->sr & SR_I) >> SR_I_SHIFT)
539 < env->pending_level) {
540 /* Real hardware gets the interrupt vector via an
541 IACK cycle at this point. Current emulated
542 hardware doesn't rely on this, so we
543 provide/save the vector when the interrupt is
544 first signalled. */
545 env->exception_index = env->pending_vector;
3c688828 546 do_interrupt_m68k_hardirq(env);
b5fc09ae 547 next_tb = 0;
0633879f 548 }
3110e292
AG
549#elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
550 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
551 (env->psw.mask & PSW_MASK_EXT)) {
97a8ea5a 552 cc->do_interrupt(cpu);
3110e292
AG
553 next_tb = 0;
554 }
40643d7c
MF
555#elif defined(TARGET_XTENSA)
556 if (interrupt_request & CPU_INTERRUPT_HARD) {
557 env->exception_index = EXC_IRQ;
97a8ea5a 558 cc->do_interrupt(cpu);
40643d7c
MF
559 next_tb = 0;
560 }
68a79315 561#endif
ff2712ba 562 /* Don't use the cached interrupt_request value,
9d05095e 563 do_interrupt may have updated the EXITTB flag. */
259186a7
AF
564 if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
565 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
bf3e8bf1
FB
566 /* ensure that no TB jump will be modified as
567 the program flow was changed */
b5fc09ae 568 next_tb = 0;
bf3e8bf1 569 }
be214e6c 570 }
fcd7d003
AF
571 if (unlikely(cpu->exit_request)) {
572 cpu->exit_request = 0;
be214e6c 573 env->exception_index = EXCP_INTERRUPT;
1162c041 574 cpu_loop_exit(env);
3fb2ded1 575 }
c30d1aea 576#if defined(DEBUG_DISAS)
8fec2b8c 577 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
3fb2ded1 578 /* restore flags in standard format */
ecb644f4 579#if defined(TARGET_I386)
6fd2a026 580 log_cpu_state(env, CPU_DUMP_CCOP);
e6e5906b
PB
581#elif defined(TARGET_M68K)
582 cpu_m68k_flush_flags(env, env->cc_op);
583 env->cc_op = CC_OP_FLAGS;
584 env->sr = (env->sr & 0xffe0)
585 | env->cc_dest | (env->cc_x << 4);
93fcfe39 586 log_cpu_state(env, 0);
e4533c7a 587#else
a73b1fd9 588 log_cpu_state(env, 0);
e4533c7a 589#endif
3fb2ded1 590 }
c30d1aea 591#endif /* DEBUG_DISAS */
5e5f07e0 592 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
cea5f9a2 593 tb = tb_find_fast(env);
d5975363
PB
594 /* Note: we do it here to avoid a gcc bug on Mac OS X when
595 doing it in tb_find_slow */
5e5f07e0 596 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
d5975363
PB
597 /* as some TB could have been invalidated because
598 of memory exceptions while generating the code, we
599 must recompute the hash index here */
600 next_tb = 0;
5e5f07e0 601 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
d5975363 602 }
c30d1aea
PM
603 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
604 qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
605 tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
606 }
8a40a180
FB
607 /* see if we can patch the calling TB. When the TB
608 spans two pages, we cannot safely do a direct
609 jump. */
040f2fb2 610 if (next_tb != 0 && tb->page_addr[1] == -1) {
0980011b
PM
611 tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
612 next_tb & TB_EXIT_MASK, tb);
3fb2ded1 613 }
5e5f07e0 614 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
55e8b85e 615
616 /* cpu_interrupt might be called while translating the
617 TB, but before it is linked into a potentially
618 infinite loop and becomes env->current_tb. Avoid
619 starting execution if there is a pending interrupt. */
d77953b9 620 cpu->current_tb = tb;
b0052d15 621 barrier();
fcd7d003 622 if (likely(!cpu->exit_request)) {
2e70f6ef 623 tc_ptr = tb->tc_ptr;
e965fc38 624 /* execute the generated code */
77211379 625 next_tb = cpu_tb_exec(cpu, tc_ptr);
378df4b2
PM
626 switch (next_tb & TB_EXIT_MASK) {
627 case TB_EXIT_REQUESTED:
628 /* Something asked us to stop executing
629 * chained TBs; just continue round the main
630 * loop. Whatever requested the exit will also
631 * have set something else (eg exit_request or
632 * interrupt_request) which we will handle
633 * next time around the loop.
634 */
635 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
636 next_tb = 0;
637 break;
638 case TB_EXIT_ICOUNT_EXPIRED:
639 {
bf20dc07 640 /* Instruction counter expired. */
2e70f6ef 641 int insns_left;
0980011b 642 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
2e70f6ef
PB
643 insns_left = env->icount_decr.u32;
644 if (env->icount_extra && insns_left >= 0) {
645 /* Refill decrementer and continue execution. */
646 env->icount_extra += insns_left;
647 if (env->icount_extra > 0xffff) {
648 insns_left = 0xffff;
649 } else {
650 insns_left = env->icount_extra;
651 }
652 env->icount_extra -= insns_left;
653 env->icount_decr.u16.low = insns_left;
654 } else {
655 if (insns_left > 0) {
656 /* Execute remaining instructions. */
cea5f9a2 657 cpu_exec_nocache(env, insns_left, tb);
2e70f6ef
PB
658 }
659 env->exception_index = EXCP_INTERRUPT;
660 next_tb = 0;
1162c041 661 cpu_loop_exit(env);
2e70f6ef 662 }
378df4b2
PM
663 break;
664 }
665 default:
666 break;
2e70f6ef
PB
667 }
668 }
d77953b9 669 cpu->current_tb = NULL;
4cbf74b6
FB
670 /* reset soft MMU for next block (it can currently
671 only be set by a memory fault) */
50a518e3 672 } /* for(;;) */
0d101938
JK
673 } else {
674 /* Reload env after longjmp - the compiler may have smashed all
675 * local variables as longjmp is marked 'noreturn'. */
676 env = cpu_single_env;
7d13299d 677 }
3fb2ded1
FB
678 } /* for(;;) */
679
7d13299d 680
e4533c7a 681#if defined(TARGET_I386)
9de5e440 682 /* restore flags in standard format */
e694d4e2
BS
683 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
684 | (DF & DF_MASK);
e4533c7a 685#elif defined(TARGET_ARM)
b7bcbe95 686 /* XXX: Save/restore host fpu exception state?. */
d2fbca94 687#elif defined(TARGET_UNICORE32)
93ac68bc 688#elif defined(TARGET_SPARC)
67867308 689#elif defined(TARGET_PPC)
81ea0e13 690#elif defined(TARGET_LM32)
e6e5906b
PB
691#elif defined(TARGET_M68K)
692 cpu_m68k_flush_flags(env, env->cc_op);
693 env->cc_op = CC_OP_FLAGS;
694 env->sr = (env->sr & 0xffe0)
695 | env->cc_dest | (env->cc_x << 4);
b779e29e 696#elif defined(TARGET_MICROBLAZE)
6af0bf9c 697#elif defined(TARGET_MIPS)
d15a9c23 698#elif defined(TARGET_MOXIE)
e67db06e 699#elif defined(TARGET_OPENRISC)
fdf9b3e8 700#elif defined(TARGET_SH4)
eddf68a6 701#elif defined(TARGET_ALPHA)
f1ccf904 702#elif defined(TARGET_CRIS)
10ec5117 703#elif defined(TARGET_S390X)
2328826b 704#elif defined(TARGET_XTENSA)
fdf9b3e8 705 /* XXXXX */
e4533c7a
FB
706#else
707#error unsupported target CPU
708#endif
1057eaa7 709
6a00d601 710 /* fail safe : never use cpu_single_env outside cpu_exec() */
5fafdf24 711 cpu_single_env = NULL;
7d13299d
FB
712 return ret;
713}