]> git.proxmox.com Git - mirror_qemu.git/blame - cpu-exec.c
cpu: Pass CPUState to cpu_interrupt()
[mirror_qemu.git] / cpu-exec.c
CommitLineData
7d13299d 1/*
e965fc38 2 * emulator main execution loop
5fafdf24 3 *
66321a11 4 * Copyright (c) 2003-2005 Fabrice Bellard
7d13299d 5 *
3ef693a0
FB
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
7d13299d 10 *
3ef693a0
FB
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
7d13299d 15 *
3ef693a0 16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
7d13299d 18 */
e4533c7a 19#include "config.h"
cea5f9a2 20#include "cpu.h"
76cad711 21#include "disas/disas.h"
7cb69cae 22#include "tcg.h"
1de7afc9 23#include "qemu/atomic.h"
9c17d615 24#include "sysemu/qtest.h"
7d13299d 25
f0667e66 26//#define CONFIG_DEBUG_EXEC
7d13299d 27
3993c6bd 28bool qemu_cpu_has_work(CPUState *cpu)
6a4955a8 29{
3993c6bd 30 return cpu_has_work(cpu);
6a4955a8
AL
31}
32
9349b4f9 33void cpu_loop_exit(CPUArchState *env)
e4533c7a 34{
d77953b9
AF
35 CPUState *cpu = ENV_GET_CPU(env);
36
37 cpu->current_tb = NULL;
6ab7e546 38 siglongjmp(env->jmp_env, 1);
e4533c7a 39}
bfed01fc 40
fbf9eeb3
FB
41/* exit the current TB from a signal handler. The host registers are
42 restored in a state compatible with the CPU emulator
43 */
9eff14f3 44#if defined(CONFIG_SOFTMMU)
9349b4f9 45void cpu_resume_from_signal(CPUArchState *env, void *puc)
9eff14f3 46{
9eff14f3
BS
47 /* XXX: restore cpu registers saved in host registers */
48
49 env->exception_index = -1;
6ab7e546 50 siglongjmp(env->jmp_env, 1);
9eff14f3 51}
9eff14f3 52#endif
fbf9eeb3 53
77211379
PM
54/* Execute a TB, and fix up the CPU state afterwards if necessary */
55static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
56{
57 CPUArchState *env = cpu->env_ptr;
58 tcg_target_ulong next_tb = tcg_qemu_tb_exec(env, tb_ptr);
59 if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
60 /* We didn't start executing this TB (eg because the instruction
61 * counter hit zero); we must restore the guest PC to the address
62 * of the start of the TB.
63 */
64 TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
65 cpu_pc_from_tb(env, tb);
66 }
378df4b2
PM
67 if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
68 /* We were asked to stop executing TBs (probably a pending
69 * interrupt. We've now stopped, so clear the flag.
70 */
71 cpu->tcg_exit_req = 0;
72 }
77211379
PM
73 return next_tb;
74}
75
2e70f6ef
PB
76/* Execute the code without caching the generated code. An interpreter
77 could be used if available. */
9349b4f9 78static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
cea5f9a2 79 TranslationBlock *orig_tb)
2e70f6ef 80{
d77953b9 81 CPUState *cpu = ENV_GET_CPU(env);
2e70f6ef
PB
82 TranslationBlock *tb;
83
84 /* Should never happen.
85 We only end up here when an existing TB is too long. */
86 if (max_cycles > CF_COUNT_MASK)
87 max_cycles = CF_COUNT_MASK;
88
89 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
90 max_cycles);
d77953b9 91 cpu->current_tb = tb;
2e70f6ef 92 /* execute the generated code */
77211379 93 cpu_tb_exec(cpu, tb->tc_ptr);
d77953b9 94 cpu->current_tb = NULL;
2e70f6ef
PB
95 tb_phys_invalidate(tb, -1);
96 tb_free(tb);
97}
98
9349b4f9 99static TranslationBlock *tb_find_slow(CPUArchState *env,
cea5f9a2 100 target_ulong pc,
8a40a180 101 target_ulong cs_base,
c068688b 102 uint64_t flags)
8a40a180
FB
103{
104 TranslationBlock *tb, **ptb1;
8a40a180 105 unsigned int h;
337fc758 106 tb_page_addr_t phys_pc, phys_page1;
41c1b1c9 107 target_ulong virt_page2;
3b46e624 108
5e5f07e0 109 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
3b46e624 110
8a40a180 111 /* find translated block using physical mappings */
41c1b1c9 112 phys_pc = get_page_addr_code(env, pc);
8a40a180 113 phys_page1 = phys_pc & TARGET_PAGE_MASK;
8a40a180 114 h = tb_phys_hash_func(phys_pc);
5e5f07e0 115 ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
8a40a180
FB
116 for(;;) {
117 tb = *ptb1;
118 if (!tb)
119 goto not_found;
5fafdf24 120 if (tb->pc == pc &&
8a40a180 121 tb->page_addr[0] == phys_page1 &&
5fafdf24 122 tb->cs_base == cs_base &&
8a40a180
FB
123 tb->flags == flags) {
124 /* check next page if needed */
125 if (tb->page_addr[1] != -1) {
337fc758
BS
126 tb_page_addr_t phys_page2;
127
5fafdf24 128 virt_page2 = (pc & TARGET_PAGE_MASK) +
8a40a180 129 TARGET_PAGE_SIZE;
41c1b1c9 130 phys_page2 = get_page_addr_code(env, virt_page2);
8a40a180
FB
131 if (tb->page_addr[1] == phys_page2)
132 goto found;
133 } else {
134 goto found;
135 }
136 }
137 ptb1 = &tb->phys_hash_next;
138 }
139 not_found:
2e70f6ef
PB
140 /* if no translated code available, then translate it now */
141 tb = tb_gen_code(env, pc, cs_base, flags, 0);
3b46e624 142
8a40a180 143 found:
2c90fe2b
KB
144 /* Move the last found TB to the head of the list */
145 if (likely(*ptb1)) {
146 *ptb1 = tb->phys_hash_next;
5e5f07e0
EV
147 tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
148 tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
2c90fe2b 149 }
8a40a180
FB
150 /* we add the TB in the virtual pc hash table */
151 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
8a40a180
FB
152 return tb;
153}
154
9349b4f9 155static inline TranslationBlock *tb_find_fast(CPUArchState *env)
8a40a180
FB
156{
157 TranslationBlock *tb;
158 target_ulong cs_base, pc;
6b917547 159 int flags;
8a40a180
FB
160
161 /* we record a subset of the CPU state. It will
162 always be the same before a given translated block
163 is executed. */
6b917547 164 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
bce61846 165 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
551bd27f
TS
166 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
167 tb->flags != flags)) {
cea5f9a2 168 tb = tb_find_slow(env, pc, cs_base, flags);
8a40a180
FB
169 }
170 return tb;
171}
172
1009d2ed
JK
173static CPUDebugExcpHandler *debug_excp_handler;
174
84e3b602 175void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
1009d2ed 176{
1009d2ed 177 debug_excp_handler = handler;
1009d2ed
JK
178}
179
9349b4f9 180static void cpu_handle_debug_exception(CPUArchState *env)
1009d2ed
JK
181{
182 CPUWatchpoint *wp;
183
184 if (!env->watchpoint_hit) {
185 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
186 wp->flags &= ~BP_WATCHPOINT_HIT;
187 }
188 }
189 if (debug_excp_handler) {
190 debug_excp_handler(env);
191 }
192}
193
7d13299d
FB
194/* main execution loop */
195
1a28cac3
MT
196volatile sig_atomic_t exit_request;
197
9349b4f9 198int cpu_exec(CPUArchState *env)
7d13299d 199{
c356a1bc 200 CPUState *cpu = ENV_GET_CPU(env);
8a40a180 201 int ret, interrupt_request;
8a40a180 202 TranslationBlock *tb;
c27004ec 203 uint8_t *tc_ptr;
69784eae 204 tcg_target_ulong next_tb;
8c6939c0 205
259186a7 206 if (cpu->halted) {
3993c6bd 207 if (!cpu_has_work(cpu)) {
eda48c34
PB
208 return EXCP_HALTED;
209 }
210
259186a7 211 cpu->halted = 0;
eda48c34 212 }
5a1e3cfc 213
cea5f9a2 214 cpu_single_env = env;
e4533c7a 215
c629a4bc 216 if (unlikely(exit_request)) {
fcd7d003 217 cpu->exit_request = 1;
1a28cac3
MT
218 }
219
ecb644f4 220#if defined(TARGET_I386)
6792a57b
JK
221 /* put eflags in CPU temporary format */
222 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
223 DF = 1 - (2 * ((env->eflags >> 10) & 1));
224 CC_OP = CC_OP_EFLAGS;
225 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
93ac68bc 226#elif defined(TARGET_SPARC)
e6e5906b
PB
227#elif defined(TARGET_M68K)
228 env->cc_op = CC_OP_FLAGS;
229 env->cc_dest = env->sr & 0xf;
230 env->cc_x = (env->sr >> 4) & 1;
ecb644f4
TS
231#elif defined(TARGET_ALPHA)
232#elif defined(TARGET_ARM)
d2fbca94 233#elif defined(TARGET_UNICORE32)
ecb644f4 234#elif defined(TARGET_PPC)
4e85f82c 235 env->reserve_addr = -1;
81ea0e13 236#elif defined(TARGET_LM32)
b779e29e 237#elif defined(TARGET_MICROBLAZE)
6af0bf9c 238#elif defined(TARGET_MIPS)
e67db06e 239#elif defined(TARGET_OPENRISC)
fdf9b3e8 240#elif defined(TARGET_SH4)
f1ccf904 241#elif defined(TARGET_CRIS)
10ec5117 242#elif defined(TARGET_S390X)
2328826b 243#elif defined(TARGET_XTENSA)
fdf9b3e8 244 /* XXXXX */
e4533c7a
FB
245#else
246#error unsupported target CPU
247#endif
3fb2ded1 248 env->exception_index = -1;
9d27abd9 249
7d13299d 250 /* prepare setjmp context for exception handling */
3fb2ded1 251 for(;;) {
6ab7e546 252 if (sigsetjmp(env->jmp_env, 0) == 0) {
3fb2ded1
FB
253 /* if an exception is pending, we execute it here */
254 if (env->exception_index >= 0) {
255 if (env->exception_index >= EXCP_INTERRUPT) {
256 /* exit request from the cpu execution loop */
257 ret = env->exception_index;
1009d2ed
JK
258 if (ret == EXCP_DEBUG) {
259 cpu_handle_debug_exception(env);
260 }
3fb2ded1 261 break;
72d239ed
AJ
262 } else {
263#if defined(CONFIG_USER_ONLY)
3fb2ded1 264 /* if user mode only, we simulate a fake exception
9f083493 265 which will be handled outside the cpu execution
3fb2ded1 266 loop */
83479e77 267#if defined(TARGET_I386)
e694d4e2 268 do_interrupt(env);
83479e77 269#endif
3fb2ded1
FB
270 ret = env->exception_index;
271 break;
72d239ed 272#else
b5ff1b31 273 do_interrupt(env);
301d2908 274 env->exception_index = -1;
83479e77 275#endif
3fb2ded1 276 }
5fafdf24 277 }
9df217a3 278
b5fc09ae 279 next_tb = 0; /* force lookup of first TB */
3fb2ded1 280 for(;;) {
259186a7 281 interrupt_request = cpu->interrupt_request;
e1638bd8 282 if (unlikely(interrupt_request)) {
283 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
284 /* Mask out external interrupts for this step. */
3125f763 285 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
e1638bd8 286 }
6658ffb8 287 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
259186a7 288 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
6658ffb8 289 env->exception_index = EXCP_DEBUG;
1162c041 290 cpu_loop_exit(env);
6658ffb8 291 }
a90b7318 292#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
b779e29e 293 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
d2fbca94 294 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
a90b7318 295 if (interrupt_request & CPU_INTERRUPT_HALT) {
259186a7
AF
296 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
297 cpu->halted = 1;
a90b7318 298 env->exception_index = EXCP_HLT;
1162c041 299 cpu_loop_exit(env);
a90b7318
AZ
300 }
301#endif
68a79315 302#if defined(TARGET_I386)
5d62c43a
JK
303#if !defined(CONFIG_USER_ONLY)
304 if (interrupt_request & CPU_INTERRUPT_POLL) {
259186a7 305 cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
5d62c43a
JK
306 apic_poll_irq(env->apic_state);
307 }
308#endif
b09ea7d5 309 if (interrupt_request & CPU_INTERRUPT_INIT) {
77b2bc2c
BS
310 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
311 0);
232fc23b 312 do_cpu_init(x86_env_get_cpu(env));
b09ea7d5 313 env->exception_index = EXCP_HALTED;
1162c041 314 cpu_loop_exit(env);
b09ea7d5 315 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
232fc23b 316 do_cpu_sipi(x86_env_get_cpu(env));
b09ea7d5 317 } else if (env->hflags2 & HF2_GIF_MASK) {
db620f46
FB
318 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
319 !(env->hflags & HF_SMM_MASK)) {
77b2bc2c
BS
320 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
321 0);
259186a7 322 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
e694d4e2 323 do_smm_enter(env);
db620f46
FB
324 next_tb = 0;
325 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
326 !(env->hflags2 & HF2_NMI_MASK)) {
259186a7 327 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
db620f46 328 env->hflags2 |= HF2_NMI_MASK;
e694d4e2 329 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
db620f46 330 next_tb = 0;
e965fc38 331 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
259186a7 332 cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
e694d4e2 333 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
79c4f6b0 334 next_tb = 0;
db620f46
FB
335 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
336 (((env->hflags2 & HF2_VINTR_MASK) &&
337 (env->hflags2 & HF2_HIF_MASK)) ||
338 (!(env->hflags2 & HF2_VINTR_MASK) &&
339 (env->eflags & IF_MASK &&
340 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
341 int intno;
77b2bc2c
BS
342 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
343 0);
259186a7
AF
344 cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
345 CPU_INTERRUPT_VIRQ);
db620f46 346 intno = cpu_get_pic_interrupt(env);
4f213879 347 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
348 do_interrupt_x86_hardirq(env, intno, 1);
349 /* ensure that no TB jump will be modified as
350 the program flow was changed */
351 next_tb = 0;
0573fbfc 352#if !defined(CONFIG_USER_ONLY)
db620f46
FB
353 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
354 (env->eflags & IF_MASK) &&
355 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
356 int intno;
357 /* FIXME: this should respect TPR */
77b2bc2c
BS
358 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
359 0);
db620f46 360 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
93fcfe39 361 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
e694d4e2 362 do_interrupt_x86_hardirq(env, intno, 1);
259186a7 363 cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
db620f46 364 next_tb = 0;
907a5b26 365#endif
db620f46 366 }
68a79315 367 }
ce09776b 368#elif defined(TARGET_PPC)
9fddaa0c 369 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
c356a1bc 370 cpu_reset(cpu);
9fddaa0c 371 }
47103572 372 if (interrupt_request & CPU_INTERRUPT_HARD) {
e9df014c 373 ppc_hw_interrupt(env);
259186a7
AF
374 if (env->pending_interrupts == 0) {
375 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
376 }
b5fc09ae 377 next_tb = 0;
ce09776b 378 }
81ea0e13
MW
379#elif defined(TARGET_LM32)
380 if ((interrupt_request & CPU_INTERRUPT_HARD)
381 && (env->ie & IE_IE)) {
382 env->exception_index = EXCP_IRQ;
383 do_interrupt(env);
384 next_tb = 0;
385 }
b779e29e
EI
386#elif defined(TARGET_MICROBLAZE)
387 if ((interrupt_request & CPU_INTERRUPT_HARD)
388 && (env->sregs[SR_MSR] & MSR_IE)
389 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
390 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
391 env->exception_index = EXCP_IRQ;
392 do_interrupt(env);
393 next_tb = 0;
394 }
6af0bf9c
FB
395#elif defined(TARGET_MIPS)
396 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
4cdc1cd1 397 cpu_mips_hw_interrupts_pending(env)) {
6af0bf9c
FB
398 /* Raise it */
399 env->exception_index = EXCP_EXT_INTERRUPT;
400 env->error_code = 0;
401 do_interrupt(env);
b5fc09ae 402 next_tb = 0;
6af0bf9c 403 }
b6a71ef7
JL
404#elif defined(TARGET_OPENRISC)
405 {
406 int idx = -1;
407 if ((interrupt_request & CPU_INTERRUPT_HARD)
408 && (env->sr & SR_IEE)) {
409 idx = EXCP_INT;
410 }
411 if ((interrupt_request & CPU_INTERRUPT_TIMER)
412 && (env->sr & SR_TEE)) {
413 idx = EXCP_TICK;
414 }
415 if (idx >= 0) {
416 env->exception_index = idx;
417 do_interrupt(env);
418 next_tb = 0;
419 }
420 }
e95c8d51 421#elif defined(TARGET_SPARC)
d532b26c
IK
422 if (interrupt_request & CPU_INTERRUPT_HARD) {
423 if (cpu_interrupts_enabled(env) &&
424 env->interrupt_index > 0) {
425 int pil = env->interrupt_index & 0xf;
426 int type = env->interrupt_index & 0xf0;
427
428 if (((type == TT_EXTINT) &&
429 cpu_pil_allowed(env, pil)) ||
430 type != TT_EXTINT) {
431 env->exception_index = env->interrupt_index;
432 do_interrupt(env);
433 next_tb = 0;
434 }
435 }
e965fc38 436 }
b5ff1b31
FB
437#elif defined(TARGET_ARM)
438 if (interrupt_request & CPU_INTERRUPT_FIQ
439 && !(env->uncached_cpsr & CPSR_F)) {
440 env->exception_index = EXCP_FIQ;
441 do_interrupt(env);
b5fc09ae 442 next_tb = 0;
b5ff1b31 443 }
9ee6e8bb
PB
444 /* ARMv7-M interrupt return works by loading a magic value
445 into the PC. On real hardware the load causes the
446 return to occur. The qemu implementation performs the
447 jump normally, then does the exception return when the
448 CPU tries to execute code at the magic address.
449 This will cause the magic PC value to be pushed to
a1c7273b 450 the stack if an interrupt occurred at the wrong time.
9ee6e8bb
PB
451 We avoid this by disabling interrupts when
452 pc contains a magic address. */
b5ff1b31 453 if (interrupt_request & CPU_INTERRUPT_HARD
9ee6e8bb
PB
454 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
455 || !(env->uncached_cpsr & CPSR_I))) {
b5ff1b31
FB
456 env->exception_index = EXCP_IRQ;
457 do_interrupt(env);
b5fc09ae 458 next_tb = 0;
b5ff1b31 459 }
d2fbca94
GX
460#elif defined(TARGET_UNICORE32)
461 if (interrupt_request & CPU_INTERRUPT_HARD
462 && !(env->uncached_asr & ASR_I)) {
d48813dd 463 env->exception_index = UC32_EXCP_INTR;
d2fbca94
GX
464 do_interrupt(env);
465 next_tb = 0;
466 }
fdf9b3e8 467#elif defined(TARGET_SH4)
e96e2044
TS
468 if (interrupt_request & CPU_INTERRUPT_HARD) {
469 do_interrupt(env);
b5fc09ae 470 next_tb = 0;
e96e2044 471 }
eddf68a6 472#elif defined(TARGET_ALPHA)
6a80e088
RH
473 {
474 int idx = -1;
475 /* ??? This hard-codes the OSF/1 interrupt levels. */
e965fc38 476 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
6a80e088
RH
477 case 0 ... 3:
478 if (interrupt_request & CPU_INTERRUPT_HARD) {
479 idx = EXCP_DEV_INTERRUPT;
480 }
481 /* FALLTHRU */
482 case 4:
483 if (interrupt_request & CPU_INTERRUPT_TIMER) {
484 idx = EXCP_CLK_INTERRUPT;
485 }
486 /* FALLTHRU */
487 case 5:
488 if (interrupt_request & CPU_INTERRUPT_SMP) {
489 idx = EXCP_SMP_INTERRUPT;
490 }
491 /* FALLTHRU */
492 case 6:
493 if (interrupt_request & CPU_INTERRUPT_MCHK) {
494 idx = EXCP_MCHK;
495 }
496 }
497 if (idx >= 0) {
498 env->exception_index = idx;
499 env->error_code = 0;
500 do_interrupt(env);
501 next_tb = 0;
502 }
eddf68a6 503 }
f1ccf904 504#elif defined(TARGET_CRIS)
1b1a38b0 505 if (interrupt_request & CPU_INTERRUPT_HARD
fb9fb692
EI
506 && (env->pregs[PR_CCS] & I_FLAG)
507 && !env->locked_irq) {
1b1a38b0
EI
508 env->exception_index = EXCP_IRQ;
509 do_interrupt(env);
510 next_tb = 0;
511 }
8219314b
LP
512 if (interrupt_request & CPU_INTERRUPT_NMI) {
513 unsigned int m_flag_archval;
514 if (env->pregs[PR_VR] < 32) {
515 m_flag_archval = M_FLAG_V10;
516 } else {
517 m_flag_archval = M_FLAG_V32;
518 }
519 if ((env->pregs[PR_CCS] & m_flag_archval)) {
520 env->exception_index = EXCP_NMI;
521 do_interrupt(env);
522 next_tb = 0;
523 }
f1ccf904 524 }
0633879f
PB
525#elif defined(TARGET_M68K)
526 if (interrupt_request & CPU_INTERRUPT_HARD
527 && ((env->sr & SR_I) >> SR_I_SHIFT)
528 < env->pending_level) {
529 /* Real hardware gets the interrupt vector via an
530 IACK cycle at this point. Current emulated
531 hardware doesn't rely on this, so we
532 provide/save the vector when the interrupt is
533 first signalled. */
534 env->exception_index = env->pending_vector;
3c688828 535 do_interrupt_m68k_hardirq(env);
b5fc09ae 536 next_tb = 0;
0633879f 537 }
3110e292
AG
538#elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
539 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
540 (env->psw.mask & PSW_MASK_EXT)) {
541 do_interrupt(env);
542 next_tb = 0;
543 }
40643d7c
MF
544#elif defined(TARGET_XTENSA)
545 if (interrupt_request & CPU_INTERRUPT_HARD) {
546 env->exception_index = EXC_IRQ;
547 do_interrupt(env);
548 next_tb = 0;
549 }
68a79315 550#endif
ff2712ba 551 /* Don't use the cached interrupt_request value,
9d05095e 552 do_interrupt may have updated the EXITTB flag. */
259186a7
AF
553 if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
554 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
bf3e8bf1
FB
555 /* ensure that no TB jump will be modified as
556 the program flow was changed */
b5fc09ae 557 next_tb = 0;
bf3e8bf1 558 }
be214e6c 559 }
fcd7d003
AF
560 if (unlikely(cpu->exit_request)) {
561 cpu->exit_request = 0;
be214e6c 562 env->exception_index = EXCP_INTERRUPT;
1162c041 563 cpu_loop_exit(env);
3fb2ded1 564 }
a73b1fd9 565#if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
8fec2b8c 566 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
3fb2ded1 567 /* restore flags in standard format */
ecb644f4 568#if defined(TARGET_I386)
e694d4e2
BS
569 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
570 | (DF & DF_MASK);
6fd2a026 571 log_cpu_state(env, CPU_DUMP_CCOP);
3fb2ded1 572 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
e6e5906b
PB
573#elif defined(TARGET_M68K)
574 cpu_m68k_flush_flags(env, env->cc_op);
575 env->cc_op = CC_OP_FLAGS;
576 env->sr = (env->sr & 0xffe0)
577 | env->cc_dest | (env->cc_x << 4);
93fcfe39 578 log_cpu_state(env, 0);
e4533c7a 579#else
a73b1fd9 580 log_cpu_state(env, 0);
e4533c7a 581#endif
3fb2ded1 582 }
a73b1fd9 583#endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
5e5f07e0 584 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
cea5f9a2 585 tb = tb_find_fast(env);
d5975363
PB
586 /* Note: we do it here to avoid a gcc bug on Mac OS X when
587 doing it in tb_find_slow */
5e5f07e0 588 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
d5975363
PB
589 /* as some TB could have been invalidated because
590 of memory exceptions while generating the code, we
591 must recompute the hash index here */
592 next_tb = 0;
5e5f07e0 593 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
d5975363 594 }
f0667e66 595#ifdef CONFIG_DEBUG_EXEC
3ba19255
SW
596 qemu_log_mask(CPU_LOG_EXEC, "Trace %p [" TARGET_FMT_lx "] %s\n",
597 tb->tc_ptr, tb->pc,
93fcfe39 598 lookup_symbol(tb->pc));
9d27abd9 599#endif
8a40a180
FB
600 /* see if we can patch the calling TB. When the TB
601 spans two pages, we cannot safely do a direct
602 jump. */
040f2fb2 603 if (next_tb != 0 && tb->page_addr[1] == -1) {
0980011b
PM
604 tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
605 next_tb & TB_EXIT_MASK, tb);
3fb2ded1 606 }
5e5f07e0 607 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
55e8b85e 608
609 /* cpu_interrupt might be called while translating the
610 TB, but before it is linked into a potentially
611 infinite loop and becomes env->current_tb. Avoid
612 starting execution if there is a pending interrupt. */
d77953b9 613 cpu->current_tb = tb;
b0052d15 614 barrier();
fcd7d003 615 if (likely(!cpu->exit_request)) {
2e70f6ef 616 tc_ptr = tb->tc_ptr;
e965fc38 617 /* execute the generated code */
77211379 618 next_tb = cpu_tb_exec(cpu, tc_ptr);
378df4b2
PM
619 switch (next_tb & TB_EXIT_MASK) {
620 case TB_EXIT_REQUESTED:
621 /* Something asked us to stop executing
622 * chained TBs; just continue round the main
623 * loop. Whatever requested the exit will also
624 * have set something else (eg exit_request or
625 * interrupt_request) which we will handle
626 * next time around the loop.
627 */
628 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
629 next_tb = 0;
630 break;
631 case TB_EXIT_ICOUNT_EXPIRED:
632 {
bf20dc07 633 /* Instruction counter expired. */
2e70f6ef 634 int insns_left;
0980011b 635 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
2e70f6ef
PB
636 insns_left = env->icount_decr.u32;
637 if (env->icount_extra && insns_left >= 0) {
638 /* Refill decrementer and continue execution. */
639 env->icount_extra += insns_left;
640 if (env->icount_extra > 0xffff) {
641 insns_left = 0xffff;
642 } else {
643 insns_left = env->icount_extra;
644 }
645 env->icount_extra -= insns_left;
646 env->icount_decr.u16.low = insns_left;
647 } else {
648 if (insns_left > 0) {
649 /* Execute remaining instructions. */
cea5f9a2 650 cpu_exec_nocache(env, insns_left, tb);
2e70f6ef
PB
651 }
652 env->exception_index = EXCP_INTERRUPT;
653 next_tb = 0;
1162c041 654 cpu_loop_exit(env);
2e70f6ef 655 }
378df4b2
PM
656 break;
657 }
658 default:
659 break;
2e70f6ef
PB
660 }
661 }
d77953b9 662 cpu->current_tb = NULL;
4cbf74b6
FB
663 /* reset soft MMU for next block (it can currently
664 only be set by a memory fault) */
50a518e3 665 } /* for(;;) */
0d101938
JK
666 } else {
667 /* Reload env after longjmp - the compiler may have smashed all
668 * local variables as longjmp is marked 'noreturn'. */
669 env = cpu_single_env;
7d13299d 670 }
3fb2ded1
FB
671 } /* for(;;) */
672
7d13299d 673
e4533c7a 674#if defined(TARGET_I386)
9de5e440 675 /* restore flags in standard format */
e694d4e2
BS
676 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
677 | (DF & DF_MASK);
e4533c7a 678#elif defined(TARGET_ARM)
b7bcbe95 679 /* XXX: Save/restore host fpu exception state?. */
d2fbca94 680#elif defined(TARGET_UNICORE32)
93ac68bc 681#elif defined(TARGET_SPARC)
67867308 682#elif defined(TARGET_PPC)
81ea0e13 683#elif defined(TARGET_LM32)
e6e5906b
PB
684#elif defined(TARGET_M68K)
685 cpu_m68k_flush_flags(env, env->cc_op);
686 env->cc_op = CC_OP_FLAGS;
687 env->sr = (env->sr & 0xffe0)
688 | env->cc_dest | (env->cc_x << 4);
b779e29e 689#elif defined(TARGET_MICROBLAZE)
6af0bf9c 690#elif defined(TARGET_MIPS)
e67db06e 691#elif defined(TARGET_OPENRISC)
fdf9b3e8 692#elif defined(TARGET_SH4)
eddf68a6 693#elif defined(TARGET_ALPHA)
f1ccf904 694#elif defined(TARGET_CRIS)
10ec5117 695#elif defined(TARGET_S390X)
2328826b 696#elif defined(TARGET_XTENSA)
fdf9b3e8 697 /* XXXXX */
e4533c7a
FB
698#else
699#error unsupported target CPU
700#endif
1057eaa7 701
6a00d601 702 /* fail safe : never use cpu_single_env outside cpu_exec() */
5fafdf24 703 cpu_single_env = NULL;
7d13299d
FB
704 return ret;
705}