]> git.proxmox.com Git - qemu.git/blame - cpu-exec.c
like acpi_table_install(), acpi_table_add() should propagate Errors
[qemu.git] / cpu-exec.c
CommitLineData
7d13299d 1/*
e965fc38 2 * emulator main execution loop
5fafdf24 3 *
66321a11 4 * Copyright (c) 2003-2005 Fabrice Bellard
7d13299d 5 *
3ef693a0
FB
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
7d13299d 10 *
3ef693a0
FB
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
7d13299d 15 *
3ef693a0 16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
7d13299d 18 */
e4533c7a 19#include "config.h"
cea5f9a2 20#include "cpu.h"
76cad711 21#include "disas/disas.h"
7cb69cae 22#include "tcg.h"
1de7afc9 23#include "qemu/atomic.h"
9c17d615 24#include "sysemu/qtest.h"
7d13299d 25
f0667e66 26//#define CONFIG_DEBUG_EXEC
7d13299d 27
3993c6bd 28bool qemu_cpu_has_work(CPUState *cpu)
6a4955a8 29{
3993c6bd 30 return cpu_has_work(cpu);
6a4955a8
AL
31}
32
9349b4f9 33void cpu_loop_exit(CPUArchState *env)
e4533c7a 34{
d77953b9
AF
35 CPUState *cpu = ENV_GET_CPU(env);
36
37 cpu->current_tb = NULL;
6ab7e546 38 siglongjmp(env->jmp_env, 1);
e4533c7a 39}
bfed01fc 40
fbf9eeb3
FB
41/* exit the current TB from a signal handler. The host registers are
42 restored in a state compatible with the CPU emulator
43 */
9eff14f3 44#if defined(CONFIG_SOFTMMU)
9349b4f9 45void cpu_resume_from_signal(CPUArchState *env, void *puc)
9eff14f3 46{
9eff14f3
BS
47 /* XXX: restore cpu registers saved in host registers */
48
49 env->exception_index = -1;
6ab7e546 50 siglongjmp(env->jmp_env, 1);
9eff14f3 51}
9eff14f3 52#endif
fbf9eeb3 53
77211379
PM
54/* Execute a TB, and fix up the CPU state afterwards if necessary */
55static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
56{
57 CPUArchState *env = cpu->env_ptr;
58 tcg_target_ulong next_tb = tcg_qemu_tb_exec(env, tb_ptr);
59 if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
60 /* We didn't start executing this TB (eg because the instruction
61 * counter hit zero); we must restore the guest PC to the address
62 * of the start of the TB.
63 */
64 TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
65 cpu_pc_from_tb(env, tb);
66 }
378df4b2
PM
67 if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
68 /* We were asked to stop executing TBs (probably a pending
69 * interrupt. We've now stopped, so clear the flag.
70 */
71 cpu->tcg_exit_req = 0;
72 }
77211379
PM
73 return next_tb;
74}
75
2e70f6ef
PB
76/* Execute the code without caching the generated code. An interpreter
77 could be used if available. */
9349b4f9 78static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
cea5f9a2 79 TranslationBlock *orig_tb)
2e70f6ef 80{
d77953b9 81 CPUState *cpu = ENV_GET_CPU(env);
2e70f6ef
PB
82 TranslationBlock *tb;
83
84 /* Should never happen.
85 We only end up here when an existing TB is too long. */
86 if (max_cycles > CF_COUNT_MASK)
87 max_cycles = CF_COUNT_MASK;
88
89 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
90 max_cycles);
d77953b9 91 cpu->current_tb = tb;
2e70f6ef 92 /* execute the generated code */
77211379 93 cpu_tb_exec(cpu, tb->tc_ptr);
d77953b9 94 cpu->current_tb = NULL;
2e70f6ef
PB
95 tb_phys_invalidate(tb, -1);
96 tb_free(tb);
97}
98
9349b4f9 99static TranslationBlock *tb_find_slow(CPUArchState *env,
cea5f9a2 100 target_ulong pc,
8a40a180 101 target_ulong cs_base,
c068688b 102 uint64_t flags)
8a40a180
FB
103{
104 TranslationBlock *tb, **ptb1;
8a40a180 105 unsigned int h;
337fc758 106 tb_page_addr_t phys_pc, phys_page1;
41c1b1c9 107 target_ulong virt_page2;
3b46e624 108
5e5f07e0 109 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
3b46e624 110
8a40a180 111 /* find translated block using physical mappings */
41c1b1c9 112 phys_pc = get_page_addr_code(env, pc);
8a40a180 113 phys_page1 = phys_pc & TARGET_PAGE_MASK;
8a40a180 114 h = tb_phys_hash_func(phys_pc);
5e5f07e0 115 ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
8a40a180
FB
116 for(;;) {
117 tb = *ptb1;
118 if (!tb)
119 goto not_found;
5fafdf24 120 if (tb->pc == pc &&
8a40a180 121 tb->page_addr[0] == phys_page1 &&
5fafdf24 122 tb->cs_base == cs_base &&
8a40a180
FB
123 tb->flags == flags) {
124 /* check next page if needed */
125 if (tb->page_addr[1] != -1) {
337fc758
BS
126 tb_page_addr_t phys_page2;
127
5fafdf24 128 virt_page2 = (pc & TARGET_PAGE_MASK) +
8a40a180 129 TARGET_PAGE_SIZE;
41c1b1c9 130 phys_page2 = get_page_addr_code(env, virt_page2);
8a40a180
FB
131 if (tb->page_addr[1] == phys_page2)
132 goto found;
133 } else {
134 goto found;
135 }
136 }
137 ptb1 = &tb->phys_hash_next;
138 }
139 not_found:
2e70f6ef
PB
140 /* if no translated code available, then translate it now */
141 tb = tb_gen_code(env, pc, cs_base, flags, 0);
3b46e624 142
8a40a180 143 found:
2c90fe2b
KB
144 /* Move the last found TB to the head of the list */
145 if (likely(*ptb1)) {
146 *ptb1 = tb->phys_hash_next;
5e5f07e0
EV
147 tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
148 tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
2c90fe2b 149 }
8a40a180
FB
150 /* we add the TB in the virtual pc hash table */
151 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
8a40a180
FB
152 return tb;
153}
154
9349b4f9 155static inline TranslationBlock *tb_find_fast(CPUArchState *env)
8a40a180
FB
156{
157 TranslationBlock *tb;
158 target_ulong cs_base, pc;
6b917547 159 int flags;
8a40a180
FB
160
161 /* we record a subset of the CPU state. It will
162 always be the same before a given translated block
163 is executed. */
6b917547 164 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
bce61846 165 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
551bd27f
TS
166 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
167 tb->flags != flags)) {
cea5f9a2 168 tb = tb_find_slow(env, pc, cs_base, flags);
8a40a180
FB
169 }
170 return tb;
171}
172
1009d2ed
JK
173static CPUDebugExcpHandler *debug_excp_handler;
174
84e3b602 175void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
1009d2ed 176{
1009d2ed 177 debug_excp_handler = handler;
1009d2ed
JK
178}
179
9349b4f9 180static void cpu_handle_debug_exception(CPUArchState *env)
1009d2ed
JK
181{
182 CPUWatchpoint *wp;
183
184 if (!env->watchpoint_hit) {
185 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
186 wp->flags &= ~BP_WATCHPOINT_HIT;
187 }
188 }
189 if (debug_excp_handler) {
190 debug_excp_handler(env);
191 }
192}
193
7d13299d
FB
194/* main execution loop */
195
1a28cac3
MT
196volatile sig_atomic_t exit_request;
197
9349b4f9 198int cpu_exec(CPUArchState *env)
7d13299d 199{
c356a1bc 200 CPUState *cpu = ENV_GET_CPU(env);
97a8ea5a
AF
201#if !(defined(CONFIG_USER_ONLY) && \
202 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
203 CPUClass *cc = CPU_GET_CLASS(cpu);
204#endif
8a40a180 205 int ret, interrupt_request;
8a40a180 206 TranslationBlock *tb;
c27004ec 207 uint8_t *tc_ptr;
69784eae 208 tcg_target_ulong next_tb;
8c6939c0 209
259186a7 210 if (cpu->halted) {
3993c6bd 211 if (!cpu_has_work(cpu)) {
eda48c34
PB
212 return EXCP_HALTED;
213 }
214
259186a7 215 cpu->halted = 0;
eda48c34 216 }
5a1e3cfc 217
cea5f9a2 218 cpu_single_env = env;
e4533c7a 219
c629a4bc 220 if (unlikely(exit_request)) {
fcd7d003 221 cpu->exit_request = 1;
1a28cac3
MT
222 }
223
ecb644f4 224#if defined(TARGET_I386)
6792a57b
JK
225 /* put eflags in CPU temporary format */
226 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
227 DF = 1 - (2 * ((env->eflags >> 10) & 1));
228 CC_OP = CC_OP_EFLAGS;
229 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
93ac68bc 230#elif defined(TARGET_SPARC)
e6e5906b
PB
231#elif defined(TARGET_M68K)
232 env->cc_op = CC_OP_FLAGS;
233 env->cc_dest = env->sr & 0xf;
234 env->cc_x = (env->sr >> 4) & 1;
ecb644f4
TS
235#elif defined(TARGET_ALPHA)
236#elif defined(TARGET_ARM)
d2fbca94 237#elif defined(TARGET_UNICORE32)
ecb644f4 238#elif defined(TARGET_PPC)
4e85f82c 239 env->reserve_addr = -1;
81ea0e13 240#elif defined(TARGET_LM32)
b779e29e 241#elif defined(TARGET_MICROBLAZE)
6af0bf9c 242#elif defined(TARGET_MIPS)
d15a9c23 243#elif defined(TARGET_MOXIE)
e67db06e 244#elif defined(TARGET_OPENRISC)
fdf9b3e8 245#elif defined(TARGET_SH4)
f1ccf904 246#elif defined(TARGET_CRIS)
10ec5117 247#elif defined(TARGET_S390X)
2328826b 248#elif defined(TARGET_XTENSA)
fdf9b3e8 249 /* XXXXX */
e4533c7a
FB
250#else
251#error unsupported target CPU
252#endif
3fb2ded1 253 env->exception_index = -1;
9d27abd9 254
7d13299d 255 /* prepare setjmp context for exception handling */
3fb2ded1 256 for(;;) {
6ab7e546 257 if (sigsetjmp(env->jmp_env, 0) == 0) {
3fb2ded1
FB
258 /* if an exception is pending, we execute it here */
259 if (env->exception_index >= 0) {
260 if (env->exception_index >= EXCP_INTERRUPT) {
261 /* exit request from the cpu execution loop */
262 ret = env->exception_index;
1009d2ed
JK
263 if (ret == EXCP_DEBUG) {
264 cpu_handle_debug_exception(env);
265 }
3fb2ded1 266 break;
72d239ed
AJ
267 } else {
268#if defined(CONFIG_USER_ONLY)
3fb2ded1 269 /* if user mode only, we simulate a fake exception
9f083493 270 which will be handled outside the cpu execution
3fb2ded1 271 loop */
83479e77 272#if defined(TARGET_I386)
97a8ea5a 273 cc->do_interrupt(cpu);
83479e77 274#endif
3fb2ded1
FB
275 ret = env->exception_index;
276 break;
72d239ed 277#else
97a8ea5a 278 cc->do_interrupt(cpu);
301d2908 279 env->exception_index = -1;
83479e77 280#endif
3fb2ded1 281 }
5fafdf24 282 }
9df217a3 283
b5fc09ae 284 next_tb = 0; /* force lookup of first TB */
3fb2ded1 285 for(;;) {
259186a7 286 interrupt_request = cpu->interrupt_request;
e1638bd8 287 if (unlikely(interrupt_request)) {
288 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
289 /* Mask out external interrupts for this step. */
3125f763 290 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
e1638bd8 291 }
6658ffb8 292 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
259186a7 293 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
6658ffb8 294 env->exception_index = EXCP_DEBUG;
1162c041 295 cpu_loop_exit(env);
6658ffb8 296 }
a90b7318 297#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
b779e29e 298 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
d2fbca94 299 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
a90b7318 300 if (interrupt_request & CPU_INTERRUPT_HALT) {
259186a7
AF
301 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
302 cpu->halted = 1;
a90b7318 303 env->exception_index = EXCP_HLT;
1162c041 304 cpu_loop_exit(env);
a90b7318
AZ
305 }
306#endif
68a79315 307#if defined(TARGET_I386)
5d62c43a
JK
308#if !defined(CONFIG_USER_ONLY)
309 if (interrupt_request & CPU_INTERRUPT_POLL) {
259186a7 310 cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
5d62c43a
JK
311 apic_poll_irq(env->apic_state);
312 }
313#endif
b09ea7d5 314 if (interrupt_request & CPU_INTERRUPT_INIT) {
77b2bc2c
BS
315 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
316 0);
232fc23b 317 do_cpu_init(x86_env_get_cpu(env));
b09ea7d5 318 env->exception_index = EXCP_HALTED;
1162c041 319 cpu_loop_exit(env);
b09ea7d5 320 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
232fc23b 321 do_cpu_sipi(x86_env_get_cpu(env));
b09ea7d5 322 } else if (env->hflags2 & HF2_GIF_MASK) {
db620f46
FB
323 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
324 !(env->hflags & HF_SMM_MASK)) {
77b2bc2c
BS
325 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
326 0);
259186a7 327 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
e694d4e2 328 do_smm_enter(env);
db620f46
FB
329 next_tb = 0;
330 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
331 !(env->hflags2 & HF2_NMI_MASK)) {
259186a7 332 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
db620f46 333 env->hflags2 |= HF2_NMI_MASK;
e694d4e2 334 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
db620f46 335 next_tb = 0;
e965fc38 336 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
259186a7 337 cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
e694d4e2 338 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
79c4f6b0 339 next_tb = 0;
db620f46
FB
340 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
341 (((env->hflags2 & HF2_VINTR_MASK) &&
342 (env->hflags2 & HF2_HIF_MASK)) ||
343 (!(env->hflags2 & HF2_VINTR_MASK) &&
344 (env->eflags & IF_MASK &&
345 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
346 int intno;
77b2bc2c
BS
347 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
348 0);
259186a7
AF
349 cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
350 CPU_INTERRUPT_VIRQ);
db620f46 351 intno = cpu_get_pic_interrupt(env);
4f213879 352 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
353 do_interrupt_x86_hardirq(env, intno, 1);
354 /* ensure that no TB jump will be modified as
355 the program flow was changed */
356 next_tb = 0;
0573fbfc 357#if !defined(CONFIG_USER_ONLY)
db620f46
FB
358 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
359 (env->eflags & IF_MASK) &&
360 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
361 int intno;
362 /* FIXME: this should respect TPR */
77b2bc2c
BS
363 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
364 0);
db620f46 365 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
93fcfe39 366 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
e694d4e2 367 do_interrupt_x86_hardirq(env, intno, 1);
259186a7 368 cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
db620f46 369 next_tb = 0;
907a5b26 370#endif
db620f46 371 }
68a79315 372 }
ce09776b 373#elif defined(TARGET_PPC)
9fddaa0c 374 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
c356a1bc 375 cpu_reset(cpu);
9fddaa0c 376 }
47103572 377 if (interrupt_request & CPU_INTERRUPT_HARD) {
e9df014c 378 ppc_hw_interrupt(env);
259186a7
AF
379 if (env->pending_interrupts == 0) {
380 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
381 }
b5fc09ae 382 next_tb = 0;
ce09776b 383 }
81ea0e13
MW
384#elif defined(TARGET_LM32)
385 if ((interrupt_request & CPU_INTERRUPT_HARD)
386 && (env->ie & IE_IE)) {
387 env->exception_index = EXCP_IRQ;
97a8ea5a 388 cc->do_interrupt(cpu);
81ea0e13
MW
389 next_tb = 0;
390 }
b779e29e
EI
391#elif defined(TARGET_MICROBLAZE)
392 if ((interrupt_request & CPU_INTERRUPT_HARD)
393 && (env->sregs[SR_MSR] & MSR_IE)
394 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
395 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
396 env->exception_index = EXCP_IRQ;
97a8ea5a 397 cc->do_interrupt(cpu);
b779e29e
EI
398 next_tb = 0;
399 }
6af0bf9c
FB
400#elif defined(TARGET_MIPS)
401 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
4cdc1cd1 402 cpu_mips_hw_interrupts_pending(env)) {
6af0bf9c
FB
403 /* Raise it */
404 env->exception_index = EXCP_EXT_INTERRUPT;
405 env->error_code = 0;
97a8ea5a 406 cc->do_interrupt(cpu);
b5fc09ae 407 next_tb = 0;
6af0bf9c 408 }
b6a71ef7
JL
409#elif defined(TARGET_OPENRISC)
410 {
411 int idx = -1;
412 if ((interrupt_request & CPU_INTERRUPT_HARD)
413 && (env->sr & SR_IEE)) {
414 idx = EXCP_INT;
415 }
416 if ((interrupt_request & CPU_INTERRUPT_TIMER)
417 && (env->sr & SR_TEE)) {
418 idx = EXCP_TICK;
419 }
420 if (idx >= 0) {
421 env->exception_index = idx;
97a8ea5a 422 cc->do_interrupt(cpu);
b6a71ef7
JL
423 next_tb = 0;
424 }
425 }
e95c8d51 426#elif defined(TARGET_SPARC)
d532b26c
IK
427 if (interrupt_request & CPU_INTERRUPT_HARD) {
428 if (cpu_interrupts_enabled(env) &&
429 env->interrupt_index > 0) {
430 int pil = env->interrupt_index & 0xf;
431 int type = env->interrupt_index & 0xf0;
432
433 if (((type == TT_EXTINT) &&
434 cpu_pil_allowed(env, pil)) ||
435 type != TT_EXTINT) {
436 env->exception_index = env->interrupt_index;
97a8ea5a 437 cc->do_interrupt(cpu);
d532b26c
IK
438 next_tb = 0;
439 }
440 }
e965fc38 441 }
b5ff1b31
FB
442#elif defined(TARGET_ARM)
443 if (interrupt_request & CPU_INTERRUPT_FIQ
444 && !(env->uncached_cpsr & CPSR_F)) {
445 env->exception_index = EXCP_FIQ;
97a8ea5a 446 cc->do_interrupt(cpu);
b5fc09ae 447 next_tb = 0;
b5ff1b31 448 }
9ee6e8bb
PB
449 /* ARMv7-M interrupt return works by loading a magic value
450 into the PC. On real hardware the load causes the
451 return to occur. The qemu implementation performs the
452 jump normally, then does the exception return when the
453 CPU tries to execute code at the magic address.
454 This will cause the magic PC value to be pushed to
a1c7273b 455 the stack if an interrupt occurred at the wrong time.
9ee6e8bb
PB
456 We avoid this by disabling interrupts when
457 pc contains a magic address. */
b5ff1b31 458 if (interrupt_request & CPU_INTERRUPT_HARD
9ee6e8bb
PB
459 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
460 || !(env->uncached_cpsr & CPSR_I))) {
b5ff1b31 461 env->exception_index = EXCP_IRQ;
97a8ea5a 462 cc->do_interrupt(cpu);
b5fc09ae 463 next_tb = 0;
b5ff1b31 464 }
d2fbca94
GX
465#elif defined(TARGET_UNICORE32)
466 if (interrupt_request & CPU_INTERRUPT_HARD
467 && !(env->uncached_asr & ASR_I)) {
d48813dd 468 env->exception_index = UC32_EXCP_INTR;
97a8ea5a 469 cc->do_interrupt(cpu);
d2fbca94
GX
470 next_tb = 0;
471 }
fdf9b3e8 472#elif defined(TARGET_SH4)
e96e2044 473 if (interrupt_request & CPU_INTERRUPT_HARD) {
97a8ea5a 474 cc->do_interrupt(cpu);
b5fc09ae 475 next_tb = 0;
e96e2044 476 }
eddf68a6 477#elif defined(TARGET_ALPHA)
6a80e088
RH
478 {
479 int idx = -1;
480 /* ??? This hard-codes the OSF/1 interrupt levels. */
e965fc38 481 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
6a80e088
RH
482 case 0 ... 3:
483 if (interrupt_request & CPU_INTERRUPT_HARD) {
484 idx = EXCP_DEV_INTERRUPT;
485 }
486 /* FALLTHRU */
487 case 4:
488 if (interrupt_request & CPU_INTERRUPT_TIMER) {
489 idx = EXCP_CLK_INTERRUPT;
490 }
491 /* FALLTHRU */
492 case 5:
493 if (interrupt_request & CPU_INTERRUPT_SMP) {
494 idx = EXCP_SMP_INTERRUPT;
495 }
496 /* FALLTHRU */
497 case 6:
498 if (interrupt_request & CPU_INTERRUPT_MCHK) {
499 idx = EXCP_MCHK;
500 }
501 }
502 if (idx >= 0) {
503 env->exception_index = idx;
504 env->error_code = 0;
97a8ea5a 505 cc->do_interrupt(cpu);
6a80e088
RH
506 next_tb = 0;
507 }
eddf68a6 508 }
f1ccf904 509#elif defined(TARGET_CRIS)
1b1a38b0 510 if (interrupt_request & CPU_INTERRUPT_HARD
fb9fb692
EI
511 && (env->pregs[PR_CCS] & I_FLAG)
512 && !env->locked_irq) {
1b1a38b0 513 env->exception_index = EXCP_IRQ;
97a8ea5a 514 cc->do_interrupt(cpu);
1b1a38b0
EI
515 next_tb = 0;
516 }
8219314b
LP
517 if (interrupt_request & CPU_INTERRUPT_NMI) {
518 unsigned int m_flag_archval;
519 if (env->pregs[PR_VR] < 32) {
520 m_flag_archval = M_FLAG_V10;
521 } else {
522 m_flag_archval = M_FLAG_V32;
523 }
524 if ((env->pregs[PR_CCS] & m_flag_archval)) {
525 env->exception_index = EXCP_NMI;
97a8ea5a 526 cc->do_interrupt(cpu);
8219314b
LP
527 next_tb = 0;
528 }
f1ccf904 529 }
0633879f
PB
530#elif defined(TARGET_M68K)
531 if (interrupt_request & CPU_INTERRUPT_HARD
532 && ((env->sr & SR_I) >> SR_I_SHIFT)
533 < env->pending_level) {
534 /* Real hardware gets the interrupt vector via an
535 IACK cycle at this point. Current emulated
536 hardware doesn't rely on this, so we
537 provide/save the vector when the interrupt is
538 first signalled. */
539 env->exception_index = env->pending_vector;
3c688828 540 do_interrupt_m68k_hardirq(env);
b5fc09ae 541 next_tb = 0;
0633879f 542 }
3110e292
AG
543#elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
544 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
545 (env->psw.mask & PSW_MASK_EXT)) {
97a8ea5a 546 cc->do_interrupt(cpu);
3110e292
AG
547 next_tb = 0;
548 }
40643d7c
MF
549#elif defined(TARGET_XTENSA)
550 if (interrupt_request & CPU_INTERRUPT_HARD) {
551 env->exception_index = EXC_IRQ;
97a8ea5a 552 cc->do_interrupt(cpu);
40643d7c
MF
553 next_tb = 0;
554 }
68a79315 555#endif
ff2712ba 556 /* Don't use the cached interrupt_request value,
9d05095e 557 do_interrupt may have updated the EXITTB flag. */
259186a7
AF
558 if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
559 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
bf3e8bf1
FB
560 /* ensure that no TB jump will be modified as
561 the program flow was changed */
b5fc09ae 562 next_tb = 0;
bf3e8bf1 563 }
be214e6c 564 }
fcd7d003
AF
565 if (unlikely(cpu->exit_request)) {
566 cpu->exit_request = 0;
be214e6c 567 env->exception_index = EXCP_INTERRUPT;
1162c041 568 cpu_loop_exit(env);
3fb2ded1 569 }
a73b1fd9 570#if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
8fec2b8c 571 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
3fb2ded1 572 /* restore flags in standard format */
ecb644f4 573#if defined(TARGET_I386)
6fd2a026 574 log_cpu_state(env, CPU_DUMP_CCOP);
e6e5906b
PB
575#elif defined(TARGET_M68K)
576 cpu_m68k_flush_flags(env, env->cc_op);
577 env->cc_op = CC_OP_FLAGS;
578 env->sr = (env->sr & 0xffe0)
579 | env->cc_dest | (env->cc_x << 4);
93fcfe39 580 log_cpu_state(env, 0);
e4533c7a 581#else
a73b1fd9 582 log_cpu_state(env, 0);
e4533c7a 583#endif
3fb2ded1 584 }
a73b1fd9 585#endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
5e5f07e0 586 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
cea5f9a2 587 tb = tb_find_fast(env);
d5975363
PB
588 /* Note: we do it here to avoid a gcc bug on Mac OS X when
589 doing it in tb_find_slow */
5e5f07e0 590 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
d5975363
PB
591 /* as some TB could have been invalidated because
592 of memory exceptions while generating the code, we
593 must recompute the hash index here */
594 next_tb = 0;
5e5f07e0 595 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
d5975363 596 }
f0667e66 597#ifdef CONFIG_DEBUG_EXEC
3ba19255
SW
598 qemu_log_mask(CPU_LOG_EXEC, "Trace %p [" TARGET_FMT_lx "] %s\n",
599 tb->tc_ptr, tb->pc,
93fcfe39 600 lookup_symbol(tb->pc));
9d27abd9 601#endif
8a40a180
FB
602 /* see if we can patch the calling TB. When the TB
603 spans two pages, we cannot safely do a direct
604 jump. */
040f2fb2 605 if (next_tb != 0 && tb->page_addr[1] == -1) {
0980011b
PM
606 tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
607 next_tb & TB_EXIT_MASK, tb);
3fb2ded1 608 }
5e5f07e0 609 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
55e8b85e 610
611 /* cpu_interrupt might be called while translating the
612 TB, but before it is linked into a potentially
613 infinite loop and becomes env->current_tb. Avoid
614 starting execution if there is a pending interrupt. */
d77953b9 615 cpu->current_tb = tb;
b0052d15 616 barrier();
fcd7d003 617 if (likely(!cpu->exit_request)) {
2e70f6ef 618 tc_ptr = tb->tc_ptr;
e965fc38 619 /* execute the generated code */
77211379 620 next_tb = cpu_tb_exec(cpu, tc_ptr);
378df4b2
PM
621 switch (next_tb & TB_EXIT_MASK) {
622 case TB_EXIT_REQUESTED:
623 /* Something asked us to stop executing
624 * chained TBs; just continue round the main
625 * loop. Whatever requested the exit will also
626 * have set something else (eg exit_request or
627 * interrupt_request) which we will handle
628 * next time around the loop.
629 */
630 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
631 next_tb = 0;
632 break;
633 case TB_EXIT_ICOUNT_EXPIRED:
634 {
bf20dc07 635 /* Instruction counter expired. */
2e70f6ef 636 int insns_left;
0980011b 637 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
2e70f6ef
PB
638 insns_left = env->icount_decr.u32;
639 if (env->icount_extra && insns_left >= 0) {
640 /* Refill decrementer and continue execution. */
641 env->icount_extra += insns_left;
642 if (env->icount_extra > 0xffff) {
643 insns_left = 0xffff;
644 } else {
645 insns_left = env->icount_extra;
646 }
647 env->icount_extra -= insns_left;
648 env->icount_decr.u16.low = insns_left;
649 } else {
650 if (insns_left > 0) {
651 /* Execute remaining instructions. */
cea5f9a2 652 cpu_exec_nocache(env, insns_left, tb);
2e70f6ef
PB
653 }
654 env->exception_index = EXCP_INTERRUPT;
655 next_tb = 0;
1162c041 656 cpu_loop_exit(env);
2e70f6ef 657 }
378df4b2
PM
658 break;
659 }
660 default:
661 break;
2e70f6ef
PB
662 }
663 }
d77953b9 664 cpu->current_tb = NULL;
4cbf74b6
FB
665 /* reset soft MMU for next block (it can currently
666 only be set by a memory fault) */
50a518e3 667 } /* for(;;) */
0d101938
JK
668 } else {
669 /* Reload env after longjmp - the compiler may have smashed all
670 * local variables as longjmp is marked 'noreturn'. */
671 env = cpu_single_env;
7d13299d 672 }
3fb2ded1
FB
673 } /* for(;;) */
674
7d13299d 675
e4533c7a 676#if defined(TARGET_I386)
9de5e440 677 /* restore flags in standard format */
e694d4e2
BS
678 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
679 | (DF & DF_MASK);
e4533c7a 680#elif defined(TARGET_ARM)
b7bcbe95 681 /* XXX: Save/restore host fpu exception state?. */
d2fbca94 682#elif defined(TARGET_UNICORE32)
93ac68bc 683#elif defined(TARGET_SPARC)
67867308 684#elif defined(TARGET_PPC)
81ea0e13 685#elif defined(TARGET_LM32)
e6e5906b
PB
686#elif defined(TARGET_M68K)
687 cpu_m68k_flush_flags(env, env->cc_op);
688 env->cc_op = CC_OP_FLAGS;
689 env->sr = (env->sr & 0xffe0)
690 | env->cc_dest | (env->cc_x << 4);
b779e29e 691#elif defined(TARGET_MICROBLAZE)
6af0bf9c 692#elif defined(TARGET_MIPS)
d15a9c23 693#elif defined(TARGET_MOXIE)
e67db06e 694#elif defined(TARGET_OPENRISC)
fdf9b3e8 695#elif defined(TARGET_SH4)
eddf68a6 696#elif defined(TARGET_ALPHA)
f1ccf904 697#elif defined(TARGET_CRIS)
10ec5117 698#elif defined(TARGET_S390X)
2328826b 699#elif defined(TARGET_XTENSA)
fdf9b3e8 700 /* XXXXX */
e4533c7a
FB
701#else
702#error unsupported target CPU
703#endif
1057eaa7 704
6a00d601 705 /* fail safe : never use cpu_single_env outside cpu_exec() */
5fafdf24 706 cpu_single_env = NULL;
7d13299d
FB
707 return ret;
708}