]> git.proxmox.com Git - mirror_qemu.git/blame - cpu-exec.c
rules.mak: fix $(obj) to a real relative path
[mirror_qemu.git] / cpu-exec.c
CommitLineData
7d13299d 1/*
e965fc38 2 * emulator main execution loop
5fafdf24 3 *
66321a11 4 * Copyright (c) 2003-2005 Fabrice Bellard
7d13299d 5 *
3ef693a0
FB
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
7d13299d 10 *
3ef693a0
FB
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
7d13299d 15 *
3ef693a0 16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
7d13299d 18 */
e4533c7a 19#include "config.h"
cea5f9a2 20#include "cpu.h"
76cad711 21#include "disas/disas.h"
7cb69cae 22#include "tcg.h"
1de7afc9 23#include "qemu/atomic.h"
9c17d615 24#include "sysemu/qtest.h"
7d13299d 25
3993c6bd 26bool qemu_cpu_has_work(CPUState *cpu)
6a4955a8 27{
3993c6bd 28 return cpu_has_work(cpu);
6a4955a8
AL
29}
30
9349b4f9 31void cpu_loop_exit(CPUArchState *env)
e4533c7a 32{
d77953b9
AF
33 CPUState *cpu = ENV_GET_CPU(env);
34
35 cpu->current_tb = NULL;
6ab7e546 36 siglongjmp(env->jmp_env, 1);
e4533c7a 37}
bfed01fc 38
fbf9eeb3
FB
39/* exit the current TB from a signal handler. The host registers are
40 restored in a state compatible with the CPU emulator
41 */
9eff14f3 42#if defined(CONFIG_SOFTMMU)
9349b4f9 43void cpu_resume_from_signal(CPUArchState *env, void *puc)
9eff14f3 44{
9eff14f3
BS
45 /* XXX: restore cpu registers saved in host registers */
46
47 env->exception_index = -1;
6ab7e546 48 siglongjmp(env->jmp_env, 1);
9eff14f3 49}
9eff14f3 50#endif
fbf9eeb3 51
77211379
PM
52/* Execute a TB, and fix up the CPU state afterwards if necessary */
53static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
54{
55 CPUArchState *env = cpu->env_ptr;
03afa5f8
RH
56 uintptr_t next_tb;
57
58#if defined(DEBUG_DISAS)
59 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
60#if defined(TARGET_I386)
61 log_cpu_state(cpu, CPU_DUMP_CCOP);
62#elif defined(TARGET_M68K)
63 /* ??? Should not modify env state for dumping. */
64 cpu_m68k_flush_flags(env, env->cc_op);
65 env->cc_op = CC_OP_FLAGS;
66 env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
67 log_cpu_state(cpu, 0);
68#else
69 log_cpu_state(cpu, 0);
70#endif
71 }
72#endif /* DEBUG_DISAS */
73
74 next_tb = tcg_qemu_tb_exec(env, tb_ptr);
77211379
PM
75 if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
76 /* We didn't start executing this TB (eg because the instruction
77 * counter hit zero); we must restore the guest PC to the address
78 * of the start of the TB.
79 */
bdf7ae5b 80 CPUClass *cc = CPU_GET_CLASS(cpu);
77211379 81 TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
bdf7ae5b
AF
82 if (cc->synchronize_from_tb) {
83 cc->synchronize_from_tb(cpu, tb);
84 } else {
85 assert(cc->set_pc);
86 cc->set_pc(cpu, tb->pc);
87 }
77211379 88 }
378df4b2
PM
89 if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
90 /* We were asked to stop executing TBs (probably a pending
91 * interrupt. We've now stopped, so clear the flag.
92 */
93 cpu->tcg_exit_req = 0;
94 }
77211379
PM
95 return next_tb;
96}
97
2e70f6ef
PB
98/* Execute the code without caching the generated code. An interpreter
99 could be used if available. */
9349b4f9 100static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
cea5f9a2 101 TranslationBlock *orig_tb)
2e70f6ef 102{
d77953b9 103 CPUState *cpu = ENV_GET_CPU(env);
2e70f6ef
PB
104 TranslationBlock *tb;
105
106 /* Should never happen.
107 We only end up here when an existing TB is too long. */
108 if (max_cycles > CF_COUNT_MASK)
109 max_cycles = CF_COUNT_MASK;
110
111 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
112 max_cycles);
d77953b9 113 cpu->current_tb = tb;
2e70f6ef 114 /* execute the generated code */
77211379 115 cpu_tb_exec(cpu, tb->tc_ptr);
d77953b9 116 cpu->current_tb = NULL;
2e70f6ef
PB
117 tb_phys_invalidate(tb, -1);
118 tb_free(tb);
119}
120
9349b4f9 121static TranslationBlock *tb_find_slow(CPUArchState *env,
cea5f9a2 122 target_ulong pc,
8a40a180 123 target_ulong cs_base,
c068688b 124 uint64_t flags)
8a40a180
FB
125{
126 TranslationBlock *tb, **ptb1;
8a40a180 127 unsigned int h;
337fc758 128 tb_page_addr_t phys_pc, phys_page1;
41c1b1c9 129 target_ulong virt_page2;
3b46e624 130
5e5f07e0 131 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
3b46e624 132
8a40a180 133 /* find translated block using physical mappings */
41c1b1c9 134 phys_pc = get_page_addr_code(env, pc);
8a40a180 135 phys_page1 = phys_pc & TARGET_PAGE_MASK;
8a40a180 136 h = tb_phys_hash_func(phys_pc);
5e5f07e0 137 ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
8a40a180
FB
138 for(;;) {
139 tb = *ptb1;
140 if (!tb)
141 goto not_found;
5fafdf24 142 if (tb->pc == pc &&
8a40a180 143 tb->page_addr[0] == phys_page1 &&
5fafdf24 144 tb->cs_base == cs_base &&
8a40a180
FB
145 tb->flags == flags) {
146 /* check next page if needed */
147 if (tb->page_addr[1] != -1) {
337fc758
BS
148 tb_page_addr_t phys_page2;
149
5fafdf24 150 virt_page2 = (pc & TARGET_PAGE_MASK) +
8a40a180 151 TARGET_PAGE_SIZE;
41c1b1c9 152 phys_page2 = get_page_addr_code(env, virt_page2);
8a40a180
FB
153 if (tb->page_addr[1] == phys_page2)
154 goto found;
155 } else {
156 goto found;
157 }
158 }
159 ptb1 = &tb->phys_hash_next;
160 }
161 not_found:
2e70f6ef
PB
162 /* if no translated code available, then translate it now */
163 tb = tb_gen_code(env, pc, cs_base, flags, 0);
3b46e624 164
8a40a180 165 found:
2c90fe2b
KB
166 /* Move the last found TB to the head of the list */
167 if (likely(*ptb1)) {
168 *ptb1 = tb->phys_hash_next;
5e5f07e0
EV
169 tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
170 tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
2c90fe2b 171 }
8a40a180
FB
172 /* we add the TB in the virtual pc hash table */
173 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
8a40a180
FB
174 return tb;
175}
176
9349b4f9 177static inline TranslationBlock *tb_find_fast(CPUArchState *env)
8a40a180
FB
178{
179 TranslationBlock *tb;
180 target_ulong cs_base, pc;
6b917547 181 int flags;
8a40a180
FB
182
183 /* we record a subset of the CPU state. It will
184 always be the same before a given translated block
185 is executed. */
6b917547 186 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
bce61846 187 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
551bd27f
TS
188 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
189 tb->flags != flags)) {
cea5f9a2 190 tb = tb_find_slow(env, pc, cs_base, flags);
8a40a180
FB
191 }
192 return tb;
193}
194
1009d2ed
JK
195static CPUDebugExcpHandler *debug_excp_handler;
196
84e3b602 197void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
1009d2ed 198{
1009d2ed 199 debug_excp_handler = handler;
1009d2ed
JK
200}
201
9349b4f9 202static void cpu_handle_debug_exception(CPUArchState *env)
1009d2ed
JK
203{
204 CPUWatchpoint *wp;
205
206 if (!env->watchpoint_hit) {
207 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
208 wp->flags &= ~BP_WATCHPOINT_HIT;
209 }
210 }
211 if (debug_excp_handler) {
212 debug_excp_handler(env);
213 }
214}
215
7d13299d
FB
216/* main execution loop */
217
1a28cac3
MT
218volatile sig_atomic_t exit_request;
219
9349b4f9 220int cpu_exec(CPUArchState *env)
7d13299d 221{
c356a1bc 222 CPUState *cpu = ENV_GET_CPU(env);
97a8ea5a
AF
223#if !(defined(CONFIG_USER_ONLY) && \
224 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
225 CPUClass *cc = CPU_GET_CLASS(cpu);
693fa551
AF
226#endif
227#ifdef TARGET_I386
228 X86CPU *x86_cpu = X86_CPU(cpu);
97a8ea5a 229#endif
8a40a180 230 int ret, interrupt_request;
8a40a180 231 TranslationBlock *tb;
c27004ec 232 uint8_t *tc_ptr;
3e9bd63a 233 uintptr_t next_tb;
8c6939c0 234
259186a7 235 if (cpu->halted) {
3993c6bd 236 if (!cpu_has_work(cpu)) {
eda48c34
PB
237 return EXCP_HALTED;
238 }
239
259186a7 240 cpu->halted = 0;
eda48c34 241 }
5a1e3cfc 242
4917cf44 243 current_cpu = cpu;
e4533c7a 244
4917cf44 245 /* As long as current_cpu is null, up to the assignment just above,
ec9bd89f
OH
246 * requests by other threads to exit the execution loop are expected to
247 * be issued using the exit_request global. We must make sure that our
4917cf44 248 * evaluation of the global value is performed past the current_cpu
ec9bd89f
OH
249 * value transition point, which requires a memory barrier as well as
250 * an instruction scheduling constraint on modern architectures. */
251 smp_mb();
252
c629a4bc 253 if (unlikely(exit_request)) {
fcd7d003 254 cpu->exit_request = 1;
1a28cac3
MT
255 }
256
ecb644f4 257#if defined(TARGET_I386)
6792a57b
JK
258 /* put eflags in CPU temporary format */
259 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
80cf2c81 260 env->df = 1 - (2 * ((env->eflags >> 10) & 1));
6792a57b
JK
261 CC_OP = CC_OP_EFLAGS;
262 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
93ac68bc 263#elif defined(TARGET_SPARC)
e6e5906b
PB
264#elif defined(TARGET_M68K)
265 env->cc_op = CC_OP_FLAGS;
266 env->cc_dest = env->sr & 0xf;
267 env->cc_x = (env->sr >> 4) & 1;
ecb644f4
TS
268#elif defined(TARGET_ALPHA)
269#elif defined(TARGET_ARM)
d2fbca94 270#elif defined(TARGET_UNICORE32)
ecb644f4 271#elif defined(TARGET_PPC)
4e85f82c 272 env->reserve_addr = -1;
81ea0e13 273#elif defined(TARGET_LM32)
b779e29e 274#elif defined(TARGET_MICROBLAZE)
6af0bf9c 275#elif defined(TARGET_MIPS)
d15a9c23 276#elif defined(TARGET_MOXIE)
e67db06e 277#elif defined(TARGET_OPENRISC)
fdf9b3e8 278#elif defined(TARGET_SH4)
f1ccf904 279#elif defined(TARGET_CRIS)
10ec5117 280#elif defined(TARGET_S390X)
2328826b 281#elif defined(TARGET_XTENSA)
fdf9b3e8 282 /* XXXXX */
e4533c7a
FB
283#else
284#error unsupported target CPU
285#endif
3fb2ded1 286 env->exception_index = -1;
9d27abd9 287
7d13299d 288 /* prepare setjmp context for exception handling */
3fb2ded1 289 for(;;) {
6ab7e546 290 if (sigsetjmp(env->jmp_env, 0) == 0) {
3fb2ded1
FB
291 /* if an exception is pending, we execute it here */
292 if (env->exception_index >= 0) {
293 if (env->exception_index >= EXCP_INTERRUPT) {
294 /* exit request from the cpu execution loop */
295 ret = env->exception_index;
1009d2ed
JK
296 if (ret == EXCP_DEBUG) {
297 cpu_handle_debug_exception(env);
298 }
3fb2ded1 299 break;
72d239ed
AJ
300 } else {
301#if defined(CONFIG_USER_ONLY)
3fb2ded1 302 /* if user mode only, we simulate a fake exception
9f083493 303 which will be handled outside the cpu execution
3fb2ded1 304 loop */
83479e77 305#if defined(TARGET_I386)
97a8ea5a 306 cc->do_interrupt(cpu);
83479e77 307#endif
3fb2ded1
FB
308 ret = env->exception_index;
309 break;
72d239ed 310#else
97a8ea5a 311 cc->do_interrupt(cpu);
301d2908 312 env->exception_index = -1;
83479e77 313#endif
3fb2ded1 314 }
5fafdf24 315 }
9df217a3 316
b5fc09ae 317 next_tb = 0; /* force lookup of first TB */
3fb2ded1 318 for(;;) {
259186a7 319 interrupt_request = cpu->interrupt_request;
e1638bd8 320 if (unlikely(interrupt_request)) {
ed2803da 321 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
e1638bd8 322 /* Mask out external interrupts for this step. */
3125f763 323 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
e1638bd8 324 }
6658ffb8 325 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
259186a7 326 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
6658ffb8 327 env->exception_index = EXCP_DEBUG;
1162c041 328 cpu_loop_exit(env);
6658ffb8 329 }
a90b7318 330#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
b779e29e 331 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
d2fbca94 332 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
a90b7318 333 if (interrupt_request & CPU_INTERRUPT_HALT) {
259186a7
AF
334 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
335 cpu->halted = 1;
a90b7318 336 env->exception_index = EXCP_HLT;
1162c041 337 cpu_loop_exit(env);
a90b7318
AZ
338 }
339#endif
68a79315 340#if defined(TARGET_I386)
5d62c43a
JK
341#if !defined(CONFIG_USER_ONLY)
342 if (interrupt_request & CPU_INTERRUPT_POLL) {
259186a7 343 cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
693fa551 344 apic_poll_irq(x86_cpu->apic_state);
5d62c43a
JK
345 }
346#endif
b09ea7d5 347 if (interrupt_request & CPU_INTERRUPT_INIT) {
77b2bc2c
BS
348 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
349 0);
693fa551 350 do_cpu_init(x86_cpu);
b09ea7d5 351 env->exception_index = EXCP_HALTED;
1162c041 352 cpu_loop_exit(env);
b09ea7d5 353 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
693fa551 354 do_cpu_sipi(x86_cpu);
b09ea7d5 355 } else if (env->hflags2 & HF2_GIF_MASK) {
db620f46
FB
356 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
357 !(env->hflags & HF_SMM_MASK)) {
77b2bc2c
BS
358 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
359 0);
259186a7 360 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
693fa551 361 do_smm_enter(x86_cpu);
db620f46
FB
362 next_tb = 0;
363 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
364 !(env->hflags2 & HF2_NMI_MASK)) {
259186a7 365 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
db620f46 366 env->hflags2 |= HF2_NMI_MASK;
e694d4e2 367 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
db620f46 368 next_tb = 0;
e965fc38 369 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
259186a7 370 cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
e694d4e2 371 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
79c4f6b0 372 next_tb = 0;
db620f46
FB
373 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
374 (((env->hflags2 & HF2_VINTR_MASK) &&
375 (env->hflags2 & HF2_HIF_MASK)) ||
376 (!(env->hflags2 & HF2_VINTR_MASK) &&
377 (env->eflags & IF_MASK &&
378 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
379 int intno;
77b2bc2c
BS
380 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
381 0);
259186a7
AF
382 cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
383 CPU_INTERRUPT_VIRQ);
db620f46 384 intno = cpu_get_pic_interrupt(env);
4f213879 385 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
386 do_interrupt_x86_hardirq(env, intno, 1);
387 /* ensure that no TB jump will be modified as
388 the program flow was changed */
389 next_tb = 0;
0573fbfc 390#if !defined(CONFIG_USER_ONLY)
db620f46
FB
391 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
392 (env->eflags & IF_MASK) &&
393 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
394 int intno;
395 /* FIXME: this should respect TPR */
77b2bc2c
BS
396 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
397 0);
fdfba1a2
EI
398 intno = ldl_phys(cpu->as,
399 env->vm_vmcb
400 + offsetof(struct vmcb,
401 control.int_vector));
93fcfe39 402 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
e694d4e2 403 do_interrupt_x86_hardirq(env, intno, 1);
259186a7 404 cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
db620f46 405 next_tb = 0;
907a5b26 406#endif
db620f46 407 }
68a79315 408 }
ce09776b 409#elif defined(TARGET_PPC)
9fddaa0c 410 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
c356a1bc 411 cpu_reset(cpu);
9fddaa0c 412 }
47103572 413 if (interrupt_request & CPU_INTERRUPT_HARD) {
e9df014c 414 ppc_hw_interrupt(env);
259186a7
AF
415 if (env->pending_interrupts == 0) {
416 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
417 }
b5fc09ae 418 next_tb = 0;
ce09776b 419 }
81ea0e13
MW
420#elif defined(TARGET_LM32)
421 if ((interrupt_request & CPU_INTERRUPT_HARD)
422 && (env->ie & IE_IE)) {
423 env->exception_index = EXCP_IRQ;
97a8ea5a 424 cc->do_interrupt(cpu);
81ea0e13
MW
425 next_tb = 0;
426 }
b779e29e
EI
427#elif defined(TARGET_MICROBLAZE)
428 if ((interrupt_request & CPU_INTERRUPT_HARD)
429 && (env->sregs[SR_MSR] & MSR_IE)
430 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
431 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
432 env->exception_index = EXCP_IRQ;
97a8ea5a 433 cc->do_interrupt(cpu);
b779e29e
EI
434 next_tb = 0;
435 }
6af0bf9c
FB
436#elif defined(TARGET_MIPS)
437 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
4cdc1cd1 438 cpu_mips_hw_interrupts_pending(env)) {
6af0bf9c
FB
439 /* Raise it */
440 env->exception_index = EXCP_EXT_INTERRUPT;
441 env->error_code = 0;
97a8ea5a 442 cc->do_interrupt(cpu);
b5fc09ae 443 next_tb = 0;
6af0bf9c 444 }
b6a71ef7
JL
445#elif defined(TARGET_OPENRISC)
446 {
447 int idx = -1;
448 if ((interrupt_request & CPU_INTERRUPT_HARD)
449 && (env->sr & SR_IEE)) {
450 idx = EXCP_INT;
451 }
452 if ((interrupt_request & CPU_INTERRUPT_TIMER)
453 && (env->sr & SR_TEE)) {
454 idx = EXCP_TICK;
455 }
456 if (idx >= 0) {
457 env->exception_index = idx;
97a8ea5a 458 cc->do_interrupt(cpu);
b6a71ef7
JL
459 next_tb = 0;
460 }
461 }
e95c8d51 462#elif defined(TARGET_SPARC)
d532b26c
IK
463 if (interrupt_request & CPU_INTERRUPT_HARD) {
464 if (cpu_interrupts_enabled(env) &&
465 env->interrupt_index > 0) {
466 int pil = env->interrupt_index & 0xf;
467 int type = env->interrupt_index & 0xf0;
468
469 if (((type == TT_EXTINT) &&
470 cpu_pil_allowed(env, pil)) ||
471 type != TT_EXTINT) {
472 env->exception_index = env->interrupt_index;
97a8ea5a 473 cc->do_interrupt(cpu);
d532b26c
IK
474 next_tb = 0;
475 }
476 }
e965fc38 477 }
b5ff1b31
FB
478#elif defined(TARGET_ARM)
479 if (interrupt_request & CPU_INTERRUPT_FIQ
480 && !(env->uncached_cpsr & CPSR_F)) {
481 env->exception_index = EXCP_FIQ;
97a8ea5a 482 cc->do_interrupt(cpu);
b5fc09ae 483 next_tb = 0;
b5ff1b31 484 }
9ee6e8bb
PB
485 /* ARMv7-M interrupt return works by loading a magic value
486 into the PC. On real hardware the load causes the
487 return to occur. The qemu implementation performs the
488 jump normally, then does the exception return when the
489 CPU tries to execute code at the magic address.
490 This will cause the magic PC value to be pushed to
a1c7273b 491 the stack if an interrupt occurred at the wrong time.
9ee6e8bb
PB
492 We avoid this by disabling interrupts when
493 pc contains a magic address. */
b5ff1b31 494 if (interrupt_request & CPU_INTERRUPT_HARD
9ee6e8bb
PB
495 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
496 || !(env->uncached_cpsr & CPSR_I))) {
b5ff1b31 497 env->exception_index = EXCP_IRQ;
97a8ea5a 498 cc->do_interrupt(cpu);
b5fc09ae 499 next_tb = 0;
b5ff1b31 500 }
d2fbca94
GX
501#elif defined(TARGET_UNICORE32)
502 if (interrupt_request & CPU_INTERRUPT_HARD
503 && !(env->uncached_asr & ASR_I)) {
d48813dd 504 env->exception_index = UC32_EXCP_INTR;
97a8ea5a 505 cc->do_interrupt(cpu);
d2fbca94
GX
506 next_tb = 0;
507 }
fdf9b3e8 508#elif defined(TARGET_SH4)
e96e2044 509 if (interrupt_request & CPU_INTERRUPT_HARD) {
97a8ea5a 510 cc->do_interrupt(cpu);
b5fc09ae 511 next_tb = 0;
e96e2044 512 }
eddf68a6 513#elif defined(TARGET_ALPHA)
6a80e088
RH
514 {
515 int idx = -1;
516 /* ??? This hard-codes the OSF/1 interrupt levels. */
e965fc38 517 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
6a80e088
RH
518 case 0 ... 3:
519 if (interrupt_request & CPU_INTERRUPT_HARD) {
520 idx = EXCP_DEV_INTERRUPT;
521 }
522 /* FALLTHRU */
523 case 4:
524 if (interrupt_request & CPU_INTERRUPT_TIMER) {
525 idx = EXCP_CLK_INTERRUPT;
526 }
527 /* FALLTHRU */
528 case 5:
529 if (interrupt_request & CPU_INTERRUPT_SMP) {
530 idx = EXCP_SMP_INTERRUPT;
531 }
532 /* FALLTHRU */
533 case 6:
534 if (interrupt_request & CPU_INTERRUPT_MCHK) {
535 idx = EXCP_MCHK;
536 }
537 }
538 if (idx >= 0) {
539 env->exception_index = idx;
540 env->error_code = 0;
97a8ea5a 541 cc->do_interrupt(cpu);
6a80e088
RH
542 next_tb = 0;
543 }
eddf68a6 544 }
f1ccf904 545#elif defined(TARGET_CRIS)
1b1a38b0 546 if (interrupt_request & CPU_INTERRUPT_HARD
fb9fb692
EI
547 && (env->pregs[PR_CCS] & I_FLAG)
548 && !env->locked_irq) {
1b1a38b0 549 env->exception_index = EXCP_IRQ;
97a8ea5a 550 cc->do_interrupt(cpu);
1b1a38b0
EI
551 next_tb = 0;
552 }
8219314b
LP
553 if (interrupt_request & CPU_INTERRUPT_NMI) {
554 unsigned int m_flag_archval;
555 if (env->pregs[PR_VR] < 32) {
556 m_flag_archval = M_FLAG_V10;
557 } else {
558 m_flag_archval = M_FLAG_V32;
559 }
560 if ((env->pregs[PR_CCS] & m_flag_archval)) {
561 env->exception_index = EXCP_NMI;
97a8ea5a 562 cc->do_interrupt(cpu);
8219314b
LP
563 next_tb = 0;
564 }
f1ccf904 565 }
0633879f
PB
566#elif defined(TARGET_M68K)
567 if (interrupt_request & CPU_INTERRUPT_HARD
568 && ((env->sr & SR_I) >> SR_I_SHIFT)
569 < env->pending_level) {
570 /* Real hardware gets the interrupt vector via an
571 IACK cycle at this point. Current emulated
572 hardware doesn't rely on this, so we
573 provide/save the vector when the interrupt is
574 first signalled. */
575 env->exception_index = env->pending_vector;
3c688828 576 do_interrupt_m68k_hardirq(env);
b5fc09ae 577 next_tb = 0;
0633879f 578 }
3110e292
AG
579#elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
580 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
581 (env->psw.mask & PSW_MASK_EXT)) {
97a8ea5a 582 cc->do_interrupt(cpu);
3110e292
AG
583 next_tb = 0;
584 }
40643d7c
MF
585#elif defined(TARGET_XTENSA)
586 if (interrupt_request & CPU_INTERRUPT_HARD) {
587 env->exception_index = EXC_IRQ;
97a8ea5a 588 cc->do_interrupt(cpu);
40643d7c
MF
589 next_tb = 0;
590 }
68a79315 591#endif
ff2712ba 592 /* Don't use the cached interrupt_request value,
9d05095e 593 do_interrupt may have updated the EXITTB flag. */
259186a7
AF
594 if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
595 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
bf3e8bf1
FB
596 /* ensure that no TB jump will be modified as
597 the program flow was changed */
b5fc09ae 598 next_tb = 0;
bf3e8bf1 599 }
be214e6c 600 }
fcd7d003
AF
601 if (unlikely(cpu->exit_request)) {
602 cpu->exit_request = 0;
be214e6c 603 env->exception_index = EXCP_INTERRUPT;
1162c041 604 cpu_loop_exit(env);
3fb2ded1 605 }
5e5f07e0 606 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
cea5f9a2 607 tb = tb_find_fast(env);
d5975363
PB
608 /* Note: we do it here to avoid a gcc bug on Mac OS X when
609 doing it in tb_find_slow */
5e5f07e0 610 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
d5975363
PB
611 /* as some TB could have been invalidated because
612 of memory exceptions while generating the code, we
613 must recompute the hash index here */
614 next_tb = 0;
5e5f07e0 615 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
d5975363 616 }
c30d1aea
PM
617 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
618 qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
619 tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
620 }
8a40a180
FB
621 /* see if we can patch the calling TB. When the TB
622 spans two pages, we cannot safely do a direct
623 jump. */
040f2fb2 624 if (next_tb != 0 && tb->page_addr[1] == -1) {
0980011b
PM
625 tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
626 next_tb & TB_EXIT_MASK, tb);
3fb2ded1 627 }
5e5f07e0 628 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
55e8b85e 629
630 /* cpu_interrupt might be called while translating the
631 TB, but before it is linked into a potentially
632 infinite loop and becomes env->current_tb. Avoid
633 starting execution if there is a pending interrupt. */
d77953b9 634 cpu->current_tb = tb;
b0052d15 635 barrier();
fcd7d003 636 if (likely(!cpu->exit_request)) {
2e70f6ef 637 tc_ptr = tb->tc_ptr;
e965fc38 638 /* execute the generated code */
77211379 639 next_tb = cpu_tb_exec(cpu, tc_ptr);
378df4b2
PM
640 switch (next_tb & TB_EXIT_MASK) {
641 case TB_EXIT_REQUESTED:
642 /* Something asked us to stop executing
643 * chained TBs; just continue round the main
644 * loop. Whatever requested the exit will also
645 * have set something else (eg exit_request or
646 * interrupt_request) which we will handle
647 * next time around the loop.
648 */
649 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
650 next_tb = 0;
651 break;
652 case TB_EXIT_ICOUNT_EXPIRED:
653 {
bf20dc07 654 /* Instruction counter expired. */
2e70f6ef 655 int insns_left;
0980011b 656 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
2e70f6ef
PB
657 insns_left = env->icount_decr.u32;
658 if (env->icount_extra && insns_left >= 0) {
659 /* Refill decrementer and continue execution. */
660 env->icount_extra += insns_left;
661 if (env->icount_extra > 0xffff) {
662 insns_left = 0xffff;
663 } else {
664 insns_left = env->icount_extra;
665 }
666 env->icount_extra -= insns_left;
667 env->icount_decr.u16.low = insns_left;
668 } else {
669 if (insns_left > 0) {
670 /* Execute remaining instructions. */
cea5f9a2 671 cpu_exec_nocache(env, insns_left, tb);
2e70f6ef
PB
672 }
673 env->exception_index = EXCP_INTERRUPT;
674 next_tb = 0;
1162c041 675 cpu_loop_exit(env);
2e70f6ef 676 }
378df4b2
PM
677 break;
678 }
679 default:
680 break;
2e70f6ef
PB
681 }
682 }
d77953b9 683 cpu->current_tb = NULL;
4cbf74b6
FB
684 /* reset soft MMU for next block (it can currently
685 only be set by a memory fault) */
50a518e3 686 } /* for(;;) */
0d101938
JK
687 } else {
688 /* Reload env after longjmp - the compiler may have smashed all
689 * local variables as longjmp is marked 'noreturn'. */
4917cf44
AF
690 cpu = current_cpu;
691 env = cpu->env_ptr;
6c78f29a
JL
692#if !(defined(CONFIG_USER_ONLY) && \
693 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
694 cc = CPU_GET_CLASS(cpu);
693fa551
AF
695#endif
696#ifdef TARGET_I386
697 x86_cpu = X86_CPU(cpu);
6c78f29a 698#endif
7d13299d 699 }
3fb2ded1
FB
700 } /* for(;;) */
701
7d13299d 702
e4533c7a 703#if defined(TARGET_I386)
9de5e440 704 /* restore flags in standard format */
e694d4e2 705 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
80cf2c81 706 | (env->df & DF_MASK);
e4533c7a 707#elif defined(TARGET_ARM)
b7bcbe95 708 /* XXX: Save/restore host fpu exception state?. */
d2fbca94 709#elif defined(TARGET_UNICORE32)
93ac68bc 710#elif defined(TARGET_SPARC)
67867308 711#elif defined(TARGET_PPC)
81ea0e13 712#elif defined(TARGET_LM32)
e6e5906b
PB
713#elif defined(TARGET_M68K)
714 cpu_m68k_flush_flags(env, env->cc_op);
715 env->cc_op = CC_OP_FLAGS;
716 env->sr = (env->sr & 0xffe0)
717 | env->cc_dest | (env->cc_x << 4);
b779e29e 718#elif defined(TARGET_MICROBLAZE)
6af0bf9c 719#elif defined(TARGET_MIPS)
d15a9c23 720#elif defined(TARGET_MOXIE)
e67db06e 721#elif defined(TARGET_OPENRISC)
fdf9b3e8 722#elif defined(TARGET_SH4)
eddf68a6 723#elif defined(TARGET_ALPHA)
f1ccf904 724#elif defined(TARGET_CRIS)
10ec5117 725#elif defined(TARGET_S390X)
2328826b 726#elif defined(TARGET_XTENSA)
fdf9b3e8 727 /* XXXXX */
e4533c7a
FB
728#else
729#error unsupported target CPU
730#endif
1057eaa7 731
4917cf44
AF
732 /* fail safe : never use current_cpu outside cpu_exec() */
733 current_cpu = NULL;
7d13299d
FB
734 return ret;
735}