]> git.proxmox.com Git - mirror_qemu.git/blame - cpu-exec.c
translate-all: Change cpu_io_recompile() argument to CPUState
[mirror_qemu.git] / cpu-exec.c
CommitLineData
7d13299d 1/*
e965fc38 2 * emulator main execution loop
5fafdf24 3 *
66321a11 4 * Copyright (c) 2003-2005 Fabrice Bellard
7d13299d 5 *
3ef693a0
FB
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
7d13299d 10 *
3ef693a0
FB
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
7d13299d 15 *
3ef693a0 16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
7d13299d 18 */
e4533c7a 19#include "config.h"
cea5f9a2 20#include "cpu.h"
76cad711 21#include "disas/disas.h"
7cb69cae 22#include "tcg.h"
1de7afc9 23#include "qemu/atomic.h"
9c17d615 24#include "sysemu/qtest.h"
7d13299d 25
5638d180 26void cpu_loop_exit(CPUState *cpu)
e4533c7a 27{
d77953b9 28 cpu->current_tb = NULL;
6f03bef0 29 siglongjmp(cpu->jmp_env, 1);
e4533c7a 30}
bfed01fc 31
fbf9eeb3
FB
32/* exit the current TB from a signal handler. The host registers are
33 restored in a state compatible with the CPU emulator
34 */
9eff14f3 35#if defined(CONFIG_SOFTMMU)
9349b4f9 36void cpu_resume_from_signal(CPUArchState *env, void *puc)
9eff14f3 37{
6f03bef0
AF
38 CPUState *cpu = ENV_GET_CPU(env);
39
9eff14f3
BS
40 /* XXX: restore cpu registers saved in host registers */
41
27103424 42 cpu->exception_index = -1;
6f03bef0 43 siglongjmp(cpu->jmp_env, 1);
9eff14f3 44}
9eff14f3 45#endif
fbf9eeb3 46
77211379
PM
47/* Execute a TB, and fix up the CPU state afterwards if necessary */
48static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
49{
50 CPUArchState *env = cpu->env_ptr;
03afa5f8
RH
51 uintptr_t next_tb;
52
53#if defined(DEBUG_DISAS)
54 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
55#if defined(TARGET_I386)
56 log_cpu_state(cpu, CPU_DUMP_CCOP);
57#elif defined(TARGET_M68K)
58 /* ??? Should not modify env state for dumping. */
59 cpu_m68k_flush_flags(env, env->cc_op);
60 env->cc_op = CC_OP_FLAGS;
61 env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
62 log_cpu_state(cpu, 0);
63#else
64 log_cpu_state(cpu, 0);
65#endif
66 }
67#endif /* DEBUG_DISAS */
68
69 next_tb = tcg_qemu_tb_exec(env, tb_ptr);
77211379
PM
70 if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
71 /* We didn't start executing this TB (eg because the instruction
72 * counter hit zero); we must restore the guest PC to the address
73 * of the start of the TB.
74 */
bdf7ae5b 75 CPUClass *cc = CPU_GET_CLASS(cpu);
77211379 76 TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
bdf7ae5b
AF
77 if (cc->synchronize_from_tb) {
78 cc->synchronize_from_tb(cpu, tb);
79 } else {
80 assert(cc->set_pc);
81 cc->set_pc(cpu, tb->pc);
82 }
77211379 83 }
378df4b2
PM
84 if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
85 /* We were asked to stop executing TBs (probably a pending
86 * interrupt. We've now stopped, so clear the flag.
87 */
88 cpu->tcg_exit_req = 0;
89 }
77211379
PM
90 return next_tb;
91}
92
2e70f6ef
PB
93/* Execute the code without caching the generated code. An interpreter
94 could be used if available. */
9349b4f9 95static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
cea5f9a2 96 TranslationBlock *orig_tb)
2e70f6ef 97{
d77953b9 98 CPUState *cpu = ENV_GET_CPU(env);
2e70f6ef
PB
99 TranslationBlock *tb;
100
101 /* Should never happen.
102 We only end up here when an existing TB is too long. */
103 if (max_cycles > CF_COUNT_MASK)
104 max_cycles = CF_COUNT_MASK;
105
106 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
107 max_cycles);
d77953b9 108 cpu->current_tb = tb;
2e70f6ef 109 /* execute the generated code */
77211379 110 cpu_tb_exec(cpu, tb->tc_ptr);
d77953b9 111 cpu->current_tb = NULL;
2e70f6ef
PB
112 tb_phys_invalidate(tb, -1);
113 tb_free(tb);
114}
115
9349b4f9 116static TranslationBlock *tb_find_slow(CPUArchState *env,
cea5f9a2 117 target_ulong pc,
8a40a180 118 target_ulong cs_base,
c068688b 119 uint64_t flags)
8a40a180 120{
8cd70437 121 CPUState *cpu = ENV_GET_CPU(env);
8a40a180 122 TranslationBlock *tb, **ptb1;
8a40a180 123 unsigned int h;
337fc758 124 tb_page_addr_t phys_pc, phys_page1;
41c1b1c9 125 target_ulong virt_page2;
3b46e624 126
5e5f07e0 127 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
3b46e624 128
8a40a180 129 /* find translated block using physical mappings */
41c1b1c9 130 phys_pc = get_page_addr_code(env, pc);
8a40a180 131 phys_page1 = phys_pc & TARGET_PAGE_MASK;
8a40a180 132 h = tb_phys_hash_func(phys_pc);
5e5f07e0 133 ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
8a40a180
FB
134 for(;;) {
135 tb = *ptb1;
136 if (!tb)
137 goto not_found;
5fafdf24 138 if (tb->pc == pc &&
8a40a180 139 tb->page_addr[0] == phys_page1 &&
5fafdf24 140 tb->cs_base == cs_base &&
8a40a180
FB
141 tb->flags == flags) {
142 /* check next page if needed */
143 if (tb->page_addr[1] != -1) {
337fc758
BS
144 tb_page_addr_t phys_page2;
145
5fafdf24 146 virt_page2 = (pc & TARGET_PAGE_MASK) +
8a40a180 147 TARGET_PAGE_SIZE;
41c1b1c9 148 phys_page2 = get_page_addr_code(env, virt_page2);
8a40a180
FB
149 if (tb->page_addr[1] == phys_page2)
150 goto found;
151 } else {
152 goto found;
153 }
154 }
155 ptb1 = &tb->phys_hash_next;
156 }
157 not_found:
2e70f6ef
PB
158 /* if no translated code available, then translate it now */
159 tb = tb_gen_code(env, pc, cs_base, flags, 0);
3b46e624 160
8a40a180 161 found:
2c90fe2b
KB
162 /* Move the last found TB to the head of the list */
163 if (likely(*ptb1)) {
164 *ptb1 = tb->phys_hash_next;
5e5f07e0
EV
165 tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
166 tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
2c90fe2b 167 }
8a40a180 168 /* we add the TB in the virtual pc hash table */
8cd70437 169 cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
8a40a180
FB
170 return tb;
171}
172
9349b4f9 173static inline TranslationBlock *tb_find_fast(CPUArchState *env)
8a40a180 174{
8cd70437 175 CPUState *cpu = ENV_GET_CPU(env);
8a40a180
FB
176 TranslationBlock *tb;
177 target_ulong cs_base, pc;
6b917547 178 int flags;
8a40a180
FB
179
180 /* we record a subset of the CPU state. It will
181 always be the same before a given translated block
182 is executed. */
6b917547 183 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
8cd70437 184 tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
551bd27f
TS
185 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
186 tb->flags != flags)) {
cea5f9a2 187 tb = tb_find_slow(env, pc, cs_base, flags);
8a40a180
FB
188 }
189 return tb;
190}
191
1009d2ed
JK
192static CPUDebugExcpHandler *debug_excp_handler;
193
84e3b602 194void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
1009d2ed 195{
1009d2ed 196 debug_excp_handler = handler;
1009d2ed
JK
197}
198
9349b4f9 199static void cpu_handle_debug_exception(CPUArchState *env)
1009d2ed 200{
ff4700b0 201 CPUState *cpu = ENV_GET_CPU(env);
1009d2ed
JK
202 CPUWatchpoint *wp;
203
ff4700b0
AF
204 if (!cpu->watchpoint_hit) {
205 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
1009d2ed
JK
206 wp->flags &= ~BP_WATCHPOINT_HIT;
207 }
208 }
209 if (debug_excp_handler) {
210 debug_excp_handler(env);
211 }
212}
213
7d13299d
FB
214/* main execution loop */
215
1a28cac3
MT
216volatile sig_atomic_t exit_request;
217
9349b4f9 218int cpu_exec(CPUArchState *env)
7d13299d 219{
c356a1bc 220 CPUState *cpu = ENV_GET_CPU(env);
97a8ea5a
AF
221#if !(defined(CONFIG_USER_ONLY) && \
222 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
223 CPUClass *cc = CPU_GET_CLASS(cpu);
693fa551
AF
224#endif
225#ifdef TARGET_I386
226 X86CPU *x86_cpu = X86_CPU(cpu);
97a8ea5a 227#endif
8a40a180 228 int ret, interrupt_request;
8a40a180 229 TranslationBlock *tb;
c27004ec 230 uint8_t *tc_ptr;
3e9bd63a 231 uintptr_t next_tb;
8c6939c0 232
259186a7 233 if (cpu->halted) {
3993c6bd 234 if (!cpu_has_work(cpu)) {
eda48c34
PB
235 return EXCP_HALTED;
236 }
237
259186a7 238 cpu->halted = 0;
eda48c34 239 }
5a1e3cfc 240
4917cf44 241 current_cpu = cpu;
e4533c7a 242
4917cf44 243 /* As long as current_cpu is null, up to the assignment just above,
ec9bd89f
OH
244 * requests by other threads to exit the execution loop are expected to
245 * be issued using the exit_request global. We must make sure that our
4917cf44 246 * evaluation of the global value is performed past the current_cpu
ec9bd89f
OH
247 * value transition point, which requires a memory barrier as well as
248 * an instruction scheduling constraint on modern architectures. */
249 smp_mb();
250
c629a4bc 251 if (unlikely(exit_request)) {
fcd7d003 252 cpu->exit_request = 1;
1a28cac3
MT
253 }
254
ecb644f4 255#if defined(TARGET_I386)
6792a57b
JK
256 /* put eflags in CPU temporary format */
257 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
80cf2c81 258 env->df = 1 - (2 * ((env->eflags >> 10) & 1));
6792a57b
JK
259 CC_OP = CC_OP_EFLAGS;
260 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
93ac68bc 261#elif defined(TARGET_SPARC)
e6e5906b
PB
262#elif defined(TARGET_M68K)
263 env->cc_op = CC_OP_FLAGS;
264 env->cc_dest = env->sr & 0xf;
265 env->cc_x = (env->sr >> 4) & 1;
ecb644f4
TS
266#elif defined(TARGET_ALPHA)
267#elif defined(TARGET_ARM)
d2fbca94 268#elif defined(TARGET_UNICORE32)
ecb644f4 269#elif defined(TARGET_PPC)
4e85f82c 270 env->reserve_addr = -1;
81ea0e13 271#elif defined(TARGET_LM32)
b779e29e 272#elif defined(TARGET_MICROBLAZE)
6af0bf9c 273#elif defined(TARGET_MIPS)
d15a9c23 274#elif defined(TARGET_MOXIE)
e67db06e 275#elif defined(TARGET_OPENRISC)
fdf9b3e8 276#elif defined(TARGET_SH4)
f1ccf904 277#elif defined(TARGET_CRIS)
10ec5117 278#elif defined(TARGET_S390X)
2328826b 279#elif defined(TARGET_XTENSA)
fdf9b3e8 280 /* XXXXX */
e4533c7a
FB
281#else
282#error unsupported target CPU
283#endif
27103424 284 cpu->exception_index = -1;
9d27abd9 285
7d13299d 286 /* prepare setjmp context for exception handling */
3fb2ded1 287 for(;;) {
6f03bef0 288 if (sigsetjmp(cpu->jmp_env, 0) == 0) {
3fb2ded1 289 /* if an exception is pending, we execute it here */
27103424
AF
290 if (cpu->exception_index >= 0) {
291 if (cpu->exception_index >= EXCP_INTERRUPT) {
3fb2ded1 292 /* exit request from the cpu execution loop */
27103424 293 ret = cpu->exception_index;
1009d2ed
JK
294 if (ret == EXCP_DEBUG) {
295 cpu_handle_debug_exception(env);
296 }
3fb2ded1 297 break;
72d239ed
AJ
298 } else {
299#if defined(CONFIG_USER_ONLY)
3fb2ded1 300 /* if user mode only, we simulate a fake exception
9f083493 301 which will be handled outside the cpu execution
3fb2ded1 302 loop */
83479e77 303#if defined(TARGET_I386)
97a8ea5a 304 cc->do_interrupt(cpu);
83479e77 305#endif
27103424 306 ret = cpu->exception_index;
3fb2ded1 307 break;
72d239ed 308#else
97a8ea5a 309 cc->do_interrupt(cpu);
27103424 310 cpu->exception_index = -1;
83479e77 311#endif
3fb2ded1 312 }
5fafdf24 313 }
9df217a3 314
b5fc09ae 315 next_tb = 0; /* force lookup of first TB */
3fb2ded1 316 for(;;) {
259186a7 317 interrupt_request = cpu->interrupt_request;
e1638bd8 318 if (unlikely(interrupt_request)) {
ed2803da 319 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
e1638bd8 320 /* Mask out external interrupts for this step. */
3125f763 321 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
e1638bd8 322 }
6658ffb8 323 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
259186a7 324 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
27103424 325 cpu->exception_index = EXCP_DEBUG;
5638d180 326 cpu_loop_exit(cpu);
6658ffb8 327 }
a90b7318 328#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
b779e29e 329 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
d2fbca94 330 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
a90b7318 331 if (interrupt_request & CPU_INTERRUPT_HALT) {
259186a7
AF
332 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
333 cpu->halted = 1;
27103424 334 cpu->exception_index = EXCP_HLT;
5638d180 335 cpu_loop_exit(cpu);
a90b7318
AZ
336 }
337#endif
68a79315 338#if defined(TARGET_I386)
5d62c43a
JK
339#if !defined(CONFIG_USER_ONLY)
340 if (interrupt_request & CPU_INTERRUPT_POLL) {
259186a7 341 cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
693fa551 342 apic_poll_irq(x86_cpu->apic_state);
5d62c43a
JK
343 }
344#endif
b09ea7d5 345 if (interrupt_request & CPU_INTERRUPT_INIT) {
77b2bc2c
BS
346 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
347 0);
693fa551 348 do_cpu_init(x86_cpu);
27103424 349 cpu->exception_index = EXCP_HALTED;
5638d180 350 cpu_loop_exit(cpu);
b09ea7d5 351 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
693fa551 352 do_cpu_sipi(x86_cpu);
b09ea7d5 353 } else if (env->hflags2 & HF2_GIF_MASK) {
db620f46
FB
354 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
355 !(env->hflags & HF_SMM_MASK)) {
77b2bc2c
BS
356 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
357 0);
259186a7 358 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
693fa551 359 do_smm_enter(x86_cpu);
db620f46
FB
360 next_tb = 0;
361 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
362 !(env->hflags2 & HF2_NMI_MASK)) {
259186a7 363 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
db620f46 364 env->hflags2 |= HF2_NMI_MASK;
e694d4e2 365 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
db620f46 366 next_tb = 0;
e965fc38 367 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
259186a7 368 cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
e694d4e2 369 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
79c4f6b0 370 next_tb = 0;
db620f46
FB
371 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
372 (((env->hflags2 & HF2_VINTR_MASK) &&
373 (env->hflags2 & HF2_HIF_MASK)) ||
374 (!(env->hflags2 & HF2_VINTR_MASK) &&
375 (env->eflags & IF_MASK &&
376 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
377 int intno;
77b2bc2c
BS
378 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
379 0);
259186a7
AF
380 cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
381 CPU_INTERRUPT_VIRQ);
db620f46 382 intno = cpu_get_pic_interrupt(env);
4f213879 383 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
384 do_interrupt_x86_hardirq(env, intno, 1);
385 /* ensure that no TB jump will be modified as
386 the program flow was changed */
387 next_tb = 0;
0573fbfc 388#if !defined(CONFIG_USER_ONLY)
db620f46
FB
389 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
390 (env->eflags & IF_MASK) &&
391 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
392 int intno;
393 /* FIXME: this should respect TPR */
77b2bc2c
BS
394 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
395 0);
fdfba1a2
EI
396 intno = ldl_phys(cpu->as,
397 env->vm_vmcb
398 + offsetof(struct vmcb,
399 control.int_vector));
93fcfe39 400 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
e694d4e2 401 do_interrupt_x86_hardirq(env, intno, 1);
259186a7 402 cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
db620f46 403 next_tb = 0;
907a5b26 404#endif
db620f46 405 }
68a79315 406 }
ce09776b 407#elif defined(TARGET_PPC)
9fddaa0c 408 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
c356a1bc 409 cpu_reset(cpu);
9fddaa0c 410 }
47103572 411 if (interrupt_request & CPU_INTERRUPT_HARD) {
e9df014c 412 ppc_hw_interrupt(env);
259186a7
AF
413 if (env->pending_interrupts == 0) {
414 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
415 }
b5fc09ae 416 next_tb = 0;
ce09776b 417 }
81ea0e13
MW
418#elif defined(TARGET_LM32)
419 if ((interrupt_request & CPU_INTERRUPT_HARD)
420 && (env->ie & IE_IE)) {
27103424 421 cpu->exception_index = EXCP_IRQ;
97a8ea5a 422 cc->do_interrupt(cpu);
81ea0e13
MW
423 next_tb = 0;
424 }
b779e29e
EI
425#elif defined(TARGET_MICROBLAZE)
426 if ((interrupt_request & CPU_INTERRUPT_HARD)
427 && (env->sregs[SR_MSR] & MSR_IE)
428 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
429 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
27103424 430 cpu->exception_index = EXCP_IRQ;
97a8ea5a 431 cc->do_interrupt(cpu);
b779e29e
EI
432 next_tb = 0;
433 }
6af0bf9c
FB
434#elif defined(TARGET_MIPS)
435 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
4cdc1cd1 436 cpu_mips_hw_interrupts_pending(env)) {
6af0bf9c 437 /* Raise it */
27103424 438 cpu->exception_index = EXCP_EXT_INTERRUPT;
6af0bf9c 439 env->error_code = 0;
97a8ea5a 440 cc->do_interrupt(cpu);
b5fc09ae 441 next_tb = 0;
6af0bf9c 442 }
b6a71ef7
JL
443#elif defined(TARGET_OPENRISC)
444 {
445 int idx = -1;
446 if ((interrupt_request & CPU_INTERRUPT_HARD)
447 && (env->sr & SR_IEE)) {
448 idx = EXCP_INT;
449 }
450 if ((interrupt_request & CPU_INTERRUPT_TIMER)
451 && (env->sr & SR_TEE)) {
452 idx = EXCP_TICK;
453 }
454 if (idx >= 0) {
27103424 455 cpu->exception_index = idx;
97a8ea5a 456 cc->do_interrupt(cpu);
b6a71ef7
JL
457 next_tb = 0;
458 }
459 }
e95c8d51 460#elif defined(TARGET_SPARC)
d532b26c
IK
461 if (interrupt_request & CPU_INTERRUPT_HARD) {
462 if (cpu_interrupts_enabled(env) &&
463 env->interrupt_index > 0) {
464 int pil = env->interrupt_index & 0xf;
465 int type = env->interrupt_index & 0xf0;
466
467 if (((type == TT_EXTINT) &&
468 cpu_pil_allowed(env, pil)) ||
469 type != TT_EXTINT) {
27103424 470 cpu->exception_index = env->interrupt_index;
97a8ea5a 471 cc->do_interrupt(cpu);
d532b26c
IK
472 next_tb = 0;
473 }
474 }
e965fc38 475 }
b5ff1b31
FB
476#elif defined(TARGET_ARM)
477 if (interrupt_request & CPU_INTERRUPT_FIQ
4cc35614 478 && !(env->daif & PSTATE_F)) {
27103424 479 cpu->exception_index = EXCP_FIQ;
97a8ea5a 480 cc->do_interrupt(cpu);
b5fc09ae 481 next_tb = 0;
b5ff1b31 482 }
9ee6e8bb
PB
483 /* ARMv7-M interrupt return works by loading a magic value
484 into the PC. On real hardware the load causes the
485 return to occur. The qemu implementation performs the
486 jump normally, then does the exception return when the
487 CPU tries to execute code at the magic address.
488 This will cause the magic PC value to be pushed to
a1c7273b 489 the stack if an interrupt occurred at the wrong time.
9ee6e8bb
PB
490 We avoid this by disabling interrupts when
491 pc contains a magic address. */
b5ff1b31 492 if (interrupt_request & CPU_INTERRUPT_HARD
9ee6e8bb 493 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
4cc35614 494 || !(env->daif & PSTATE_I))) {
27103424 495 cpu->exception_index = EXCP_IRQ;
97a8ea5a 496 cc->do_interrupt(cpu);
b5fc09ae 497 next_tb = 0;
b5ff1b31 498 }
d2fbca94
GX
499#elif defined(TARGET_UNICORE32)
500 if (interrupt_request & CPU_INTERRUPT_HARD
501 && !(env->uncached_asr & ASR_I)) {
27103424 502 cpu->exception_index = UC32_EXCP_INTR;
97a8ea5a 503 cc->do_interrupt(cpu);
d2fbca94
GX
504 next_tb = 0;
505 }
fdf9b3e8 506#elif defined(TARGET_SH4)
e96e2044 507 if (interrupt_request & CPU_INTERRUPT_HARD) {
97a8ea5a 508 cc->do_interrupt(cpu);
b5fc09ae 509 next_tb = 0;
e96e2044 510 }
eddf68a6 511#elif defined(TARGET_ALPHA)
6a80e088
RH
512 {
513 int idx = -1;
514 /* ??? This hard-codes the OSF/1 interrupt levels. */
e965fc38 515 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
6a80e088
RH
516 case 0 ... 3:
517 if (interrupt_request & CPU_INTERRUPT_HARD) {
518 idx = EXCP_DEV_INTERRUPT;
519 }
520 /* FALLTHRU */
521 case 4:
522 if (interrupt_request & CPU_INTERRUPT_TIMER) {
523 idx = EXCP_CLK_INTERRUPT;
524 }
525 /* FALLTHRU */
526 case 5:
527 if (interrupt_request & CPU_INTERRUPT_SMP) {
528 idx = EXCP_SMP_INTERRUPT;
529 }
530 /* FALLTHRU */
531 case 6:
532 if (interrupt_request & CPU_INTERRUPT_MCHK) {
533 idx = EXCP_MCHK;
534 }
535 }
536 if (idx >= 0) {
27103424 537 cpu->exception_index = idx;
6a80e088 538 env->error_code = 0;
97a8ea5a 539 cc->do_interrupt(cpu);
6a80e088
RH
540 next_tb = 0;
541 }
eddf68a6 542 }
f1ccf904 543#elif defined(TARGET_CRIS)
1b1a38b0 544 if (interrupt_request & CPU_INTERRUPT_HARD
fb9fb692
EI
545 && (env->pregs[PR_CCS] & I_FLAG)
546 && !env->locked_irq) {
27103424 547 cpu->exception_index = EXCP_IRQ;
97a8ea5a 548 cc->do_interrupt(cpu);
1b1a38b0
EI
549 next_tb = 0;
550 }
8219314b
LP
551 if (interrupt_request & CPU_INTERRUPT_NMI) {
552 unsigned int m_flag_archval;
553 if (env->pregs[PR_VR] < 32) {
554 m_flag_archval = M_FLAG_V10;
555 } else {
556 m_flag_archval = M_FLAG_V32;
557 }
558 if ((env->pregs[PR_CCS] & m_flag_archval)) {
27103424 559 cpu->exception_index = EXCP_NMI;
97a8ea5a 560 cc->do_interrupt(cpu);
8219314b
LP
561 next_tb = 0;
562 }
f1ccf904 563 }
0633879f
PB
564#elif defined(TARGET_M68K)
565 if (interrupt_request & CPU_INTERRUPT_HARD
566 && ((env->sr & SR_I) >> SR_I_SHIFT)
567 < env->pending_level) {
568 /* Real hardware gets the interrupt vector via an
569 IACK cycle at this point. Current emulated
570 hardware doesn't rely on this, so we
571 provide/save the vector when the interrupt is
572 first signalled. */
27103424 573 cpu->exception_index = env->pending_vector;
3c688828 574 do_interrupt_m68k_hardirq(env);
b5fc09ae 575 next_tb = 0;
0633879f 576 }
3110e292
AG
577#elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
578 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
579 (env->psw.mask & PSW_MASK_EXT)) {
97a8ea5a 580 cc->do_interrupt(cpu);
3110e292
AG
581 next_tb = 0;
582 }
40643d7c
MF
583#elif defined(TARGET_XTENSA)
584 if (interrupt_request & CPU_INTERRUPT_HARD) {
27103424 585 cpu->exception_index = EXC_IRQ;
97a8ea5a 586 cc->do_interrupt(cpu);
40643d7c
MF
587 next_tb = 0;
588 }
68a79315 589#endif
ff2712ba 590 /* Don't use the cached interrupt_request value,
9d05095e 591 do_interrupt may have updated the EXITTB flag. */
259186a7
AF
592 if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
593 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
bf3e8bf1
FB
594 /* ensure that no TB jump will be modified as
595 the program flow was changed */
b5fc09ae 596 next_tb = 0;
bf3e8bf1 597 }
be214e6c 598 }
fcd7d003
AF
599 if (unlikely(cpu->exit_request)) {
600 cpu->exit_request = 0;
27103424 601 cpu->exception_index = EXCP_INTERRUPT;
5638d180 602 cpu_loop_exit(cpu);
3fb2ded1 603 }
5e5f07e0 604 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
cea5f9a2 605 tb = tb_find_fast(env);
d5975363
PB
606 /* Note: we do it here to avoid a gcc bug on Mac OS X when
607 doing it in tb_find_slow */
5e5f07e0 608 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
d5975363
PB
609 /* as some TB could have been invalidated because
610 of memory exceptions while generating the code, we
611 must recompute the hash index here */
612 next_tb = 0;
5e5f07e0 613 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
d5975363 614 }
c30d1aea
PM
615 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
616 qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
617 tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
618 }
8a40a180
FB
619 /* see if we can patch the calling TB. When the TB
620 spans two pages, we cannot safely do a direct
621 jump. */
040f2fb2 622 if (next_tb != 0 && tb->page_addr[1] == -1) {
0980011b
PM
623 tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
624 next_tb & TB_EXIT_MASK, tb);
3fb2ded1 625 }
5e5f07e0 626 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
55e8b85e 627
628 /* cpu_interrupt might be called while translating the
629 TB, but before it is linked into a potentially
630 infinite loop and becomes env->current_tb. Avoid
631 starting execution if there is a pending interrupt. */
d77953b9 632 cpu->current_tb = tb;
b0052d15 633 barrier();
fcd7d003 634 if (likely(!cpu->exit_request)) {
2e70f6ef 635 tc_ptr = tb->tc_ptr;
e965fc38 636 /* execute the generated code */
77211379 637 next_tb = cpu_tb_exec(cpu, tc_ptr);
378df4b2
PM
638 switch (next_tb & TB_EXIT_MASK) {
639 case TB_EXIT_REQUESTED:
640 /* Something asked us to stop executing
641 * chained TBs; just continue round the main
642 * loop. Whatever requested the exit will also
643 * have set something else (eg exit_request or
644 * interrupt_request) which we will handle
645 * next time around the loop.
646 */
647 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
648 next_tb = 0;
649 break;
650 case TB_EXIT_ICOUNT_EXPIRED:
651 {
bf20dc07 652 /* Instruction counter expired. */
2e70f6ef 653 int insns_left;
0980011b 654 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
28ecfd7a 655 insns_left = cpu->icount_decr.u32;
efee7340 656 if (cpu->icount_extra && insns_left >= 0) {
2e70f6ef 657 /* Refill decrementer and continue execution. */
efee7340
AF
658 cpu->icount_extra += insns_left;
659 if (cpu->icount_extra > 0xffff) {
2e70f6ef
PB
660 insns_left = 0xffff;
661 } else {
efee7340 662 insns_left = cpu->icount_extra;
2e70f6ef 663 }
efee7340 664 cpu->icount_extra -= insns_left;
28ecfd7a 665 cpu->icount_decr.u16.low = insns_left;
2e70f6ef
PB
666 } else {
667 if (insns_left > 0) {
668 /* Execute remaining instructions. */
cea5f9a2 669 cpu_exec_nocache(env, insns_left, tb);
2e70f6ef 670 }
27103424 671 cpu->exception_index = EXCP_INTERRUPT;
2e70f6ef 672 next_tb = 0;
5638d180 673 cpu_loop_exit(cpu);
2e70f6ef 674 }
378df4b2
PM
675 break;
676 }
677 default:
678 break;
2e70f6ef
PB
679 }
680 }
d77953b9 681 cpu->current_tb = NULL;
4cbf74b6
FB
682 /* reset soft MMU for next block (it can currently
683 only be set by a memory fault) */
50a518e3 684 } /* for(;;) */
0d101938
JK
685 } else {
686 /* Reload env after longjmp - the compiler may have smashed all
687 * local variables as longjmp is marked 'noreturn'. */
4917cf44
AF
688 cpu = current_cpu;
689 env = cpu->env_ptr;
6c78f29a
JL
690#if !(defined(CONFIG_USER_ONLY) && \
691 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
692 cc = CPU_GET_CLASS(cpu);
693fa551
AF
693#endif
694#ifdef TARGET_I386
695 x86_cpu = X86_CPU(cpu);
6c78f29a 696#endif
7d13299d 697 }
3fb2ded1
FB
698 } /* for(;;) */
699
7d13299d 700
e4533c7a 701#if defined(TARGET_I386)
9de5e440 702 /* restore flags in standard format */
e694d4e2 703 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
80cf2c81 704 | (env->df & DF_MASK);
e4533c7a 705#elif defined(TARGET_ARM)
b7bcbe95 706 /* XXX: Save/restore host fpu exception state?. */
d2fbca94 707#elif defined(TARGET_UNICORE32)
93ac68bc 708#elif defined(TARGET_SPARC)
67867308 709#elif defined(TARGET_PPC)
81ea0e13 710#elif defined(TARGET_LM32)
e6e5906b
PB
711#elif defined(TARGET_M68K)
712 cpu_m68k_flush_flags(env, env->cc_op);
713 env->cc_op = CC_OP_FLAGS;
714 env->sr = (env->sr & 0xffe0)
715 | env->cc_dest | (env->cc_x << 4);
b779e29e 716#elif defined(TARGET_MICROBLAZE)
6af0bf9c 717#elif defined(TARGET_MIPS)
d15a9c23 718#elif defined(TARGET_MOXIE)
e67db06e 719#elif defined(TARGET_OPENRISC)
fdf9b3e8 720#elif defined(TARGET_SH4)
eddf68a6 721#elif defined(TARGET_ALPHA)
f1ccf904 722#elif defined(TARGET_CRIS)
10ec5117 723#elif defined(TARGET_S390X)
2328826b 724#elif defined(TARGET_XTENSA)
fdf9b3e8 725 /* XXXXX */
e4533c7a
FB
726#else
727#error unsupported target CPU
728#endif
1057eaa7 729
4917cf44
AF
730 /* fail safe : never use current_cpu outside cpu_exec() */
731 current_cpu = NULL;
7d13299d
FB
732 return ret;
733}