]> git.proxmox.com Git - mirror_qemu.git/blame - cpu-exec.c
cpu: Move opaque field from CPU_COMMON to CPUState
[mirror_qemu.git] / cpu-exec.c
CommitLineData
7d13299d 1/*
e965fc38 2 * emulator main execution loop
5fafdf24 3 *
66321a11 4 * Copyright (c) 2003-2005 Fabrice Bellard
7d13299d 5 *
3ef693a0
FB
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
7d13299d 10 *
3ef693a0
FB
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
7d13299d 15 *
3ef693a0 16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
7d13299d 18 */
e4533c7a 19#include "config.h"
cea5f9a2 20#include "cpu.h"
76cad711 21#include "disas/disas.h"
7cb69cae 22#include "tcg.h"
1de7afc9 23#include "qemu/atomic.h"
9c17d615 24#include "sysemu/qtest.h"
7d13299d 25
9349b4f9 26void cpu_loop_exit(CPUArchState *env)
e4533c7a 27{
d77953b9
AF
28 CPUState *cpu = ENV_GET_CPU(env);
29
30 cpu->current_tb = NULL;
6f03bef0 31 siglongjmp(cpu->jmp_env, 1);
e4533c7a 32}
bfed01fc 33
fbf9eeb3
FB
34/* exit the current TB from a signal handler. The host registers are
35 restored in a state compatible with the CPU emulator
36 */
9eff14f3 37#if defined(CONFIG_SOFTMMU)
9349b4f9 38void cpu_resume_from_signal(CPUArchState *env, void *puc)
9eff14f3 39{
6f03bef0
AF
40 CPUState *cpu = ENV_GET_CPU(env);
41
9eff14f3
BS
42 /* XXX: restore cpu registers saved in host registers */
43
27103424 44 cpu->exception_index = -1;
6f03bef0 45 siglongjmp(cpu->jmp_env, 1);
9eff14f3 46}
9eff14f3 47#endif
fbf9eeb3 48
77211379
PM
49/* Execute a TB, and fix up the CPU state afterwards if necessary */
50static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
51{
52 CPUArchState *env = cpu->env_ptr;
03afa5f8
RH
53 uintptr_t next_tb;
54
55#if defined(DEBUG_DISAS)
56 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
57#if defined(TARGET_I386)
58 log_cpu_state(cpu, CPU_DUMP_CCOP);
59#elif defined(TARGET_M68K)
60 /* ??? Should not modify env state for dumping. */
61 cpu_m68k_flush_flags(env, env->cc_op);
62 env->cc_op = CC_OP_FLAGS;
63 env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
64 log_cpu_state(cpu, 0);
65#else
66 log_cpu_state(cpu, 0);
67#endif
68 }
69#endif /* DEBUG_DISAS */
70
71 next_tb = tcg_qemu_tb_exec(env, tb_ptr);
77211379
PM
72 if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
73 /* We didn't start executing this TB (eg because the instruction
74 * counter hit zero); we must restore the guest PC to the address
75 * of the start of the TB.
76 */
bdf7ae5b 77 CPUClass *cc = CPU_GET_CLASS(cpu);
77211379 78 TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
bdf7ae5b
AF
79 if (cc->synchronize_from_tb) {
80 cc->synchronize_from_tb(cpu, tb);
81 } else {
82 assert(cc->set_pc);
83 cc->set_pc(cpu, tb->pc);
84 }
77211379 85 }
378df4b2
PM
86 if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
87 /* We were asked to stop executing TBs (probably a pending
88 * interrupt. We've now stopped, so clear the flag.
89 */
90 cpu->tcg_exit_req = 0;
91 }
77211379
PM
92 return next_tb;
93}
94
2e70f6ef
PB
95/* Execute the code without caching the generated code. An interpreter
96 could be used if available. */
9349b4f9 97static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
cea5f9a2 98 TranslationBlock *orig_tb)
2e70f6ef 99{
d77953b9 100 CPUState *cpu = ENV_GET_CPU(env);
2e70f6ef
PB
101 TranslationBlock *tb;
102
103 /* Should never happen.
104 We only end up here when an existing TB is too long. */
105 if (max_cycles > CF_COUNT_MASK)
106 max_cycles = CF_COUNT_MASK;
107
108 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
109 max_cycles);
d77953b9 110 cpu->current_tb = tb;
2e70f6ef 111 /* execute the generated code */
77211379 112 cpu_tb_exec(cpu, tb->tc_ptr);
d77953b9 113 cpu->current_tb = NULL;
2e70f6ef
PB
114 tb_phys_invalidate(tb, -1);
115 tb_free(tb);
116}
117
9349b4f9 118static TranslationBlock *tb_find_slow(CPUArchState *env,
cea5f9a2 119 target_ulong pc,
8a40a180 120 target_ulong cs_base,
c068688b 121 uint64_t flags)
8a40a180 122{
8cd70437 123 CPUState *cpu = ENV_GET_CPU(env);
8a40a180 124 TranslationBlock *tb, **ptb1;
8a40a180 125 unsigned int h;
337fc758 126 tb_page_addr_t phys_pc, phys_page1;
41c1b1c9 127 target_ulong virt_page2;
3b46e624 128
5e5f07e0 129 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
3b46e624 130
8a40a180 131 /* find translated block using physical mappings */
41c1b1c9 132 phys_pc = get_page_addr_code(env, pc);
8a40a180 133 phys_page1 = phys_pc & TARGET_PAGE_MASK;
8a40a180 134 h = tb_phys_hash_func(phys_pc);
5e5f07e0 135 ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
8a40a180
FB
136 for(;;) {
137 tb = *ptb1;
138 if (!tb)
139 goto not_found;
5fafdf24 140 if (tb->pc == pc &&
8a40a180 141 tb->page_addr[0] == phys_page1 &&
5fafdf24 142 tb->cs_base == cs_base &&
8a40a180
FB
143 tb->flags == flags) {
144 /* check next page if needed */
145 if (tb->page_addr[1] != -1) {
337fc758
BS
146 tb_page_addr_t phys_page2;
147
5fafdf24 148 virt_page2 = (pc & TARGET_PAGE_MASK) +
8a40a180 149 TARGET_PAGE_SIZE;
41c1b1c9 150 phys_page2 = get_page_addr_code(env, virt_page2);
8a40a180
FB
151 if (tb->page_addr[1] == phys_page2)
152 goto found;
153 } else {
154 goto found;
155 }
156 }
157 ptb1 = &tb->phys_hash_next;
158 }
159 not_found:
2e70f6ef
PB
160 /* if no translated code available, then translate it now */
161 tb = tb_gen_code(env, pc, cs_base, flags, 0);
3b46e624 162
8a40a180 163 found:
2c90fe2b
KB
164 /* Move the last found TB to the head of the list */
165 if (likely(*ptb1)) {
166 *ptb1 = tb->phys_hash_next;
5e5f07e0
EV
167 tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
168 tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
2c90fe2b 169 }
8a40a180 170 /* we add the TB in the virtual pc hash table */
8cd70437 171 cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
8a40a180
FB
172 return tb;
173}
174
9349b4f9 175static inline TranslationBlock *tb_find_fast(CPUArchState *env)
8a40a180 176{
8cd70437 177 CPUState *cpu = ENV_GET_CPU(env);
8a40a180
FB
178 TranslationBlock *tb;
179 target_ulong cs_base, pc;
6b917547 180 int flags;
8a40a180
FB
181
182 /* we record a subset of the CPU state. It will
183 always be the same before a given translated block
184 is executed. */
6b917547 185 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
8cd70437 186 tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
551bd27f
TS
187 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
188 tb->flags != flags)) {
cea5f9a2 189 tb = tb_find_slow(env, pc, cs_base, flags);
8a40a180
FB
190 }
191 return tb;
192}
193
1009d2ed
JK
194static CPUDebugExcpHandler *debug_excp_handler;
195
84e3b602 196void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
1009d2ed 197{
1009d2ed 198 debug_excp_handler = handler;
1009d2ed
JK
199}
200
9349b4f9 201static void cpu_handle_debug_exception(CPUArchState *env)
1009d2ed
JK
202{
203 CPUWatchpoint *wp;
204
205 if (!env->watchpoint_hit) {
206 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
207 wp->flags &= ~BP_WATCHPOINT_HIT;
208 }
209 }
210 if (debug_excp_handler) {
211 debug_excp_handler(env);
212 }
213}
214
7d13299d
FB
215/* main execution loop */
216
1a28cac3
MT
217volatile sig_atomic_t exit_request;
218
9349b4f9 219int cpu_exec(CPUArchState *env)
7d13299d 220{
c356a1bc 221 CPUState *cpu = ENV_GET_CPU(env);
97a8ea5a
AF
222#if !(defined(CONFIG_USER_ONLY) && \
223 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
224 CPUClass *cc = CPU_GET_CLASS(cpu);
693fa551
AF
225#endif
226#ifdef TARGET_I386
227 X86CPU *x86_cpu = X86_CPU(cpu);
97a8ea5a 228#endif
8a40a180 229 int ret, interrupt_request;
8a40a180 230 TranslationBlock *tb;
c27004ec 231 uint8_t *tc_ptr;
3e9bd63a 232 uintptr_t next_tb;
8c6939c0 233
259186a7 234 if (cpu->halted) {
3993c6bd 235 if (!cpu_has_work(cpu)) {
eda48c34
PB
236 return EXCP_HALTED;
237 }
238
259186a7 239 cpu->halted = 0;
eda48c34 240 }
5a1e3cfc 241
4917cf44 242 current_cpu = cpu;
e4533c7a 243
4917cf44 244 /* As long as current_cpu is null, up to the assignment just above,
ec9bd89f
OH
245 * requests by other threads to exit the execution loop are expected to
246 * be issued using the exit_request global. We must make sure that our
4917cf44 247 * evaluation of the global value is performed past the current_cpu
ec9bd89f
OH
248 * value transition point, which requires a memory barrier as well as
249 * an instruction scheduling constraint on modern architectures. */
250 smp_mb();
251
c629a4bc 252 if (unlikely(exit_request)) {
fcd7d003 253 cpu->exit_request = 1;
1a28cac3
MT
254 }
255
ecb644f4 256#if defined(TARGET_I386)
6792a57b
JK
257 /* put eflags in CPU temporary format */
258 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
80cf2c81 259 env->df = 1 - (2 * ((env->eflags >> 10) & 1));
6792a57b
JK
260 CC_OP = CC_OP_EFLAGS;
261 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
93ac68bc 262#elif defined(TARGET_SPARC)
e6e5906b
PB
263#elif defined(TARGET_M68K)
264 env->cc_op = CC_OP_FLAGS;
265 env->cc_dest = env->sr & 0xf;
266 env->cc_x = (env->sr >> 4) & 1;
ecb644f4
TS
267#elif defined(TARGET_ALPHA)
268#elif defined(TARGET_ARM)
d2fbca94 269#elif defined(TARGET_UNICORE32)
ecb644f4 270#elif defined(TARGET_PPC)
4e85f82c 271 env->reserve_addr = -1;
81ea0e13 272#elif defined(TARGET_LM32)
b779e29e 273#elif defined(TARGET_MICROBLAZE)
6af0bf9c 274#elif defined(TARGET_MIPS)
d15a9c23 275#elif defined(TARGET_MOXIE)
e67db06e 276#elif defined(TARGET_OPENRISC)
fdf9b3e8 277#elif defined(TARGET_SH4)
f1ccf904 278#elif defined(TARGET_CRIS)
10ec5117 279#elif defined(TARGET_S390X)
2328826b 280#elif defined(TARGET_XTENSA)
fdf9b3e8 281 /* XXXXX */
e4533c7a
FB
282#else
283#error unsupported target CPU
284#endif
27103424 285 cpu->exception_index = -1;
9d27abd9 286
7d13299d 287 /* prepare setjmp context for exception handling */
3fb2ded1 288 for(;;) {
6f03bef0 289 if (sigsetjmp(cpu->jmp_env, 0) == 0) {
3fb2ded1 290 /* if an exception is pending, we execute it here */
27103424
AF
291 if (cpu->exception_index >= 0) {
292 if (cpu->exception_index >= EXCP_INTERRUPT) {
3fb2ded1 293 /* exit request from the cpu execution loop */
27103424 294 ret = cpu->exception_index;
1009d2ed
JK
295 if (ret == EXCP_DEBUG) {
296 cpu_handle_debug_exception(env);
297 }
3fb2ded1 298 break;
72d239ed
AJ
299 } else {
300#if defined(CONFIG_USER_ONLY)
3fb2ded1 301 /* if user mode only, we simulate a fake exception
9f083493 302 which will be handled outside the cpu execution
3fb2ded1 303 loop */
83479e77 304#if defined(TARGET_I386)
97a8ea5a 305 cc->do_interrupt(cpu);
83479e77 306#endif
27103424 307 ret = cpu->exception_index;
3fb2ded1 308 break;
72d239ed 309#else
97a8ea5a 310 cc->do_interrupt(cpu);
27103424 311 cpu->exception_index = -1;
83479e77 312#endif
3fb2ded1 313 }
5fafdf24 314 }
9df217a3 315
b5fc09ae 316 next_tb = 0; /* force lookup of first TB */
3fb2ded1 317 for(;;) {
259186a7 318 interrupt_request = cpu->interrupt_request;
e1638bd8 319 if (unlikely(interrupt_request)) {
ed2803da 320 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
e1638bd8 321 /* Mask out external interrupts for this step. */
3125f763 322 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
e1638bd8 323 }
6658ffb8 324 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
259186a7 325 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
27103424 326 cpu->exception_index = EXCP_DEBUG;
1162c041 327 cpu_loop_exit(env);
6658ffb8 328 }
a90b7318 329#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
b779e29e 330 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
d2fbca94 331 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
a90b7318 332 if (interrupt_request & CPU_INTERRUPT_HALT) {
259186a7
AF
333 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
334 cpu->halted = 1;
27103424 335 cpu->exception_index = EXCP_HLT;
1162c041 336 cpu_loop_exit(env);
a90b7318
AZ
337 }
338#endif
68a79315 339#if defined(TARGET_I386)
5d62c43a
JK
340#if !defined(CONFIG_USER_ONLY)
341 if (interrupt_request & CPU_INTERRUPT_POLL) {
259186a7 342 cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
693fa551 343 apic_poll_irq(x86_cpu->apic_state);
5d62c43a
JK
344 }
345#endif
b09ea7d5 346 if (interrupt_request & CPU_INTERRUPT_INIT) {
77b2bc2c
BS
347 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
348 0);
693fa551 349 do_cpu_init(x86_cpu);
27103424 350 cpu->exception_index = EXCP_HALTED;
1162c041 351 cpu_loop_exit(env);
b09ea7d5 352 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
693fa551 353 do_cpu_sipi(x86_cpu);
b09ea7d5 354 } else if (env->hflags2 & HF2_GIF_MASK) {
db620f46
FB
355 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
356 !(env->hflags & HF_SMM_MASK)) {
77b2bc2c
BS
357 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
358 0);
259186a7 359 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
693fa551 360 do_smm_enter(x86_cpu);
db620f46
FB
361 next_tb = 0;
362 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
363 !(env->hflags2 & HF2_NMI_MASK)) {
259186a7 364 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
db620f46 365 env->hflags2 |= HF2_NMI_MASK;
e694d4e2 366 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
db620f46 367 next_tb = 0;
e965fc38 368 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
259186a7 369 cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
e694d4e2 370 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
79c4f6b0 371 next_tb = 0;
db620f46
FB
372 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
373 (((env->hflags2 & HF2_VINTR_MASK) &&
374 (env->hflags2 & HF2_HIF_MASK)) ||
375 (!(env->hflags2 & HF2_VINTR_MASK) &&
376 (env->eflags & IF_MASK &&
377 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
378 int intno;
77b2bc2c
BS
379 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
380 0);
259186a7
AF
381 cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
382 CPU_INTERRUPT_VIRQ);
db620f46 383 intno = cpu_get_pic_interrupt(env);
4f213879 384 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
385 do_interrupt_x86_hardirq(env, intno, 1);
386 /* ensure that no TB jump will be modified as
387 the program flow was changed */
388 next_tb = 0;
0573fbfc 389#if !defined(CONFIG_USER_ONLY)
db620f46
FB
390 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
391 (env->eflags & IF_MASK) &&
392 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
393 int intno;
394 /* FIXME: this should respect TPR */
77b2bc2c
BS
395 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
396 0);
fdfba1a2
EI
397 intno = ldl_phys(cpu->as,
398 env->vm_vmcb
399 + offsetof(struct vmcb,
400 control.int_vector));
93fcfe39 401 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
e694d4e2 402 do_interrupt_x86_hardirq(env, intno, 1);
259186a7 403 cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
db620f46 404 next_tb = 0;
907a5b26 405#endif
db620f46 406 }
68a79315 407 }
ce09776b 408#elif defined(TARGET_PPC)
9fddaa0c 409 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
c356a1bc 410 cpu_reset(cpu);
9fddaa0c 411 }
47103572 412 if (interrupt_request & CPU_INTERRUPT_HARD) {
e9df014c 413 ppc_hw_interrupt(env);
259186a7
AF
414 if (env->pending_interrupts == 0) {
415 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
416 }
b5fc09ae 417 next_tb = 0;
ce09776b 418 }
81ea0e13
MW
419#elif defined(TARGET_LM32)
420 if ((interrupt_request & CPU_INTERRUPT_HARD)
421 && (env->ie & IE_IE)) {
27103424 422 cpu->exception_index = EXCP_IRQ;
97a8ea5a 423 cc->do_interrupt(cpu);
81ea0e13
MW
424 next_tb = 0;
425 }
b779e29e
EI
426#elif defined(TARGET_MICROBLAZE)
427 if ((interrupt_request & CPU_INTERRUPT_HARD)
428 && (env->sregs[SR_MSR] & MSR_IE)
429 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
430 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
27103424 431 cpu->exception_index = EXCP_IRQ;
97a8ea5a 432 cc->do_interrupt(cpu);
b779e29e
EI
433 next_tb = 0;
434 }
6af0bf9c
FB
435#elif defined(TARGET_MIPS)
436 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
4cdc1cd1 437 cpu_mips_hw_interrupts_pending(env)) {
6af0bf9c 438 /* Raise it */
27103424 439 cpu->exception_index = EXCP_EXT_INTERRUPT;
6af0bf9c 440 env->error_code = 0;
97a8ea5a 441 cc->do_interrupt(cpu);
b5fc09ae 442 next_tb = 0;
6af0bf9c 443 }
b6a71ef7
JL
444#elif defined(TARGET_OPENRISC)
445 {
446 int idx = -1;
447 if ((interrupt_request & CPU_INTERRUPT_HARD)
448 && (env->sr & SR_IEE)) {
449 idx = EXCP_INT;
450 }
451 if ((interrupt_request & CPU_INTERRUPT_TIMER)
452 && (env->sr & SR_TEE)) {
453 idx = EXCP_TICK;
454 }
455 if (idx >= 0) {
27103424 456 cpu->exception_index = idx;
97a8ea5a 457 cc->do_interrupt(cpu);
b6a71ef7
JL
458 next_tb = 0;
459 }
460 }
e95c8d51 461#elif defined(TARGET_SPARC)
d532b26c
IK
462 if (interrupt_request & CPU_INTERRUPT_HARD) {
463 if (cpu_interrupts_enabled(env) &&
464 env->interrupt_index > 0) {
465 int pil = env->interrupt_index & 0xf;
466 int type = env->interrupt_index & 0xf0;
467
468 if (((type == TT_EXTINT) &&
469 cpu_pil_allowed(env, pil)) ||
470 type != TT_EXTINT) {
27103424 471 cpu->exception_index = env->interrupt_index;
97a8ea5a 472 cc->do_interrupt(cpu);
d532b26c
IK
473 next_tb = 0;
474 }
475 }
e965fc38 476 }
b5ff1b31
FB
477#elif defined(TARGET_ARM)
478 if (interrupt_request & CPU_INTERRUPT_FIQ
4cc35614 479 && !(env->daif & PSTATE_F)) {
27103424 480 cpu->exception_index = EXCP_FIQ;
97a8ea5a 481 cc->do_interrupt(cpu);
b5fc09ae 482 next_tb = 0;
b5ff1b31 483 }
9ee6e8bb
PB
484 /* ARMv7-M interrupt return works by loading a magic value
485 into the PC. On real hardware the load causes the
486 return to occur. The qemu implementation performs the
487 jump normally, then does the exception return when the
488 CPU tries to execute code at the magic address.
489 This will cause the magic PC value to be pushed to
a1c7273b 490 the stack if an interrupt occurred at the wrong time.
9ee6e8bb
PB
491 We avoid this by disabling interrupts when
492 pc contains a magic address. */
b5ff1b31 493 if (interrupt_request & CPU_INTERRUPT_HARD
9ee6e8bb 494 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
4cc35614 495 || !(env->daif & PSTATE_I))) {
27103424 496 cpu->exception_index = EXCP_IRQ;
97a8ea5a 497 cc->do_interrupt(cpu);
b5fc09ae 498 next_tb = 0;
b5ff1b31 499 }
d2fbca94
GX
500#elif defined(TARGET_UNICORE32)
501 if (interrupt_request & CPU_INTERRUPT_HARD
502 && !(env->uncached_asr & ASR_I)) {
27103424 503 cpu->exception_index = UC32_EXCP_INTR;
97a8ea5a 504 cc->do_interrupt(cpu);
d2fbca94
GX
505 next_tb = 0;
506 }
fdf9b3e8 507#elif defined(TARGET_SH4)
e96e2044 508 if (interrupt_request & CPU_INTERRUPT_HARD) {
97a8ea5a 509 cc->do_interrupt(cpu);
b5fc09ae 510 next_tb = 0;
e96e2044 511 }
eddf68a6 512#elif defined(TARGET_ALPHA)
6a80e088
RH
513 {
514 int idx = -1;
515 /* ??? This hard-codes the OSF/1 interrupt levels. */
e965fc38 516 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
6a80e088
RH
517 case 0 ... 3:
518 if (interrupt_request & CPU_INTERRUPT_HARD) {
519 idx = EXCP_DEV_INTERRUPT;
520 }
521 /* FALLTHRU */
522 case 4:
523 if (interrupt_request & CPU_INTERRUPT_TIMER) {
524 idx = EXCP_CLK_INTERRUPT;
525 }
526 /* FALLTHRU */
527 case 5:
528 if (interrupt_request & CPU_INTERRUPT_SMP) {
529 idx = EXCP_SMP_INTERRUPT;
530 }
531 /* FALLTHRU */
532 case 6:
533 if (interrupt_request & CPU_INTERRUPT_MCHK) {
534 idx = EXCP_MCHK;
535 }
536 }
537 if (idx >= 0) {
27103424 538 cpu->exception_index = idx;
6a80e088 539 env->error_code = 0;
97a8ea5a 540 cc->do_interrupt(cpu);
6a80e088
RH
541 next_tb = 0;
542 }
eddf68a6 543 }
f1ccf904 544#elif defined(TARGET_CRIS)
1b1a38b0 545 if (interrupt_request & CPU_INTERRUPT_HARD
fb9fb692
EI
546 && (env->pregs[PR_CCS] & I_FLAG)
547 && !env->locked_irq) {
27103424 548 cpu->exception_index = EXCP_IRQ;
97a8ea5a 549 cc->do_interrupt(cpu);
1b1a38b0
EI
550 next_tb = 0;
551 }
8219314b
LP
552 if (interrupt_request & CPU_INTERRUPT_NMI) {
553 unsigned int m_flag_archval;
554 if (env->pregs[PR_VR] < 32) {
555 m_flag_archval = M_FLAG_V10;
556 } else {
557 m_flag_archval = M_FLAG_V32;
558 }
559 if ((env->pregs[PR_CCS] & m_flag_archval)) {
27103424 560 cpu->exception_index = EXCP_NMI;
97a8ea5a 561 cc->do_interrupt(cpu);
8219314b
LP
562 next_tb = 0;
563 }
f1ccf904 564 }
0633879f
PB
565#elif defined(TARGET_M68K)
566 if (interrupt_request & CPU_INTERRUPT_HARD
567 && ((env->sr & SR_I) >> SR_I_SHIFT)
568 < env->pending_level) {
569 /* Real hardware gets the interrupt vector via an
570 IACK cycle at this point. Current emulated
571 hardware doesn't rely on this, so we
572 provide/save the vector when the interrupt is
573 first signalled. */
27103424 574 cpu->exception_index = env->pending_vector;
3c688828 575 do_interrupt_m68k_hardirq(env);
b5fc09ae 576 next_tb = 0;
0633879f 577 }
3110e292
AG
578#elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
579 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
580 (env->psw.mask & PSW_MASK_EXT)) {
97a8ea5a 581 cc->do_interrupt(cpu);
3110e292
AG
582 next_tb = 0;
583 }
40643d7c
MF
584#elif defined(TARGET_XTENSA)
585 if (interrupt_request & CPU_INTERRUPT_HARD) {
27103424 586 cpu->exception_index = EXC_IRQ;
97a8ea5a 587 cc->do_interrupt(cpu);
40643d7c
MF
588 next_tb = 0;
589 }
68a79315 590#endif
ff2712ba 591 /* Don't use the cached interrupt_request value,
9d05095e 592 do_interrupt may have updated the EXITTB flag. */
259186a7
AF
593 if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
594 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
bf3e8bf1
FB
595 /* ensure that no TB jump will be modified as
596 the program flow was changed */
b5fc09ae 597 next_tb = 0;
bf3e8bf1 598 }
be214e6c 599 }
fcd7d003
AF
600 if (unlikely(cpu->exit_request)) {
601 cpu->exit_request = 0;
27103424 602 cpu->exception_index = EXCP_INTERRUPT;
1162c041 603 cpu_loop_exit(env);
3fb2ded1 604 }
5e5f07e0 605 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
cea5f9a2 606 tb = tb_find_fast(env);
d5975363
PB
607 /* Note: we do it here to avoid a gcc bug on Mac OS X when
608 doing it in tb_find_slow */
5e5f07e0 609 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
d5975363
PB
610 /* as some TB could have been invalidated because
611 of memory exceptions while generating the code, we
612 must recompute the hash index here */
613 next_tb = 0;
5e5f07e0 614 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
d5975363 615 }
c30d1aea
PM
616 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
617 qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
618 tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
619 }
8a40a180
FB
620 /* see if we can patch the calling TB. When the TB
621 spans two pages, we cannot safely do a direct
622 jump. */
040f2fb2 623 if (next_tb != 0 && tb->page_addr[1] == -1) {
0980011b
PM
624 tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
625 next_tb & TB_EXIT_MASK, tb);
3fb2ded1 626 }
5e5f07e0 627 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
55e8b85e 628
629 /* cpu_interrupt might be called while translating the
630 TB, but before it is linked into a potentially
631 infinite loop and becomes env->current_tb. Avoid
632 starting execution if there is a pending interrupt. */
d77953b9 633 cpu->current_tb = tb;
b0052d15 634 barrier();
fcd7d003 635 if (likely(!cpu->exit_request)) {
2e70f6ef 636 tc_ptr = tb->tc_ptr;
e965fc38 637 /* execute the generated code */
77211379 638 next_tb = cpu_tb_exec(cpu, tc_ptr);
378df4b2
PM
639 switch (next_tb & TB_EXIT_MASK) {
640 case TB_EXIT_REQUESTED:
641 /* Something asked us to stop executing
642 * chained TBs; just continue round the main
643 * loop. Whatever requested the exit will also
644 * have set something else (eg exit_request or
645 * interrupt_request) which we will handle
646 * next time around the loop.
647 */
648 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
649 next_tb = 0;
650 break;
651 case TB_EXIT_ICOUNT_EXPIRED:
652 {
bf20dc07 653 /* Instruction counter expired. */
2e70f6ef 654 int insns_left;
0980011b 655 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
28ecfd7a 656 insns_left = cpu->icount_decr.u32;
efee7340 657 if (cpu->icount_extra && insns_left >= 0) {
2e70f6ef 658 /* Refill decrementer and continue execution. */
efee7340
AF
659 cpu->icount_extra += insns_left;
660 if (cpu->icount_extra > 0xffff) {
2e70f6ef
PB
661 insns_left = 0xffff;
662 } else {
efee7340 663 insns_left = cpu->icount_extra;
2e70f6ef 664 }
efee7340 665 cpu->icount_extra -= insns_left;
28ecfd7a 666 cpu->icount_decr.u16.low = insns_left;
2e70f6ef
PB
667 } else {
668 if (insns_left > 0) {
669 /* Execute remaining instructions. */
cea5f9a2 670 cpu_exec_nocache(env, insns_left, tb);
2e70f6ef 671 }
27103424 672 cpu->exception_index = EXCP_INTERRUPT;
2e70f6ef 673 next_tb = 0;
1162c041 674 cpu_loop_exit(env);
2e70f6ef 675 }
378df4b2
PM
676 break;
677 }
678 default:
679 break;
2e70f6ef
PB
680 }
681 }
d77953b9 682 cpu->current_tb = NULL;
4cbf74b6
FB
683 /* reset soft MMU for next block (it can currently
684 only be set by a memory fault) */
50a518e3 685 } /* for(;;) */
0d101938
JK
686 } else {
687 /* Reload env after longjmp - the compiler may have smashed all
688 * local variables as longjmp is marked 'noreturn'. */
4917cf44
AF
689 cpu = current_cpu;
690 env = cpu->env_ptr;
6c78f29a
JL
691#if !(defined(CONFIG_USER_ONLY) && \
692 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
693 cc = CPU_GET_CLASS(cpu);
693fa551
AF
694#endif
695#ifdef TARGET_I386
696 x86_cpu = X86_CPU(cpu);
6c78f29a 697#endif
7d13299d 698 }
3fb2ded1
FB
699 } /* for(;;) */
700
7d13299d 701
e4533c7a 702#if defined(TARGET_I386)
9de5e440 703 /* restore flags in standard format */
e694d4e2 704 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
80cf2c81 705 | (env->df & DF_MASK);
e4533c7a 706#elif defined(TARGET_ARM)
b7bcbe95 707 /* XXX: Save/restore host fpu exception state?. */
d2fbca94 708#elif defined(TARGET_UNICORE32)
93ac68bc 709#elif defined(TARGET_SPARC)
67867308 710#elif defined(TARGET_PPC)
81ea0e13 711#elif defined(TARGET_LM32)
e6e5906b
PB
712#elif defined(TARGET_M68K)
713 cpu_m68k_flush_flags(env, env->cc_op);
714 env->cc_op = CC_OP_FLAGS;
715 env->sr = (env->sr & 0xffe0)
716 | env->cc_dest | (env->cc_x << 4);
b779e29e 717#elif defined(TARGET_MICROBLAZE)
6af0bf9c 718#elif defined(TARGET_MIPS)
d15a9c23 719#elif defined(TARGET_MOXIE)
e67db06e 720#elif defined(TARGET_OPENRISC)
fdf9b3e8 721#elif defined(TARGET_SH4)
eddf68a6 722#elif defined(TARGET_ALPHA)
f1ccf904 723#elif defined(TARGET_CRIS)
10ec5117 724#elif defined(TARGET_S390X)
2328826b 725#elif defined(TARGET_XTENSA)
fdf9b3e8 726 /* XXXXX */
e4533c7a
FB
727#else
728#error unsupported target CPU
729#endif
1057eaa7 730
4917cf44
AF
731 /* fail safe : never use current_cpu outside cpu_exec() */
732 current_cpu = NULL;
7d13299d
FB
733 return ret;
734}