]> git.proxmox.com Git - mirror_qemu.git/blame - cpu-exec.c
apic: do not accept SIPI on the bootstrap processor
[mirror_qemu.git] / cpu-exec.c
CommitLineData
7d13299d 1/*
e965fc38 2 * emulator main execution loop
5fafdf24 3 *
66321a11 4 * Copyright (c) 2003-2005 Fabrice Bellard
7d13299d 5 *
3ef693a0
FB
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
7d13299d 10 *
3ef693a0
FB
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
7d13299d 15 *
3ef693a0 16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
7d13299d 18 */
e4533c7a 19#include "config.h"
cea5f9a2 20#include "cpu.h"
76cad711 21#include "disas/disas.h"
7cb69cae 22#include "tcg.h"
1de7afc9 23#include "qemu/atomic.h"
9c17d615 24#include "sysemu/qtest.h"
7d13299d 25
5638d180 26void cpu_loop_exit(CPUState *cpu)
e4533c7a 27{
d77953b9 28 cpu->current_tb = NULL;
6f03bef0 29 siglongjmp(cpu->jmp_env, 1);
e4533c7a 30}
bfed01fc 31
fbf9eeb3
FB
32/* exit the current TB from a signal handler. The host registers are
33 restored in a state compatible with the CPU emulator
34 */
9eff14f3 35#if defined(CONFIG_SOFTMMU)
0ea8cb88 36void cpu_resume_from_signal(CPUState *cpu, void *puc)
9eff14f3 37{
9eff14f3
BS
38 /* XXX: restore cpu registers saved in host registers */
39
27103424 40 cpu->exception_index = -1;
6f03bef0 41 siglongjmp(cpu->jmp_env, 1);
9eff14f3 42}
9eff14f3 43#endif
fbf9eeb3 44
77211379
PM
45/* Execute a TB, and fix up the CPU state afterwards if necessary */
46static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
47{
48 CPUArchState *env = cpu->env_ptr;
03afa5f8
RH
49 uintptr_t next_tb;
50
51#if defined(DEBUG_DISAS)
52 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
53#if defined(TARGET_I386)
54 log_cpu_state(cpu, CPU_DUMP_CCOP);
55#elif defined(TARGET_M68K)
56 /* ??? Should not modify env state for dumping. */
57 cpu_m68k_flush_flags(env, env->cc_op);
58 env->cc_op = CC_OP_FLAGS;
59 env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
60 log_cpu_state(cpu, 0);
61#else
62 log_cpu_state(cpu, 0);
63#endif
64 }
65#endif /* DEBUG_DISAS */
66
67 next_tb = tcg_qemu_tb_exec(env, tb_ptr);
77211379
PM
68 if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
69 /* We didn't start executing this TB (eg because the instruction
70 * counter hit zero); we must restore the guest PC to the address
71 * of the start of the TB.
72 */
bdf7ae5b 73 CPUClass *cc = CPU_GET_CLASS(cpu);
77211379 74 TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
bdf7ae5b
AF
75 if (cc->synchronize_from_tb) {
76 cc->synchronize_from_tb(cpu, tb);
77 } else {
78 assert(cc->set_pc);
79 cc->set_pc(cpu, tb->pc);
80 }
77211379 81 }
378df4b2
PM
82 if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
83 /* We were asked to stop executing TBs (probably a pending
84 * interrupt. We've now stopped, so clear the flag.
85 */
86 cpu->tcg_exit_req = 0;
87 }
77211379
PM
88 return next_tb;
89}
90
2e70f6ef
PB
91/* Execute the code without caching the generated code. An interpreter
92 could be used if available. */
9349b4f9 93static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
cea5f9a2 94 TranslationBlock *orig_tb)
2e70f6ef 95{
d77953b9 96 CPUState *cpu = ENV_GET_CPU(env);
2e70f6ef
PB
97 TranslationBlock *tb;
98
99 /* Should never happen.
100 We only end up here when an existing TB is too long. */
101 if (max_cycles > CF_COUNT_MASK)
102 max_cycles = CF_COUNT_MASK;
103
648f034c 104 tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
2e70f6ef 105 max_cycles);
d77953b9 106 cpu->current_tb = tb;
2e70f6ef 107 /* execute the generated code */
77211379 108 cpu_tb_exec(cpu, tb->tc_ptr);
d77953b9 109 cpu->current_tb = NULL;
2e70f6ef
PB
110 tb_phys_invalidate(tb, -1);
111 tb_free(tb);
112}
113
9349b4f9 114static TranslationBlock *tb_find_slow(CPUArchState *env,
cea5f9a2 115 target_ulong pc,
8a40a180 116 target_ulong cs_base,
c068688b 117 uint64_t flags)
8a40a180 118{
8cd70437 119 CPUState *cpu = ENV_GET_CPU(env);
8a40a180 120 TranslationBlock *tb, **ptb1;
8a40a180 121 unsigned int h;
337fc758 122 tb_page_addr_t phys_pc, phys_page1;
41c1b1c9 123 target_ulong virt_page2;
3b46e624 124
5e5f07e0 125 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
3b46e624 126
8a40a180 127 /* find translated block using physical mappings */
41c1b1c9 128 phys_pc = get_page_addr_code(env, pc);
8a40a180 129 phys_page1 = phys_pc & TARGET_PAGE_MASK;
8a40a180 130 h = tb_phys_hash_func(phys_pc);
5e5f07e0 131 ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
8a40a180
FB
132 for(;;) {
133 tb = *ptb1;
134 if (!tb)
135 goto not_found;
5fafdf24 136 if (tb->pc == pc &&
8a40a180 137 tb->page_addr[0] == phys_page1 &&
5fafdf24 138 tb->cs_base == cs_base &&
8a40a180
FB
139 tb->flags == flags) {
140 /* check next page if needed */
141 if (tb->page_addr[1] != -1) {
337fc758
BS
142 tb_page_addr_t phys_page2;
143
5fafdf24 144 virt_page2 = (pc & TARGET_PAGE_MASK) +
8a40a180 145 TARGET_PAGE_SIZE;
41c1b1c9 146 phys_page2 = get_page_addr_code(env, virt_page2);
8a40a180
FB
147 if (tb->page_addr[1] == phys_page2)
148 goto found;
149 } else {
150 goto found;
151 }
152 }
153 ptb1 = &tb->phys_hash_next;
154 }
155 not_found:
2e70f6ef 156 /* if no translated code available, then translate it now */
648f034c 157 tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
3b46e624 158
8a40a180 159 found:
2c90fe2b
KB
160 /* Move the last found TB to the head of the list */
161 if (likely(*ptb1)) {
162 *ptb1 = tb->phys_hash_next;
5e5f07e0
EV
163 tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
164 tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
2c90fe2b 165 }
8a40a180 166 /* we add the TB in the virtual pc hash table */
8cd70437 167 cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
8a40a180
FB
168 return tb;
169}
170
9349b4f9 171static inline TranslationBlock *tb_find_fast(CPUArchState *env)
8a40a180 172{
8cd70437 173 CPUState *cpu = ENV_GET_CPU(env);
8a40a180
FB
174 TranslationBlock *tb;
175 target_ulong cs_base, pc;
6b917547 176 int flags;
8a40a180
FB
177
178 /* we record a subset of the CPU state. It will
179 always be the same before a given translated block
180 is executed. */
6b917547 181 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
8cd70437 182 tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
551bd27f
TS
183 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
184 tb->flags != flags)) {
cea5f9a2 185 tb = tb_find_slow(env, pc, cs_base, flags);
8a40a180
FB
186 }
187 return tb;
188}
189
1009d2ed
JK
190static CPUDebugExcpHandler *debug_excp_handler;
191
84e3b602 192void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
1009d2ed 193{
1009d2ed 194 debug_excp_handler = handler;
1009d2ed
JK
195}
196
9349b4f9 197static void cpu_handle_debug_exception(CPUArchState *env)
1009d2ed 198{
ff4700b0 199 CPUState *cpu = ENV_GET_CPU(env);
1009d2ed
JK
200 CPUWatchpoint *wp;
201
ff4700b0
AF
202 if (!cpu->watchpoint_hit) {
203 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
1009d2ed
JK
204 wp->flags &= ~BP_WATCHPOINT_HIT;
205 }
206 }
207 if (debug_excp_handler) {
208 debug_excp_handler(env);
209 }
210}
211
7d13299d
FB
212/* main execution loop */
213
1a28cac3
MT
214volatile sig_atomic_t exit_request;
215
9349b4f9 216int cpu_exec(CPUArchState *env)
7d13299d 217{
c356a1bc 218 CPUState *cpu = ENV_GET_CPU(env);
97a8ea5a
AF
219#if !(defined(CONFIG_USER_ONLY) && \
220 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
221 CPUClass *cc = CPU_GET_CLASS(cpu);
693fa551
AF
222#endif
223#ifdef TARGET_I386
224 X86CPU *x86_cpu = X86_CPU(cpu);
97a8ea5a 225#endif
8a40a180 226 int ret, interrupt_request;
8a40a180 227 TranslationBlock *tb;
c27004ec 228 uint8_t *tc_ptr;
3e9bd63a 229 uintptr_t next_tb;
bae2c270
PM
230 /* This must be volatile so it is not trashed by longjmp() */
231 volatile bool have_tb_lock = false;
8c6939c0 232
259186a7 233 if (cpu->halted) {
3993c6bd 234 if (!cpu_has_work(cpu)) {
eda48c34
PB
235 return EXCP_HALTED;
236 }
237
259186a7 238 cpu->halted = 0;
eda48c34 239 }
5a1e3cfc 240
4917cf44 241 current_cpu = cpu;
e4533c7a 242
4917cf44 243 /* As long as current_cpu is null, up to the assignment just above,
ec9bd89f
OH
244 * requests by other threads to exit the execution loop are expected to
245 * be issued using the exit_request global. We must make sure that our
4917cf44 246 * evaluation of the global value is performed past the current_cpu
ec9bd89f
OH
247 * value transition point, which requires a memory barrier as well as
248 * an instruction scheduling constraint on modern architectures. */
249 smp_mb();
250
c629a4bc 251 if (unlikely(exit_request)) {
fcd7d003 252 cpu->exit_request = 1;
1a28cac3
MT
253 }
254
ecb644f4 255#if defined(TARGET_I386)
6792a57b
JK
256 /* put eflags in CPU temporary format */
257 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
80cf2c81 258 env->df = 1 - (2 * ((env->eflags >> 10) & 1));
6792a57b
JK
259 CC_OP = CC_OP_EFLAGS;
260 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
93ac68bc 261#elif defined(TARGET_SPARC)
e6e5906b
PB
262#elif defined(TARGET_M68K)
263 env->cc_op = CC_OP_FLAGS;
264 env->cc_dest = env->sr & 0xf;
265 env->cc_x = (env->sr >> 4) & 1;
ecb644f4
TS
266#elif defined(TARGET_ALPHA)
267#elif defined(TARGET_ARM)
d2fbca94 268#elif defined(TARGET_UNICORE32)
ecb644f4 269#elif defined(TARGET_PPC)
4e85f82c 270 env->reserve_addr = -1;
81ea0e13 271#elif defined(TARGET_LM32)
b779e29e 272#elif defined(TARGET_MICROBLAZE)
6af0bf9c 273#elif defined(TARGET_MIPS)
d15a9c23 274#elif defined(TARGET_MOXIE)
e67db06e 275#elif defined(TARGET_OPENRISC)
fdf9b3e8 276#elif defined(TARGET_SH4)
f1ccf904 277#elif defined(TARGET_CRIS)
10ec5117 278#elif defined(TARGET_S390X)
2328826b 279#elif defined(TARGET_XTENSA)
fdf9b3e8 280 /* XXXXX */
e4533c7a
FB
281#else
282#error unsupported target CPU
283#endif
27103424 284 cpu->exception_index = -1;
9d27abd9 285
7d13299d 286 /* prepare setjmp context for exception handling */
3fb2ded1 287 for(;;) {
6f03bef0 288 if (sigsetjmp(cpu->jmp_env, 0) == 0) {
3fb2ded1 289 /* if an exception is pending, we execute it here */
27103424
AF
290 if (cpu->exception_index >= 0) {
291 if (cpu->exception_index >= EXCP_INTERRUPT) {
3fb2ded1 292 /* exit request from the cpu execution loop */
27103424 293 ret = cpu->exception_index;
1009d2ed
JK
294 if (ret == EXCP_DEBUG) {
295 cpu_handle_debug_exception(env);
296 }
3fb2ded1 297 break;
72d239ed
AJ
298 } else {
299#if defined(CONFIG_USER_ONLY)
3fb2ded1 300 /* if user mode only, we simulate a fake exception
9f083493 301 which will be handled outside the cpu execution
3fb2ded1 302 loop */
83479e77 303#if defined(TARGET_I386)
97a8ea5a 304 cc->do_interrupt(cpu);
83479e77 305#endif
27103424 306 ret = cpu->exception_index;
3fb2ded1 307 break;
72d239ed 308#else
97a8ea5a 309 cc->do_interrupt(cpu);
27103424 310 cpu->exception_index = -1;
83479e77 311#endif
3fb2ded1 312 }
5fafdf24 313 }
9df217a3 314
b5fc09ae 315 next_tb = 0; /* force lookup of first TB */
3fb2ded1 316 for(;;) {
259186a7 317 interrupt_request = cpu->interrupt_request;
e1638bd8 318 if (unlikely(interrupt_request)) {
ed2803da 319 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
e1638bd8 320 /* Mask out external interrupts for this step. */
3125f763 321 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
e1638bd8 322 }
6658ffb8 323 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
259186a7 324 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
27103424 325 cpu->exception_index = EXCP_DEBUG;
5638d180 326 cpu_loop_exit(cpu);
6658ffb8 327 }
a90b7318 328#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
b779e29e 329 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
d2fbca94 330 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
a90b7318 331 if (interrupt_request & CPU_INTERRUPT_HALT) {
259186a7
AF
332 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
333 cpu->halted = 1;
27103424 334 cpu->exception_index = EXCP_HLT;
5638d180 335 cpu_loop_exit(cpu);
a90b7318
AZ
336 }
337#endif
68a79315 338#if defined(TARGET_I386)
5d62c43a
JK
339#if !defined(CONFIG_USER_ONLY)
340 if (interrupt_request & CPU_INTERRUPT_POLL) {
259186a7 341 cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
693fa551 342 apic_poll_irq(x86_cpu->apic_state);
5d62c43a
JK
343 }
344#endif
b09ea7d5 345 if (interrupt_request & CPU_INTERRUPT_INIT) {
77b2bc2c
BS
346 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
347 0);
693fa551 348 do_cpu_init(x86_cpu);
27103424 349 cpu->exception_index = EXCP_HALTED;
5638d180 350 cpu_loop_exit(cpu);
b09ea7d5 351 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
693fa551 352 do_cpu_sipi(x86_cpu);
b09ea7d5 353 } else if (env->hflags2 & HF2_GIF_MASK) {
db620f46
FB
354 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
355 !(env->hflags & HF_SMM_MASK)) {
77b2bc2c
BS
356 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
357 0);
259186a7 358 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
693fa551 359 do_smm_enter(x86_cpu);
db620f46
FB
360 next_tb = 0;
361 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
362 !(env->hflags2 & HF2_NMI_MASK)) {
259186a7 363 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
db620f46 364 env->hflags2 |= HF2_NMI_MASK;
e694d4e2 365 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
db620f46 366 next_tb = 0;
e965fc38 367 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
259186a7 368 cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
e694d4e2 369 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
79c4f6b0 370 next_tb = 0;
db620f46
FB
371 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
372 (((env->hflags2 & HF2_VINTR_MASK) &&
373 (env->hflags2 & HF2_HIF_MASK)) ||
374 (!(env->hflags2 & HF2_VINTR_MASK) &&
375 (env->eflags & IF_MASK &&
376 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
377 int intno;
77b2bc2c
BS
378 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
379 0);
259186a7
AF
380 cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
381 CPU_INTERRUPT_VIRQ);
db620f46 382 intno = cpu_get_pic_interrupt(env);
4f213879 383 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
384 do_interrupt_x86_hardirq(env, intno, 1);
385 /* ensure that no TB jump will be modified as
386 the program flow was changed */
387 next_tb = 0;
0573fbfc 388#if !defined(CONFIG_USER_ONLY)
db620f46
FB
389 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
390 (env->eflags & IF_MASK) &&
391 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
392 int intno;
393 /* FIXME: this should respect TPR */
77b2bc2c
BS
394 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
395 0);
fdfba1a2
EI
396 intno = ldl_phys(cpu->as,
397 env->vm_vmcb
398 + offsetof(struct vmcb,
399 control.int_vector));
93fcfe39 400 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
e694d4e2 401 do_interrupt_x86_hardirq(env, intno, 1);
259186a7 402 cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
db620f46 403 next_tb = 0;
907a5b26 404#endif
db620f46 405 }
68a79315 406 }
ce09776b 407#elif defined(TARGET_PPC)
9fddaa0c 408 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
c356a1bc 409 cpu_reset(cpu);
9fddaa0c 410 }
47103572 411 if (interrupt_request & CPU_INTERRUPT_HARD) {
e9df014c 412 ppc_hw_interrupt(env);
259186a7
AF
413 if (env->pending_interrupts == 0) {
414 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
415 }
b5fc09ae 416 next_tb = 0;
ce09776b 417 }
81ea0e13
MW
418#elif defined(TARGET_LM32)
419 if ((interrupt_request & CPU_INTERRUPT_HARD)
420 && (env->ie & IE_IE)) {
27103424 421 cpu->exception_index = EXCP_IRQ;
97a8ea5a 422 cc->do_interrupt(cpu);
81ea0e13
MW
423 next_tb = 0;
424 }
b779e29e
EI
425#elif defined(TARGET_MICROBLAZE)
426 if ((interrupt_request & CPU_INTERRUPT_HARD)
427 && (env->sregs[SR_MSR] & MSR_IE)
428 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
429 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
27103424 430 cpu->exception_index = EXCP_IRQ;
97a8ea5a 431 cc->do_interrupt(cpu);
b779e29e
EI
432 next_tb = 0;
433 }
6af0bf9c
FB
434#elif defined(TARGET_MIPS)
435 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
4cdc1cd1 436 cpu_mips_hw_interrupts_pending(env)) {
6af0bf9c 437 /* Raise it */
27103424 438 cpu->exception_index = EXCP_EXT_INTERRUPT;
6af0bf9c 439 env->error_code = 0;
97a8ea5a 440 cc->do_interrupt(cpu);
b5fc09ae 441 next_tb = 0;
6af0bf9c 442 }
b6a71ef7
JL
443#elif defined(TARGET_OPENRISC)
444 {
445 int idx = -1;
446 if ((interrupt_request & CPU_INTERRUPT_HARD)
447 && (env->sr & SR_IEE)) {
448 idx = EXCP_INT;
449 }
450 if ((interrupt_request & CPU_INTERRUPT_TIMER)
451 && (env->sr & SR_TEE)) {
452 idx = EXCP_TICK;
453 }
454 if (idx >= 0) {
27103424 455 cpu->exception_index = idx;
97a8ea5a 456 cc->do_interrupt(cpu);
b6a71ef7
JL
457 next_tb = 0;
458 }
459 }
e95c8d51 460#elif defined(TARGET_SPARC)
d532b26c
IK
461 if (interrupt_request & CPU_INTERRUPT_HARD) {
462 if (cpu_interrupts_enabled(env) &&
463 env->interrupt_index > 0) {
464 int pil = env->interrupt_index & 0xf;
465 int type = env->interrupt_index & 0xf0;
466
467 if (((type == TT_EXTINT) &&
468 cpu_pil_allowed(env, pil)) ||
469 type != TT_EXTINT) {
27103424 470 cpu->exception_index = env->interrupt_index;
97a8ea5a 471 cc->do_interrupt(cpu);
d532b26c
IK
472 next_tb = 0;
473 }
474 }
e965fc38 475 }
b5ff1b31
FB
476#elif defined(TARGET_ARM)
477 if (interrupt_request & CPU_INTERRUPT_FIQ
4cc35614 478 && !(env->daif & PSTATE_F)) {
27103424 479 cpu->exception_index = EXCP_FIQ;
97a8ea5a 480 cc->do_interrupt(cpu);
b5fc09ae 481 next_tb = 0;
b5ff1b31 482 }
9ee6e8bb
PB
483 /* ARMv7-M interrupt return works by loading a magic value
484 into the PC. On real hardware the load causes the
485 return to occur. The qemu implementation performs the
486 jump normally, then does the exception return when the
487 CPU tries to execute code at the magic address.
488 This will cause the magic PC value to be pushed to
a1c7273b 489 the stack if an interrupt occurred at the wrong time.
9ee6e8bb
PB
490 We avoid this by disabling interrupts when
491 pc contains a magic address. */
b5ff1b31 492 if (interrupt_request & CPU_INTERRUPT_HARD
9ee6e8bb 493 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
4cc35614 494 || !(env->daif & PSTATE_I))) {
27103424 495 cpu->exception_index = EXCP_IRQ;
97a8ea5a 496 cc->do_interrupt(cpu);
b5fc09ae 497 next_tb = 0;
b5ff1b31 498 }
d2fbca94
GX
499#elif defined(TARGET_UNICORE32)
500 if (interrupt_request & CPU_INTERRUPT_HARD
501 && !(env->uncached_asr & ASR_I)) {
27103424 502 cpu->exception_index = UC32_EXCP_INTR;
97a8ea5a 503 cc->do_interrupt(cpu);
d2fbca94
GX
504 next_tb = 0;
505 }
fdf9b3e8 506#elif defined(TARGET_SH4)
e96e2044 507 if (interrupt_request & CPU_INTERRUPT_HARD) {
97a8ea5a 508 cc->do_interrupt(cpu);
b5fc09ae 509 next_tb = 0;
e96e2044 510 }
eddf68a6 511#elif defined(TARGET_ALPHA)
6a80e088
RH
512 {
513 int idx = -1;
514 /* ??? This hard-codes the OSF/1 interrupt levels. */
e965fc38 515 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
6a80e088
RH
516 case 0 ... 3:
517 if (interrupt_request & CPU_INTERRUPT_HARD) {
518 idx = EXCP_DEV_INTERRUPT;
519 }
520 /* FALLTHRU */
521 case 4:
522 if (interrupt_request & CPU_INTERRUPT_TIMER) {
523 idx = EXCP_CLK_INTERRUPT;
524 }
525 /* FALLTHRU */
526 case 5:
527 if (interrupt_request & CPU_INTERRUPT_SMP) {
528 idx = EXCP_SMP_INTERRUPT;
529 }
530 /* FALLTHRU */
531 case 6:
532 if (interrupt_request & CPU_INTERRUPT_MCHK) {
533 idx = EXCP_MCHK;
534 }
535 }
536 if (idx >= 0) {
27103424 537 cpu->exception_index = idx;
6a80e088 538 env->error_code = 0;
97a8ea5a 539 cc->do_interrupt(cpu);
6a80e088
RH
540 next_tb = 0;
541 }
eddf68a6 542 }
f1ccf904 543#elif defined(TARGET_CRIS)
1b1a38b0 544 if (interrupt_request & CPU_INTERRUPT_HARD
fb9fb692
EI
545 && (env->pregs[PR_CCS] & I_FLAG)
546 && !env->locked_irq) {
27103424 547 cpu->exception_index = EXCP_IRQ;
97a8ea5a 548 cc->do_interrupt(cpu);
1b1a38b0
EI
549 next_tb = 0;
550 }
8219314b
LP
551 if (interrupt_request & CPU_INTERRUPT_NMI) {
552 unsigned int m_flag_archval;
553 if (env->pregs[PR_VR] < 32) {
554 m_flag_archval = M_FLAG_V10;
555 } else {
556 m_flag_archval = M_FLAG_V32;
557 }
558 if ((env->pregs[PR_CCS] & m_flag_archval)) {
27103424 559 cpu->exception_index = EXCP_NMI;
97a8ea5a 560 cc->do_interrupt(cpu);
8219314b
LP
561 next_tb = 0;
562 }
f1ccf904 563 }
0633879f
PB
564#elif defined(TARGET_M68K)
565 if (interrupt_request & CPU_INTERRUPT_HARD
566 && ((env->sr & SR_I) >> SR_I_SHIFT)
567 < env->pending_level) {
568 /* Real hardware gets the interrupt vector via an
569 IACK cycle at this point. Current emulated
570 hardware doesn't rely on this, so we
571 provide/save the vector when the interrupt is
572 first signalled. */
27103424 573 cpu->exception_index = env->pending_vector;
3c688828 574 do_interrupt_m68k_hardirq(env);
b5fc09ae 575 next_tb = 0;
0633879f 576 }
3110e292
AG
577#elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
578 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
579 (env->psw.mask & PSW_MASK_EXT)) {
97a8ea5a 580 cc->do_interrupt(cpu);
3110e292
AG
581 next_tb = 0;
582 }
40643d7c
MF
583#elif defined(TARGET_XTENSA)
584 if (interrupt_request & CPU_INTERRUPT_HARD) {
27103424 585 cpu->exception_index = EXC_IRQ;
97a8ea5a 586 cc->do_interrupt(cpu);
40643d7c
MF
587 next_tb = 0;
588 }
68a79315 589#endif
ff2712ba 590 /* Don't use the cached interrupt_request value,
9d05095e 591 do_interrupt may have updated the EXITTB flag. */
259186a7
AF
592 if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
593 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
bf3e8bf1
FB
594 /* ensure that no TB jump will be modified as
595 the program flow was changed */
b5fc09ae 596 next_tb = 0;
bf3e8bf1 597 }
be214e6c 598 }
fcd7d003
AF
599 if (unlikely(cpu->exit_request)) {
600 cpu->exit_request = 0;
27103424 601 cpu->exception_index = EXCP_INTERRUPT;
5638d180 602 cpu_loop_exit(cpu);
3fb2ded1 603 }
5e5f07e0 604 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
bae2c270 605 have_tb_lock = true;
cea5f9a2 606 tb = tb_find_fast(env);
d5975363
PB
607 /* Note: we do it here to avoid a gcc bug on Mac OS X when
608 doing it in tb_find_slow */
5e5f07e0 609 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
d5975363
PB
610 /* as some TB could have been invalidated because
611 of memory exceptions while generating the code, we
612 must recompute the hash index here */
613 next_tb = 0;
5e5f07e0 614 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
d5975363 615 }
c30d1aea
PM
616 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
617 qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
618 tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
619 }
8a40a180
FB
620 /* see if we can patch the calling TB. When the TB
621 spans two pages, we cannot safely do a direct
622 jump. */
040f2fb2 623 if (next_tb != 0 && tb->page_addr[1] == -1) {
0980011b
PM
624 tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
625 next_tb & TB_EXIT_MASK, tb);
3fb2ded1 626 }
bae2c270 627 have_tb_lock = false;
5e5f07e0 628 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
55e8b85e 629
630 /* cpu_interrupt might be called while translating the
631 TB, but before it is linked into a potentially
632 infinite loop and becomes env->current_tb. Avoid
633 starting execution if there is a pending interrupt. */
d77953b9 634 cpu->current_tb = tb;
b0052d15 635 barrier();
fcd7d003 636 if (likely(!cpu->exit_request)) {
2e70f6ef 637 tc_ptr = tb->tc_ptr;
e965fc38 638 /* execute the generated code */
77211379 639 next_tb = cpu_tb_exec(cpu, tc_ptr);
378df4b2
PM
640 switch (next_tb & TB_EXIT_MASK) {
641 case TB_EXIT_REQUESTED:
642 /* Something asked us to stop executing
643 * chained TBs; just continue round the main
644 * loop. Whatever requested the exit will also
645 * have set something else (eg exit_request or
646 * interrupt_request) which we will handle
647 * next time around the loop.
648 */
649 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
650 next_tb = 0;
651 break;
652 case TB_EXIT_ICOUNT_EXPIRED:
653 {
bf20dc07 654 /* Instruction counter expired. */
2e70f6ef 655 int insns_left;
0980011b 656 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
28ecfd7a 657 insns_left = cpu->icount_decr.u32;
efee7340 658 if (cpu->icount_extra && insns_left >= 0) {
2e70f6ef 659 /* Refill decrementer and continue execution. */
efee7340
AF
660 cpu->icount_extra += insns_left;
661 if (cpu->icount_extra > 0xffff) {
2e70f6ef
PB
662 insns_left = 0xffff;
663 } else {
efee7340 664 insns_left = cpu->icount_extra;
2e70f6ef 665 }
efee7340 666 cpu->icount_extra -= insns_left;
28ecfd7a 667 cpu->icount_decr.u16.low = insns_left;
2e70f6ef
PB
668 } else {
669 if (insns_left > 0) {
670 /* Execute remaining instructions. */
cea5f9a2 671 cpu_exec_nocache(env, insns_left, tb);
2e70f6ef 672 }
27103424 673 cpu->exception_index = EXCP_INTERRUPT;
2e70f6ef 674 next_tb = 0;
5638d180 675 cpu_loop_exit(cpu);
2e70f6ef 676 }
378df4b2
PM
677 break;
678 }
679 default:
680 break;
2e70f6ef
PB
681 }
682 }
d77953b9 683 cpu->current_tb = NULL;
4cbf74b6
FB
684 /* reset soft MMU for next block (it can currently
685 only be set by a memory fault) */
50a518e3 686 } /* for(;;) */
0d101938
JK
687 } else {
688 /* Reload env after longjmp - the compiler may have smashed all
689 * local variables as longjmp is marked 'noreturn'. */
4917cf44
AF
690 cpu = current_cpu;
691 env = cpu->env_ptr;
6c78f29a
JL
692#if !(defined(CONFIG_USER_ONLY) && \
693 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
694 cc = CPU_GET_CLASS(cpu);
693fa551
AF
695#endif
696#ifdef TARGET_I386
697 x86_cpu = X86_CPU(cpu);
6c78f29a 698#endif
bae2c270
PM
699 if (have_tb_lock) {
700 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
701 have_tb_lock = false;
702 }
7d13299d 703 }
3fb2ded1
FB
704 } /* for(;;) */
705
7d13299d 706
e4533c7a 707#if defined(TARGET_I386)
9de5e440 708 /* restore flags in standard format */
e694d4e2 709 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
80cf2c81 710 | (env->df & DF_MASK);
e4533c7a 711#elif defined(TARGET_ARM)
b7bcbe95 712 /* XXX: Save/restore host fpu exception state?. */
d2fbca94 713#elif defined(TARGET_UNICORE32)
93ac68bc 714#elif defined(TARGET_SPARC)
67867308 715#elif defined(TARGET_PPC)
81ea0e13 716#elif defined(TARGET_LM32)
e6e5906b
PB
717#elif defined(TARGET_M68K)
718 cpu_m68k_flush_flags(env, env->cc_op);
719 env->cc_op = CC_OP_FLAGS;
720 env->sr = (env->sr & 0xffe0)
721 | env->cc_dest | (env->cc_x << 4);
b779e29e 722#elif defined(TARGET_MICROBLAZE)
6af0bf9c 723#elif defined(TARGET_MIPS)
d15a9c23 724#elif defined(TARGET_MOXIE)
e67db06e 725#elif defined(TARGET_OPENRISC)
fdf9b3e8 726#elif defined(TARGET_SH4)
eddf68a6 727#elif defined(TARGET_ALPHA)
f1ccf904 728#elif defined(TARGET_CRIS)
10ec5117 729#elif defined(TARGET_S390X)
2328826b 730#elif defined(TARGET_XTENSA)
fdf9b3e8 731 /* XXXXX */
e4533c7a
FB
732#else
733#error unsupported target CPU
734#endif
1057eaa7 735
4917cf44
AF
736 /* fail safe : never use current_cpu outside cpu_exec() */
737 current_cpu = NULL;
7d13299d
FB
738 return ret;
739}