]> git.proxmox.com Git - qemu.git/blame - cpu-exec.c
smc91c111: Fix receive starvation
[qemu.git] / cpu-exec.c
CommitLineData
7d13299d 1/*
e965fc38 2 * emulator main execution loop
5fafdf24 3 *
66321a11 4 * Copyright (c) 2003-2005 Fabrice Bellard
7d13299d 5 *
3ef693a0
FB
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
7d13299d 10 *
3ef693a0
FB
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
7d13299d 15 *
3ef693a0 16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
7d13299d 18 */
e4533c7a 19#include "config.h"
cea5f9a2 20#include "cpu.h"
76cad711 21#include "disas/disas.h"
7cb69cae 22#include "tcg.h"
1de7afc9 23#include "qemu/atomic.h"
9c17d615 24#include "sysemu/qtest.h"
7d13299d 25
3993c6bd 26bool qemu_cpu_has_work(CPUState *cpu)
6a4955a8 27{
3993c6bd 28 return cpu_has_work(cpu);
6a4955a8
AL
29}
30
9349b4f9 31void cpu_loop_exit(CPUArchState *env)
e4533c7a 32{
d77953b9
AF
33 CPUState *cpu = ENV_GET_CPU(env);
34
35 cpu->current_tb = NULL;
6ab7e546 36 siglongjmp(env->jmp_env, 1);
e4533c7a 37}
bfed01fc 38
fbf9eeb3
FB
39/* exit the current TB from a signal handler. The host registers are
40 restored in a state compatible with the CPU emulator
41 */
9eff14f3 42#if defined(CONFIG_SOFTMMU)
9349b4f9 43void cpu_resume_from_signal(CPUArchState *env, void *puc)
9eff14f3 44{
9eff14f3
BS
45 /* XXX: restore cpu registers saved in host registers */
46
47 env->exception_index = -1;
6ab7e546 48 siglongjmp(env->jmp_env, 1);
9eff14f3 49}
9eff14f3 50#endif
fbf9eeb3 51
77211379
PM
52/* Execute a TB, and fix up the CPU state afterwards if necessary */
53static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
54{
55 CPUArchState *env = cpu->env_ptr;
04d5a1da 56 uintptr_t next_tb = tcg_qemu_tb_exec(env, tb_ptr);
77211379
PM
57 if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
58 /* We didn't start executing this TB (eg because the instruction
59 * counter hit zero); we must restore the guest PC to the address
60 * of the start of the TB.
61 */
bdf7ae5b 62 CPUClass *cc = CPU_GET_CLASS(cpu);
77211379 63 TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
bdf7ae5b
AF
64 if (cc->synchronize_from_tb) {
65 cc->synchronize_from_tb(cpu, tb);
66 } else {
67 assert(cc->set_pc);
68 cc->set_pc(cpu, tb->pc);
69 }
77211379 70 }
378df4b2
PM
71 if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
72 /* We were asked to stop executing TBs (probably a pending
73 * interrupt. We've now stopped, so clear the flag.
74 */
75 cpu->tcg_exit_req = 0;
76 }
77211379
PM
77 return next_tb;
78}
79
2e70f6ef
PB
80/* Execute the code without caching the generated code. An interpreter
81 could be used if available. */
9349b4f9 82static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
cea5f9a2 83 TranslationBlock *orig_tb)
2e70f6ef 84{
d77953b9 85 CPUState *cpu = ENV_GET_CPU(env);
2e70f6ef
PB
86 TranslationBlock *tb;
87
88 /* Should never happen.
89 We only end up here when an existing TB is too long. */
90 if (max_cycles > CF_COUNT_MASK)
91 max_cycles = CF_COUNT_MASK;
92
93 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
94 max_cycles);
d77953b9 95 cpu->current_tb = tb;
2e70f6ef 96 /* execute the generated code */
77211379 97 cpu_tb_exec(cpu, tb->tc_ptr);
d77953b9 98 cpu->current_tb = NULL;
2e70f6ef
PB
99 tb_phys_invalidate(tb, -1);
100 tb_free(tb);
101}
102
9349b4f9 103static TranslationBlock *tb_find_slow(CPUArchState *env,
cea5f9a2 104 target_ulong pc,
8a40a180 105 target_ulong cs_base,
c068688b 106 uint64_t flags)
8a40a180
FB
107{
108 TranslationBlock *tb, **ptb1;
8a40a180 109 unsigned int h;
337fc758 110 tb_page_addr_t phys_pc, phys_page1;
41c1b1c9 111 target_ulong virt_page2;
3b46e624 112
5e5f07e0 113 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
3b46e624 114
8a40a180 115 /* find translated block using physical mappings */
41c1b1c9 116 phys_pc = get_page_addr_code(env, pc);
8a40a180 117 phys_page1 = phys_pc & TARGET_PAGE_MASK;
8a40a180 118 h = tb_phys_hash_func(phys_pc);
5e5f07e0 119 ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
8a40a180
FB
120 for(;;) {
121 tb = *ptb1;
122 if (!tb)
123 goto not_found;
5fafdf24 124 if (tb->pc == pc &&
8a40a180 125 tb->page_addr[0] == phys_page1 &&
5fafdf24 126 tb->cs_base == cs_base &&
8a40a180
FB
127 tb->flags == flags) {
128 /* check next page if needed */
129 if (tb->page_addr[1] != -1) {
337fc758
BS
130 tb_page_addr_t phys_page2;
131
5fafdf24 132 virt_page2 = (pc & TARGET_PAGE_MASK) +
8a40a180 133 TARGET_PAGE_SIZE;
41c1b1c9 134 phys_page2 = get_page_addr_code(env, virt_page2);
8a40a180
FB
135 if (tb->page_addr[1] == phys_page2)
136 goto found;
137 } else {
138 goto found;
139 }
140 }
141 ptb1 = &tb->phys_hash_next;
142 }
143 not_found:
2e70f6ef
PB
144 /* if no translated code available, then translate it now */
145 tb = tb_gen_code(env, pc, cs_base, flags, 0);
3b46e624 146
8a40a180 147 found:
2c90fe2b
KB
148 /* Move the last found TB to the head of the list */
149 if (likely(*ptb1)) {
150 *ptb1 = tb->phys_hash_next;
5e5f07e0
EV
151 tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
152 tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
2c90fe2b 153 }
8a40a180
FB
154 /* we add the TB in the virtual pc hash table */
155 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
8a40a180
FB
156 return tb;
157}
158
9349b4f9 159static inline TranslationBlock *tb_find_fast(CPUArchState *env)
8a40a180
FB
160{
161 TranslationBlock *tb;
162 target_ulong cs_base, pc;
6b917547 163 int flags;
8a40a180
FB
164
165 /* we record a subset of the CPU state. It will
166 always be the same before a given translated block
167 is executed. */
6b917547 168 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
bce61846 169 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
551bd27f
TS
170 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
171 tb->flags != flags)) {
cea5f9a2 172 tb = tb_find_slow(env, pc, cs_base, flags);
8a40a180
FB
173 }
174 return tb;
175}
176
1009d2ed
JK
177static CPUDebugExcpHandler *debug_excp_handler;
178
84e3b602 179void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
1009d2ed 180{
1009d2ed 181 debug_excp_handler = handler;
1009d2ed
JK
182}
183
9349b4f9 184static void cpu_handle_debug_exception(CPUArchState *env)
1009d2ed
JK
185{
186 CPUWatchpoint *wp;
187
188 if (!env->watchpoint_hit) {
189 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
190 wp->flags &= ~BP_WATCHPOINT_HIT;
191 }
192 }
193 if (debug_excp_handler) {
194 debug_excp_handler(env);
195 }
196}
197
7d13299d
FB
198/* main execution loop */
199
1a28cac3
MT
200volatile sig_atomic_t exit_request;
201
9349b4f9 202int cpu_exec(CPUArchState *env)
7d13299d 203{
c356a1bc 204 CPUState *cpu = ENV_GET_CPU(env);
97a8ea5a
AF
205#if !(defined(CONFIG_USER_ONLY) && \
206 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
207 CPUClass *cc = CPU_GET_CLASS(cpu);
208#endif
8a40a180 209 int ret, interrupt_request;
8a40a180 210 TranslationBlock *tb;
c27004ec 211 uint8_t *tc_ptr;
3e9bd63a 212 uintptr_t next_tb;
8c6939c0 213
259186a7 214 if (cpu->halted) {
3993c6bd 215 if (!cpu_has_work(cpu)) {
eda48c34
PB
216 return EXCP_HALTED;
217 }
218
259186a7 219 cpu->halted = 0;
eda48c34 220 }
5a1e3cfc 221
4917cf44 222 current_cpu = cpu;
e4533c7a 223
4917cf44 224 /* As long as current_cpu is null, up to the assignment just above,
ec9bd89f
OH
225 * requests by other threads to exit the execution loop are expected to
226 * be issued using the exit_request global. We must make sure that our
4917cf44 227 * evaluation of the global value is performed past the current_cpu
ec9bd89f
OH
228 * value transition point, which requires a memory barrier as well as
229 * an instruction scheduling constraint on modern architectures. */
230 smp_mb();
231
c629a4bc 232 if (unlikely(exit_request)) {
fcd7d003 233 cpu->exit_request = 1;
1a28cac3
MT
234 }
235
ecb644f4 236#if defined(TARGET_I386)
6792a57b
JK
237 /* put eflags in CPU temporary format */
238 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
80cf2c81 239 env->df = 1 - (2 * ((env->eflags >> 10) & 1));
6792a57b
JK
240 CC_OP = CC_OP_EFLAGS;
241 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
93ac68bc 242#elif defined(TARGET_SPARC)
e6e5906b
PB
243#elif defined(TARGET_M68K)
244 env->cc_op = CC_OP_FLAGS;
245 env->cc_dest = env->sr & 0xf;
246 env->cc_x = (env->sr >> 4) & 1;
ecb644f4
TS
247#elif defined(TARGET_ALPHA)
248#elif defined(TARGET_ARM)
d2fbca94 249#elif defined(TARGET_UNICORE32)
ecb644f4 250#elif defined(TARGET_PPC)
4e85f82c 251 env->reserve_addr = -1;
81ea0e13 252#elif defined(TARGET_LM32)
b779e29e 253#elif defined(TARGET_MICROBLAZE)
6af0bf9c 254#elif defined(TARGET_MIPS)
d15a9c23 255#elif defined(TARGET_MOXIE)
e67db06e 256#elif defined(TARGET_OPENRISC)
fdf9b3e8 257#elif defined(TARGET_SH4)
f1ccf904 258#elif defined(TARGET_CRIS)
10ec5117 259#elif defined(TARGET_S390X)
2328826b 260#elif defined(TARGET_XTENSA)
fdf9b3e8 261 /* XXXXX */
e4533c7a
FB
262#else
263#error unsupported target CPU
264#endif
3fb2ded1 265 env->exception_index = -1;
9d27abd9 266
7d13299d 267 /* prepare setjmp context for exception handling */
3fb2ded1 268 for(;;) {
6ab7e546 269 if (sigsetjmp(env->jmp_env, 0) == 0) {
3fb2ded1
FB
270 /* if an exception is pending, we execute it here */
271 if (env->exception_index >= 0) {
272 if (env->exception_index >= EXCP_INTERRUPT) {
273 /* exit request from the cpu execution loop */
274 ret = env->exception_index;
1009d2ed
JK
275 if (ret == EXCP_DEBUG) {
276 cpu_handle_debug_exception(env);
277 }
3fb2ded1 278 break;
72d239ed
AJ
279 } else {
280#if defined(CONFIG_USER_ONLY)
3fb2ded1 281 /* if user mode only, we simulate a fake exception
9f083493 282 which will be handled outside the cpu execution
3fb2ded1 283 loop */
83479e77 284#if defined(TARGET_I386)
97a8ea5a 285 cc->do_interrupt(cpu);
83479e77 286#endif
3fb2ded1
FB
287 ret = env->exception_index;
288 break;
72d239ed 289#else
97a8ea5a 290 cc->do_interrupt(cpu);
301d2908 291 env->exception_index = -1;
83479e77 292#endif
3fb2ded1 293 }
5fafdf24 294 }
9df217a3 295
b5fc09ae 296 next_tb = 0; /* force lookup of first TB */
3fb2ded1 297 for(;;) {
259186a7 298 interrupt_request = cpu->interrupt_request;
e1638bd8 299 if (unlikely(interrupt_request)) {
ed2803da 300 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
e1638bd8 301 /* Mask out external interrupts for this step. */
3125f763 302 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
e1638bd8 303 }
6658ffb8 304 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
259186a7 305 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
6658ffb8 306 env->exception_index = EXCP_DEBUG;
1162c041 307 cpu_loop_exit(env);
6658ffb8 308 }
a90b7318 309#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
b779e29e 310 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
d2fbca94 311 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
a90b7318 312 if (interrupt_request & CPU_INTERRUPT_HALT) {
259186a7
AF
313 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
314 cpu->halted = 1;
a90b7318 315 env->exception_index = EXCP_HLT;
1162c041 316 cpu_loop_exit(env);
a90b7318
AZ
317 }
318#endif
68a79315 319#if defined(TARGET_I386)
5d62c43a
JK
320#if !defined(CONFIG_USER_ONLY)
321 if (interrupt_request & CPU_INTERRUPT_POLL) {
259186a7 322 cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
5d62c43a
JK
323 apic_poll_irq(env->apic_state);
324 }
325#endif
b09ea7d5 326 if (interrupt_request & CPU_INTERRUPT_INIT) {
77b2bc2c
BS
327 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
328 0);
232fc23b 329 do_cpu_init(x86_env_get_cpu(env));
b09ea7d5 330 env->exception_index = EXCP_HALTED;
1162c041 331 cpu_loop_exit(env);
b09ea7d5 332 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
232fc23b 333 do_cpu_sipi(x86_env_get_cpu(env));
b09ea7d5 334 } else if (env->hflags2 & HF2_GIF_MASK) {
db620f46
FB
335 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
336 !(env->hflags & HF_SMM_MASK)) {
77b2bc2c
BS
337 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
338 0);
259186a7 339 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
518e9d7d 340 do_smm_enter(x86_env_get_cpu(env));
db620f46
FB
341 next_tb = 0;
342 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
343 !(env->hflags2 & HF2_NMI_MASK)) {
259186a7 344 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
db620f46 345 env->hflags2 |= HF2_NMI_MASK;
e694d4e2 346 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
db620f46 347 next_tb = 0;
e965fc38 348 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
259186a7 349 cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
e694d4e2 350 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
79c4f6b0 351 next_tb = 0;
db620f46
FB
352 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
353 (((env->hflags2 & HF2_VINTR_MASK) &&
354 (env->hflags2 & HF2_HIF_MASK)) ||
355 (!(env->hflags2 & HF2_VINTR_MASK) &&
356 (env->eflags & IF_MASK &&
357 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
358 int intno;
77b2bc2c
BS
359 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
360 0);
259186a7
AF
361 cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
362 CPU_INTERRUPT_VIRQ);
db620f46 363 intno = cpu_get_pic_interrupt(env);
4f213879 364 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
365 do_interrupt_x86_hardirq(env, intno, 1);
366 /* ensure that no TB jump will be modified as
367 the program flow was changed */
368 next_tb = 0;
0573fbfc 369#if !defined(CONFIG_USER_ONLY)
db620f46
FB
370 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
371 (env->eflags & IF_MASK) &&
372 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
373 int intno;
374 /* FIXME: this should respect TPR */
77b2bc2c
BS
375 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
376 0);
db620f46 377 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
93fcfe39 378 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
e694d4e2 379 do_interrupt_x86_hardirq(env, intno, 1);
259186a7 380 cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
db620f46 381 next_tb = 0;
907a5b26 382#endif
db620f46 383 }
68a79315 384 }
ce09776b 385#elif defined(TARGET_PPC)
9fddaa0c 386 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
c356a1bc 387 cpu_reset(cpu);
9fddaa0c 388 }
47103572 389 if (interrupt_request & CPU_INTERRUPT_HARD) {
e9df014c 390 ppc_hw_interrupt(env);
259186a7
AF
391 if (env->pending_interrupts == 0) {
392 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
393 }
b5fc09ae 394 next_tb = 0;
ce09776b 395 }
81ea0e13
MW
396#elif defined(TARGET_LM32)
397 if ((interrupt_request & CPU_INTERRUPT_HARD)
398 && (env->ie & IE_IE)) {
399 env->exception_index = EXCP_IRQ;
97a8ea5a 400 cc->do_interrupt(cpu);
81ea0e13
MW
401 next_tb = 0;
402 }
b779e29e
EI
403#elif defined(TARGET_MICROBLAZE)
404 if ((interrupt_request & CPU_INTERRUPT_HARD)
405 && (env->sregs[SR_MSR] & MSR_IE)
406 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
407 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
408 env->exception_index = EXCP_IRQ;
97a8ea5a 409 cc->do_interrupt(cpu);
b779e29e
EI
410 next_tb = 0;
411 }
6af0bf9c
FB
412#elif defined(TARGET_MIPS)
413 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
4cdc1cd1 414 cpu_mips_hw_interrupts_pending(env)) {
6af0bf9c
FB
415 /* Raise it */
416 env->exception_index = EXCP_EXT_INTERRUPT;
417 env->error_code = 0;
97a8ea5a 418 cc->do_interrupt(cpu);
b5fc09ae 419 next_tb = 0;
6af0bf9c 420 }
b6a71ef7
JL
421#elif defined(TARGET_OPENRISC)
422 {
423 int idx = -1;
424 if ((interrupt_request & CPU_INTERRUPT_HARD)
425 && (env->sr & SR_IEE)) {
426 idx = EXCP_INT;
427 }
428 if ((interrupt_request & CPU_INTERRUPT_TIMER)
429 && (env->sr & SR_TEE)) {
430 idx = EXCP_TICK;
431 }
432 if (idx >= 0) {
433 env->exception_index = idx;
97a8ea5a 434 cc->do_interrupt(cpu);
b6a71ef7
JL
435 next_tb = 0;
436 }
437 }
e95c8d51 438#elif defined(TARGET_SPARC)
d532b26c
IK
439 if (interrupt_request & CPU_INTERRUPT_HARD) {
440 if (cpu_interrupts_enabled(env) &&
441 env->interrupt_index > 0) {
442 int pil = env->interrupt_index & 0xf;
443 int type = env->interrupt_index & 0xf0;
444
445 if (((type == TT_EXTINT) &&
446 cpu_pil_allowed(env, pil)) ||
447 type != TT_EXTINT) {
448 env->exception_index = env->interrupt_index;
97a8ea5a 449 cc->do_interrupt(cpu);
d532b26c
IK
450 next_tb = 0;
451 }
452 }
e965fc38 453 }
b5ff1b31
FB
454#elif defined(TARGET_ARM)
455 if (interrupt_request & CPU_INTERRUPT_FIQ
456 && !(env->uncached_cpsr & CPSR_F)) {
457 env->exception_index = EXCP_FIQ;
97a8ea5a 458 cc->do_interrupt(cpu);
b5fc09ae 459 next_tb = 0;
b5ff1b31 460 }
9ee6e8bb
PB
461 /* ARMv7-M interrupt return works by loading a magic value
462 into the PC. On real hardware the load causes the
463 return to occur. The qemu implementation performs the
464 jump normally, then does the exception return when the
465 CPU tries to execute code at the magic address.
466 This will cause the magic PC value to be pushed to
a1c7273b 467 the stack if an interrupt occurred at the wrong time.
9ee6e8bb
PB
468 We avoid this by disabling interrupts when
469 pc contains a magic address. */
b5ff1b31 470 if (interrupt_request & CPU_INTERRUPT_HARD
9ee6e8bb
PB
471 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
472 || !(env->uncached_cpsr & CPSR_I))) {
b5ff1b31 473 env->exception_index = EXCP_IRQ;
97a8ea5a 474 cc->do_interrupt(cpu);
b5fc09ae 475 next_tb = 0;
b5ff1b31 476 }
d2fbca94
GX
477#elif defined(TARGET_UNICORE32)
478 if (interrupt_request & CPU_INTERRUPT_HARD
479 && !(env->uncached_asr & ASR_I)) {
d48813dd 480 env->exception_index = UC32_EXCP_INTR;
97a8ea5a 481 cc->do_interrupt(cpu);
d2fbca94
GX
482 next_tb = 0;
483 }
fdf9b3e8 484#elif defined(TARGET_SH4)
e96e2044 485 if (interrupt_request & CPU_INTERRUPT_HARD) {
97a8ea5a 486 cc->do_interrupt(cpu);
b5fc09ae 487 next_tb = 0;
e96e2044 488 }
eddf68a6 489#elif defined(TARGET_ALPHA)
6a80e088
RH
490 {
491 int idx = -1;
492 /* ??? This hard-codes the OSF/1 interrupt levels. */
e965fc38 493 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
6a80e088
RH
494 case 0 ... 3:
495 if (interrupt_request & CPU_INTERRUPT_HARD) {
496 idx = EXCP_DEV_INTERRUPT;
497 }
498 /* FALLTHRU */
499 case 4:
500 if (interrupt_request & CPU_INTERRUPT_TIMER) {
501 idx = EXCP_CLK_INTERRUPT;
502 }
503 /* FALLTHRU */
504 case 5:
505 if (interrupt_request & CPU_INTERRUPT_SMP) {
506 idx = EXCP_SMP_INTERRUPT;
507 }
508 /* FALLTHRU */
509 case 6:
510 if (interrupt_request & CPU_INTERRUPT_MCHK) {
511 idx = EXCP_MCHK;
512 }
513 }
514 if (idx >= 0) {
515 env->exception_index = idx;
516 env->error_code = 0;
97a8ea5a 517 cc->do_interrupt(cpu);
6a80e088
RH
518 next_tb = 0;
519 }
eddf68a6 520 }
f1ccf904 521#elif defined(TARGET_CRIS)
1b1a38b0 522 if (interrupt_request & CPU_INTERRUPT_HARD
fb9fb692
EI
523 && (env->pregs[PR_CCS] & I_FLAG)
524 && !env->locked_irq) {
1b1a38b0 525 env->exception_index = EXCP_IRQ;
97a8ea5a 526 cc->do_interrupt(cpu);
1b1a38b0
EI
527 next_tb = 0;
528 }
8219314b
LP
529 if (interrupt_request & CPU_INTERRUPT_NMI) {
530 unsigned int m_flag_archval;
531 if (env->pregs[PR_VR] < 32) {
532 m_flag_archval = M_FLAG_V10;
533 } else {
534 m_flag_archval = M_FLAG_V32;
535 }
536 if ((env->pregs[PR_CCS] & m_flag_archval)) {
537 env->exception_index = EXCP_NMI;
97a8ea5a 538 cc->do_interrupt(cpu);
8219314b
LP
539 next_tb = 0;
540 }
f1ccf904 541 }
0633879f
PB
542#elif defined(TARGET_M68K)
543 if (interrupt_request & CPU_INTERRUPT_HARD
544 && ((env->sr & SR_I) >> SR_I_SHIFT)
545 < env->pending_level) {
546 /* Real hardware gets the interrupt vector via an
547 IACK cycle at this point. Current emulated
548 hardware doesn't rely on this, so we
549 provide/save the vector when the interrupt is
550 first signalled. */
551 env->exception_index = env->pending_vector;
3c688828 552 do_interrupt_m68k_hardirq(env);
b5fc09ae 553 next_tb = 0;
0633879f 554 }
3110e292
AG
555#elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
556 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
557 (env->psw.mask & PSW_MASK_EXT)) {
97a8ea5a 558 cc->do_interrupt(cpu);
3110e292
AG
559 next_tb = 0;
560 }
40643d7c
MF
561#elif defined(TARGET_XTENSA)
562 if (interrupt_request & CPU_INTERRUPT_HARD) {
563 env->exception_index = EXC_IRQ;
97a8ea5a 564 cc->do_interrupt(cpu);
40643d7c
MF
565 next_tb = 0;
566 }
68a79315 567#endif
ff2712ba 568 /* Don't use the cached interrupt_request value,
9d05095e 569 do_interrupt may have updated the EXITTB flag. */
259186a7
AF
570 if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
571 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
bf3e8bf1
FB
572 /* ensure that no TB jump will be modified as
573 the program flow was changed */
b5fc09ae 574 next_tb = 0;
bf3e8bf1 575 }
be214e6c 576 }
fcd7d003
AF
577 if (unlikely(cpu->exit_request)) {
578 cpu->exit_request = 0;
be214e6c 579 env->exception_index = EXCP_INTERRUPT;
1162c041 580 cpu_loop_exit(env);
3fb2ded1 581 }
c30d1aea 582#if defined(DEBUG_DISAS)
8fec2b8c 583 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
3fb2ded1 584 /* restore flags in standard format */
ecb644f4 585#if defined(TARGET_I386)
a0762859 586 log_cpu_state(cpu, CPU_DUMP_CCOP);
e6e5906b
PB
587#elif defined(TARGET_M68K)
588 cpu_m68k_flush_flags(env, env->cc_op);
589 env->cc_op = CC_OP_FLAGS;
590 env->sr = (env->sr & 0xffe0)
591 | env->cc_dest | (env->cc_x << 4);
a0762859 592 log_cpu_state(cpu, 0);
e4533c7a 593#else
a0762859 594 log_cpu_state(cpu, 0);
e4533c7a 595#endif
3fb2ded1 596 }
c30d1aea 597#endif /* DEBUG_DISAS */
5e5f07e0 598 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
cea5f9a2 599 tb = tb_find_fast(env);
d5975363
PB
600 /* Note: we do it here to avoid a gcc bug on Mac OS X when
601 doing it in tb_find_slow */
5e5f07e0 602 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
d5975363
PB
603 /* as some TB could have been invalidated because
604 of memory exceptions while generating the code, we
605 must recompute the hash index here */
606 next_tb = 0;
5e5f07e0 607 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
d5975363 608 }
c30d1aea
PM
609 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
610 qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
611 tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
612 }
8a40a180
FB
613 /* see if we can patch the calling TB. When the TB
614 spans two pages, we cannot safely do a direct
615 jump. */
040f2fb2 616 if (next_tb != 0 && tb->page_addr[1] == -1) {
0980011b
PM
617 tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
618 next_tb & TB_EXIT_MASK, tb);
3fb2ded1 619 }
5e5f07e0 620 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
55e8b85e 621
622 /* cpu_interrupt might be called while translating the
623 TB, but before it is linked into a potentially
624 infinite loop and becomes env->current_tb. Avoid
625 starting execution if there is a pending interrupt. */
d77953b9 626 cpu->current_tb = tb;
b0052d15 627 barrier();
fcd7d003 628 if (likely(!cpu->exit_request)) {
2e70f6ef 629 tc_ptr = tb->tc_ptr;
e965fc38 630 /* execute the generated code */
77211379 631 next_tb = cpu_tb_exec(cpu, tc_ptr);
378df4b2
PM
632 switch (next_tb & TB_EXIT_MASK) {
633 case TB_EXIT_REQUESTED:
634 /* Something asked us to stop executing
635 * chained TBs; just continue round the main
636 * loop. Whatever requested the exit will also
637 * have set something else (eg exit_request or
638 * interrupt_request) which we will handle
639 * next time around the loop.
640 */
641 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
642 next_tb = 0;
643 break;
644 case TB_EXIT_ICOUNT_EXPIRED:
645 {
bf20dc07 646 /* Instruction counter expired. */
2e70f6ef 647 int insns_left;
0980011b 648 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
2e70f6ef
PB
649 insns_left = env->icount_decr.u32;
650 if (env->icount_extra && insns_left >= 0) {
651 /* Refill decrementer and continue execution. */
652 env->icount_extra += insns_left;
653 if (env->icount_extra > 0xffff) {
654 insns_left = 0xffff;
655 } else {
656 insns_left = env->icount_extra;
657 }
658 env->icount_extra -= insns_left;
659 env->icount_decr.u16.low = insns_left;
660 } else {
661 if (insns_left > 0) {
662 /* Execute remaining instructions. */
cea5f9a2 663 cpu_exec_nocache(env, insns_left, tb);
2e70f6ef
PB
664 }
665 env->exception_index = EXCP_INTERRUPT;
666 next_tb = 0;
1162c041 667 cpu_loop_exit(env);
2e70f6ef 668 }
378df4b2
PM
669 break;
670 }
671 default:
672 break;
2e70f6ef
PB
673 }
674 }
d77953b9 675 cpu->current_tb = NULL;
4cbf74b6
FB
676 /* reset soft MMU for next block (it can currently
677 only be set by a memory fault) */
50a518e3 678 } /* for(;;) */
0d101938
JK
679 } else {
680 /* Reload env after longjmp - the compiler may have smashed all
681 * local variables as longjmp is marked 'noreturn'. */
4917cf44
AF
682 cpu = current_cpu;
683 env = cpu->env_ptr;
6c78f29a
JL
684#if !(defined(CONFIG_USER_ONLY) && \
685 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
686 cc = CPU_GET_CLASS(cpu);
687#endif
7d13299d 688 }
3fb2ded1
FB
689 } /* for(;;) */
690
7d13299d 691
e4533c7a 692#if defined(TARGET_I386)
9de5e440 693 /* restore flags in standard format */
e694d4e2 694 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
80cf2c81 695 | (env->df & DF_MASK);
e4533c7a 696#elif defined(TARGET_ARM)
b7bcbe95 697 /* XXX: Save/restore host fpu exception state?. */
d2fbca94 698#elif defined(TARGET_UNICORE32)
93ac68bc 699#elif defined(TARGET_SPARC)
67867308 700#elif defined(TARGET_PPC)
81ea0e13 701#elif defined(TARGET_LM32)
e6e5906b
PB
702#elif defined(TARGET_M68K)
703 cpu_m68k_flush_flags(env, env->cc_op);
704 env->cc_op = CC_OP_FLAGS;
705 env->sr = (env->sr & 0xffe0)
706 | env->cc_dest | (env->cc_x << 4);
b779e29e 707#elif defined(TARGET_MICROBLAZE)
6af0bf9c 708#elif defined(TARGET_MIPS)
d15a9c23 709#elif defined(TARGET_MOXIE)
e67db06e 710#elif defined(TARGET_OPENRISC)
fdf9b3e8 711#elif defined(TARGET_SH4)
eddf68a6 712#elif defined(TARGET_ALPHA)
f1ccf904 713#elif defined(TARGET_CRIS)
10ec5117 714#elif defined(TARGET_S390X)
2328826b 715#elif defined(TARGET_XTENSA)
fdf9b3e8 716 /* XXXXX */
e4533c7a
FB
717#else
718#error unsupported target CPU
719#endif
1057eaa7 720
4917cf44
AF
721 /* fail safe : never use current_cpu outside cpu_exec() */
722 current_cpu = NULL;
7d13299d
FB
723 return ret;
724}