]> git.proxmox.com Git - mirror_qemu.git/blame - cpu-exec.c
cpu-exec: Add sleeping algorithm
[mirror_qemu.git] / cpu-exec.c
CommitLineData
7d13299d 1/*
e965fc38 2 * emulator main execution loop
5fafdf24 3 *
66321a11 4 * Copyright (c) 2003-2005 Fabrice Bellard
7d13299d 5 *
3ef693a0
FB
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
7d13299d 10 *
3ef693a0
FB
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
7d13299d 15 *
3ef693a0 16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
7d13299d 18 */
e4533c7a 19#include "config.h"
cea5f9a2 20#include "cpu.h"
76cad711 21#include "disas/disas.h"
7cb69cae 22#include "tcg.h"
1de7afc9 23#include "qemu/atomic.h"
9c17d615 24#include "sysemu/qtest.h"
c2aa5f81
ST
25#include "qemu/timer.h"
26
27/* -icount align implementation. */
28
29typedef struct SyncClocks {
30 int64_t diff_clk;
31 int64_t last_cpu_icount;
32} SyncClocks;
33
34#if !defined(CONFIG_USER_ONLY)
35/* Allow the guest to have a max 3ms advance.
36 * The difference between the 2 clocks could therefore
37 * oscillate around 0.
38 */
39#define VM_CLOCK_ADVANCE 3000000
40
41static void align_clocks(SyncClocks *sc, const CPUState *cpu)
42{
43 int64_t cpu_icount;
44
45 if (!icount_align_option) {
46 return;
47 }
48
49 cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
50 sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount);
51 sc->last_cpu_icount = cpu_icount;
52
53 if (sc->diff_clk > VM_CLOCK_ADVANCE) {
54#ifndef _WIN32
55 struct timespec sleep_delay, rem_delay;
56 sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
57 sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
58 if (nanosleep(&sleep_delay, &rem_delay) < 0) {
59 sc->diff_clk -= (sleep_delay.tv_sec - rem_delay.tv_sec) * 1000000000LL;
60 sc->diff_clk -= sleep_delay.tv_nsec - rem_delay.tv_nsec;
61 } else {
62 sc->diff_clk = 0;
63 }
64#else
65 Sleep(sc->diff_clk / SCALE_MS);
66 sc->diff_clk = 0;
67#endif
68 }
69}
70
71static void init_delay_params(SyncClocks *sc,
72 const CPUState *cpu)
73{
74 if (!icount_align_option) {
75 return;
76 }
77 sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) -
78 qemu_clock_get_ns(QEMU_CLOCK_REALTIME) +
79 cpu_get_clock_offset();
80 sc->last_cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
81}
82#else
83static void align_clocks(SyncClocks *sc, const CPUState *cpu)
84{
85}
86
87static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
88{
89}
90#endif /* CONFIG USER ONLY */
7d13299d 91
5638d180 92void cpu_loop_exit(CPUState *cpu)
e4533c7a 93{
d77953b9 94 cpu->current_tb = NULL;
6f03bef0 95 siglongjmp(cpu->jmp_env, 1);
e4533c7a 96}
bfed01fc 97
fbf9eeb3
FB
98/* exit the current TB from a signal handler. The host registers are
99 restored in a state compatible with the CPU emulator
100 */
9eff14f3 101#if defined(CONFIG_SOFTMMU)
0ea8cb88 102void cpu_resume_from_signal(CPUState *cpu, void *puc)
9eff14f3 103{
9eff14f3
BS
104 /* XXX: restore cpu registers saved in host registers */
105
27103424 106 cpu->exception_index = -1;
6f03bef0 107 siglongjmp(cpu->jmp_env, 1);
9eff14f3 108}
9eff14f3 109#endif
fbf9eeb3 110
77211379
PM
111/* Execute a TB, and fix up the CPU state afterwards if necessary */
112static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
113{
114 CPUArchState *env = cpu->env_ptr;
03afa5f8
RH
115 uintptr_t next_tb;
116
117#if defined(DEBUG_DISAS)
118 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
119#if defined(TARGET_I386)
120 log_cpu_state(cpu, CPU_DUMP_CCOP);
121#elif defined(TARGET_M68K)
122 /* ??? Should not modify env state for dumping. */
123 cpu_m68k_flush_flags(env, env->cc_op);
124 env->cc_op = CC_OP_FLAGS;
125 env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
126 log_cpu_state(cpu, 0);
127#else
128 log_cpu_state(cpu, 0);
129#endif
130 }
131#endif /* DEBUG_DISAS */
132
133 next_tb = tcg_qemu_tb_exec(env, tb_ptr);
77211379
PM
134 if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
135 /* We didn't start executing this TB (eg because the instruction
136 * counter hit zero); we must restore the guest PC to the address
137 * of the start of the TB.
138 */
bdf7ae5b 139 CPUClass *cc = CPU_GET_CLASS(cpu);
77211379 140 TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
bdf7ae5b
AF
141 if (cc->synchronize_from_tb) {
142 cc->synchronize_from_tb(cpu, tb);
143 } else {
144 assert(cc->set_pc);
145 cc->set_pc(cpu, tb->pc);
146 }
77211379 147 }
378df4b2
PM
148 if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
149 /* We were asked to stop executing TBs (probably a pending
150 * interrupt. We've now stopped, so clear the flag.
151 */
152 cpu->tcg_exit_req = 0;
153 }
77211379
PM
154 return next_tb;
155}
156
2e70f6ef
PB
157/* Execute the code without caching the generated code. An interpreter
158 could be used if available. */
9349b4f9 159static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
cea5f9a2 160 TranslationBlock *orig_tb)
2e70f6ef 161{
d77953b9 162 CPUState *cpu = ENV_GET_CPU(env);
2e70f6ef
PB
163 TranslationBlock *tb;
164
165 /* Should never happen.
166 We only end up here when an existing TB is too long. */
167 if (max_cycles > CF_COUNT_MASK)
168 max_cycles = CF_COUNT_MASK;
169
648f034c 170 tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
2e70f6ef 171 max_cycles);
d77953b9 172 cpu->current_tb = tb;
2e70f6ef 173 /* execute the generated code */
77211379 174 cpu_tb_exec(cpu, tb->tc_ptr);
d77953b9 175 cpu->current_tb = NULL;
2e70f6ef
PB
176 tb_phys_invalidate(tb, -1);
177 tb_free(tb);
178}
179
9349b4f9 180static TranslationBlock *tb_find_slow(CPUArchState *env,
cea5f9a2 181 target_ulong pc,
8a40a180 182 target_ulong cs_base,
c068688b 183 uint64_t flags)
8a40a180 184{
8cd70437 185 CPUState *cpu = ENV_GET_CPU(env);
8a40a180 186 TranslationBlock *tb, **ptb1;
8a40a180 187 unsigned int h;
337fc758 188 tb_page_addr_t phys_pc, phys_page1;
41c1b1c9 189 target_ulong virt_page2;
3b46e624 190
5e5f07e0 191 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
3b46e624 192
8a40a180 193 /* find translated block using physical mappings */
41c1b1c9 194 phys_pc = get_page_addr_code(env, pc);
8a40a180 195 phys_page1 = phys_pc & TARGET_PAGE_MASK;
8a40a180 196 h = tb_phys_hash_func(phys_pc);
5e5f07e0 197 ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
8a40a180
FB
198 for(;;) {
199 tb = *ptb1;
200 if (!tb)
201 goto not_found;
5fafdf24 202 if (tb->pc == pc &&
8a40a180 203 tb->page_addr[0] == phys_page1 &&
5fafdf24 204 tb->cs_base == cs_base &&
8a40a180
FB
205 tb->flags == flags) {
206 /* check next page if needed */
207 if (tb->page_addr[1] != -1) {
337fc758
BS
208 tb_page_addr_t phys_page2;
209
5fafdf24 210 virt_page2 = (pc & TARGET_PAGE_MASK) +
8a40a180 211 TARGET_PAGE_SIZE;
41c1b1c9 212 phys_page2 = get_page_addr_code(env, virt_page2);
8a40a180
FB
213 if (tb->page_addr[1] == phys_page2)
214 goto found;
215 } else {
216 goto found;
217 }
218 }
219 ptb1 = &tb->phys_hash_next;
220 }
221 not_found:
2e70f6ef 222 /* if no translated code available, then translate it now */
648f034c 223 tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
3b46e624 224
8a40a180 225 found:
2c90fe2b
KB
226 /* Move the last found TB to the head of the list */
227 if (likely(*ptb1)) {
228 *ptb1 = tb->phys_hash_next;
5e5f07e0
EV
229 tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
230 tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
2c90fe2b 231 }
8a40a180 232 /* we add the TB in the virtual pc hash table */
8cd70437 233 cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
8a40a180
FB
234 return tb;
235}
236
9349b4f9 237static inline TranslationBlock *tb_find_fast(CPUArchState *env)
8a40a180 238{
8cd70437 239 CPUState *cpu = ENV_GET_CPU(env);
8a40a180
FB
240 TranslationBlock *tb;
241 target_ulong cs_base, pc;
6b917547 242 int flags;
8a40a180
FB
243
244 /* we record a subset of the CPU state. It will
245 always be the same before a given translated block
246 is executed. */
6b917547 247 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
8cd70437 248 tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
551bd27f
TS
249 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
250 tb->flags != flags)) {
cea5f9a2 251 tb = tb_find_slow(env, pc, cs_base, flags);
8a40a180
FB
252 }
253 return tb;
254}
255
1009d2ed
JK
256static CPUDebugExcpHandler *debug_excp_handler;
257
84e3b602 258void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
1009d2ed 259{
1009d2ed 260 debug_excp_handler = handler;
1009d2ed
JK
261}
262
9349b4f9 263static void cpu_handle_debug_exception(CPUArchState *env)
1009d2ed 264{
ff4700b0 265 CPUState *cpu = ENV_GET_CPU(env);
1009d2ed
JK
266 CPUWatchpoint *wp;
267
ff4700b0
AF
268 if (!cpu->watchpoint_hit) {
269 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
1009d2ed
JK
270 wp->flags &= ~BP_WATCHPOINT_HIT;
271 }
272 }
273 if (debug_excp_handler) {
274 debug_excp_handler(env);
275 }
276}
277
7d13299d
FB
278/* main execution loop */
279
1a28cac3
MT
280volatile sig_atomic_t exit_request;
281
9349b4f9 282int cpu_exec(CPUArchState *env)
7d13299d 283{
c356a1bc 284 CPUState *cpu = ENV_GET_CPU(env);
97a8ea5a
AF
285#if !(defined(CONFIG_USER_ONLY) && \
286 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
287 CPUClass *cc = CPU_GET_CLASS(cpu);
693fa551
AF
288#endif
289#ifdef TARGET_I386
290 X86CPU *x86_cpu = X86_CPU(cpu);
97a8ea5a 291#endif
8a40a180 292 int ret, interrupt_request;
8a40a180 293 TranslationBlock *tb;
c27004ec 294 uint8_t *tc_ptr;
3e9bd63a 295 uintptr_t next_tb;
c2aa5f81
ST
296 SyncClocks sc;
297
bae2c270
PM
298 /* This must be volatile so it is not trashed by longjmp() */
299 volatile bool have_tb_lock = false;
8c6939c0 300
259186a7 301 if (cpu->halted) {
3993c6bd 302 if (!cpu_has_work(cpu)) {
eda48c34
PB
303 return EXCP_HALTED;
304 }
305
259186a7 306 cpu->halted = 0;
eda48c34 307 }
5a1e3cfc 308
4917cf44 309 current_cpu = cpu;
e4533c7a 310
4917cf44 311 /* As long as current_cpu is null, up to the assignment just above,
ec9bd89f
OH
312 * requests by other threads to exit the execution loop are expected to
313 * be issued using the exit_request global. We must make sure that our
4917cf44 314 * evaluation of the global value is performed past the current_cpu
ec9bd89f
OH
315 * value transition point, which requires a memory barrier as well as
316 * an instruction scheduling constraint on modern architectures. */
317 smp_mb();
318
c629a4bc 319 if (unlikely(exit_request)) {
fcd7d003 320 cpu->exit_request = 1;
1a28cac3
MT
321 }
322
ecb644f4 323#if defined(TARGET_I386)
6792a57b
JK
324 /* put eflags in CPU temporary format */
325 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
80cf2c81 326 env->df = 1 - (2 * ((env->eflags >> 10) & 1));
6792a57b
JK
327 CC_OP = CC_OP_EFLAGS;
328 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
93ac68bc 329#elif defined(TARGET_SPARC)
e6e5906b
PB
330#elif defined(TARGET_M68K)
331 env->cc_op = CC_OP_FLAGS;
332 env->cc_dest = env->sr & 0xf;
333 env->cc_x = (env->sr >> 4) & 1;
ecb644f4
TS
334#elif defined(TARGET_ALPHA)
335#elif defined(TARGET_ARM)
d2fbca94 336#elif defined(TARGET_UNICORE32)
ecb644f4 337#elif defined(TARGET_PPC)
4e85f82c 338 env->reserve_addr = -1;
81ea0e13 339#elif defined(TARGET_LM32)
b779e29e 340#elif defined(TARGET_MICROBLAZE)
6af0bf9c 341#elif defined(TARGET_MIPS)
d15a9c23 342#elif defined(TARGET_MOXIE)
e67db06e 343#elif defined(TARGET_OPENRISC)
fdf9b3e8 344#elif defined(TARGET_SH4)
f1ccf904 345#elif defined(TARGET_CRIS)
10ec5117 346#elif defined(TARGET_S390X)
2328826b 347#elif defined(TARGET_XTENSA)
fdf9b3e8 348 /* XXXXX */
e4533c7a
FB
349#else
350#error unsupported target CPU
351#endif
27103424 352 cpu->exception_index = -1;
9d27abd9 353
c2aa5f81
ST
354 /* Calculate difference between guest clock and host clock.
355 * This delay includes the delay of the last cycle, so
356 * what we have to do is sleep until it is 0. As for the
357 * advance/delay we gain here, we try to fix it next time.
358 */
359 init_delay_params(&sc, cpu);
360
7d13299d 361 /* prepare setjmp context for exception handling */
3fb2ded1 362 for(;;) {
6f03bef0 363 if (sigsetjmp(cpu->jmp_env, 0) == 0) {
3fb2ded1 364 /* if an exception is pending, we execute it here */
27103424
AF
365 if (cpu->exception_index >= 0) {
366 if (cpu->exception_index >= EXCP_INTERRUPT) {
3fb2ded1 367 /* exit request from the cpu execution loop */
27103424 368 ret = cpu->exception_index;
1009d2ed
JK
369 if (ret == EXCP_DEBUG) {
370 cpu_handle_debug_exception(env);
371 }
3fb2ded1 372 break;
72d239ed
AJ
373 } else {
374#if defined(CONFIG_USER_ONLY)
3fb2ded1 375 /* if user mode only, we simulate a fake exception
9f083493 376 which will be handled outside the cpu execution
3fb2ded1 377 loop */
83479e77 378#if defined(TARGET_I386)
97a8ea5a 379 cc->do_interrupt(cpu);
83479e77 380#endif
27103424 381 ret = cpu->exception_index;
3fb2ded1 382 break;
72d239ed 383#else
97a8ea5a 384 cc->do_interrupt(cpu);
27103424 385 cpu->exception_index = -1;
83479e77 386#endif
3fb2ded1 387 }
5fafdf24 388 }
9df217a3 389
b5fc09ae 390 next_tb = 0; /* force lookup of first TB */
3fb2ded1 391 for(;;) {
259186a7 392 interrupt_request = cpu->interrupt_request;
e1638bd8 393 if (unlikely(interrupt_request)) {
ed2803da 394 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
e1638bd8 395 /* Mask out external interrupts for this step. */
3125f763 396 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
e1638bd8 397 }
6658ffb8 398 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
259186a7 399 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
27103424 400 cpu->exception_index = EXCP_DEBUG;
5638d180 401 cpu_loop_exit(cpu);
6658ffb8 402 }
a90b7318 403#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
b779e29e 404 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
d2fbca94 405 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
a90b7318 406 if (interrupt_request & CPU_INTERRUPT_HALT) {
259186a7
AF
407 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
408 cpu->halted = 1;
27103424 409 cpu->exception_index = EXCP_HLT;
5638d180 410 cpu_loop_exit(cpu);
a90b7318
AZ
411 }
412#endif
4a92a558
PB
413#if defined(TARGET_I386)
414 if (interrupt_request & CPU_INTERRUPT_INIT) {
415 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0);
416 do_cpu_init(x86_cpu);
417 cpu->exception_index = EXCP_HALTED;
418 cpu_loop_exit(cpu);
419 }
420#else
421 if (interrupt_request & CPU_INTERRUPT_RESET) {
422 cpu_reset(cpu);
423 }
424#endif
68a79315 425#if defined(TARGET_I386)
5d62c43a
JK
426#if !defined(CONFIG_USER_ONLY)
427 if (interrupt_request & CPU_INTERRUPT_POLL) {
259186a7 428 cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
693fa551 429 apic_poll_irq(x86_cpu->apic_state);
5d62c43a
JK
430 }
431#endif
4a92a558 432 if (interrupt_request & CPU_INTERRUPT_SIPI) {
693fa551 433 do_cpu_sipi(x86_cpu);
b09ea7d5 434 } else if (env->hflags2 & HF2_GIF_MASK) {
db620f46
FB
435 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
436 !(env->hflags & HF_SMM_MASK)) {
77b2bc2c
BS
437 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
438 0);
259186a7 439 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
693fa551 440 do_smm_enter(x86_cpu);
db620f46
FB
441 next_tb = 0;
442 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
443 !(env->hflags2 & HF2_NMI_MASK)) {
259186a7 444 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
db620f46 445 env->hflags2 |= HF2_NMI_MASK;
e694d4e2 446 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
db620f46 447 next_tb = 0;
e965fc38 448 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
259186a7 449 cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
e694d4e2 450 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
79c4f6b0 451 next_tb = 0;
db620f46
FB
452 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
453 (((env->hflags2 & HF2_VINTR_MASK) &&
454 (env->hflags2 & HF2_HIF_MASK)) ||
455 (!(env->hflags2 & HF2_VINTR_MASK) &&
456 (env->eflags & IF_MASK &&
457 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
458 int intno;
77b2bc2c
BS
459 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
460 0);
259186a7
AF
461 cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
462 CPU_INTERRUPT_VIRQ);
db620f46 463 intno = cpu_get_pic_interrupt(env);
4f213879 464 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
465 do_interrupt_x86_hardirq(env, intno, 1);
466 /* ensure that no TB jump will be modified as
467 the program flow was changed */
468 next_tb = 0;
0573fbfc 469#if !defined(CONFIG_USER_ONLY)
db620f46
FB
470 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
471 (env->eflags & IF_MASK) &&
472 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
473 int intno;
474 /* FIXME: this should respect TPR */
77b2bc2c
BS
475 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
476 0);
fdfba1a2
EI
477 intno = ldl_phys(cpu->as,
478 env->vm_vmcb
479 + offsetof(struct vmcb,
480 control.int_vector));
93fcfe39 481 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
e694d4e2 482 do_interrupt_x86_hardirq(env, intno, 1);
259186a7 483 cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
db620f46 484 next_tb = 0;
907a5b26 485#endif
db620f46 486 }
68a79315 487 }
ce09776b 488#elif defined(TARGET_PPC)
47103572 489 if (interrupt_request & CPU_INTERRUPT_HARD) {
e9df014c 490 ppc_hw_interrupt(env);
259186a7
AF
491 if (env->pending_interrupts == 0) {
492 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
493 }
b5fc09ae 494 next_tb = 0;
ce09776b 495 }
81ea0e13
MW
496#elif defined(TARGET_LM32)
497 if ((interrupt_request & CPU_INTERRUPT_HARD)
498 && (env->ie & IE_IE)) {
27103424 499 cpu->exception_index = EXCP_IRQ;
97a8ea5a 500 cc->do_interrupt(cpu);
81ea0e13
MW
501 next_tb = 0;
502 }
b779e29e
EI
503#elif defined(TARGET_MICROBLAZE)
504 if ((interrupt_request & CPU_INTERRUPT_HARD)
505 && (env->sregs[SR_MSR] & MSR_IE)
506 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
507 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
27103424 508 cpu->exception_index = EXCP_IRQ;
97a8ea5a 509 cc->do_interrupt(cpu);
b779e29e
EI
510 next_tb = 0;
511 }
6af0bf9c
FB
512#elif defined(TARGET_MIPS)
513 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
4cdc1cd1 514 cpu_mips_hw_interrupts_pending(env)) {
6af0bf9c 515 /* Raise it */
27103424 516 cpu->exception_index = EXCP_EXT_INTERRUPT;
6af0bf9c 517 env->error_code = 0;
97a8ea5a 518 cc->do_interrupt(cpu);
b5fc09ae 519 next_tb = 0;
6af0bf9c 520 }
b6a71ef7
JL
521#elif defined(TARGET_OPENRISC)
522 {
523 int idx = -1;
524 if ((interrupt_request & CPU_INTERRUPT_HARD)
525 && (env->sr & SR_IEE)) {
526 idx = EXCP_INT;
527 }
528 if ((interrupt_request & CPU_INTERRUPT_TIMER)
529 && (env->sr & SR_TEE)) {
530 idx = EXCP_TICK;
531 }
532 if (idx >= 0) {
27103424 533 cpu->exception_index = idx;
97a8ea5a 534 cc->do_interrupt(cpu);
b6a71ef7
JL
535 next_tb = 0;
536 }
537 }
e95c8d51 538#elif defined(TARGET_SPARC)
d532b26c
IK
539 if (interrupt_request & CPU_INTERRUPT_HARD) {
540 if (cpu_interrupts_enabled(env) &&
541 env->interrupt_index > 0) {
542 int pil = env->interrupt_index & 0xf;
543 int type = env->interrupt_index & 0xf0;
544
545 if (((type == TT_EXTINT) &&
546 cpu_pil_allowed(env, pil)) ||
547 type != TT_EXTINT) {
27103424 548 cpu->exception_index = env->interrupt_index;
97a8ea5a 549 cc->do_interrupt(cpu);
d532b26c
IK
550 next_tb = 0;
551 }
552 }
e965fc38 553 }
b5ff1b31
FB
554#elif defined(TARGET_ARM)
555 if (interrupt_request & CPU_INTERRUPT_FIQ
4cc35614 556 && !(env->daif & PSTATE_F)) {
27103424 557 cpu->exception_index = EXCP_FIQ;
97a8ea5a 558 cc->do_interrupt(cpu);
b5fc09ae 559 next_tb = 0;
b5ff1b31 560 }
9ee6e8bb
PB
561 /* ARMv7-M interrupt return works by loading a magic value
562 into the PC. On real hardware the load causes the
563 return to occur. The qemu implementation performs the
564 jump normally, then does the exception return when the
565 CPU tries to execute code at the magic address.
566 This will cause the magic PC value to be pushed to
a1c7273b 567 the stack if an interrupt occurred at the wrong time.
9ee6e8bb
PB
568 We avoid this by disabling interrupts when
569 pc contains a magic address. */
b5ff1b31 570 if (interrupt_request & CPU_INTERRUPT_HARD
9ee6e8bb 571 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
4cc35614 572 || !(env->daif & PSTATE_I))) {
27103424 573 cpu->exception_index = EXCP_IRQ;
97a8ea5a 574 cc->do_interrupt(cpu);
b5fc09ae 575 next_tb = 0;
b5ff1b31 576 }
d2fbca94
GX
577#elif defined(TARGET_UNICORE32)
578 if (interrupt_request & CPU_INTERRUPT_HARD
579 && !(env->uncached_asr & ASR_I)) {
27103424 580 cpu->exception_index = UC32_EXCP_INTR;
97a8ea5a 581 cc->do_interrupt(cpu);
d2fbca94
GX
582 next_tb = 0;
583 }
fdf9b3e8 584#elif defined(TARGET_SH4)
e96e2044 585 if (interrupt_request & CPU_INTERRUPT_HARD) {
97a8ea5a 586 cc->do_interrupt(cpu);
b5fc09ae 587 next_tb = 0;
e96e2044 588 }
eddf68a6 589#elif defined(TARGET_ALPHA)
6a80e088
RH
590 {
591 int idx = -1;
592 /* ??? This hard-codes the OSF/1 interrupt levels. */
e965fc38 593 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
6a80e088
RH
594 case 0 ... 3:
595 if (interrupt_request & CPU_INTERRUPT_HARD) {
596 idx = EXCP_DEV_INTERRUPT;
597 }
598 /* FALLTHRU */
599 case 4:
600 if (interrupt_request & CPU_INTERRUPT_TIMER) {
601 idx = EXCP_CLK_INTERRUPT;
602 }
603 /* FALLTHRU */
604 case 5:
605 if (interrupt_request & CPU_INTERRUPT_SMP) {
606 idx = EXCP_SMP_INTERRUPT;
607 }
608 /* FALLTHRU */
609 case 6:
610 if (interrupt_request & CPU_INTERRUPT_MCHK) {
611 idx = EXCP_MCHK;
612 }
613 }
614 if (idx >= 0) {
27103424 615 cpu->exception_index = idx;
6a80e088 616 env->error_code = 0;
97a8ea5a 617 cc->do_interrupt(cpu);
6a80e088
RH
618 next_tb = 0;
619 }
eddf68a6 620 }
f1ccf904 621#elif defined(TARGET_CRIS)
1b1a38b0 622 if (interrupt_request & CPU_INTERRUPT_HARD
fb9fb692
EI
623 && (env->pregs[PR_CCS] & I_FLAG)
624 && !env->locked_irq) {
27103424 625 cpu->exception_index = EXCP_IRQ;
97a8ea5a 626 cc->do_interrupt(cpu);
1b1a38b0
EI
627 next_tb = 0;
628 }
8219314b
LP
629 if (interrupt_request & CPU_INTERRUPT_NMI) {
630 unsigned int m_flag_archval;
631 if (env->pregs[PR_VR] < 32) {
632 m_flag_archval = M_FLAG_V10;
633 } else {
634 m_flag_archval = M_FLAG_V32;
635 }
636 if ((env->pregs[PR_CCS] & m_flag_archval)) {
27103424 637 cpu->exception_index = EXCP_NMI;
97a8ea5a 638 cc->do_interrupt(cpu);
8219314b
LP
639 next_tb = 0;
640 }
f1ccf904 641 }
0633879f
PB
642#elif defined(TARGET_M68K)
643 if (interrupt_request & CPU_INTERRUPT_HARD
644 && ((env->sr & SR_I) >> SR_I_SHIFT)
645 < env->pending_level) {
646 /* Real hardware gets the interrupt vector via an
647 IACK cycle at this point. Current emulated
648 hardware doesn't rely on this, so we
649 provide/save the vector when the interrupt is
650 first signalled. */
27103424 651 cpu->exception_index = env->pending_vector;
3c688828 652 do_interrupt_m68k_hardirq(env);
b5fc09ae 653 next_tb = 0;
0633879f 654 }
3110e292
AG
655#elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
656 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
657 (env->psw.mask & PSW_MASK_EXT)) {
97a8ea5a 658 cc->do_interrupt(cpu);
3110e292
AG
659 next_tb = 0;
660 }
40643d7c
MF
661#elif defined(TARGET_XTENSA)
662 if (interrupt_request & CPU_INTERRUPT_HARD) {
27103424 663 cpu->exception_index = EXC_IRQ;
97a8ea5a 664 cc->do_interrupt(cpu);
40643d7c
MF
665 next_tb = 0;
666 }
68a79315 667#endif
ff2712ba 668 /* Don't use the cached interrupt_request value,
9d05095e 669 do_interrupt may have updated the EXITTB flag. */
259186a7
AF
670 if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
671 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
bf3e8bf1
FB
672 /* ensure that no TB jump will be modified as
673 the program flow was changed */
b5fc09ae 674 next_tb = 0;
bf3e8bf1 675 }
be214e6c 676 }
fcd7d003
AF
677 if (unlikely(cpu->exit_request)) {
678 cpu->exit_request = 0;
27103424 679 cpu->exception_index = EXCP_INTERRUPT;
5638d180 680 cpu_loop_exit(cpu);
3fb2ded1 681 }
5e5f07e0 682 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
bae2c270 683 have_tb_lock = true;
cea5f9a2 684 tb = tb_find_fast(env);
d5975363
PB
685 /* Note: we do it here to avoid a gcc bug on Mac OS X when
686 doing it in tb_find_slow */
5e5f07e0 687 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
d5975363
PB
688 /* as some TB could have been invalidated because
689 of memory exceptions while generating the code, we
690 must recompute the hash index here */
691 next_tb = 0;
5e5f07e0 692 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
d5975363 693 }
c30d1aea
PM
694 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
695 qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
696 tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
697 }
8a40a180
FB
698 /* see if we can patch the calling TB. When the TB
699 spans two pages, we cannot safely do a direct
700 jump. */
040f2fb2 701 if (next_tb != 0 && tb->page_addr[1] == -1) {
0980011b
PM
702 tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
703 next_tb & TB_EXIT_MASK, tb);
3fb2ded1 704 }
bae2c270 705 have_tb_lock = false;
5e5f07e0 706 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
55e8b85e 707
708 /* cpu_interrupt might be called while translating the
709 TB, but before it is linked into a potentially
710 infinite loop and becomes env->current_tb. Avoid
711 starting execution if there is a pending interrupt. */
d77953b9 712 cpu->current_tb = tb;
b0052d15 713 barrier();
fcd7d003 714 if (likely(!cpu->exit_request)) {
2e70f6ef 715 tc_ptr = tb->tc_ptr;
e965fc38 716 /* execute the generated code */
77211379 717 next_tb = cpu_tb_exec(cpu, tc_ptr);
378df4b2
PM
718 switch (next_tb & TB_EXIT_MASK) {
719 case TB_EXIT_REQUESTED:
720 /* Something asked us to stop executing
721 * chained TBs; just continue round the main
722 * loop. Whatever requested the exit will also
723 * have set something else (eg exit_request or
724 * interrupt_request) which we will handle
725 * next time around the loop.
726 */
727 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
728 next_tb = 0;
729 break;
730 case TB_EXIT_ICOUNT_EXPIRED:
731 {
bf20dc07 732 /* Instruction counter expired. */
2e70f6ef 733 int insns_left;
0980011b 734 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
28ecfd7a 735 insns_left = cpu->icount_decr.u32;
efee7340 736 if (cpu->icount_extra && insns_left >= 0) {
2e70f6ef 737 /* Refill decrementer and continue execution. */
efee7340
AF
738 cpu->icount_extra += insns_left;
739 if (cpu->icount_extra > 0xffff) {
2e70f6ef
PB
740 insns_left = 0xffff;
741 } else {
efee7340 742 insns_left = cpu->icount_extra;
2e70f6ef 743 }
efee7340 744 cpu->icount_extra -= insns_left;
28ecfd7a 745 cpu->icount_decr.u16.low = insns_left;
2e70f6ef
PB
746 } else {
747 if (insns_left > 0) {
748 /* Execute remaining instructions. */
cea5f9a2 749 cpu_exec_nocache(env, insns_left, tb);
c2aa5f81 750 align_clocks(&sc, cpu);
2e70f6ef 751 }
27103424 752 cpu->exception_index = EXCP_INTERRUPT;
2e70f6ef 753 next_tb = 0;
5638d180 754 cpu_loop_exit(cpu);
2e70f6ef 755 }
378df4b2
PM
756 break;
757 }
758 default:
759 break;
2e70f6ef
PB
760 }
761 }
d77953b9 762 cpu->current_tb = NULL;
c2aa5f81
ST
763 /* Try to align the host and virtual clocks
764 if the guest is in advance */
765 align_clocks(&sc, cpu);
4cbf74b6
FB
766 /* reset soft MMU for next block (it can currently
767 only be set by a memory fault) */
50a518e3 768 } /* for(;;) */
0d101938
JK
769 } else {
770 /* Reload env after longjmp - the compiler may have smashed all
771 * local variables as longjmp is marked 'noreturn'. */
4917cf44
AF
772 cpu = current_cpu;
773 env = cpu->env_ptr;
6c78f29a
JL
774#if !(defined(CONFIG_USER_ONLY) && \
775 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
776 cc = CPU_GET_CLASS(cpu);
693fa551
AF
777#endif
778#ifdef TARGET_I386
779 x86_cpu = X86_CPU(cpu);
6c78f29a 780#endif
bae2c270
PM
781 if (have_tb_lock) {
782 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
783 have_tb_lock = false;
784 }
7d13299d 785 }
3fb2ded1
FB
786 } /* for(;;) */
787
7d13299d 788
e4533c7a 789#if defined(TARGET_I386)
9de5e440 790 /* restore flags in standard format */
e694d4e2 791 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
80cf2c81 792 | (env->df & DF_MASK);
e4533c7a 793#elif defined(TARGET_ARM)
b7bcbe95 794 /* XXX: Save/restore host fpu exception state?. */
d2fbca94 795#elif defined(TARGET_UNICORE32)
93ac68bc 796#elif defined(TARGET_SPARC)
67867308 797#elif defined(TARGET_PPC)
81ea0e13 798#elif defined(TARGET_LM32)
e6e5906b
PB
799#elif defined(TARGET_M68K)
800 cpu_m68k_flush_flags(env, env->cc_op);
801 env->cc_op = CC_OP_FLAGS;
802 env->sr = (env->sr & 0xffe0)
803 | env->cc_dest | (env->cc_x << 4);
b779e29e 804#elif defined(TARGET_MICROBLAZE)
6af0bf9c 805#elif defined(TARGET_MIPS)
d15a9c23 806#elif defined(TARGET_MOXIE)
e67db06e 807#elif defined(TARGET_OPENRISC)
fdf9b3e8 808#elif defined(TARGET_SH4)
eddf68a6 809#elif defined(TARGET_ALPHA)
f1ccf904 810#elif defined(TARGET_CRIS)
10ec5117 811#elif defined(TARGET_S390X)
2328826b 812#elif defined(TARGET_XTENSA)
fdf9b3e8 813 /* XXXXX */
e4533c7a
FB
814#else
815#error unsupported target CPU
816#endif
1057eaa7 817
4917cf44
AF
818 /* fail safe : never use current_cpu outside cpu_exec() */
819 current_cpu = NULL;
7d13299d
FB
820 return ret;
821}