]> git.proxmox.com Git - mirror_qemu.git/blob - cpu-exec.c
target-i386: Use cpu_exec_enter/exit qom hooks
[mirror_qemu.git] / cpu-exec.c
1 /*
2 * emulator main execution loop
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "config.h"
20 #include "cpu.h"
21 #include "trace.h"
22 #include "disas/disas.h"
23 #include "tcg.h"
24 #include "qemu/atomic.h"
25 #include "sysemu/qtest.h"
26 #include "qemu/timer.h"
27
28 /* -icount align implementation. */
29
30 typedef struct SyncClocks {
31 int64_t diff_clk;
32 int64_t last_cpu_icount;
33 int64_t realtime_clock;
34 } SyncClocks;
35
36 #if !defined(CONFIG_USER_ONLY)
37 /* Allow the guest to have a max 3ms advance.
38 * The difference between the 2 clocks could therefore
39 * oscillate around 0.
40 */
41 #define VM_CLOCK_ADVANCE 3000000
42 #define THRESHOLD_REDUCE 1.5
43 #define MAX_DELAY_PRINT_RATE 2000000000LL
44 #define MAX_NB_PRINTS 100
45
46 static void align_clocks(SyncClocks *sc, const CPUState *cpu)
47 {
48 int64_t cpu_icount;
49
50 if (!icount_align_option) {
51 return;
52 }
53
54 cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
55 sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount);
56 sc->last_cpu_icount = cpu_icount;
57
58 if (sc->diff_clk > VM_CLOCK_ADVANCE) {
59 #ifndef _WIN32
60 struct timespec sleep_delay, rem_delay;
61 sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
62 sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
63 if (nanosleep(&sleep_delay, &rem_delay) < 0) {
64 sc->diff_clk -= (sleep_delay.tv_sec - rem_delay.tv_sec) * 1000000000LL;
65 sc->diff_clk -= sleep_delay.tv_nsec - rem_delay.tv_nsec;
66 } else {
67 sc->diff_clk = 0;
68 }
69 #else
70 Sleep(sc->diff_clk / SCALE_MS);
71 sc->diff_clk = 0;
72 #endif
73 }
74 }
75
76 static void print_delay(const SyncClocks *sc)
77 {
78 static float threshold_delay;
79 static int64_t last_realtime_clock;
80 static int nb_prints;
81
82 if (icount_align_option &&
83 sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE &&
84 nb_prints < MAX_NB_PRINTS) {
85 if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) ||
86 (-sc->diff_clk / (float)1000000000LL <
87 (threshold_delay - THRESHOLD_REDUCE))) {
88 threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
89 printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
90 threshold_delay - 1,
91 threshold_delay);
92 nb_prints++;
93 last_realtime_clock = sc->realtime_clock;
94 }
95 }
96 }
97
98 static void init_delay_params(SyncClocks *sc,
99 const CPUState *cpu)
100 {
101 if (!icount_align_option) {
102 return;
103 }
104 sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
105 sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) -
106 sc->realtime_clock +
107 cpu_get_clock_offset();
108 sc->last_cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
109 if (sc->diff_clk < max_delay) {
110 max_delay = sc->diff_clk;
111 }
112 if (sc->diff_clk > max_advance) {
113 max_advance = sc->diff_clk;
114 }
115
116 /* Print every 2s max if the guest is late. We limit the number
117 of printed messages to NB_PRINT_MAX(currently 100) */
118 print_delay(sc);
119 }
120 #else
121 static void align_clocks(SyncClocks *sc, const CPUState *cpu)
122 {
123 }
124
125 static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
126 {
127 }
128 #endif /* CONFIG USER ONLY */
129
130 void cpu_loop_exit(CPUState *cpu)
131 {
132 cpu->current_tb = NULL;
133 siglongjmp(cpu->jmp_env, 1);
134 }
135
136 /* exit the current TB from a signal handler. The host registers are
137 restored in a state compatible with the CPU emulator
138 */
139 #if defined(CONFIG_SOFTMMU)
140 void cpu_resume_from_signal(CPUState *cpu, void *puc)
141 {
142 /* XXX: restore cpu registers saved in host registers */
143
144 cpu->exception_index = -1;
145 siglongjmp(cpu->jmp_env, 1);
146 }
147 #endif
148
149 /* Execute a TB, and fix up the CPU state afterwards if necessary */
150 static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
151 {
152 CPUArchState *env = cpu->env_ptr;
153 uintptr_t next_tb;
154
155 #if defined(DEBUG_DISAS)
156 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
157 #if defined(TARGET_I386)
158 log_cpu_state(cpu, CPU_DUMP_CCOP);
159 #elif defined(TARGET_M68K)
160 /* ??? Should not modify env state for dumping. */
161 cpu_m68k_flush_flags(env, env->cc_op);
162 env->cc_op = CC_OP_FLAGS;
163 env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
164 log_cpu_state(cpu, 0);
165 #else
166 log_cpu_state(cpu, 0);
167 #endif
168 }
169 #endif /* DEBUG_DISAS */
170
171 next_tb = tcg_qemu_tb_exec(env, tb_ptr);
172 trace_exec_tb_exit((void *) (next_tb & ~TB_EXIT_MASK),
173 next_tb & TB_EXIT_MASK);
174
175 if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
176 /* We didn't start executing this TB (eg because the instruction
177 * counter hit zero); we must restore the guest PC to the address
178 * of the start of the TB.
179 */
180 CPUClass *cc = CPU_GET_CLASS(cpu);
181 TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
182 if (cc->synchronize_from_tb) {
183 cc->synchronize_from_tb(cpu, tb);
184 } else {
185 assert(cc->set_pc);
186 cc->set_pc(cpu, tb->pc);
187 }
188 }
189 if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
190 /* We were asked to stop executing TBs (probably a pending
191 * interrupt. We've now stopped, so clear the flag.
192 */
193 cpu->tcg_exit_req = 0;
194 }
195 return next_tb;
196 }
197
198 /* Execute the code without caching the generated code. An interpreter
199 could be used if available. */
200 static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
201 TranslationBlock *orig_tb)
202 {
203 CPUState *cpu = ENV_GET_CPU(env);
204 TranslationBlock *tb;
205
206 /* Should never happen.
207 We only end up here when an existing TB is too long. */
208 if (max_cycles > CF_COUNT_MASK)
209 max_cycles = CF_COUNT_MASK;
210
211 tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
212 max_cycles);
213 cpu->current_tb = tb;
214 /* execute the generated code */
215 trace_exec_tb_nocache(tb, tb->pc);
216 cpu_tb_exec(cpu, tb->tc_ptr);
217 cpu->current_tb = NULL;
218 tb_phys_invalidate(tb, -1);
219 tb_free(tb);
220 }
221
222 static TranslationBlock *tb_find_slow(CPUArchState *env,
223 target_ulong pc,
224 target_ulong cs_base,
225 uint64_t flags)
226 {
227 CPUState *cpu = ENV_GET_CPU(env);
228 TranslationBlock *tb, **ptb1;
229 unsigned int h;
230 tb_page_addr_t phys_pc, phys_page1;
231 target_ulong virt_page2;
232
233 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
234
235 /* find translated block using physical mappings */
236 phys_pc = get_page_addr_code(env, pc);
237 phys_page1 = phys_pc & TARGET_PAGE_MASK;
238 h = tb_phys_hash_func(phys_pc);
239 ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
240 for(;;) {
241 tb = *ptb1;
242 if (!tb)
243 goto not_found;
244 if (tb->pc == pc &&
245 tb->page_addr[0] == phys_page1 &&
246 tb->cs_base == cs_base &&
247 tb->flags == flags) {
248 /* check next page if needed */
249 if (tb->page_addr[1] != -1) {
250 tb_page_addr_t phys_page2;
251
252 virt_page2 = (pc & TARGET_PAGE_MASK) +
253 TARGET_PAGE_SIZE;
254 phys_page2 = get_page_addr_code(env, virt_page2);
255 if (tb->page_addr[1] == phys_page2)
256 goto found;
257 } else {
258 goto found;
259 }
260 }
261 ptb1 = &tb->phys_hash_next;
262 }
263 not_found:
264 /* if no translated code available, then translate it now */
265 tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
266
267 found:
268 /* Move the last found TB to the head of the list */
269 if (likely(*ptb1)) {
270 *ptb1 = tb->phys_hash_next;
271 tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
272 tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
273 }
274 /* we add the TB in the virtual pc hash table */
275 cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
276 return tb;
277 }
278
279 static inline TranslationBlock *tb_find_fast(CPUArchState *env)
280 {
281 CPUState *cpu = ENV_GET_CPU(env);
282 TranslationBlock *tb;
283 target_ulong cs_base, pc;
284 int flags;
285
286 /* we record a subset of the CPU state. It will
287 always be the same before a given translated block
288 is executed. */
289 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
290 tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
291 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
292 tb->flags != flags)) {
293 tb = tb_find_slow(env, pc, cs_base, flags);
294 }
295 return tb;
296 }
297
298 static void cpu_handle_debug_exception(CPUArchState *env)
299 {
300 CPUState *cpu = ENV_GET_CPU(env);
301 CPUClass *cc = CPU_GET_CLASS(cpu);
302 CPUWatchpoint *wp;
303
304 if (!cpu->watchpoint_hit) {
305 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
306 wp->flags &= ~BP_WATCHPOINT_HIT;
307 }
308 }
309
310 cc->debug_excp_handler(cpu);
311 }
312
313 /* main execution loop */
314
315 volatile sig_atomic_t exit_request;
316
317 int cpu_exec(CPUArchState *env)
318 {
319 CPUState *cpu = ENV_GET_CPU(env);
320 CPUClass *cc = CPU_GET_CLASS(cpu);
321 #ifdef TARGET_I386
322 X86CPU *x86_cpu = X86_CPU(cpu);
323 #endif
324 int ret, interrupt_request;
325 TranslationBlock *tb;
326 uint8_t *tc_ptr;
327 uintptr_t next_tb;
328 SyncClocks sc;
329
330 /* This must be volatile so it is not trashed by longjmp() */
331 volatile bool have_tb_lock = false;
332
333 if (cpu->halted) {
334 if (!cpu_has_work(cpu)) {
335 return EXCP_HALTED;
336 }
337
338 cpu->halted = 0;
339 }
340
341 current_cpu = cpu;
342
343 /* As long as current_cpu is null, up to the assignment just above,
344 * requests by other threads to exit the execution loop are expected to
345 * be issued using the exit_request global. We must make sure that our
346 * evaluation of the global value is performed past the current_cpu
347 * value transition point, which requires a memory barrier as well as
348 * an instruction scheduling constraint on modern architectures. */
349 smp_mb();
350
351 if (unlikely(exit_request)) {
352 cpu->exit_request = 1;
353 }
354
355 #if defined(TARGET_M68K)
356 env->cc_op = CC_OP_FLAGS;
357 env->cc_dest = env->sr & 0xf;
358 env->cc_x = (env->sr >> 4) & 1;
359 #elif defined(TARGET_PPC)
360 env->reserve_addr = -1;
361 #endif
362 cc->cpu_exec_enter(cpu);
363 cpu->exception_index = -1;
364
365 /* Calculate difference between guest clock and host clock.
366 * This delay includes the delay of the last cycle, so
367 * what we have to do is sleep until it is 0. As for the
368 * advance/delay we gain here, we try to fix it next time.
369 */
370 init_delay_params(&sc, cpu);
371
372 /* prepare setjmp context for exception handling */
373 for(;;) {
374 if (sigsetjmp(cpu->jmp_env, 0) == 0) {
375 /* if an exception is pending, we execute it here */
376 if (cpu->exception_index >= 0) {
377 if (cpu->exception_index >= EXCP_INTERRUPT) {
378 /* exit request from the cpu execution loop */
379 ret = cpu->exception_index;
380 if (ret == EXCP_DEBUG) {
381 cpu_handle_debug_exception(env);
382 }
383 break;
384 } else {
385 #if defined(CONFIG_USER_ONLY)
386 /* if user mode only, we simulate a fake exception
387 which will be handled outside the cpu execution
388 loop */
389 #if defined(TARGET_I386)
390 cc->do_interrupt(cpu);
391 #endif
392 ret = cpu->exception_index;
393 break;
394 #else
395 cc->do_interrupt(cpu);
396 cpu->exception_index = -1;
397 #endif
398 }
399 }
400
401 next_tb = 0; /* force lookup of first TB */
402 for(;;) {
403 interrupt_request = cpu->interrupt_request;
404 if (unlikely(interrupt_request)) {
405 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
406 /* Mask out external interrupts for this step. */
407 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
408 }
409 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
410 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
411 cpu->exception_index = EXCP_DEBUG;
412 cpu_loop_exit(cpu);
413 }
414 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
415 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
416 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || \
417 defined(TARGET_UNICORE32) || defined(TARGET_TRICORE)
418 if (interrupt_request & CPU_INTERRUPT_HALT) {
419 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
420 cpu->halted = 1;
421 cpu->exception_index = EXCP_HLT;
422 cpu_loop_exit(cpu);
423 }
424 #endif
425 #if defined(TARGET_I386)
426 if (interrupt_request & CPU_INTERRUPT_INIT) {
427 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0);
428 do_cpu_init(x86_cpu);
429 cpu->exception_index = EXCP_HALTED;
430 cpu_loop_exit(cpu);
431 }
432 #else
433 if (interrupt_request & CPU_INTERRUPT_RESET) {
434 cpu_reset(cpu);
435 }
436 #endif
437 #if defined(TARGET_I386)
438 #if !defined(CONFIG_USER_ONLY)
439 if (interrupt_request & CPU_INTERRUPT_POLL) {
440 cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
441 apic_poll_irq(x86_cpu->apic_state);
442 }
443 #endif
444 if (interrupt_request & CPU_INTERRUPT_SIPI) {
445 do_cpu_sipi(x86_cpu);
446 } else if (env->hflags2 & HF2_GIF_MASK) {
447 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
448 !(env->hflags & HF_SMM_MASK)) {
449 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
450 0);
451 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
452 do_smm_enter(x86_cpu);
453 next_tb = 0;
454 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
455 !(env->hflags2 & HF2_NMI_MASK)) {
456 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
457 env->hflags2 |= HF2_NMI_MASK;
458 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
459 next_tb = 0;
460 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
461 cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
462 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
463 next_tb = 0;
464 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
465 (((env->hflags2 & HF2_VINTR_MASK) &&
466 (env->hflags2 & HF2_HIF_MASK)) ||
467 (!(env->hflags2 & HF2_VINTR_MASK) &&
468 (env->eflags & IF_MASK &&
469 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
470 int intno;
471 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
472 0);
473 cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
474 CPU_INTERRUPT_VIRQ);
475 intno = cpu_get_pic_interrupt(env);
476 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
477 do_interrupt_x86_hardirq(env, intno, 1);
478 /* ensure that no TB jump will be modified as
479 the program flow was changed */
480 next_tb = 0;
481 #if !defined(CONFIG_USER_ONLY)
482 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
483 (env->eflags & IF_MASK) &&
484 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
485 int intno;
486 /* FIXME: this should respect TPR */
487 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
488 0);
489 intno = ldl_phys(cpu->as,
490 env->vm_vmcb
491 + offsetof(struct vmcb,
492 control.int_vector));
493 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
494 do_interrupt_x86_hardirq(env, intno, 1);
495 cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
496 next_tb = 0;
497 #endif
498 }
499 }
500 #elif defined(TARGET_PPC)
501 if (interrupt_request & CPU_INTERRUPT_HARD) {
502 ppc_hw_interrupt(env);
503 if (env->pending_interrupts == 0) {
504 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
505 }
506 next_tb = 0;
507 }
508 #elif defined(TARGET_LM32)
509 if ((interrupt_request & CPU_INTERRUPT_HARD)
510 && (env->ie & IE_IE)) {
511 cpu->exception_index = EXCP_IRQ;
512 cc->do_interrupt(cpu);
513 next_tb = 0;
514 }
515 #elif defined(TARGET_MICROBLAZE)
516 if ((interrupt_request & CPU_INTERRUPT_HARD)
517 && (env->sregs[SR_MSR] & MSR_IE)
518 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
519 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
520 cpu->exception_index = EXCP_IRQ;
521 cc->do_interrupt(cpu);
522 next_tb = 0;
523 }
524 #elif defined(TARGET_MIPS)
525 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
526 cpu_mips_hw_interrupts_pending(env)) {
527 /* Raise it */
528 cpu->exception_index = EXCP_EXT_INTERRUPT;
529 env->error_code = 0;
530 cc->do_interrupt(cpu);
531 next_tb = 0;
532 }
533 #elif defined(TARGET_TRICORE)
534 if ((interrupt_request & CPU_INTERRUPT_HARD)) {
535 cc->do_interrupt(cpu);
536 next_tb = 0;
537 }
538
539 #elif defined(TARGET_OPENRISC)
540 {
541 int idx = -1;
542 if ((interrupt_request & CPU_INTERRUPT_HARD)
543 && (env->sr & SR_IEE)) {
544 idx = EXCP_INT;
545 }
546 if ((interrupt_request & CPU_INTERRUPT_TIMER)
547 && (env->sr & SR_TEE)) {
548 idx = EXCP_TICK;
549 }
550 if (idx >= 0) {
551 cpu->exception_index = idx;
552 cc->do_interrupt(cpu);
553 next_tb = 0;
554 }
555 }
556 #elif defined(TARGET_SPARC)
557 if (interrupt_request & CPU_INTERRUPT_HARD) {
558 if (cpu_interrupts_enabled(env) &&
559 env->interrupt_index > 0) {
560 int pil = env->interrupt_index & 0xf;
561 int type = env->interrupt_index & 0xf0;
562
563 if (((type == TT_EXTINT) &&
564 cpu_pil_allowed(env, pil)) ||
565 type != TT_EXTINT) {
566 cpu->exception_index = env->interrupt_index;
567 cc->do_interrupt(cpu);
568 next_tb = 0;
569 }
570 }
571 }
572 #elif defined(TARGET_ARM)
573 if (interrupt_request & CPU_INTERRUPT_FIQ
574 && !(env->daif & PSTATE_F)) {
575 cpu->exception_index = EXCP_FIQ;
576 cc->do_interrupt(cpu);
577 next_tb = 0;
578 }
579 /* ARMv7-M interrupt return works by loading a magic value
580 into the PC. On real hardware the load causes the
581 return to occur. The qemu implementation performs the
582 jump normally, then does the exception return when the
583 CPU tries to execute code at the magic address.
584 This will cause the magic PC value to be pushed to
585 the stack if an interrupt occurred at the wrong time.
586 We avoid this by disabling interrupts when
587 pc contains a magic address. */
588 if (interrupt_request & CPU_INTERRUPT_HARD
589 && !(env->daif & PSTATE_I)
590 && (!IS_M(env) || env->regs[15] < 0xfffffff0)) {
591 cpu->exception_index = EXCP_IRQ;
592 cc->do_interrupt(cpu);
593 next_tb = 0;
594 }
595 #elif defined(TARGET_UNICORE32)
596 if (interrupt_request & CPU_INTERRUPT_HARD
597 && !(env->uncached_asr & ASR_I)) {
598 cpu->exception_index = UC32_EXCP_INTR;
599 cc->do_interrupt(cpu);
600 next_tb = 0;
601 }
602 #elif defined(TARGET_SH4)
603 if (interrupt_request & CPU_INTERRUPT_HARD) {
604 cc->do_interrupt(cpu);
605 next_tb = 0;
606 }
607 #elif defined(TARGET_ALPHA)
608 {
609 int idx = -1;
610 /* ??? This hard-codes the OSF/1 interrupt levels. */
611 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
612 case 0 ... 3:
613 if (interrupt_request & CPU_INTERRUPT_HARD) {
614 idx = EXCP_DEV_INTERRUPT;
615 }
616 /* FALLTHRU */
617 case 4:
618 if (interrupt_request & CPU_INTERRUPT_TIMER) {
619 idx = EXCP_CLK_INTERRUPT;
620 }
621 /* FALLTHRU */
622 case 5:
623 if (interrupt_request & CPU_INTERRUPT_SMP) {
624 idx = EXCP_SMP_INTERRUPT;
625 }
626 /* FALLTHRU */
627 case 6:
628 if (interrupt_request & CPU_INTERRUPT_MCHK) {
629 idx = EXCP_MCHK;
630 }
631 }
632 if (idx >= 0) {
633 cpu->exception_index = idx;
634 env->error_code = 0;
635 cc->do_interrupt(cpu);
636 next_tb = 0;
637 }
638 }
639 #elif defined(TARGET_CRIS)
640 if (interrupt_request & CPU_INTERRUPT_HARD
641 && (env->pregs[PR_CCS] & I_FLAG)
642 && !env->locked_irq) {
643 cpu->exception_index = EXCP_IRQ;
644 cc->do_interrupt(cpu);
645 next_tb = 0;
646 }
647 if (interrupt_request & CPU_INTERRUPT_NMI) {
648 unsigned int m_flag_archval;
649 if (env->pregs[PR_VR] < 32) {
650 m_flag_archval = M_FLAG_V10;
651 } else {
652 m_flag_archval = M_FLAG_V32;
653 }
654 if ((env->pregs[PR_CCS] & m_flag_archval)) {
655 cpu->exception_index = EXCP_NMI;
656 cc->do_interrupt(cpu);
657 next_tb = 0;
658 }
659 }
660 #elif defined(TARGET_M68K)
661 if (interrupt_request & CPU_INTERRUPT_HARD
662 && ((env->sr & SR_I) >> SR_I_SHIFT)
663 < env->pending_level) {
664 /* Real hardware gets the interrupt vector via an
665 IACK cycle at this point. Current emulated
666 hardware doesn't rely on this, so we
667 provide/save the vector when the interrupt is
668 first signalled. */
669 cpu->exception_index = env->pending_vector;
670 do_interrupt_m68k_hardirq(env);
671 next_tb = 0;
672 }
673 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
674 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
675 (env->psw.mask & PSW_MASK_EXT)) {
676 cc->do_interrupt(cpu);
677 next_tb = 0;
678 }
679 #elif defined(TARGET_XTENSA)
680 if (interrupt_request & CPU_INTERRUPT_HARD) {
681 cpu->exception_index = EXC_IRQ;
682 cc->do_interrupt(cpu);
683 next_tb = 0;
684 }
685 #endif
686 /* Don't use the cached interrupt_request value,
687 do_interrupt may have updated the EXITTB flag. */
688 if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
689 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
690 /* ensure that no TB jump will be modified as
691 the program flow was changed */
692 next_tb = 0;
693 }
694 }
695 if (unlikely(cpu->exit_request)) {
696 cpu->exit_request = 0;
697 cpu->exception_index = EXCP_INTERRUPT;
698 cpu_loop_exit(cpu);
699 }
700 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
701 have_tb_lock = true;
702 tb = tb_find_fast(env);
703 /* Note: we do it here to avoid a gcc bug on Mac OS X when
704 doing it in tb_find_slow */
705 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
706 /* as some TB could have been invalidated because
707 of memory exceptions while generating the code, we
708 must recompute the hash index here */
709 next_tb = 0;
710 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
711 }
712 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
713 qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
714 tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
715 }
716 /* see if we can patch the calling TB. When the TB
717 spans two pages, we cannot safely do a direct
718 jump. */
719 if (next_tb != 0 && tb->page_addr[1] == -1) {
720 tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
721 next_tb & TB_EXIT_MASK, tb);
722 }
723 have_tb_lock = false;
724 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
725
726 /* cpu_interrupt might be called while translating the
727 TB, but before it is linked into a potentially
728 infinite loop and becomes env->current_tb. Avoid
729 starting execution if there is a pending interrupt. */
730 cpu->current_tb = tb;
731 barrier();
732 if (likely(!cpu->exit_request)) {
733 trace_exec_tb(tb, tb->pc);
734 tc_ptr = tb->tc_ptr;
735 /* execute the generated code */
736 next_tb = cpu_tb_exec(cpu, tc_ptr);
737 switch (next_tb & TB_EXIT_MASK) {
738 case TB_EXIT_REQUESTED:
739 /* Something asked us to stop executing
740 * chained TBs; just continue round the main
741 * loop. Whatever requested the exit will also
742 * have set something else (eg exit_request or
743 * interrupt_request) which we will handle
744 * next time around the loop.
745 */
746 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
747 next_tb = 0;
748 break;
749 case TB_EXIT_ICOUNT_EXPIRED:
750 {
751 /* Instruction counter expired. */
752 int insns_left;
753 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
754 insns_left = cpu->icount_decr.u32;
755 if (cpu->icount_extra && insns_left >= 0) {
756 /* Refill decrementer and continue execution. */
757 cpu->icount_extra += insns_left;
758 if (cpu->icount_extra > 0xffff) {
759 insns_left = 0xffff;
760 } else {
761 insns_left = cpu->icount_extra;
762 }
763 cpu->icount_extra -= insns_left;
764 cpu->icount_decr.u16.low = insns_left;
765 } else {
766 if (insns_left > 0) {
767 /* Execute remaining instructions. */
768 cpu_exec_nocache(env, insns_left, tb);
769 align_clocks(&sc, cpu);
770 }
771 cpu->exception_index = EXCP_INTERRUPT;
772 next_tb = 0;
773 cpu_loop_exit(cpu);
774 }
775 break;
776 }
777 default:
778 break;
779 }
780 }
781 cpu->current_tb = NULL;
782 /* Try to align the host and virtual clocks
783 if the guest is in advance */
784 align_clocks(&sc, cpu);
785 /* reset soft MMU for next block (it can currently
786 only be set by a memory fault) */
787 } /* for(;;) */
788 } else {
789 /* Reload env after longjmp - the compiler may have smashed all
790 * local variables as longjmp is marked 'noreturn'. */
791 cpu = current_cpu;
792 env = cpu->env_ptr;
793 #if !(defined(CONFIG_USER_ONLY) && \
794 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
795 cc = CPU_GET_CLASS(cpu);
796 #endif
797 #ifdef TARGET_I386
798 x86_cpu = X86_CPU(cpu);
799 #endif
800 if (have_tb_lock) {
801 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
802 have_tb_lock = false;
803 }
804 }
805 } /* for(;;) */
806
807
808 #if defined(TARGET_M68K)
809 cpu_m68k_flush_flags(env, env->cc_op);
810 env->cc_op = CC_OP_FLAGS;
811 env->sr = (env->sr & 0xffe0)
812 | env->cc_dest | (env->cc_x << 4);
813 #endif
814 cc->cpu_exec_exit(cpu);
815
816 /* fail safe : never use current_cpu outside cpu_exec() */
817 current_cpu = NULL;
818 return ret;
819 }