]> git.proxmox.com Git - mirror_qemu.git/blob - cpu-exec.c
cpu-exec: Make debug_excp_handler a QOM CPU method
[mirror_qemu.git] / cpu-exec.c
1 /*
2 * emulator main execution loop
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "config.h"
20 #include "cpu.h"
21 #include "trace.h"
22 #include "disas/disas.h"
23 #include "tcg.h"
24 #include "qemu/atomic.h"
25 #include "sysemu/qtest.h"
26 #include "qemu/timer.h"
27
28 /* -icount align implementation. */
29
30 typedef struct SyncClocks {
31 int64_t diff_clk;
32 int64_t last_cpu_icount;
33 int64_t realtime_clock;
34 } SyncClocks;
35
36 #if !defined(CONFIG_USER_ONLY)
37 /* Allow the guest to have a max 3ms advance.
38 * The difference between the 2 clocks could therefore
39 * oscillate around 0.
40 */
41 #define VM_CLOCK_ADVANCE 3000000
42 #define THRESHOLD_REDUCE 1.5
43 #define MAX_DELAY_PRINT_RATE 2000000000LL
44 #define MAX_NB_PRINTS 100
45
46 static void align_clocks(SyncClocks *sc, const CPUState *cpu)
47 {
48 int64_t cpu_icount;
49
50 if (!icount_align_option) {
51 return;
52 }
53
54 cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
55 sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount);
56 sc->last_cpu_icount = cpu_icount;
57
58 if (sc->diff_clk > VM_CLOCK_ADVANCE) {
59 #ifndef _WIN32
60 struct timespec sleep_delay, rem_delay;
61 sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
62 sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
63 if (nanosleep(&sleep_delay, &rem_delay) < 0) {
64 sc->diff_clk -= (sleep_delay.tv_sec - rem_delay.tv_sec) * 1000000000LL;
65 sc->diff_clk -= sleep_delay.tv_nsec - rem_delay.tv_nsec;
66 } else {
67 sc->diff_clk = 0;
68 }
69 #else
70 Sleep(sc->diff_clk / SCALE_MS);
71 sc->diff_clk = 0;
72 #endif
73 }
74 }
75
76 static void print_delay(const SyncClocks *sc)
77 {
78 static float threshold_delay;
79 static int64_t last_realtime_clock;
80 static int nb_prints;
81
82 if (icount_align_option &&
83 sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE &&
84 nb_prints < MAX_NB_PRINTS) {
85 if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) ||
86 (-sc->diff_clk / (float)1000000000LL <
87 (threshold_delay - THRESHOLD_REDUCE))) {
88 threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
89 printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
90 threshold_delay - 1,
91 threshold_delay);
92 nb_prints++;
93 last_realtime_clock = sc->realtime_clock;
94 }
95 }
96 }
97
98 static void init_delay_params(SyncClocks *sc,
99 const CPUState *cpu)
100 {
101 if (!icount_align_option) {
102 return;
103 }
104 sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
105 sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) -
106 sc->realtime_clock +
107 cpu_get_clock_offset();
108 sc->last_cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
109 if (sc->diff_clk < max_delay) {
110 max_delay = sc->diff_clk;
111 }
112 if (sc->diff_clk > max_advance) {
113 max_advance = sc->diff_clk;
114 }
115
116 /* Print every 2s max if the guest is late. We limit the number
117 of printed messages to NB_PRINT_MAX(currently 100) */
118 print_delay(sc);
119 }
120 #else
121 static void align_clocks(SyncClocks *sc, const CPUState *cpu)
122 {
123 }
124
125 static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
126 {
127 }
128 #endif /* CONFIG USER ONLY */
129
130 void cpu_loop_exit(CPUState *cpu)
131 {
132 cpu->current_tb = NULL;
133 siglongjmp(cpu->jmp_env, 1);
134 }
135
136 /* exit the current TB from a signal handler. The host registers are
137 restored in a state compatible with the CPU emulator
138 */
139 #if defined(CONFIG_SOFTMMU)
140 void cpu_resume_from_signal(CPUState *cpu, void *puc)
141 {
142 /* XXX: restore cpu registers saved in host registers */
143
144 cpu->exception_index = -1;
145 siglongjmp(cpu->jmp_env, 1);
146 }
147 #endif
148
149 /* Execute a TB, and fix up the CPU state afterwards if necessary */
150 static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
151 {
152 CPUArchState *env = cpu->env_ptr;
153 uintptr_t next_tb;
154
155 #if defined(DEBUG_DISAS)
156 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
157 #if defined(TARGET_I386)
158 log_cpu_state(cpu, CPU_DUMP_CCOP);
159 #elif defined(TARGET_M68K)
160 /* ??? Should not modify env state for dumping. */
161 cpu_m68k_flush_flags(env, env->cc_op);
162 env->cc_op = CC_OP_FLAGS;
163 env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
164 log_cpu_state(cpu, 0);
165 #else
166 log_cpu_state(cpu, 0);
167 #endif
168 }
169 #endif /* DEBUG_DISAS */
170
171 next_tb = tcg_qemu_tb_exec(env, tb_ptr);
172 trace_exec_tb_exit((void *) (next_tb & ~TB_EXIT_MASK),
173 next_tb & TB_EXIT_MASK);
174
175 if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
176 /* We didn't start executing this TB (eg because the instruction
177 * counter hit zero); we must restore the guest PC to the address
178 * of the start of the TB.
179 */
180 CPUClass *cc = CPU_GET_CLASS(cpu);
181 TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
182 if (cc->synchronize_from_tb) {
183 cc->synchronize_from_tb(cpu, tb);
184 } else {
185 assert(cc->set_pc);
186 cc->set_pc(cpu, tb->pc);
187 }
188 }
189 if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
190 /* We were asked to stop executing TBs (probably a pending
191 * interrupt. We've now stopped, so clear the flag.
192 */
193 cpu->tcg_exit_req = 0;
194 }
195 return next_tb;
196 }
197
198 /* Execute the code without caching the generated code. An interpreter
199 could be used if available. */
200 static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
201 TranslationBlock *orig_tb)
202 {
203 CPUState *cpu = ENV_GET_CPU(env);
204 TranslationBlock *tb;
205
206 /* Should never happen.
207 We only end up here when an existing TB is too long. */
208 if (max_cycles > CF_COUNT_MASK)
209 max_cycles = CF_COUNT_MASK;
210
211 tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
212 max_cycles);
213 cpu->current_tb = tb;
214 /* execute the generated code */
215 trace_exec_tb_nocache(tb, tb->pc);
216 cpu_tb_exec(cpu, tb->tc_ptr);
217 cpu->current_tb = NULL;
218 tb_phys_invalidate(tb, -1);
219 tb_free(tb);
220 }
221
222 static TranslationBlock *tb_find_slow(CPUArchState *env,
223 target_ulong pc,
224 target_ulong cs_base,
225 uint64_t flags)
226 {
227 CPUState *cpu = ENV_GET_CPU(env);
228 TranslationBlock *tb, **ptb1;
229 unsigned int h;
230 tb_page_addr_t phys_pc, phys_page1;
231 target_ulong virt_page2;
232
233 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
234
235 /* find translated block using physical mappings */
236 phys_pc = get_page_addr_code(env, pc);
237 phys_page1 = phys_pc & TARGET_PAGE_MASK;
238 h = tb_phys_hash_func(phys_pc);
239 ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
240 for(;;) {
241 tb = *ptb1;
242 if (!tb)
243 goto not_found;
244 if (tb->pc == pc &&
245 tb->page_addr[0] == phys_page1 &&
246 tb->cs_base == cs_base &&
247 tb->flags == flags) {
248 /* check next page if needed */
249 if (tb->page_addr[1] != -1) {
250 tb_page_addr_t phys_page2;
251
252 virt_page2 = (pc & TARGET_PAGE_MASK) +
253 TARGET_PAGE_SIZE;
254 phys_page2 = get_page_addr_code(env, virt_page2);
255 if (tb->page_addr[1] == phys_page2)
256 goto found;
257 } else {
258 goto found;
259 }
260 }
261 ptb1 = &tb->phys_hash_next;
262 }
263 not_found:
264 /* if no translated code available, then translate it now */
265 tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
266
267 found:
268 /* Move the last found TB to the head of the list */
269 if (likely(*ptb1)) {
270 *ptb1 = tb->phys_hash_next;
271 tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
272 tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
273 }
274 /* we add the TB in the virtual pc hash table */
275 cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
276 return tb;
277 }
278
279 static inline TranslationBlock *tb_find_fast(CPUArchState *env)
280 {
281 CPUState *cpu = ENV_GET_CPU(env);
282 TranslationBlock *tb;
283 target_ulong cs_base, pc;
284 int flags;
285
286 /* we record a subset of the CPU state. It will
287 always be the same before a given translated block
288 is executed. */
289 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
290 tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
291 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
292 tb->flags != flags)) {
293 tb = tb_find_slow(env, pc, cs_base, flags);
294 }
295 return tb;
296 }
297
298 static void cpu_handle_debug_exception(CPUArchState *env)
299 {
300 CPUState *cpu = ENV_GET_CPU(env);
301 CPUClass *cc = CPU_GET_CLASS(cpu);
302 CPUWatchpoint *wp;
303
304 if (!cpu->watchpoint_hit) {
305 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
306 wp->flags &= ~BP_WATCHPOINT_HIT;
307 }
308 }
309
310 cc->debug_excp_handler(cpu);
311 }
312
313 /* main execution loop */
314
315 volatile sig_atomic_t exit_request;
316
317 int cpu_exec(CPUArchState *env)
318 {
319 CPUState *cpu = ENV_GET_CPU(env);
320 #if !(defined(CONFIG_USER_ONLY) && \
321 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
322 CPUClass *cc = CPU_GET_CLASS(cpu);
323 #endif
324 #ifdef TARGET_I386
325 X86CPU *x86_cpu = X86_CPU(cpu);
326 #endif
327 int ret, interrupt_request;
328 TranslationBlock *tb;
329 uint8_t *tc_ptr;
330 uintptr_t next_tb;
331 SyncClocks sc;
332
333 /* This must be volatile so it is not trashed by longjmp() */
334 volatile bool have_tb_lock = false;
335
336 if (cpu->halted) {
337 if (!cpu_has_work(cpu)) {
338 return EXCP_HALTED;
339 }
340
341 cpu->halted = 0;
342 }
343
344 current_cpu = cpu;
345
346 /* As long as current_cpu is null, up to the assignment just above,
347 * requests by other threads to exit the execution loop are expected to
348 * be issued using the exit_request global. We must make sure that our
349 * evaluation of the global value is performed past the current_cpu
350 * value transition point, which requires a memory barrier as well as
351 * an instruction scheduling constraint on modern architectures. */
352 smp_mb();
353
354 if (unlikely(exit_request)) {
355 cpu->exit_request = 1;
356 }
357
358 #if defined(TARGET_I386)
359 /* put eflags in CPU temporary format */
360 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
361 env->df = 1 - (2 * ((env->eflags >> 10) & 1));
362 CC_OP = CC_OP_EFLAGS;
363 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
364 #elif defined(TARGET_SPARC)
365 #elif defined(TARGET_M68K)
366 env->cc_op = CC_OP_FLAGS;
367 env->cc_dest = env->sr & 0xf;
368 env->cc_x = (env->sr >> 4) & 1;
369 #elif defined(TARGET_ALPHA)
370 #elif defined(TARGET_ARM)
371 #elif defined(TARGET_UNICORE32)
372 #elif defined(TARGET_PPC)
373 env->reserve_addr = -1;
374 #elif defined(TARGET_LM32)
375 #elif defined(TARGET_MICROBLAZE)
376 #elif defined(TARGET_MIPS)
377 #elif defined(TARGET_MOXIE)
378 #elif defined(TARGET_OPENRISC)
379 #elif defined(TARGET_SH4)
380 #elif defined(TARGET_CRIS)
381 #elif defined(TARGET_S390X)
382 #elif defined(TARGET_XTENSA)
383 #elif defined(TARGET_TRICORE)
384 /* XXXXX */
385 #else
386 #error unsupported target CPU
387 #endif
388 cpu->exception_index = -1;
389
390 /* Calculate difference between guest clock and host clock.
391 * This delay includes the delay of the last cycle, so
392 * what we have to do is sleep until it is 0. As for the
393 * advance/delay we gain here, we try to fix it next time.
394 */
395 init_delay_params(&sc, cpu);
396
397 /* prepare setjmp context for exception handling */
398 for(;;) {
399 if (sigsetjmp(cpu->jmp_env, 0) == 0) {
400 /* if an exception is pending, we execute it here */
401 if (cpu->exception_index >= 0) {
402 if (cpu->exception_index >= EXCP_INTERRUPT) {
403 /* exit request from the cpu execution loop */
404 ret = cpu->exception_index;
405 if (ret == EXCP_DEBUG) {
406 cpu_handle_debug_exception(env);
407 }
408 break;
409 } else {
410 #if defined(CONFIG_USER_ONLY)
411 /* if user mode only, we simulate a fake exception
412 which will be handled outside the cpu execution
413 loop */
414 #if defined(TARGET_I386)
415 cc->do_interrupt(cpu);
416 #endif
417 ret = cpu->exception_index;
418 break;
419 #else
420 cc->do_interrupt(cpu);
421 cpu->exception_index = -1;
422 #endif
423 }
424 }
425
426 next_tb = 0; /* force lookup of first TB */
427 for(;;) {
428 interrupt_request = cpu->interrupt_request;
429 if (unlikely(interrupt_request)) {
430 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
431 /* Mask out external interrupts for this step. */
432 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
433 }
434 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
435 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
436 cpu->exception_index = EXCP_DEBUG;
437 cpu_loop_exit(cpu);
438 }
439 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
440 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
441 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || \
442 defined(TARGET_UNICORE32) || defined(TARGET_TRICORE)
443 if (interrupt_request & CPU_INTERRUPT_HALT) {
444 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
445 cpu->halted = 1;
446 cpu->exception_index = EXCP_HLT;
447 cpu_loop_exit(cpu);
448 }
449 #endif
450 #if defined(TARGET_I386)
451 if (interrupt_request & CPU_INTERRUPT_INIT) {
452 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0);
453 do_cpu_init(x86_cpu);
454 cpu->exception_index = EXCP_HALTED;
455 cpu_loop_exit(cpu);
456 }
457 #else
458 if (interrupt_request & CPU_INTERRUPT_RESET) {
459 cpu_reset(cpu);
460 }
461 #endif
462 #if defined(TARGET_I386)
463 #if !defined(CONFIG_USER_ONLY)
464 if (interrupt_request & CPU_INTERRUPT_POLL) {
465 cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
466 apic_poll_irq(x86_cpu->apic_state);
467 }
468 #endif
469 if (interrupt_request & CPU_INTERRUPT_SIPI) {
470 do_cpu_sipi(x86_cpu);
471 } else if (env->hflags2 & HF2_GIF_MASK) {
472 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
473 !(env->hflags & HF_SMM_MASK)) {
474 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
475 0);
476 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
477 do_smm_enter(x86_cpu);
478 next_tb = 0;
479 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
480 !(env->hflags2 & HF2_NMI_MASK)) {
481 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
482 env->hflags2 |= HF2_NMI_MASK;
483 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
484 next_tb = 0;
485 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
486 cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
487 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
488 next_tb = 0;
489 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
490 (((env->hflags2 & HF2_VINTR_MASK) &&
491 (env->hflags2 & HF2_HIF_MASK)) ||
492 (!(env->hflags2 & HF2_VINTR_MASK) &&
493 (env->eflags & IF_MASK &&
494 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
495 int intno;
496 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
497 0);
498 cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
499 CPU_INTERRUPT_VIRQ);
500 intno = cpu_get_pic_interrupt(env);
501 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
502 do_interrupt_x86_hardirq(env, intno, 1);
503 /* ensure that no TB jump will be modified as
504 the program flow was changed */
505 next_tb = 0;
506 #if !defined(CONFIG_USER_ONLY)
507 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
508 (env->eflags & IF_MASK) &&
509 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
510 int intno;
511 /* FIXME: this should respect TPR */
512 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
513 0);
514 intno = ldl_phys(cpu->as,
515 env->vm_vmcb
516 + offsetof(struct vmcb,
517 control.int_vector));
518 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
519 do_interrupt_x86_hardirq(env, intno, 1);
520 cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
521 next_tb = 0;
522 #endif
523 }
524 }
525 #elif defined(TARGET_PPC)
526 if (interrupt_request & CPU_INTERRUPT_HARD) {
527 ppc_hw_interrupt(env);
528 if (env->pending_interrupts == 0) {
529 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
530 }
531 next_tb = 0;
532 }
533 #elif defined(TARGET_LM32)
534 if ((interrupt_request & CPU_INTERRUPT_HARD)
535 && (env->ie & IE_IE)) {
536 cpu->exception_index = EXCP_IRQ;
537 cc->do_interrupt(cpu);
538 next_tb = 0;
539 }
540 #elif defined(TARGET_MICROBLAZE)
541 if ((interrupt_request & CPU_INTERRUPT_HARD)
542 && (env->sregs[SR_MSR] & MSR_IE)
543 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
544 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
545 cpu->exception_index = EXCP_IRQ;
546 cc->do_interrupt(cpu);
547 next_tb = 0;
548 }
549 #elif defined(TARGET_MIPS)
550 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
551 cpu_mips_hw_interrupts_pending(env)) {
552 /* Raise it */
553 cpu->exception_index = EXCP_EXT_INTERRUPT;
554 env->error_code = 0;
555 cc->do_interrupt(cpu);
556 next_tb = 0;
557 }
558 #elif defined(TARGET_TRICORE)
559 if ((interrupt_request & CPU_INTERRUPT_HARD)) {
560 cc->do_interrupt(cpu);
561 next_tb = 0;
562 }
563
564 #elif defined(TARGET_OPENRISC)
565 {
566 int idx = -1;
567 if ((interrupt_request & CPU_INTERRUPT_HARD)
568 && (env->sr & SR_IEE)) {
569 idx = EXCP_INT;
570 }
571 if ((interrupt_request & CPU_INTERRUPT_TIMER)
572 && (env->sr & SR_TEE)) {
573 idx = EXCP_TICK;
574 }
575 if (idx >= 0) {
576 cpu->exception_index = idx;
577 cc->do_interrupt(cpu);
578 next_tb = 0;
579 }
580 }
581 #elif defined(TARGET_SPARC)
582 if (interrupt_request & CPU_INTERRUPT_HARD) {
583 if (cpu_interrupts_enabled(env) &&
584 env->interrupt_index > 0) {
585 int pil = env->interrupt_index & 0xf;
586 int type = env->interrupt_index & 0xf0;
587
588 if (((type == TT_EXTINT) &&
589 cpu_pil_allowed(env, pil)) ||
590 type != TT_EXTINT) {
591 cpu->exception_index = env->interrupt_index;
592 cc->do_interrupt(cpu);
593 next_tb = 0;
594 }
595 }
596 }
597 #elif defined(TARGET_ARM)
598 if (interrupt_request & CPU_INTERRUPT_FIQ
599 && !(env->daif & PSTATE_F)) {
600 cpu->exception_index = EXCP_FIQ;
601 cc->do_interrupt(cpu);
602 next_tb = 0;
603 }
604 /* ARMv7-M interrupt return works by loading a magic value
605 into the PC. On real hardware the load causes the
606 return to occur. The qemu implementation performs the
607 jump normally, then does the exception return when the
608 CPU tries to execute code at the magic address.
609 This will cause the magic PC value to be pushed to
610 the stack if an interrupt occurred at the wrong time.
611 We avoid this by disabling interrupts when
612 pc contains a magic address. */
613 if (interrupt_request & CPU_INTERRUPT_HARD
614 && !(env->daif & PSTATE_I)
615 && (!IS_M(env) || env->regs[15] < 0xfffffff0)) {
616 cpu->exception_index = EXCP_IRQ;
617 cc->do_interrupt(cpu);
618 next_tb = 0;
619 }
620 #elif defined(TARGET_UNICORE32)
621 if (interrupt_request & CPU_INTERRUPT_HARD
622 && !(env->uncached_asr & ASR_I)) {
623 cpu->exception_index = UC32_EXCP_INTR;
624 cc->do_interrupt(cpu);
625 next_tb = 0;
626 }
627 #elif defined(TARGET_SH4)
628 if (interrupt_request & CPU_INTERRUPT_HARD) {
629 cc->do_interrupt(cpu);
630 next_tb = 0;
631 }
632 #elif defined(TARGET_ALPHA)
633 {
634 int idx = -1;
635 /* ??? This hard-codes the OSF/1 interrupt levels. */
636 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
637 case 0 ... 3:
638 if (interrupt_request & CPU_INTERRUPT_HARD) {
639 idx = EXCP_DEV_INTERRUPT;
640 }
641 /* FALLTHRU */
642 case 4:
643 if (interrupt_request & CPU_INTERRUPT_TIMER) {
644 idx = EXCP_CLK_INTERRUPT;
645 }
646 /* FALLTHRU */
647 case 5:
648 if (interrupt_request & CPU_INTERRUPT_SMP) {
649 idx = EXCP_SMP_INTERRUPT;
650 }
651 /* FALLTHRU */
652 case 6:
653 if (interrupt_request & CPU_INTERRUPT_MCHK) {
654 idx = EXCP_MCHK;
655 }
656 }
657 if (idx >= 0) {
658 cpu->exception_index = idx;
659 env->error_code = 0;
660 cc->do_interrupt(cpu);
661 next_tb = 0;
662 }
663 }
664 #elif defined(TARGET_CRIS)
665 if (interrupt_request & CPU_INTERRUPT_HARD
666 && (env->pregs[PR_CCS] & I_FLAG)
667 && !env->locked_irq) {
668 cpu->exception_index = EXCP_IRQ;
669 cc->do_interrupt(cpu);
670 next_tb = 0;
671 }
672 if (interrupt_request & CPU_INTERRUPT_NMI) {
673 unsigned int m_flag_archval;
674 if (env->pregs[PR_VR] < 32) {
675 m_flag_archval = M_FLAG_V10;
676 } else {
677 m_flag_archval = M_FLAG_V32;
678 }
679 if ((env->pregs[PR_CCS] & m_flag_archval)) {
680 cpu->exception_index = EXCP_NMI;
681 cc->do_interrupt(cpu);
682 next_tb = 0;
683 }
684 }
685 #elif defined(TARGET_M68K)
686 if (interrupt_request & CPU_INTERRUPT_HARD
687 && ((env->sr & SR_I) >> SR_I_SHIFT)
688 < env->pending_level) {
689 /* Real hardware gets the interrupt vector via an
690 IACK cycle at this point. Current emulated
691 hardware doesn't rely on this, so we
692 provide/save the vector when the interrupt is
693 first signalled. */
694 cpu->exception_index = env->pending_vector;
695 do_interrupt_m68k_hardirq(env);
696 next_tb = 0;
697 }
698 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
699 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
700 (env->psw.mask & PSW_MASK_EXT)) {
701 cc->do_interrupt(cpu);
702 next_tb = 0;
703 }
704 #elif defined(TARGET_XTENSA)
705 if (interrupt_request & CPU_INTERRUPT_HARD) {
706 cpu->exception_index = EXC_IRQ;
707 cc->do_interrupt(cpu);
708 next_tb = 0;
709 }
710 #endif
711 /* Don't use the cached interrupt_request value,
712 do_interrupt may have updated the EXITTB flag. */
713 if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
714 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
715 /* ensure that no TB jump will be modified as
716 the program flow was changed */
717 next_tb = 0;
718 }
719 }
720 if (unlikely(cpu->exit_request)) {
721 cpu->exit_request = 0;
722 cpu->exception_index = EXCP_INTERRUPT;
723 cpu_loop_exit(cpu);
724 }
725 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
726 have_tb_lock = true;
727 tb = tb_find_fast(env);
728 /* Note: we do it here to avoid a gcc bug on Mac OS X when
729 doing it in tb_find_slow */
730 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
731 /* as some TB could have been invalidated because
732 of memory exceptions while generating the code, we
733 must recompute the hash index here */
734 next_tb = 0;
735 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
736 }
737 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
738 qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
739 tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
740 }
741 /* see if we can patch the calling TB. When the TB
742 spans two pages, we cannot safely do a direct
743 jump. */
744 if (next_tb != 0 && tb->page_addr[1] == -1) {
745 tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
746 next_tb & TB_EXIT_MASK, tb);
747 }
748 have_tb_lock = false;
749 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
750
751 /* cpu_interrupt might be called while translating the
752 TB, but before it is linked into a potentially
753 infinite loop and becomes env->current_tb. Avoid
754 starting execution if there is a pending interrupt. */
755 cpu->current_tb = tb;
756 barrier();
757 if (likely(!cpu->exit_request)) {
758 trace_exec_tb(tb, tb->pc);
759 tc_ptr = tb->tc_ptr;
760 /* execute the generated code */
761 next_tb = cpu_tb_exec(cpu, tc_ptr);
762 switch (next_tb & TB_EXIT_MASK) {
763 case TB_EXIT_REQUESTED:
764 /* Something asked us to stop executing
765 * chained TBs; just continue round the main
766 * loop. Whatever requested the exit will also
767 * have set something else (eg exit_request or
768 * interrupt_request) which we will handle
769 * next time around the loop.
770 */
771 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
772 next_tb = 0;
773 break;
774 case TB_EXIT_ICOUNT_EXPIRED:
775 {
776 /* Instruction counter expired. */
777 int insns_left;
778 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
779 insns_left = cpu->icount_decr.u32;
780 if (cpu->icount_extra && insns_left >= 0) {
781 /* Refill decrementer and continue execution. */
782 cpu->icount_extra += insns_left;
783 if (cpu->icount_extra > 0xffff) {
784 insns_left = 0xffff;
785 } else {
786 insns_left = cpu->icount_extra;
787 }
788 cpu->icount_extra -= insns_left;
789 cpu->icount_decr.u16.low = insns_left;
790 } else {
791 if (insns_left > 0) {
792 /* Execute remaining instructions. */
793 cpu_exec_nocache(env, insns_left, tb);
794 align_clocks(&sc, cpu);
795 }
796 cpu->exception_index = EXCP_INTERRUPT;
797 next_tb = 0;
798 cpu_loop_exit(cpu);
799 }
800 break;
801 }
802 default:
803 break;
804 }
805 }
806 cpu->current_tb = NULL;
807 /* Try to align the host and virtual clocks
808 if the guest is in advance */
809 align_clocks(&sc, cpu);
810 /* reset soft MMU for next block (it can currently
811 only be set by a memory fault) */
812 } /* for(;;) */
813 } else {
814 /* Reload env after longjmp - the compiler may have smashed all
815 * local variables as longjmp is marked 'noreturn'. */
816 cpu = current_cpu;
817 env = cpu->env_ptr;
818 #if !(defined(CONFIG_USER_ONLY) && \
819 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
820 cc = CPU_GET_CLASS(cpu);
821 #endif
822 #ifdef TARGET_I386
823 x86_cpu = X86_CPU(cpu);
824 #endif
825 if (have_tb_lock) {
826 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
827 have_tb_lock = false;
828 }
829 }
830 } /* for(;;) */
831
832
833 #if defined(TARGET_I386)
834 /* restore flags in standard format */
835 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
836 | (env->df & DF_MASK);
837 #elif defined(TARGET_ARM)
838 /* XXX: Save/restore host fpu exception state?. */
839 #elif defined(TARGET_UNICORE32)
840 #elif defined(TARGET_SPARC)
841 #elif defined(TARGET_PPC)
842 #elif defined(TARGET_LM32)
843 #elif defined(TARGET_M68K)
844 cpu_m68k_flush_flags(env, env->cc_op);
845 env->cc_op = CC_OP_FLAGS;
846 env->sr = (env->sr & 0xffe0)
847 | env->cc_dest | (env->cc_x << 4);
848 #elif defined(TARGET_MICROBLAZE)
849 #elif defined(TARGET_MIPS)
850 #elif defined(TARGET_TRICORE)
851 #elif defined(TARGET_MOXIE)
852 #elif defined(TARGET_OPENRISC)
853 #elif defined(TARGET_SH4)
854 #elif defined(TARGET_ALPHA)
855 #elif defined(TARGET_CRIS)
856 #elif defined(TARGET_S390X)
857 #elif defined(TARGET_XTENSA)
858 /* XXXXX */
859 #else
860 #error unsupported target CPU
861 #endif
862
863 /* fail safe : never use current_cpu outside cpu_exec() */
864 current_cpu = NULL;
865 return ret;
866 }