]> git.proxmox.com Git - mirror_qemu.git/blame - cpu-exec.c
trace: teach lttng backend to use format strings
[mirror_qemu.git] / cpu-exec.c
CommitLineData
7d13299d 1/*
e965fc38 2 * emulator main execution loop
5fafdf24 3 *
66321a11 4 * Copyright (c) 2003-2005 Fabrice Bellard
7d13299d 5 *
3ef693a0
FB
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
7d13299d 10 *
3ef693a0
FB
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
7d13299d 15 *
3ef693a0 16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
7d13299d 18 */
e4533c7a 19#include "config.h"
cea5f9a2 20#include "cpu.h"
76cad711 21#include "disas/disas.h"
7cb69cae 22#include "tcg.h"
1de7afc9 23#include "qemu/atomic.h"
9c17d615 24#include "sysemu/qtest.h"
c2aa5f81
ST
25#include "qemu/timer.h"
26
27/* -icount align implementation. */
28
29typedef struct SyncClocks {
30 int64_t diff_clk;
31 int64_t last_cpu_icount;
7f7bc144 32 int64_t realtime_clock;
c2aa5f81
ST
33} SyncClocks;
34
35#if !defined(CONFIG_USER_ONLY)
36/* Allow the guest to have a max 3ms advance.
37 * The difference between the 2 clocks could therefore
38 * oscillate around 0.
39 */
40#define VM_CLOCK_ADVANCE 3000000
7f7bc144
ST
41#define THRESHOLD_REDUCE 1.5
42#define MAX_DELAY_PRINT_RATE 2000000000LL
43#define MAX_NB_PRINTS 100
c2aa5f81
ST
44
45static void align_clocks(SyncClocks *sc, const CPUState *cpu)
46{
47 int64_t cpu_icount;
48
49 if (!icount_align_option) {
50 return;
51 }
52
53 cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
54 sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount);
55 sc->last_cpu_icount = cpu_icount;
56
57 if (sc->diff_clk > VM_CLOCK_ADVANCE) {
58#ifndef _WIN32
59 struct timespec sleep_delay, rem_delay;
60 sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
61 sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
62 if (nanosleep(&sleep_delay, &rem_delay) < 0) {
63 sc->diff_clk -= (sleep_delay.tv_sec - rem_delay.tv_sec) * 1000000000LL;
64 sc->diff_clk -= sleep_delay.tv_nsec - rem_delay.tv_nsec;
65 } else {
66 sc->diff_clk = 0;
67 }
68#else
69 Sleep(sc->diff_clk / SCALE_MS);
70 sc->diff_clk = 0;
71#endif
72 }
73}
74
7f7bc144
ST
75static void print_delay(const SyncClocks *sc)
76{
77 static float threshold_delay;
78 static int64_t last_realtime_clock;
79 static int nb_prints;
80
81 if (icount_align_option &&
82 sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE &&
83 nb_prints < MAX_NB_PRINTS) {
84 if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) ||
85 (-sc->diff_clk / (float)1000000000LL <
86 (threshold_delay - THRESHOLD_REDUCE))) {
87 threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
88 printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
89 threshold_delay - 1,
90 threshold_delay);
91 nb_prints++;
92 last_realtime_clock = sc->realtime_clock;
93 }
94 }
95}
96
c2aa5f81
ST
97static void init_delay_params(SyncClocks *sc,
98 const CPUState *cpu)
99{
100 if (!icount_align_option) {
101 return;
102 }
7f7bc144 103 sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
c2aa5f81 104 sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) -
7f7bc144 105 sc->realtime_clock +
c2aa5f81
ST
106 cpu_get_clock_offset();
107 sc->last_cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
27498bef
ST
108 if (sc->diff_clk < max_delay) {
109 max_delay = sc->diff_clk;
110 }
111 if (sc->diff_clk > max_advance) {
112 max_advance = sc->diff_clk;
113 }
7f7bc144
ST
114
115 /* Print every 2s max if the guest is late. We limit the number
116 of printed messages to NB_PRINT_MAX(currently 100) */
117 print_delay(sc);
c2aa5f81
ST
118}
119#else
120static void align_clocks(SyncClocks *sc, const CPUState *cpu)
121{
122}
123
124static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
125{
126}
127#endif /* CONFIG USER ONLY */
7d13299d 128
5638d180 129void cpu_loop_exit(CPUState *cpu)
e4533c7a 130{
d77953b9 131 cpu->current_tb = NULL;
6f03bef0 132 siglongjmp(cpu->jmp_env, 1);
e4533c7a 133}
bfed01fc 134
fbf9eeb3
FB
135/* exit the current TB from a signal handler. The host registers are
136 restored in a state compatible with the CPU emulator
137 */
9eff14f3 138#if defined(CONFIG_SOFTMMU)
0ea8cb88 139void cpu_resume_from_signal(CPUState *cpu, void *puc)
9eff14f3 140{
9eff14f3
BS
141 /* XXX: restore cpu registers saved in host registers */
142
27103424 143 cpu->exception_index = -1;
6f03bef0 144 siglongjmp(cpu->jmp_env, 1);
9eff14f3 145}
9eff14f3 146#endif
fbf9eeb3 147
77211379
PM
148/* Execute a TB, and fix up the CPU state afterwards if necessary */
149static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
150{
151 CPUArchState *env = cpu->env_ptr;
03afa5f8
RH
152 uintptr_t next_tb;
153
154#if defined(DEBUG_DISAS)
155 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
156#if defined(TARGET_I386)
157 log_cpu_state(cpu, CPU_DUMP_CCOP);
158#elif defined(TARGET_M68K)
159 /* ??? Should not modify env state for dumping. */
160 cpu_m68k_flush_flags(env, env->cc_op);
161 env->cc_op = CC_OP_FLAGS;
162 env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
163 log_cpu_state(cpu, 0);
164#else
165 log_cpu_state(cpu, 0);
166#endif
167 }
168#endif /* DEBUG_DISAS */
169
170 next_tb = tcg_qemu_tb_exec(env, tb_ptr);
77211379
PM
171 if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
172 /* We didn't start executing this TB (eg because the instruction
173 * counter hit zero); we must restore the guest PC to the address
174 * of the start of the TB.
175 */
bdf7ae5b 176 CPUClass *cc = CPU_GET_CLASS(cpu);
77211379 177 TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
bdf7ae5b
AF
178 if (cc->synchronize_from_tb) {
179 cc->synchronize_from_tb(cpu, tb);
180 } else {
181 assert(cc->set_pc);
182 cc->set_pc(cpu, tb->pc);
183 }
77211379 184 }
378df4b2
PM
185 if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
186 /* We were asked to stop executing TBs (probably a pending
187 * interrupt. We've now stopped, so clear the flag.
188 */
189 cpu->tcg_exit_req = 0;
190 }
77211379
PM
191 return next_tb;
192}
193
2e70f6ef
PB
194/* Execute the code without caching the generated code. An interpreter
195 could be used if available. */
9349b4f9 196static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
cea5f9a2 197 TranslationBlock *orig_tb)
2e70f6ef 198{
d77953b9 199 CPUState *cpu = ENV_GET_CPU(env);
2e70f6ef
PB
200 TranslationBlock *tb;
201
202 /* Should never happen.
203 We only end up here when an existing TB is too long. */
204 if (max_cycles > CF_COUNT_MASK)
205 max_cycles = CF_COUNT_MASK;
206
648f034c 207 tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
2e70f6ef 208 max_cycles);
d77953b9 209 cpu->current_tb = tb;
2e70f6ef 210 /* execute the generated code */
77211379 211 cpu_tb_exec(cpu, tb->tc_ptr);
d77953b9 212 cpu->current_tb = NULL;
2e70f6ef
PB
213 tb_phys_invalidate(tb, -1);
214 tb_free(tb);
215}
216
9349b4f9 217static TranslationBlock *tb_find_slow(CPUArchState *env,
cea5f9a2 218 target_ulong pc,
8a40a180 219 target_ulong cs_base,
c068688b 220 uint64_t flags)
8a40a180 221{
8cd70437 222 CPUState *cpu = ENV_GET_CPU(env);
8a40a180 223 TranslationBlock *tb, **ptb1;
8a40a180 224 unsigned int h;
337fc758 225 tb_page_addr_t phys_pc, phys_page1;
41c1b1c9 226 target_ulong virt_page2;
3b46e624 227
5e5f07e0 228 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
3b46e624 229
8a40a180 230 /* find translated block using physical mappings */
41c1b1c9 231 phys_pc = get_page_addr_code(env, pc);
8a40a180 232 phys_page1 = phys_pc & TARGET_PAGE_MASK;
8a40a180 233 h = tb_phys_hash_func(phys_pc);
5e5f07e0 234 ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
8a40a180
FB
235 for(;;) {
236 tb = *ptb1;
237 if (!tb)
238 goto not_found;
5fafdf24 239 if (tb->pc == pc &&
8a40a180 240 tb->page_addr[0] == phys_page1 &&
5fafdf24 241 tb->cs_base == cs_base &&
8a40a180
FB
242 tb->flags == flags) {
243 /* check next page if needed */
244 if (tb->page_addr[1] != -1) {
337fc758
BS
245 tb_page_addr_t phys_page2;
246
5fafdf24 247 virt_page2 = (pc & TARGET_PAGE_MASK) +
8a40a180 248 TARGET_PAGE_SIZE;
41c1b1c9 249 phys_page2 = get_page_addr_code(env, virt_page2);
8a40a180
FB
250 if (tb->page_addr[1] == phys_page2)
251 goto found;
252 } else {
253 goto found;
254 }
255 }
256 ptb1 = &tb->phys_hash_next;
257 }
258 not_found:
2e70f6ef 259 /* if no translated code available, then translate it now */
648f034c 260 tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
3b46e624 261
8a40a180 262 found:
2c90fe2b
KB
263 /* Move the last found TB to the head of the list */
264 if (likely(*ptb1)) {
265 *ptb1 = tb->phys_hash_next;
5e5f07e0
EV
266 tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
267 tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
2c90fe2b 268 }
8a40a180 269 /* we add the TB in the virtual pc hash table */
8cd70437 270 cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
8a40a180
FB
271 return tb;
272}
273
9349b4f9 274static inline TranslationBlock *tb_find_fast(CPUArchState *env)
8a40a180 275{
8cd70437 276 CPUState *cpu = ENV_GET_CPU(env);
8a40a180
FB
277 TranslationBlock *tb;
278 target_ulong cs_base, pc;
6b917547 279 int flags;
8a40a180
FB
280
281 /* we record a subset of the CPU state. It will
282 always be the same before a given translated block
283 is executed. */
6b917547 284 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
8cd70437 285 tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
551bd27f
TS
286 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
287 tb->flags != flags)) {
cea5f9a2 288 tb = tb_find_slow(env, pc, cs_base, flags);
8a40a180
FB
289 }
290 return tb;
291}
292
1009d2ed
JK
293static CPUDebugExcpHandler *debug_excp_handler;
294
84e3b602 295void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
1009d2ed 296{
1009d2ed 297 debug_excp_handler = handler;
1009d2ed
JK
298}
299
9349b4f9 300static void cpu_handle_debug_exception(CPUArchState *env)
1009d2ed 301{
ff4700b0 302 CPUState *cpu = ENV_GET_CPU(env);
1009d2ed
JK
303 CPUWatchpoint *wp;
304
ff4700b0
AF
305 if (!cpu->watchpoint_hit) {
306 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
1009d2ed
JK
307 wp->flags &= ~BP_WATCHPOINT_HIT;
308 }
309 }
310 if (debug_excp_handler) {
311 debug_excp_handler(env);
312 }
313}
314
7d13299d
FB
315/* main execution loop */
316
1a28cac3
MT
317volatile sig_atomic_t exit_request;
318
9349b4f9 319int cpu_exec(CPUArchState *env)
7d13299d 320{
c356a1bc 321 CPUState *cpu = ENV_GET_CPU(env);
97a8ea5a
AF
322#if !(defined(CONFIG_USER_ONLY) && \
323 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
324 CPUClass *cc = CPU_GET_CLASS(cpu);
693fa551
AF
325#endif
326#ifdef TARGET_I386
327 X86CPU *x86_cpu = X86_CPU(cpu);
97a8ea5a 328#endif
8a40a180 329 int ret, interrupt_request;
8a40a180 330 TranslationBlock *tb;
c27004ec 331 uint8_t *tc_ptr;
3e9bd63a 332 uintptr_t next_tb;
c2aa5f81
ST
333 SyncClocks sc;
334
bae2c270
PM
335 /* This must be volatile so it is not trashed by longjmp() */
336 volatile bool have_tb_lock = false;
8c6939c0 337
259186a7 338 if (cpu->halted) {
3993c6bd 339 if (!cpu_has_work(cpu)) {
eda48c34
PB
340 return EXCP_HALTED;
341 }
342
259186a7 343 cpu->halted = 0;
eda48c34 344 }
5a1e3cfc 345
4917cf44 346 current_cpu = cpu;
e4533c7a 347
4917cf44 348 /* As long as current_cpu is null, up to the assignment just above,
ec9bd89f
OH
349 * requests by other threads to exit the execution loop are expected to
350 * be issued using the exit_request global. We must make sure that our
4917cf44 351 * evaluation of the global value is performed past the current_cpu
ec9bd89f
OH
352 * value transition point, which requires a memory barrier as well as
353 * an instruction scheduling constraint on modern architectures. */
354 smp_mb();
355
c629a4bc 356 if (unlikely(exit_request)) {
fcd7d003 357 cpu->exit_request = 1;
1a28cac3
MT
358 }
359
ecb644f4 360#if defined(TARGET_I386)
6792a57b
JK
361 /* put eflags in CPU temporary format */
362 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
80cf2c81 363 env->df = 1 - (2 * ((env->eflags >> 10) & 1));
6792a57b
JK
364 CC_OP = CC_OP_EFLAGS;
365 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
93ac68bc 366#elif defined(TARGET_SPARC)
e6e5906b
PB
367#elif defined(TARGET_M68K)
368 env->cc_op = CC_OP_FLAGS;
369 env->cc_dest = env->sr & 0xf;
370 env->cc_x = (env->sr >> 4) & 1;
ecb644f4
TS
371#elif defined(TARGET_ALPHA)
372#elif defined(TARGET_ARM)
d2fbca94 373#elif defined(TARGET_UNICORE32)
ecb644f4 374#elif defined(TARGET_PPC)
4e85f82c 375 env->reserve_addr = -1;
81ea0e13 376#elif defined(TARGET_LM32)
b779e29e 377#elif defined(TARGET_MICROBLAZE)
6af0bf9c 378#elif defined(TARGET_MIPS)
d15a9c23 379#elif defined(TARGET_MOXIE)
e67db06e 380#elif defined(TARGET_OPENRISC)
fdf9b3e8 381#elif defined(TARGET_SH4)
f1ccf904 382#elif defined(TARGET_CRIS)
10ec5117 383#elif defined(TARGET_S390X)
2328826b 384#elif defined(TARGET_XTENSA)
fdf9b3e8 385 /* XXXXX */
e4533c7a
FB
386#else
387#error unsupported target CPU
388#endif
27103424 389 cpu->exception_index = -1;
9d27abd9 390
c2aa5f81
ST
391 /* Calculate difference between guest clock and host clock.
392 * This delay includes the delay of the last cycle, so
393 * what we have to do is sleep until it is 0. As for the
394 * advance/delay we gain here, we try to fix it next time.
395 */
396 init_delay_params(&sc, cpu);
397
7d13299d 398 /* prepare setjmp context for exception handling */
3fb2ded1 399 for(;;) {
6f03bef0 400 if (sigsetjmp(cpu->jmp_env, 0) == 0) {
3fb2ded1 401 /* if an exception is pending, we execute it here */
27103424
AF
402 if (cpu->exception_index >= 0) {
403 if (cpu->exception_index >= EXCP_INTERRUPT) {
3fb2ded1 404 /* exit request from the cpu execution loop */
27103424 405 ret = cpu->exception_index;
1009d2ed
JK
406 if (ret == EXCP_DEBUG) {
407 cpu_handle_debug_exception(env);
408 }
3fb2ded1 409 break;
72d239ed
AJ
410 } else {
411#if defined(CONFIG_USER_ONLY)
3fb2ded1 412 /* if user mode only, we simulate a fake exception
9f083493 413 which will be handled outside the cpu execution
3fb2ded1 414 loop */
83479e77 415#if defined(TARGET_I386)
97a8ea5a 416 cc->do_interrupt(cpu);
83479e77 417#endif
27103424 418 ret = cpu->exception_index;
3fb2ded1 419 break;
72d239ed 420#else
97a8ea5a 421 cc->do_interrupt(cpu);
27103424 422 cpu->exception_index = -1;
83479e77 423#endif
3fb2ded1 424 }
5fafdf24 425 }
9df217a3 426
b5fc09ae 427 next_tb = 0; /* force lookup of first TB */
3fb2ded1 428 for(;;) {
259186a7 429 interrupt_request = cpu->interrupt_request;
e1638bd8 430 if (unlikely(interrupt_request)) {
ed2803da 431 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
e1638bd8 432 /* Mask out external interrupts for this step. */
3125f763 433 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
e1638bd8 434 }
6658ffb8 435 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
259186a7 436 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
27103424 437 cpu->exception_index = EXCP_DEBUG;
5638d180 438 cpu_loop_exit(cpu);
6658ffb8 439 }
a90b7318 440#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
b779e29e 441 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
d2fbca94 442 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
a90b7318 443 if (interrupt_request & CPU_INTERRUPT_HALT) {
259186a7
AF
444 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
445 cpu->halted = 1;
27103424 446 cpu->exception_index = EXCP_HLT;
5638d180 447 cpu_loop_exit(cpu);
a90b7318
AZ
448 }
449#endif
4a92a558
PB
450#if defined(TARGET_I386)
451 if (interrupt_request & CPU_INTERRUPT_INIT) {
452 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0);
453 do_cpu_init(x86_cpu);
454 cpu->exception_index = EXCP_HALTED;
455 cpu_loop_exit(cpu);
456 }
457#else
458 if (interrupt_request & CPU_INTERRUPT_RESET) {
459 cpu_reset(cpu);
460 }
461#endif
68a79315 462#if defined(TARGET_I386)
5d62c43a
JK
463#if !defined(CONFIG_USER_ONLY)
464 if (interrupt_request & CPU_INTERRUPT_POLL) {
259186a7 465 cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
693fa551 466 apic_poll_irq(x86_cpu->apic_state);
5d62c43a
JK
467 }
468#endif
4a92a558 469 if (interrupt_request & CPU_INTERRUPT_SIPI) {
693fa551 470 do_cpu_sipi(x86_cpu);
b09ea7d5 471 } else if (env->hflags2 & HF2_GIF_MASK) {
db620f46
FB
472 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
473 !(env->hflags & HF_SMM_MASK)) {
77b2bc2c
BS
474 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
475 0);
259186a7 476 cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
693fa551 477 do_smm_enter(x86_cpu);
db620f46
FB
478 next_tb = 0;
479 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
480 !(env->hflags2 & HF2_NMI_MASK)) {
259186a7 481 cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
db620f46 482 env->hflags2 |= HF2_NMI_MASK;
e694d4e2 483 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
db620f46 484 next_tb = 0;
e965fc38 485 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
259186a7 486 cpu->interrupt_request &= ~CPU_INTERRUPT_MCE;
e694d4e2 487 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
79c4f6b0 488 next_tb = 0;
db620f46
FB
489 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
490 (((env->hflags2 & HF2_VINTR_MASK) &&
491 (env->hflags2 & HF2_HIF_MASK)) ||
492 (!(env->hflags2 & HF2_VINTR_MASK) &&
493 (env->eflags & IF_MASK &&
494 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
495 int intno;
77b2bc2c
BS
496 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
497 0);
259186a7
AF
498 cpu->interrupt_request &= ~(CPU_INTERRUPT_HARD |
499 CPU_INTERRUPT_VIRQ);
db620f46 500 intno = cpu_get_pic_interrupt(env);
4f213879 501 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
502 do_interrupt_x86_hardirq(env, intno, 1);
503 /* ensure that no TB jump will be modified as
504 the program flow was changed */
505 next_tb = 0;
0573fbfc 506#if !defined(CONFIG_USER_ONLY)
db620f46
FB
507 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
508 (env->eflags & IF_MASK) &&
509 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
510 int intno;
511 /* FIXME: this should respect TPR */
77b2bc2c
BS
512 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
513 0);
fdfba1a2
EI
514 intno = ldl_phys(cpu->as,
515 env->vm_vmcb
516 + offsetof(struct vmcb,
517 control.int_vector));
93fcfe39 518 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
e694d4e2 519 do_interrupt_x86_hardirq(env, intno, 1);
259186a7 520 cpu->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
db620f46 521 next_tb = 0;
907a5b26 522#endif
db620f46 523 }
68a79315 524 }
ce09776b 525#elif defined(TARGET_PPC)
47103572 526 if (interrupt_request & CPU_INTERRUPT_HARD) {
e9df014c 527 ppc_hw_interrupt(env);
259186a7
AF
528 if (env->pending_interrupts == 0) {
529 cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
530 }
b5fc09ae 531 next_tb = 0;
ce09776b 532 }
81ea0e13
MW
533#elif defined(TARGET_LM32)
534 if ((interrupt_request & CPU_INTERRUPT_HARD)
535 && (env->ie & IE_IE)) {
27103424 536 cpu->exception_index = EXCP_IRQ;
97a8ea5a 537 cc->do_interrupt(cpu);
81ea0e13
MW
538 next_tb = 0;
539 }
b779e29e
EI
540#elif defined(TARGET_MICROBLAZE)
541 if ((interrupt_request & CPU_INTERRUPT_HARD)
542 && (env->sregs[SR_MSR] & MSR_IE)
543 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
544 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
27103424 545 cpu->exception_index = EXCP_IRQ;
97a8ea5a 546 cc->do_interrupt(cpu);
b779e29e
EI
547 next_tb = 0;
548 }
6af0bf9c
FB
549#elif defined(TARGET_MIPS)
550 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
4cdc1cd1 551 cpu_mips_hw_interrupts_pending(env)) {
6af0bf9c 552 /* Raise it */
27103424 553 cpu->exception_index = EXCP_EXT_INTERRUPT;
6af0bf9c 554 env->error_code = 0;
97a8ea5a 555 cc->do_interrupt(cpu);
b5fc09ae 556 next_tb = 0;
6af0bf9c 557 }
b6a71ef7
JL
558#elif defined(TARGET_OPENRISC)
559 {
560 int idx = -1;
561 if ((interrupt_request & CPU_INTERRUPT_HARD)
562 && (env->sr & SR_IEE)) {
563 idx = EXCP_INT;
564 }
565 if ((interrupt_request & CPU_INTERRUPT_TIMER)
566 && (env->sr & SR_TEE)) {
567 idx = EXCP_TICK;
568 }
569 if (idx >= 0) {
27103424 570 cpu->exception_index = idx;
97a8ea5a 571 cc->do_interrupt(cpu);
b6a71ef7
JL
572 next_tb = 0;
573 }
574 }
e95c8d51 575#elif defined(TARGET_SPARC)
d532b26c
IK
576 if (interrupt_request & CPU_INTERRUPT_HARD) {
577 if (cpu_interrupts_enabled(env) &&
578 env->interrupt_index > 0) {
579 int pil = env->interrupt_index & 0xf;
580 int type = env->interrupt_index & 0xf0;
581
582 if (((type == TT_EXTINT) &&
583 cpu_pil_allowed(env, pil)) ||
584 type != TT_EXTINT) {
27103424 585 cpu->exception_index = env->interrupt_index;
97a8ea5a 586 cc->do_interrupt(cpu);
d532b26c
IK
587 next_tb = 0;
588 }
589 }
e965fc38 590 }
b5ff1b31
FB
591#elif defined(TARGET_ARM)
592 if (interrupt_request & CPU_INTERRUPT_FIQ
4cc35614 593 && !(env->daif & PSTATE_F)) {
27103424 594 cpu->exception_index = EXCP_FIQ;
97a8ea5a 595 cc->do_interrupt(cpu);
b5fc09ae 596 next_tb = 0;
b5ff1b31 597 }
9ee6e8bb
PB
598 /* ARMv7-M interrupt return works by loading a magic value
599 into the PC. On real hardware the load causes the
600 return to occur. The qemu implementation performs the
601 jump normally, then does the exception return when the
602 CPU tries to execute code at the magic address.
603 This will cause the magic PC value to be pushed to
a1c7273b 604 the stack if an interrupt occurred at the wrong time.
9ee6e8bb
PB
605 We avoid this by disabling interrupts when
606 pc contains a magic address. */
b5ff1b31 607 if (interrupt_request & CPU_INTERRUPT_HARD
9ee6e8bb 608 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
4cc35614 609 || !(env->daif & PSTATE_I))) {
27103424 610 cpu->exception_index = EXCP_IRQ;
97a8ea5a 611 cc->do_interrupt(cpu);
b5fc09ae 612 next_tb = 0;
b5ff1b31 613 }
d2fbca94
GX
614#elif defined(TARGET_UNICORE32)
615 if (interrupt_request & CPU_INTERRUPT_HARD
616 && !(env->uncached_asr & ASR_I)) {
27103424 617 cpu->exception_index = UC32_EXCP_INTR;
97a8ea5a 618 cc->do_interrupt(cpu);
d2fbca94
GX
619 next_tb = 0;
620 }
fdf9b3e8 621#elif defined(TARGET_SH4)
e96e2044 622 if (interrupt_request & CPU_INTERRUPT_HARD) {
97a8ea5a 623 cc->do_interrupt(cpu);
b5fc09ae 624 next_tb = 0;
e96e2044 625 }
eddf68a6 626#elif defined(TARGET_ALPHA)
6a80e088
RH
627 {
628 int idx = -1;
629 /* ??? This hard-codes the OSF/1 interrupt levels. */
e965fc38 630 switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) {
6a80e088
RH
631 case 0 ... 3:
632 if (interrupt_request & CPU_INTERRUPT_HARD) {
633 idx = EXCP_DEV_INTERRUPT;
634 }
635 /* FALLTHRU */
636 case 4:
637 if (interrupt_request & CPU_INTERRUPT_TIMER) {
638 idx = EXCP_CLK_INTERRUPT;
639 }
640 /* FALLTHRU */
641 case 5:
642 if (interrupt_request & CPU_INTERRUPT_SMP) {
643 idx = EXCP_SMP_INTERRUPT;
644 }
645 /* FALLTHRU */
646 case 6:
647 if (interrupt_request & CPU_INTERRUPT_MCHK) {
648 idx = EXCP_MCHK;
649 }
650 }
651 if (idx >= 0) {
27103424 652 cpu->exception_index = idx;
6a80e088 653 env->error_code = 0;
97a8ea5a 654 cc->do_interrupt(cpu);
6a80e088
RH
655 next_tb = 0;
656 }
eddf68a6 657 }
f1ccf904 658#elif defined(TARGET_CRIS)
1b1a38b0 659 if (interrupt_request & CPU_INTERRUPT_HARD
fb9fb692
EI
660 && (env->pregs[PR_CCS] & I_FLAG)
661 && !env->locked_irq) {
27103424 662 cpu->exception_index = EXCP_IRQ;
97a8ea5a 663 cc->do_interrupt(cpu);
1b1a38b0
EI
664 next_tb = 0;
665 }
8219314b
LP
666 if (interrupt_request & CPU_INTERRUPT_NMI) {
667 unsigned int m_flag_archval;
668 if (env->pregs[PR_VR] < 32) {
669 m_flag_archval = M_FLAG_V10;
670 } else {
671 m_flag_archval = M_FLAG_V32;
672 }
673 if ((env->pregs[PR_CCS] & m_flag_archval)) {
27103424 674 cpu->exception_index = EXCP_NMI;
97a8ea5a 675 cc->do_interrupt(cpu);
8219314b
LP
676 next_tb = 0;
677 }
f1ccf904 678 }
0633879f
PB
679#elif defined(TARGET_M68K)
680 if (interrupt_request & CPU_INTERRUPT_HARD
681 && ((env->sr & SR_I) >> SR_I_SHIFT)
682 < env->pending_level) {
683 /* Real hardware gets the interrupt vector via an
684 IACK cycle at this point. Current emulated
685 hardware doesn't rely on this, so we
686 provide/save the vector when the interrupt is
687 first signalled. */
27103424 688 cpu->exception_index = env->pending_vector;
3c688828 689 do_interrupt_m68k_hardirq(env);
b5fc09ae 690 next_tb = 0;
0633879f 691 }
3110e292
AG
692#elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
693 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
694 (env->psw.mask & PSW_MASK_EXT)) {
97a8ea5a 695 cc->do_interrupt(cpu);
3110e292
AG
696 next_tb = 0;
697 }
40643d7c
MF
698#elif defined(TARGET_XTENSA)
699 if (interrupt_request & CPU_INTERRUPT_HARD) {
27103424 700 cpu->exception_index = EXC_IRQ;
97a8ea5a 701 cc->do_interrupt(cpu);
40643d7c
MF
702 next_tb = 0;
703 }
68a79315 704#endif
ff2712ba 705 /* Don't use the cached interrupt_request value,
9d05095e 706 do_interrupt may have updated the EXITTB flag. */
259186a7
AF
707 if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
708 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
bf3e8bf1
FB
709 /* ensure that no TB jump will be modified as
710 the program flow was changed */
b5fc09ae 711 next_tb = 0;
bf3e8bf1 712 }
be214e6c 713 }
fcd7d003
AF
714 if (unlikely(cpu->exit_request)) {
715 cpu->exit_request = 0;
27103424 716 cpu->exception_index = EXCP_INTERRUPT;
5638d180 717 cpu_loop_exit(cpu);
3fb2ded1 718 }
5e5f07e0 719 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
bae2c270 720 have_tb_lock = true;
cea5f9a2 721 tb = tb_find_fast(env);
d5975363
PB
722 /* Note: we do it here to avoid a gcc bug on Mac OS X when
723 doing it in tb_find_slow */
5e5f07e0 724 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
d5975363
PB
725 /* as some TB could have been invalidated because
726 of memory exceptions while generating the code, we
727 must recompute the hash index here */
728 next_tb = 0;
5e5f07e0 729 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
d5975363 730 }
c30d1aea
PM
731 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
732 qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
733 tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
734 }
8a40a180
FB
735 /* see if we can patch the calling TB. When the TB
736 spans two pages, we cannot safely do a direct
737 jump. */
040f2fb2 738 if (next_tb != 0 && tb->page_addr[1] == -1) {
0980011b
PM
739 tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
740 next_tb & TB_EXIT_MASK, tb);
3fb2ded1 741 }
bae2c270 742 have_tb_lock = false;
5e5f07e0 743 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
55e8b85e 744
745 /* cpu_interrupt might be called while translating the
746 TB, but before it is linked into a potentially
747 infinite loop and becomes env->current_tb. Avoid
748 starting execution if there is a pending interrupt. */
d77953b9 749 cpu->current_tb = tb;
b0052d15 750 barrier();
fcd7d003 751 if (likely(!cpu->exit_request)) {
2e70f6ef 752 tc_ptr = tb->tc_ptr;
e965fc38 753 /* execute the generated code */
77211379 754 next_tb = cpu_tb_exec(cpu, tc_ptr);
378df4b2
PM
755 switch (next_tb & TB_EXIT_MASK) {
756 case TB_EXIT_REQUESTED:
757 /* Something asked us to stop executing
758 * chained TBs; just continue round the main
759 * loop. Whatever requested the exit will also
760 * have set something else (eg exit_request or
761 * interrupt_request) which we will handle
762 * next time around the loop.
763 */
764 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
765 next_tb = 0;
766 break;
767 case TB_EXIT_ICOUNT_EXPIRED:
768 {
bf20dc07 769 /* Instruction counter expired. */
2e70f6ef 770 int insns_left;
0980011b 771 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
28ecfd7a 772 insns_left = cpu->icount_decr.u32;
efee7340 773 if (cpu->icount_extra && insns_left >= 0) {
2e70f6ef 774 /* Refill decrementer and continue execution. */
efee7340
AF
775 cpu->icount_extra += insns_left;
776 if (cpu->icount_extra > 0xffff) {
2e70f6ef
PB
777 insns_left = 0xffff;
778 } else {
efee7340 779 insns_left = cpu->icount_extra;
2e70f6ef 780 }
efee7340 781 cpu->icount_extra -= insns_left;
28ecfd7a 782 cpu->icount_decr.u16.low = insns_left;
2e70f6ef
PB
783 } else {
784 if (insns_left > 0) {
785 /* Execute remaining instructions. */
cea5f9a2 786 cpu_exec_nocache(env, insns_left, tb);
c2aa5f81 787 align_clocks(&sc, cpu);
2e70f6ef 788 }
27103424 789 cpu->exception_index = EXCP_INTERRUPT;
2e70f6ef 790 next_tb = 0;
5638d180 791 cpu_loop_exit(cpu);
2e70f6ef 792 }
378df4b2
PM
793 break;
794 }
795 default:
796 break;
2e70f6ef
PB
797 }
798 }
d77953b9 799 cpu->current_tb = NULL;
c2aa5f81
ST
800 /* Try to align the host and virtual clocks
801 if the guest is in advance */
802 align_clocks(&sc, cpu);
4cbf74b6
FB
803 /* reset soft MMU for next block (it can currently
804 only be set by a memory fault) */
50a518e3 805 } /* for(;;) */
0d101938
JK
806 } else {
807 /* Reload env after longjmp - the compiler may have smashed all
808 * local variables as longjmp is marked 'noreturn'. */
4917cf44
AF
809 cpu = current_cpu;
810 env = cpu->env_ptr;
6c78f29a
JL
811#if !(defined(CONFIG_USER_ONLY) && \
812 (defined(TARGET_M68K) || defined(TARGET_PPC) || defined(TARGET_S390X)))
813 cc = CPU_GET_CLASS(cpu);
693fa551
AF
814#endif
815#ifdef TARGET_I386
816 x86_cpu = X86_CPU(cpu);
6c78f29a 817#endif
bae2c270
PM
818 if (have_tb_lock) {
819 spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
820 have_tb_lock = false;
821 }
7d13299d 822 }
3fb2ded1
FB
823 } /* for(;;) */
824
7d13299d 825
e4533c7a 826#if defined(TARGET_I386)
9de5e440 827 /* restore flags in standard format */
e694d4e2 828 env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP)
80cf2c81 829 | (env->df & DF_MASK);
e4533c7a 830#elif defined(TARGET_ARM)
b7bcbe95 831 /* XXX: Save/restore host fpu exception state?. */
d2fbca94 832#elif defined(TARGET_UNICORE32)
93ac68bc 833#elif defined(TARGET_SPARC)
67867308 834#elif defined(TARGET_PPC)
81ea0e13 835#elif defined(TARGET_LM32)
e6e5906b
PB
836#elif defined(TARGET_M68K)
837 cpu_m68k_flush_flags(env, env->cc_op);
838 env->cc_op = CC_OP_FLAGS;
839 env->sr = (env->sr & 0xffe0)
840 | env->cc_dest | (env->cc_x << 4);
b779e29e 841#elif defined(TARGET_MICROBLAZE)
6af0bf9c 842#elif defined(TARGET_MIPS)
d15a9c23 843#elif defined(TARGET_MOXIE)
e67db06e 844#elif defined(TARGET_OPENRISC)
fdf9b3e8 845#elif defined(TARGET_SH4)
eddf68a6 846#elif defined(TARGET_ALPHA)
f1ccf904 847#elif defined(TARGET_CRIS)
10ec5117 848#elif defined(TARGET_S390X)
2328826b 849#elif defined(TARGET_XTENSA)
fdf9b3e8 850 /* XXXXX */
e4533c7a
FB
851#else
852#error unsupported target CPU
853#endif
1057eaa7 854
4917cf44
AF
855 /* fail safe : never use current_cpu outside cpu_exec() */
856 current_cpu = NULL;
7d13299d
FB
857 return ret;
858}