]> git.proxmox.com Git - mirror_qemu.git/blame - cpu-exec.c
qemu-log: Avoid function call for disabled qemu_log_mask logging
[mirror_qemu.git] / cpu-exec.c
CommitLineData
7d13299d 1/*
e965fc38 2 * emulator main execution loop
5fafdf24 3 *
66321a11 4 * Copyright (c) 2003-2005 Fabrice Bellard
7d13299d 5 *
3ef693a0
FB
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
7d13299d 10 *
3ef693a0
FB
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
7d13299d 15 *
3ef693a0 16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
7d13299d 18 */
7b31bbc2 19#include "qemu/osdep.h"
cea5f9a2 20#include "cpu.h"
6db8b538 21#include "trace.h"
76cad711 22#include "disas/disas.h"
7cb69cae 23#include "tcg.h"
1de7afc9 24#include "qemu/atomic.h"
9c17d615 25#include "sysemu/qtest.h"
c2aa5f81 26#include "qemu/timer.h"
9d82b5a7 27#include "exec/address-spaces.h"
79e2b9ae 28#include "qemu/rcu.h"
e1b89321 29#include "exec/tb-hash.h"
508127e2 30#include "exec/log.h"
6220e900
PD
31#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
32#include "hw/i386/apic.h"
33#endif
6f060969 34#include "sysemu/replay.h"
c2aa5f81
ST
35
36/* -icount align implementation. */
37
38typedef struct SyncClocks {
39 int64_t diff_clk;
40 int64_t last_cpu_icount;
7f7bc144 41 int64_t realtime_clock;
c2aa5f81
ST
42} SyncClocks;
43
44#if !defined(CONFIG_USER_ONLY)
45/* Allow the guest to have a max 3ms advance.
46 * The difference between the 2 clocks could therefore
47 * oscillate around 0.
48 */
49#define VM_CLOCK_ADVANCE 3000000
7f7bc144
ST
50#define THRESHOLD_REDUCE 1.5
51#define MAX_DELAY_PRINT_RATE 2000000000LL
52#define MAX_NB_PRINTS 100
c2aa5f81
ST
53
54static void align_clocks(SyncClocks *sc, const CPUState *cpu)
55{
56 int64_t cpu_icount;
57
58 if (!icount_align_option) {
59 return;
60 }
61
62 cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
63 sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount);
64 sc->last_cpu_icount = cpu_icount;
65
66 if (sc->diff_clk > VM_CLOCK_ADVANCE) {
67#ifndef _WIN32
68 struct timespec sleep_delay, rem_delay;
69 sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
70 sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
71 if (nanosleep(&sleep_delay, &rem_delay) < 0) {
a498d0ef 72 sc->diff_clk = rem_delay.tv_sec * 1000000000LL + rem_delay.tv_nsec;
c2aa5f81
ST
73 } else {
74 sc->diff_clk = 0;
75 }
76#else
77 Sleep(sc->diff_clk / SCALE_MS);
78 sc->diff_clk = 0;
79#endif
80 }
81}
82
7f7bc144
ST
83static void print_delay(const SyncClocks *sc)
84{
85 static float threshold_delay;
86 static int64_t last_realtime_clock;
87 static int nb_prints;
88
89 if (icount_align_option &&
90 sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE &&
91 nb_prints < MAX_NB_PRINTS) {
92 if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) ||
93 (-sc->diff_clk / (float)1000000000LL <
94 (threshold_delay - THRESHOLD_REDUCE))) {
95 threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
96 printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
97 threshold_delay - 1,
98 threshold_delay);
99 nb_prints++;
100 last_realtime_clock = sc->realtime_clock;
101 }
102 }
103}
104
c2aa5f81
ST
105static void init_delay_params(SyncClocks *sc,
106 const CPUState *cpu)
107{
108 if (!icount_align_option) {
109 return;
110 }
2e91cc62
PB
111 sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
112 sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock;
c2aa5f81 113 sc->last_cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
27498bef
ST
114 if (sc->diff_clk < max_delay) {
115 max_delay = sc->diff_clk;
116 }
117 if (sc->diff_clk > max_advance) {
118 max_advance = sc->diff_clk;
119 }
7f7bc144
ST
120
121 /* Print every 2s max if the guest is late. We limit the number
122 of printed messages to NB_PRINT_MAX(currently 100) */
123 print_delay(sc);
c2aa5f81
ST
124}
125#else
126static void align_clocks(SyncClocks *sc, const CPUState *cpu)
127{
128}
129
130static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
131{
132}
133#endif /* CONFIG USER ONLY */
7d13299d 134
77211379
PM
135/* Execute a TB, and fix up the CPU state afterwards if necessary */
136static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
137{
138 CPUArchState *env = cpu->env_ptr;
03afa5f8
RH
139 uintptr_t next_tb;
140
141#if defined(DEBUG_DISAS)
142 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
143#if defined(TARGET_I386)
144 log_cpu_state(cpu, CPU_DUMP_CCOP);
145#elif defined(TARGET_M68K)
146 /* ??? Should not modify env state for dumping. */
147 cpu_m68k_flush_flags(env, env->cc_op);
148 env->cc_op = CC_OP_FLAGS;
149 env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
150 log_cpu_state(cpu, 0);
151#else
152 log_cpu_state(cpu, 0);
153#endif
154 }
155#endif /* DEBUG_DISAS */
156
414b15c9 157 cpu->can_do_io = !use_icount;
03afa5f8 158 next_tb = tcg_qemu_tb_exec(env, tb_ptr);
626cf8f4 159 cpu->can_do_io = 1;
6db8b538
AB
160 trace_exec_tb_exit((void *) (next_tb & ~TB_EXIT_MASK),
161 next_tb & TB_EXIT_MASK);
162
77211379
PM
163 if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
164 /* We didn't start executing this TB (eg because the instruction
165 * counter hit zero); we must restore the guest PC to the address
166 * of the start of the TB.
167 */
bdf7ae5b 168 CPUClass *cc = CPU_GET_CLASS(cpu);
77211379 169 TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
bdf7ae5b
AF
170 if (cc->synchronize_from_tb) {
171 cc->synchronize_from_tb(cpu, tb);
172 } else {
173 assert(cc->set_pc);
174 cc->set_pc(cpu, tb->pc);
175 }
77211379 176 }
378df4b2
PM
177 if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
178 /* We were asked to stop executing TBs (probably a pending
179 * interrupt. We've now stopped, so clear the flag.
180 */
181 cpu->tcg_exit_req = 0;
182 }
77211379
PM
183 return next_tb;
184}
185
2e70f6ef
PB
186/* Execute the code without caching the generated code. An interpreter
187 could be used if available. */
ea3e9847 188static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
56c0269a 189 TranslationBlock *orig_tb, bool ignore_icount)
2e70f6ef 190{
2e70f6ef
PB
191 TranslationBlock *tb;
192
193 /* Should never happen.
194 We only end up here when an existing TB is too long. */
195 if (max_cycles > CF_COUNT_MASK)
196 max_cycles = CF_COUNT_MASK;
197
02d57ea1 198 tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
56c0269a
PD
199 max_cycles | CF_NOCACHE
200 | (ignore_icount ? CF_IGNORE_ICOUNT : 0));
02d57ea1 201 tb->orig_tb = tcg_ctx.tb_ctx.tb_invalidated_flag ? NULL : orig_tb;
d77953b9 202 cpu->current_tb = tb;
2e70f6ef 203 /* execute the generated code */
6db8b538 204 trace_exec_tb_nocache(tb, tb->pc);
77211379 205 cpu_tb_exec(cpu, tb->tc_ptr);
d77953b9 206 cpu->current_tb = NULL;
2e70f6ef
PB
207 tb_phys_invalidate(tb, -1);
208 tb_free(tb);
209}
210
9fd1a948
PB
211static TranslationBlock *tb_find_physical(CPUState *cpu,
212 target_ulong pc,
213 target_ulong cs_base,
214 uint64_t flags)
8a40a180 215{
ea3e9847 216 CPUArchState *env = (CPUArchState *)cpu->env_ptr;
8a40a180 217 TranslationBlock *tb, **ptb1;
8a40a180 218 unsigned int h;
337fc758 219 tb_page_addr_t phys_pc, phys_page1;
41c1b1c9 220 target_ulong virt_page2;
3b46e624 221
5e5f07e0 222 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
3b46e624 223
8a40a180 224 /* find translated block using physical mappings */
41c1b1c9 225 phys_pc = get_page_addr_code(env, pc);
8a40a180 226 phys_page1 = phys_pc & TARGET_PAGE_MASK;
8a40a180 227 h = tb_phys_hash_func(phys_pc);
5e5f07e0 228 ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
8a40a180
FB
229 for(;;) {
230 tb = *ptb1;
9fd1a948
PB
231 if (!tb) {
232 return NULL;
233 }
5fafdf24 234 if (tb->pc == pc &&
8a40a180 235 tb->page_addr[0] == phys_page1 &&
5fafdf24 236 tb->cs_base == cs_base &&
8a40a180
FB
237 tb->flags == flags) {
238 /* check next page if needed */
239 if (tb->page_addr[1] != -1) {
337fc758
BS
240 tb_page_addr_t phys_page2;
241
5fafdf24 242 virt_page2 = (pc & TARGET_PAGE_MASK) +
8a40a180 243 TARGET_PAGE_SIZE;
41c1b1c9 244 phys_page2 = get_page_addr_code(env, virt_page2);
9fd1a948
PB
245 if (tb->page_addr[1] == phys_page2) {
246 break;
247 }
8a40a180 248 } else {
9fd1a948 249 break;
8a40a180
FB
250 }
251 }
252 ptb1 = &tb->phys_hash_next;
253 }
3b46e624 254
9fd1a948
PB
255 /* Move the TB to the head of the list */
256 *ptb1 = tb->phys_hash_next;
257 tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
258 tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
259 return tb;
260}
261
262static TranslationBlock *tb_find_slow(CPUState *cpu,
263 target_ulong pc,
264 target_ulong cs_base,
265 uint64_t flags)
266{
267 TranslationBlock *tb;
268
269 tb = tb_find_physical(cpu, pc, cs_base, flags);
270 if (tb) {
271 goto found;
272 }
273
274#ifdef CONFIG_USER_ONLY
275 /* mmap_lock is needed by tb_gen_code, and mmap_lock must be
276 * taken outside tb_lock. Since we're momentarily dropping
277 * tb_lock, there's a chance that our desired tb has been
278 * translated.
279 */
280 tb_unlock();
281 mmap_lock();
282 tb_lock();
283 tb = tb_find_physical(cpu, pc, cs_base, flags);
284 if (tb) {
285 mmap_unlock();
286 goto found;
2c90fe2b 287 }
9fd1a948
PB
288#endif
289
290 /* if no translated code available, then translate it now */
291 tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
292
293#ifdef CONFIG_USER_ONLY
294 mmap_unlock();
295#endif
296
297found:
8a40a180 298 /* we add the TB in the virtual pc hash table */
8cd70437 299 cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
8a40a180
FB
300 return tb;
301}
302
ea3e9847 303static inline TranslationBlock *tb_find_fast(CPUState *cpu)
8a40a180 304{
ea3e9847 305 CPUArchState *env = (CPUArchState *)cpu->env_ptr;
8a40a180
FB
306 TranslationBlock *tb;
307 target_ulong cs_base, pc;
6b917547 308 int flags;
8a40a180
FB
309
310 /* we record a subset of the CPU state. It will
311 always be the same before a given translated block
312 is executed. */
6b917547 313 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
8cd70437 314 tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
551bd27f
TS
315 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
316 tb->flags != flags)) {
ea3e9847 317 tb = tb_find_slow(cpu, pc, cs_base, flags);
8a40a180
FB
318 }
319 return tb;
320}
321
ea3e9847 322static void cpu_handle_debug_exception(CPUState *cpu)
1009d2ed 323{
86025ee4 324 CPUClass *cc = CPU_GET_CLASS(cpu);
1009d2ed
JK
325 CPUWatchpoint *wp;
326
ff4700b0
AF
327 if (!cpu->watchpoint_hit) {
328 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
1009d2ed
JK
329 wp->flags &= ~BP_WATCHPOINT_HIT;
330 }
331 }
86025ee4
PM
332
333 cc->debug_excp_handler(cpu);
1009d2ed
JK
334}
335
7d13299d
FB
336/* main execution loop */
337
ea3e9847 338int cpu_exec(CPUState *cpu)
7d13299d 339{
97a8ea5a 340 CPUClass *cc = CPU_GET_CLASS(cpu);
693fa551
AF
341#ifdef TARGET_I386
342 X86CPU *x86_cpu = X86_CPU(cpu);
ea3e9847 343 CPUArchState *env = &x86_cpu->env;
97a8ea5a 344#endif
8a40a180 345 int ret, interrupt_request;
8a40a180 346 TranslationBlock *tb;
c27004ec 347 uint8_t *tc_ptr;
3e9bd63a 348 uintptr_t next_tb;
c2aa5f81
ST
349 SyncClocks sc;
350
6f060969
PD
351 /* replay_interrupt may need current_cpu */
352 current_cpu = cpu;
353
259186a7 354 if (cpu->halted) {
6220e900 355#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
6f060969
PD
356 if ((cpu->interrupt_request & CPU_INTERRUPT_POLL)
357 && replay_interrupt()) {
6220e900
PD
358 apic_poll_irq(x86_cpu->apic_state);
359 cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
360 }
361#endif
3993c6bd 362 if (!cpu_has_work(cpu)) {
6f060969 363 current_cpu = NULL;
eda48c34
PB
364 return EXCP_HALTED;
365 }
366
259186a7 367 cpu->halted = 0;
eda48c34 368 }
5a1e3cfc 369
9373e632 370 atomic_mb_set(&tcg_current_cpu, cpu);
79e2b9ae
PB
371 rcu_read_lock();
372
aed807c8 373 if (unlikely(atomic_mb_read(&exit_request))) {
fcd7d003 374 cpu->exit_request = 1;
1a28cac3
MT
375 }
376
cffe7b32 377 cc->cpu_exec_enter(cpu);
9d27abd9 378
c2aa5f81
ST
379 /* Calculate difference between guest clock and host clock.
380 * This delay includes the delay of the last cycle, so
381 * what we have to do is sleep until it is 0. As for the
382 * advance/delay we gain here, we try to fix it next time.
383 */
384 init_delay_params(&sc, cpu);
385
7d13299d 386 /* prepare setjmp context for exception handling */
3fb2ded1 387 for(;;) {
6f03bef0 388 if (sigsetjmp(cpu->jmp_env, 0) == 0) {
3fb2ded1 389 /* if an exception is pending, we execute it here */
27103424
AF
390 if (cpu->exception_index >= 0) {
391 if (cpu->exception_index >= EXCP_INTERRUPT) {
3fb2ded1 392 /* exit request from the cpu execution loop */
27103424 393 ret = cpu->exception_index;
1009d2ed 394 if (ret == EXCP_DEBUG) {
ea3e9847 395 cpu_handle_debug_exception(cpu);
1009d2ed 396 }
e511b4d7 397 cpu->exception_index = -1;
3fb2ded1 398 break;
72d239ed
AJ
399 } else {
400#if defined(CONFIG_USER_ONLY)
3fb2ded1 401 /* if user mode only, we simulate a fake exception
9f083493 402 which will be handled outside the cpu execution
3fb2ded1 403 loop */
83479e77 404#if defined(TARGET_I386)
97a8ea5a 405 cc->do_interrupt(cpu);
83479e77 406#endif
27103424 407 ret = cpu->exception_index;
e511b4d7 408 cpu->exception_index = -1;
3fb2ded1 409 break;
72d239ed 410#else
6f060969
PD
411 if (replay_exception()) {
412 cc->do_interrupt(cpu);
413 cpu->exception_index = -1;
414 } else if (!replay_has_interrupt()) {
415 /* give a chance to iothread in replay mode */
416 ret = EXCP_INTERRUPT;
417 break;
418 }
83479e77 419#endif
3fb2ded1 420 }
6f060969
PD
421 } else if (replay_has_exception()
422 && cpu->icount_decr.u16.low + cpu->icount_extra == 0) {
423 /* try to cause an exception pending in the log */
424 cpu_exec_nocache(cpu, 1, tb_find_fast(cpu), true);
425 ret = -1;
426 break;
5fafdf24 427 }
9df217a3 428
b5fc09ae 429 next_tb = 0; /* force lookup of first TB */
3fb2ded1 430 for(;;) {
259186a7 431 interrupt_request = cpu->interrupt_request;
e1638bd8 432 if (unlikely(interrupt_request)) {
ed2803da 433 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
e1638bd8 434 /* Mask out external interrupts for this step. */
3125f763 435 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
e1638bd8 436 }
6658ffb8 437 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
259186a7 438 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
27103424 439 cpu->exception_index = EXCP_DEBUG;
5638d180 440 cpu_loop_exit(cpu);
6658ffb8 441 }
6f060969
PD
442 if (replay_mode == REPLAY_MODE_PLAY
443 && !replay_has_interrupt()) {
444 /* Do nothing */
445 } else if (interrupt_request & CPU_INTERRUPT_HALT) {
446 replay_interrupt();
259186a7
AF
447 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
448 cpu->halted = 1;
27103424 449 cpu->exception_index = EXCP_HLT;
5638d180 450 cpu_loop_exit(cpu);
a90b7318 451 }
4a92a558 452#if defined(TARGET_I386)
6f060969
PD
453 else if (interrupt_request & CPU_INTERRUPT_INIT) {
454 replay_interrupt();
4a92a558
PB
455 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0);
456 do_cpu_init(x86_cpu);
457 cpu->exception_index = EXCP_HALTED;
458 cpu_loop_exit(cpu);
459 }
460#else
6f060969
PD
461 else if (interrupt_request & CPU_INTERRUPT_RESET) {
462 replay_interrupt();
4a92a558 463 cpu_reset(cpu);
6f060969 464 cpu_loop_exit(cpu);
4a92a558 465 }
68a79315 466#endif
9585db68
RH
467 /* The target hook has 3 exit conditions:
468 False when the interrupt isn't processed,
469 True when it is, and we should restart on a new TB,
470 and via longjmp via cpu_loop_exit. */
6f060969
PD
471 else {
472 replay_interrupt();
473 if (cc->cpu_exec_interrupt(cpu, interrupt_request)) {
474 next_tb = 0;
475 }
9585db68
RH
476 }
477 /* Don't use the cached interrupt_request value,
478 do_interrupt may have updated the EXITTB flag. */
259186a7
AF
479 if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
480 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
bf3e8bf1
FB
481 /* ensure that no TB jump will be modified as
482 the program flow was changed */
b5fc09ae 483 next_tb = 0;
bf3e8bf1 484 }
be214e6c 485 }
6f060969
PD
486 if (unlikely(cpu->exit_request
487 || replay_has_interrupt())) {
fcd7d003 488 cpu->exit_request = 0;
27103424 489 cpu->exception_index = EXCP_INTERRUPT;
5638d180 490 cpu_loop_exit(cpu);
3fb2ded1 491 }
677ef623 492 tb_lock();
ea3e9847 493 tb = tb_find_fast(cpu);
d5975363
PB
494 /* Note: we do it here to avoid a gcc bug on Mac OS X when
495 doing it in tb_find_slow */
5e5f07e0 496 if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
d5975363
PB
497 /* as some TB could have been invalidated because
498 of memory exceptions while generating the code, we
499 must recompute the hash index here */
500 next_tb = 0;
5e5f07e0 501 tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
d5975363 502 }
c30d1aea
PM
503 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
504 qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
505 tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
506 }
8a40a180
FB
507 /* see if we can patch the calling TB. When the TB
508 spans two pages, we cannot safely do a direct
509 jump. */
89a82cd4
RH
510 if (next_tb != 0 && tb->page_addr[1] == -1
511 && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
0980011b
PM
512 tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
513 next_tb & TB_EXIT_MASK, tb);
3fb2ded1 514 }
677ef623 515 tb_unlock();
fcd7d003 516 if (likely(!cpu->exit_request)) {
6db8b538 517 trace_exec_tb(tb, tb->pc);
2e70f6ef 518 tc_ptr = tb->tc_ptr;
e965fc38 519 /* execute the generated code */
b0a46fa7 520 cpu->current_tb = tb;
77211379 521 next_tb = cpu_tb_exec(cpu, tc_ptr);
b0a46fa7 522 cpu->current_tb = NULL;
378df4b2
PM
523 switch (next_tb & TB_EXIT_MASK) {
524 case TB_EXIT_REQUESTED:
525 /* Something asked us to stop executing
526 * chained TBs; just continue round the main
527 * loop. Whatever requested the exit will also
528 * have set something else (eg exit_request or
529 * interrupt_request) which we will handle
ab096a75
PB
530 * next time around the loop. But we need to
531 * ensure the tcg_exit_req read in generated code
532 * comes before the next read of cpu->exit_request
533 * or cpu->interrupt_request.
378df4b2 534 */
ab096a75 535 smp_rmb();
378df4b2
PM
536 next_tb = 0;
537 break;
538 case TB_EXIT_ICOUNT_EXPIRED:
539 {
bf20dc07 540 /* Instruction counter expired. */
52851b7e 541 int insns_left = cpu->icount_decr.u32;
efee7340 542 if (cpu->icount_extra && insns_left >= 0) {
2e70f6ef 543 /* Refill decrementer and continue execution. */
efee7340 544 cpu->icount_extra += insns_left;
52851b7e 545 insns_left = MIN(0xffff, cpu->icount_extra);
efee7340 546 cpu->icount_extra -= insns_left;
28ecfd7a 547 cpu->icount_decr.u16.low = insns_left;
2e70f6ef
PB
548 } else {
549 if (insns_left > 0) {
550 /* Execute remaining instructions. */
52851b7e 551 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
56c0269a 552 cpu_exec_nocache(cpu, insns_left, tb, false);
c2aa5f81 553 align_clocks(&sc, cpu);
2e70f6ef 554 }
27103424 555 cpu->exception_index = EXCP_INTERRUPT;
2e70f6ef 556 next_tb = 0;
5638d180 557 cpu_loop_exit(cpu);
2e70f6ef 558 }
378df4b2
PM
559 break;
560 }
561 default:
562 break;
2e70f6ef
PB
563 }
564 }
c2aa5f81
ST
565 /* Try to align the host and virtual clocks
566 if the guest is in advance */
567 align_clocks(&sc, cpu);
4cbf74b6
FB
568 /* reset soft MMU for next block (it can currently
569 only be set by a memory fault) */
50a518e3 570 } /* for(;;) */
0d101938 571 } else {
0448f5f8
SW
572#if defined(__clang__) || !QEMU_GNUC_PREREQ(4, 6)
573 /* Some compilers wrongly smash all local variables after
574 * siglongjmp. There were bug reports for gcc 4.5.0 and clang.
575 * Reload essential local variables here for those compilers.
576 * Newer versions of gcc would complain about this code (-Wclobbered). */
4917cf44 577 cpu = current_cpu;
6c78f29a 578 cc = CPU_GET_CLASS(cpu);
693fa551
AF
579#ifdef TARGET_I386
580 x86_cpu = X86_CPU(cpu);
ea3e9847 581 env = &x86_cpu->env;
6c78f29a 582#endif
0448f5f8
SW
583#else /* buggy compiler */
584 /* Assert that the compiler does not smash local variables. */
585 g_assert(cpu == current_cpu);
586 g_assert(cc == CPU_GET_CLASS(cpu));
587#ifdef TARGET_I386
588 g_assert(x86_cpu == X86_CPU(cpu));
589 g_assert(env == &x86_cpu->env);
590#endif
591#endif /* buggy compiler */
592 cpu->can_do_io = 1;
677ef623 593 tb_lock_reset();
7d13299d 594 }
3fb2ded1
FB
595 } /* for(;;) */
596
cffe7b32 597 cc->cpu_exec_exit(cpu);
79e2b9ae 598 rcu_read_unlock();
1057eaa7 599
4917cf44
AF
600 /* fail safe : never use current_cpu outside cpu_exec() */
601 current_cpu = NULL;
9373e632
PB
602
603 /* Does not need atomic_mb_set because a spurious wakeup is okay. */
604 atomic_set(&tcg_current_cpu, NULL);
7d13299d
FB
605 return ret;
606}