]> git.proxmox.com Git - mirror_qemu.git/blame - cpu-exec.c
tcg: enable thread-per-vCPU
[mirror_qemu.git] / cpu-exec.c
CommitLineData
7d13299d 1/*
e965fc38 2 * emulator main execution loop
5fafdf24 3 *
66321a11 4 * Copyright (c) 2003-2005 Fabrice Bellard
7d13299d 5 *
3ef693a0
FB
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
7d13299d 10 *
3ef693a0
FB
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
7d13299d 15 *
3ef693a0 16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
7d13299d 18 */
7b31bbc2 19#include "qemu/osdep.h"
cea5f9a2 20#include "cpu.h"
0ab8ed18 21#include "trace-root.h"
76cad711 22#include "disas/disas.h"
63c91552 23#include "exec/exec-all.h"
7cb69cae 24#include "tcg.h"
1de7afc9 25#include "qemu/atomic.h"
9c17d615 26#include "sysemu/qtest.h"
c2aa5f81 27#include "qemu/timer.h"
9d82b5a7 28#include "exec/address-spaces.h"
79e2b9ae 29#include "qemu/rcu.h"
e1b89321 30#include "exec/tb-hash.h"
508127e2 31#include "exec/log.h"
8d04fb55 32#include "qemu/main-loop.h"
6220e900
PD
33#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
34#include "hw/i386/apic.h"
35#endif
6f060969 36#include "sysemu/replay.h"
c2aa5f81
ST
37
38/* -icount align implementation. */
39
40typedef struct SyncClocks {
41 int64_t diff_clk;
42 int64_t last_cpu_icount;
7f7bc144 43 int64_t realtime_clock;
c2aa5f81
ST
44} SyncClocks;
45
46#if !defined(CONFIG_USER_ONLY)
47/* Allow the guest to have a max 3ms advance.
48 * The difference between the 2 clocks could therefore
49 * oscillate around 0.
50 */
51#define VM_CLOCK_ADVANCE 3000000
7f7bc144
ST
52#define THRESHOLD_REDUCE 1.5
53#define MAX_DELAY_PRINT_RATE 2000000000LL
54#define MAX_NB_PRINTS 100
c2aa5f81
ST
55
56static void align_clocks(SyncClocks *sc, const CPUState *cpu)
57{
58 int64_t cpu_icount;
59
60 if (!icount_align_option) {
61 return;
62 }
63
64 cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
65 sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount);
66 sc->last_cpu_icount = cpu_icount;
67
68 if (sc->diff_clk > VM_CLOCK_ADVANCE) {
69#ifndef _WIN32
70 struct timespec sleep_delay, rem_delay;
71 sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
72 sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
73 if (nanosleep(&sleep_delay, &rem_delay) < 0) {
a498d0ef 74 sc->diff_clk = rem_delay.tv_sec * 1000000000LL + rem_delay.tv_nsec;
c2aa5f81
ST
75 } else {
76 sc->diff_clk = 0;
77 }
78#else
79 Sleep(sc->diff_clk / SCALE_MS);
80 sc->diff_clk = 0;
81#endif
82 }
83}
84
7f7bc144
ST
85static void print_delay(const SyncClocks *sc)
86{
87 static float threshold_delay;
88 static int64_t last_realtime_clock;
89 static int nb_prints;
90
91 if (icount_align_option &&
92 sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE &&
93 nb_prints < MAX_NB_PRINTS) {
94 if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) ||
95 (-sc->diff_clk / (float)1000000000LL <
96 (threshold_delay - THRESHOLD_REDUCE))) {
97 threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
98 printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
99 threshold_delay - 1,
100 threshold_delay);
101 nb_prints++;
102 last_realtime_clock = sc->realtime_clock;
103 }
104 }
105}
106
c2aa5f81
ST
107static void init_delay_params(SyncClocks *sc,
108 const CPUState *cpu)
109{
110 if (!icount_align_option) {
111 return;
112 }
2e91cc62
PB
113 sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
114 sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock;
c2aa5f81 115 sc->last_cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
27498bef
ST
116 if (sc->diff_clk < max_delay) {
117 max_delay = sc->diff_clk;
118 }
119 if (sc->diff_clk > max_advance) {
120 max_advance = sc->diff_clk;
121 }
7f7bc144
ST
122
123 /* Print every 2s max if the guest is late. We limit the number
124 of printed messages to NB_PRINT_MAX(currently 100) */
125 print_delay(sc);
c2aa5f81
ST
126}
127#else
128static void align_clocks(SyncClocks *sc, const CPUState *cpu)
129{
130}
131
132static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
133{
134}
135#endif /* CONFIG USER ONLY */
7d13299d 136
77211379 137/* Execute a TB, and fix up the CPU state afterwards if necessary */
1a830635 138static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb)
77211379
PM
139{
140 CPUArchState *env = cpu->env_ptr;
819af24b
SF
141 uintptr_t ret;
142 TranslationBlock *last_tb;
143 int tb_exit;
1a830635
PM
144 uint8_t *tb_ptr = itb->tc_ptr;
145
d977e1c2 146 qemu_log_mask_and_addr(CPU_LOG_EXEC, itb->pc,
4426f83a
AB
147 "Trace %p [%d: " TARGET_FMT_lx "] %s\n",
148 itb->tc_ptr, cpu->cpu_index, itb->pc,
149 lookup_symbol(itb->pc));
03afa5f8
RH
150
151#if defined(DEBUG_DISAS)
be2208e2
RH
152 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)
153 && qemu_log_in_addr_range(itb->pc)) {
1ee73216 154 qemu_log_lock();
03afa5f8
RH
155#if defined(TARGET_I386)
156 log_cpu_state(cpu, CPU_DUMP_CCOP);
03afa5f8
RH
157#else
158 log_cpu_state(cpu, 0);
159#endif
1ee73216 160 qemu_log_unlock();
03afa5f8
RH
161 }
162#endif /* DEBUG_DISAS */
163
414b15c9 164 cpu->can_do_io = !use_icount;
819af24b 165 ret = tcg_qemu_tb_exec(env, tb_ptr);
626cf8f4 166 cpu->can_do_io = 1;
819af24b
SF
167 last_tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
168 tb_exit = ret & TB_EXIT_MASK;
169 trace_exec_tb_exit(last_tb, tb_exit);
6db8b538 170
819af24b 171 if (tb_exit > TB_EXIT_IDX1) {
77211379
PM
172 /* We didn't start executing this TB (eg because the instruction
173 * counter hit zero); we must restore the guest PC to the address
174 * of the start of the TB.
175 */
bdf7ae5b 176 CPUClass *cc = CPU_GET_CLASS(cpu);
819af24b 177 qemu_log_mask_and_addr(CPU_LOG_EXEC, last_tb->pc,
d977e1c2
AB
178 "Stopped execution of TB chain before %p ["
179 TARGET_FMT_lx "] %s\n",
819af24b
SF
180 last_tb->tc_ptr, last_tb->pc,
181 lookup_symbol(last_tb->pc));
bdf7ae5b 182 if (cc->synchronize_from_tb) {
819af24b 183 cc->synchronize_from_tb(cpu, last_tb);
bdf7ae5b
AF
184 } else {
185 assert(cc->set_pc);
819af24b 186 cc->set_pc(cpu, last_tb->pc);
bdf7ae5b 187 }
77211379 188 }
819af24b 189 if (tb_exit == TB_EXIT_REQUESTED) {
378df4b2
PM
190 /* We were asked to stop executing TBs (probably a pending
191 * interrupt. We've now stopped, so clear the flag.
192 */
027d9a7d 193 atomic_set(&cpu->tcg_exit_req, 0);
378df4b2 194 }
819af24b 195 return ret;
77211379
PM
196}
197
7687bf52 198#ifndef CONFIG_USER_ONLY
2e70f6ef
PB
199/* Execute the code without caching the generated code. An interpreter
200 could be used if available. */
ea3e9847 201static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
56c0269a 202 TranslationBlock *orig_tb, bool ignore_icount)
2e70f6ef 203{
2e70f6ef
PB
204 TranslationBlock *tb;
205
206 /* Should never happen.
207 We only end up here when an existing TB is too long. */
208 if (max_cycles > CF_COUNT_MASK)
209 max_cycles = CF_COUNT_MASK;
210
a5e99826 211 tb_lock();
02d57ea1 212 tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
56c0269a
PD
213 max_cycles | CF_NOCACHE
214 | (ignore_icount ? CF_IGNORE_ICOUNT : 0));
3359baad 215 tb->orig_tb = orig_tb;
a5e99826
FK
216 tb_unlock();
217
2e70f6ef 218 /* execute the generated code */
6db8b538 219 trace_exec_tb_nocache(tb, tb->pc);
1a830635 220 cpu_tb_exec(cpu, tb);
a5e99826
FK
221
222 tb_lock();
2e70f6ef
PB
223 tb_phys_invalidate(tb, -1);
224 tb_free(tb);
a5e99826 225 tb_unlock();
2e70f6ef 226}
7687bf52 227#endif
2e70f6ef 228
fdbc2b57
RH
229static void cpu_exec_step(CPUState *cpu)
230{
231 CPUArchState *env = (CPUArchState *)cpu->env_ptr;
232 TranslationBlock *tb;
233 target_ulong cs_base, pc;
234 uint32_t flags;
235
236 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
4ec66704 237 tb_lock();
fdbc2b57
RH
238 tb = tb_gen_code(cpu, pc, cs_base, flags,
239 1 | CF_NOCACHE | CF_IGNORE_ICOUNT);
240 tb->orig_tb = NULL;
4ec66704 241 tb_unlock();
fdbc2b57
RH
242 /* execute the generated code */
243 trace_exec_tb_nocache(tb, pc);
244 cpu_tb_exec(cpu, tb);
4ec66704 245 tb_lock();
fdbc2b57
RH
246 tb_phys_invalidate(tb, -1);
247 tb_free(tb);
4ec66704 248 tb_unlock();
fdbc2b57
RH
249}
250
251void cpu_exec_step_atomic(CPUState *cpu)
252{
253 start_exclusive();
254
255 /* Since we got here, we know that parallel_cpus must be true. */
256 parallel_cpus = false;
257 cpu_exec_step(cpu);
258 parallel_cpus = true;
259
260 end_exclusive();
261}
262
909eaac9
EC
263struct tb_desc {
264 target_ulong pc;
265 target_ulong cs_base;
266 CPUArchState *env;
267 tb_page_addr_t phys_page1;
268 uint32_t flags;
269};
270
271static bool tb_cmp(const void *p, const void *d)
272{
273 const TranslationBlock *tb = p;
274 const struct tb_desc *desc = d;
275
276 if (tb->pc == desc->pc &&
277 tb->page_addr[0] == desc->phys_page1 &&
278 tb->cs_base == desc->cs_base &&
6d21e420
PB
279 tb->flags == desc->flags &&
280 !atomic_read(&tb->invalid)) {
909eaac9
EC
281 /* check next page if needed */
282 if (tb->page_addr[1] == -1) {
283 return true;
284 } else {
285 tb_page_addr_t phys_page2;
286 target_ulong virt_page2;
287
288 virt_page2 = (desc->pc & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
289 phys_page2 = get_page_addr_code(desc->env, virt_page2);
290 if (tb->page_addr[1] == phys_page2) {
291 return true;
292 }
293 }
294 }
295 return false;
296}
297
b34de45f 298static TranslationBlock *tb_htable_lookup(CPUState *cpu,
9fd1a948
PB
299 target_ulong pc,
300 target_ulong cs_base,
89fee74a 301 uint32_t flags)
8a40a180 302{
909eaac9
EC
303 tb_page_addr_t phys_pc;
304 struct tb_desc desc;
42bd3228 305 uint32_t h;
3b46e624 306
909eaac9
EC
307 desc.env = (CPUArchState *)cpu->env_ptr;
308 desc.cs_base = cs_base;
309 desc.flags = flags;
310 desc.pc = pc;
311 phys_pc = get_page_addr_code(desc.env, pc);
312 desc.phys_page1 = phys_pc & TARGET_PAGE_MASK;
42bd3228 313 h = tb_hash_func(phys_pc, pc, flags);
909eaac9 314 return qht_lookup(&tcg_ctx.tb_ctx.htable, tb_cmp, &desc, h);
9fd1a948
PB
315}
316
bd2710d5
SF
317static inline TranslationBlock *tb_find(CPUState *cpu,
318 TranslationBlock *last_tb,
319 int tb_exit)
8a40a180 320{
ea3e9847 321 CPUArchState *env = (CPUArchState *)cpu->env_ptr;
8a40a180
FB
322 TranslationBlock *tb;
323 target_ulong cs_base, pc;
89fee74a 324 uint32_t flags;
74d356dd 325 bool have_tb_lock = false;
8a40a180
FB
326
327 /* we record a subset of the CPU state. It will
328 always be the same before a given translated block
329 is executed. */
6b917547 330 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
89a16b1e 331 tb = atomic_rcu_read(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)]);
551bd27f
TS
332 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
333 tb->flags != flags)) {
b34de45f 334 tb = tb_htable_lookup(cpu, pc, cs_base, flags);
bd2710d5
SF
335 if (!tb) {
336
337 /* mmap_lock is needed by tb_gen_code, and mmap_lock must be
338 * taken outside tb_lock. As system emulation is currently
339 * single threaded the locks are NOPs.
340 */
341 mmap_lock();
342 tb_lock();
343 have_tb_lock = true;
344
345 /* There's a chance that our desired tb has been translated while
346 * taking the locks so we check again inside the lock.
347 */
b34de45f 348 tb = tb_htable_lookup(cpu, pc, cs_base, flags);
bd2710d5
SF
349 if (!tb) {
350 /* if no translated code available, then translate it now */
351 tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
352 }
353
354 mmap_unlock();
355 }
356
357 /* We add the TB in the virtual pc hash table for the fast lookup */
358 atomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
8a40a180 359 }
c88c67e5
SF
360#ifndef CONFIG_USER_ONLY
361 /* We don't take care of direct jumps when address mapping changes in
362 * system emulation. So it's not safe to make a direct jump to a TB
363 * spanning two pages because the mapping for the second page can change.
364 */
365 if (tb->page_addr[1] != -1) {
4b7e6950 366 last_tb = NULL;
c88c67e5
SF
367 }
368#endif
a0522c7a 369 /* See if we can patch the calling TB. */
4b7e6950 370 if (last_tb && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
74d356dd
SF
371 if (!have_tb_lock) {
372 tb_lock();
373 have_tb_lock = true;
374 }
3359baad 375 if (!tb->invalid) {
118b0730
SF
376 tb_add_jump(last_tb, tb_exit, tb);
377 }
74d356dd
SF
378 }
379 if (have_tb_lock) {
518615c6 380 tb_unlock();
a0522c7a 381 }
8a40a180
FB
382 return tb;
383}
384
8b2d34e9
SF
385static inline bool cpu_handle_halt(CPUState *cpu)
386{
387 if (cpu->halted) {
388#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
389 if ((cpu->interrupt_request & CPU_INTERRUPT_POLL)
390 && replay_interrupt()) {
391 X86CPU *x86_cpu = X86_CPU(cpu);
8d04fb55 392 qemu_mutex_lock_iothread();
8b2d34e9
SF
393 apic_poll_irq(x86_cpu->apic_state);
394 cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
8d04fb55 395 qemu_mutex_unlock_iothread();
8b2d34e9
SF
396 }
397#endif
398 if (!cpu_has_work(cpu)) {
8b2d34e9
SF
399 return true;
400 }
401
402 cpu->halted = 0;
403 }
404
405 return false;
406}
407
ea284766 408static inline void cpu_handle_debug_exception(CPUState *cpu)
1009d2ed 409{
86025ee4 410 CPUClass *cc = CPU_GET_CLASS(cpu);
1009d2ed
JK
411 CPUWatchpoint *wp;
412
ff4700b0
AF
413 if (!cpu->watchpoint_hit) {
414 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
1009d2ed
JK
415 wp->flags &= ~BP_WATCHPOINT_HIT;
416 }
417 }
86025ee4
PM
418
419 cc->debug_excp_handler(cpu);
1009d2ed
JK
420}
421
ea284766
SF
422static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
423{
424 if (cpu->exception_index >= 0) {
425 if (cpu->exception_index >= EXCP_INTERRUPT) {
426 /* exit request from the cpu execution loop */
427 *ret = cpu->exception_index;
428 if (*ret == EXCP_DEBUG) {
429 cpu_handle_debug_exception(cpu);
430 }
431 cpu->exception_index = -1;
432 return true;
433 } else {
434#if defined(CONFIG_USER_ONLY)
435 /* if user mode only, we simulate a fake exception
436 which will be handled outside the cpu execution
437 loop */
438#if defined(TARGET_I386)
439 CPUClass *cc = CPU_GET_CLASS(cpu);
440 cc->do_interrupt(cpu);
441#endif
442 *ret = cpu->exception_index;
443 cpu->exception_index = -1;
444 return true;
445#else
446 if (replay_exception()) {
447 CPUClass *cc = CPU_GET_CLASS(cpu);
8d04fb55 448 qemu_mutex_lock_iothread();
ea284766 449 cc->do_interrupt(cpu);
8d04fb55 450 qemu_mutex_unlock_iothread();
ea284766
SF
451 cpu->exception_index = -1;
452 } else if (!replay_has_interrupt()) {
453 /* give a chance to iothread in replay mode */
454 *ret = EXCP_INTERRUPT;
455 return true;
456 }
457#endif
458 }
459#ifndef CONFIG_USER_ONLY
460 } else if (replay_has_exception()
461 && cpu->icount_decr.u16.low + cpu->icount_extra == 0) {
462 /* try to cause an exception pending in the log */
bd2710d5 463 cpu_exec_nocache(cpu, 1, tb_find(cpu, NULL, 0), true);
ea284766
SF
464 *ret = -1;
465 return true;
466#endif
467 }
468
469 return false;
470}
471
209b71b6 472static inline bool cpu_handle_interrupt(CPUState *cpu,
c385e6e4
SF
473 TranslationBlock **last_tb)
474{
475 CPUClass *cc = CPU_GET_CLASS(cpu);
c385e6e4 476
8d04fb55
JK
477 if (unlikely(atomic_read(&cpu->interrupt_request))) {
478 int interrupt_request;
479 qemu_mutex_lock_iothread();
480 interrupt_request = cpu->interrupt_request;
c385e6e4
SF
481 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
482 /* Mask out external interrupts for this step. */
483 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
484 }
485 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
486 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
487 cpu->exception_index = EXCP_DEBUG;
8d04fb55 488 qemu_mutex_unlock_iothread();
209b71b6 489 return true;
c385e6e4
SF
490 }
491 if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) {
492 /* Do nothing */
493 } else if (interrupt_request & CPU_INTERRUPT_HALT) {
494 replay_interrupt();
495 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
496 cpu->halted = 1;
497 cpu->exception_index = EXCP_HLT;
8d04fb55 498 qemu_mutex_unlock_iothread();
209b71b6 499 return true;
c385e6e4
SF
500 }
501#if defined(TARGET_I386)
502 else if (interrupt_request & CPU_INTERRUPT_INIT) {
503 X86CPU *x86_cpu = X86_CPU(cpu);
504 CPUArchState *env = &x86_cpu->env;
505 replay_interrupt();
65c9d60a 506 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0);
c385e6e4
SF
507 do_cpu_init(x86_cpu);
508 cpu->exception_index = EXCP_HALTED;
8d04fb55 509 qemu_mutex_unlock_iothread();
209b71b6 510 return true;
c385e6e4
SF
511 }
512#else
513 else if (interrupt_request & CPU_INTERRUPT_RESET) {
514 replay_interrupt();
515 cpu_reset(cpu);
8d04fb55 516 qemu_mutex_unlock_iothread();
209b71b6 517 return true;
c385e6e4
SF
518 }
519#endif
520 /* The target hook has 3 exit conditions:
521 False when the interrupt isn't processed,
522 True when it is, and we should restart on a new TB,
523 and via longjmp via cpu_loop_exit. */
524 else {
c385e6e4 525 if (cc->cpu_exec_interrupt(cpu, interrupt_request)) {
d718b14b 526 replay_interrupt();
c385e6e4
SF
527 *last_tb = NULL;
528 }
8b1fe3f4
SF
529 /* The target hook may have updated the 'cpu->interrupt_request';
530 * reload the 'interrupt_request' value */
531 interrupt_request = cpu->interrupt_request;
c385e6e4 532 }
8b1fe3f4 533 if (interrupt_request & CPU_INTERRUPT_EXITTB) {
c385e6e4
SF
534 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
535 /* ensure that no TB jump will be modified as
536 the program flow was changed */
537 *last_tb = NULL;
538 }
8d04fb55
JK
539
540 /* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */
541 qemu_mutex_unlock_iothread();
c385e6e4 542 }
8d04fb55
JK
543
544
027d9a7d
AB
545 if (unlikely(atomic_read(&cpu->exit_request) || replay_has_interrupt())) {
546 atomic_set(&cpu->exit_request, 0);
c385e6e4 547 cpu->exception_index = EXCP_INTERRUPT;
209b71b6 548 return true;
c385e6e4 549 }
209b71b6
PB
550
551 return false;
c385e6e4
SF
552}
553
928de9ee
SF
554static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
555 TranslationBlock **last_tb, int *tb_exit,
556 SyncClocks *sc)
557{
558 uintptr_t ret;
559
027d9a7d 560 if (unlikely(atomic_read(&cpu->exit_request))) {
928de9ee
SF
561 return;
562 }
563
564 trace_exec_tb(tb, tb->pc);
565 ret = cpu_tb_exec(cpu, tb);
43d70ddf 566 tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
928de9ee
SF
567 *tb_exit = ret & TB_EXIT_MASK;
568 switch (*tb_exit) {
569 case TB_EXIT_REQUESTED:
e5143e30
AB
570 /* Something asked us to stop executing chained TBs; just
571 * continue round the main loop. Whatever requested the exit
572 * will also have set something else (eg interrupt_request)
573 * which we will handle next time around the loop. But we
574 * need to ensure the tcg_exit_req read in generated code
575 * comes before the next read of cpu->exit_request or
576 * cpu->interrupt_request.
928de9ee 577 */
a70fe14b 578 smp_mb();
928de9ee
SF
579 *last_tb = NULL;
580 break;
581 case TB_EXIT_ICOUNT_EXPIRED:
582 {
583 /* Instruction counter expired. */
584#ifdef CONFIG_USER_ONLY
585 abort();
586#else
587 int insns_left = cpu->icount_decr.u32;
43d70ddf 588 *last_tb = NULL;
928de9ee
SF
589 if (cpu->icount_extra && insns_left >= 0) {
590 /* Refill decrementer and continue execution. */
591 cpu->icount_extra += insns_left;
592 insns_left = MIN(0xffff, cpu->icount_extra);
593 cpu->icount_extra -= insns_left;
594 cpu->icount_decr.u16.low = insns_left;
595 } else {
596 if (insns_left > 0) {
597 /* Execute remaining instructions. */
43d70ddf 598 cpu_exec_nocache(cpu, insns_left, tb, false);
928de9ee
SF
599 align_clocks(sc, cpu);
600 }
601 cpu->exception_index = EXCP_INTERRUPT;
928de9ee
SF
602 cpu_loop_exit(cpu);
603 }
604 break;
605#endif
606 }
607 default:
43d70ddf 608 *last_tb = tb;
928de9ee
SF
609 break;
610 }
611}
612
7d13299d
FB
613/* main execution loop */
614
ea3e9847 615int cpu_exec(CPUState *cpu)
7d13299d 616{
97a8ea5a 617 CPUClass *cc = CPU_GET_CLASS(cpu);
c385e6e4 618 int ret;
c2aa5f81
ST
619 SyncClocks sc;
620
6f060969
PD
621 /* replay_interrupt may need current_cpu */
622 current_cpu = cpu;
623
8b2d34e9
SF
624 if (cpu_handle_halt(cpu)) {
625 return EXCP_HALTED;
eda48c34 626 }
5a1e3cfc 627
79e2b9ae
PB
628 rcu_read_lock();
629
cffe7b32 630 cc->cpu_exec_enter(cpu);
9d27abd9 631
c2aa5f81
ST
632 /* Calculate difference between guest clock and host clock.
633 * This delay includes the delay of the last cycle, so
634 * what we have to do is sleep until it is 0. As for the
635 * advance/delay we gain here, we try to fix it next time.
636 */
637 init_delay_params(&sc, cpu);
638
4515e58d
PB
639 /* prepare setjmp context for exception handling */
640 if (sigsetjmp(cpu->jmp_env, 0) != 0) {
0448f5f8 641#if defined(__clang__) || !QEMU_GNUC_PREREQ(4, 6)
4515e58d
PB
642 /* Some compilers wrongly smash all local variables after
643 * siglongjmp. There were bug reports for gcc 4.5.0 and clang.
644 * Reload essential local variables here for those compilers.
645 * Newer versions of gcc would complain about this code (-Wclobbered). */
646 cpu = current_cpu;
647 cc = CPU_GET_CLASS(cpu);
0448f5f8 648#else /* buggy compiler */
4515e58d
PB
649 /* Assert that the compiler does not smash local variables. */
650 g_assert(cpu == current_cpu);
651 g_assert(cc == CPU_GET_CLASS(cpu));
0448f5f8 652#endif /* buggy compiler */
4515e58d
PB
653 cpu->can_do_io = 1;
654 tb_lock_reset();
8d04fb55
JK
655 if (qemu_mutex_iothread_locked()) {
656 qemu_mutex_unlock_iothread();
657 }
4515e58d
PB
658 }
659
660 /* if an exception is pending, we execute it here */
661 while (!cpu_handle_exception(cpu, &ret)) {
662 TranslationBlock *last_tb = NULL;
663 int tb_exit = 0;
664
665 while (!cpu_handle_interrupt(cpu, &last_tb)) {
666 TranslationBlock *tb = tb_find(cpu, last_tb, tb_exit);
667 cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit, &sc);
668 /* Try to align the host and virtual clocks
669 if the guest is in advance */
670 align_clocks(&sc, cpu);
7d13299d 671 }
4515e58d 672 }
3fb2ded1 673
cffe7b32 674 cc->cpu_exec_exit(cpu);
79e2b9ae 675 rcu_read_unlock();
1057eaa7 676
7d13299d
FB
677 return ret;
678}