]> git.proxmox.com Git - mirror_qemu.git/blame - accel/tcg/cpu-exec.c
cpu-exec: simplify jump cache management
[mirror_qemu.git] / accel / tcg / cpu-exec.c
CommitLineData
7d13299d 1/*
e965fc38 2 * emulator main execution loop
5fafdf24 3 *
66321a11 4 * Copyright (c) 2003-2005 Fabrice Bellard
7d13299d 5 *
3ef693a0
FB
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
fb0343d5 9 * version 2.1 of the License, or (at your option) any later version.
7d13299d 10 *
3ef693a0
FB
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
7d13299d 15 *
3ef693a0 16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
7d13299d 18 */
a8d25326 19
7b31bbc2 20#include "qemu/osdep.h"
740b1759 21#include "qemu/qemu-print.h"
3a841ab5 22#include "qapi/error.h"
3a841ab5 23#include "qapi/type-helpers.h"
78271684 24#include "hw/core/tcg-cpu-ops.h"
d9bb58e5 25#include "trace.h"
76cad711 26#include "disas/disas.h"
63c91552 27#include "exec/exec-all.h"
dcb32f1d 28#include "tcg/tcg.h"
1de7afc9 29#include "qemu/atomic.h"
79e2b9ae 30#include "qemu/rcu.h"
508127e2 31#include "exec/log.h"
8d04fb55 32#include "qemu/main-loop.h"
6220e900
PD
33#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
34#include "hw/i386/apic.h"
35#endif
d2528bdc 36#include "sysemu/cpus.h"
740b1759
CF
37#include "exec/cpu-all.h"
38#include "sysemu/cpu-timers.h"
5b5968c4 39#include "exec/replay-core.h"
3a841ab5 40#include "sysemu/tcg.h"
a3e7f702 41#include "exec/helper-proto-common.h"
a976a99a 42#include "tb-jmp-cache.h"
e5ceadff 43#include "tb-hash.h"
e5ceadff 44#include "tb-context.h"
5934660f 45#include "internal-common.h"
4c268d6d 46#include "internal-target.h"
c2aa5f81
ST
47
48/* -icount align implementation. */
49
50typedef struct SyncClocks {
51 int64_t diff_clk;
52 int64_t last_cpu_icount;
7f7bc144 53 int64_t realtime_clock;
c2aa5f81
ST
54} SyncClocks;
55
56#if !defined(CONFIG_USER_ONLY)
57/* Allow the guest to have a max 3ms advance.
58 * The difference between the 2 clocks could therefore
59 * oscillate around 0.
60 */
61#define VM_CLOCK_ADVANCE 3000000
7f7bc144
ST
62#define THRESHOLD_REDUCE 1.5
63#define MAX_DELAY_PRINT_RATE 2000000000LL
64#define MAX_NB_PRINTS 100
c2aa5f81 65
00c9a5c2
PMD
66int64_t max_delay;
67int64_t max_advance;
740b1759 68
5e140196 69static void align_clocks(SyncClocks *sc, CPUState *cpu)
c2aa5f81
ST
70{
71 int64_t cpu_icount;
72
73 if (!icount_align_option) {
74 return;
75 }
76
a953b5fa 77 cpu_icount = cpu->icount_extra + cpu->neg.icount_decr.u16.low;
8191d368 78 sc->diff_clk += icount_to_ns(sc->last_cpu_icount - cpu_icount);
c2aa5f81
ST
79 sc->last_cpu_icount = cpu_icount;
80
81 if (sc->diff_clk > VM_CLOCK_ADVANCE) {
82#ifndef _WIN32
83 struct timespec sleep_delay, rem_delay;
84 sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
85 sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
86 if (nanosleep(&sleep_delay, &rem_delay) < 0) {
a498d0ef 87 sc->diff_clk = rem_delay.tv_sec * 1000000000LL + rem_delay.tv_nsec;
c2aa5f81
ST
88 } else {
89 sc->diff_clk = 0;
90 }
91#else
92 Sleep(sc->diff_clk / SCALE_MS);
93 sc->diff_clk = 0;
94#endif
95 }
96}
97
7f7bc144
ST
98static void print_delay(const SyncClocks *sc)
99{
100 static float threshold_delay;
101 static int64_t last_realtime_clock;
102 static int nb_prints;
103
104 if (icount_align_option &&
105 sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE &&
106 nb_prints < MAX_NB_PRINTS) {
107 if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) ||
108 (-sc->diff_clk / (float)1000000000LL <
109 (threshold_delay - THRESHOLD_REDUCE))) {
110 threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
740b1759
CF
111 qemu_printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
112 threshold_delay - 1,
113 threshold_delay);
7f7bc144
ST
114 nb_prints++;
115 last_realtime_clock = sc->realtime_clock;
116 }
117 }
118}
119
5e140196 120static void init_delay_params(SyncClocks *sc, CPUState *cpu)
c2aa5f81
ST
121{
122 if (!icount_align_option) {
123 return;
124 }
2e91cc62
PB
125 sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
126 sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock;
5e140196 127 sc->last_cpu_icount
a953b5fa 128 = cpu->icount_extra + cpu->neg.icount_decr.u16.low;
27498bef
ST
129 if (sc->diff_clk < max_delay) {
130 max_delay = sc->diff_clk;
131 }
132 if (sc->diff_clk > max_advance) {
133 max_advance = sc->diff_clk;
134 }
7f7bc144
ST
135
136 /* Print every 2s max if the guest is late. We limit the number
137 of printed messages to NB_PRINT_MAX(currently 100) */
138 print_delay(sc);
c2aa5f81
ST
139}
140#else
141static void align_clocks(SyncClocks *sc, const CPUState *cpu)
142{
143}
144
145static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
146{
147}
148#endif /* CONFIG USER ONLY */
7d13299d 149
043e35d9
RH
150uint32_t curr_cflags(CPUState *cpu)
151{
84f15616
RH
152 uint32_t cflags = cpu->tcg_cflags;
153
04f5b647 154 /*
c2ffd754
RH
155 * Record gdb single-step. We should be exiting the TB by raising
156 * EXCP_DEBUG, but to simplify other tests, disable chaining too.
157 *
04f5b647
RH
158 * For singlestep and -d nochain, suppress goto_tb so that
159 * we can log -d cpu,exec after every TB.
160 */
c2ffd754
RH
161 if (unlikely(cpu->singlestep_enabled)) {
162 cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | CF_SINGLE_STEP | 1;
0e33928c 163 } else if (qatomic_read(&one_insn_per_tb)) {
04f5b647
RH
164 cflags |= CF_NO_GOTO_TB | 1;
165 } else if (qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
fb957011 166 cflags |= CF_NO_GOTO_TB;
84f15616
RH
167 }
168
169 return cflags;
043e35d9
RH
170}
171
0c90ba16 172struct tb_desc {
f0a08b09
AJ
173 vaddr pc;
174 uint64_t cs_base;
0c90ba16 175 CPUArchState *env;
93b99616 176 tb_page_addr_t page_addr0;
0c90ba16
RH
177 uint32_t flags;
178 uint32_t cflags;
0c90ba16
RH
179};
180
181static bool tb_lookup_cmp(const void *p, const void *d)
182{
183 const TranslationBlock *tb = p;
184 const struct tb_desc *desc = d;
185
279513c7 186 if ((tb_cflags(tb) & CF_PCREL || tb->pc == desc->pc) &&
28905cfb 187 tb_page_addr0(tb) == desc->page_addr0 &&
0c90ba16
RH
188 tb->cs_base == desc->cs_base &&
189 tb->flags == desc->flags &&
0c90ba16
RH
190 tb_cflags(tb) == desc->cflags) {
191 /* check next page if needed */
28905cfb
RH
192 tb_page_addr_t tb_phys_page1 = tb_page_addr1(tb);
193 if (tb_phys_page1 == -1) {
0c90ba16
RH
194 return true;
195 } else {
93b99616 196 tb_page_addr_t phys_page1;
f0a08b09 197 vaddr virt_page1;
0c90ba16 198
9867b302
RH
199 /*
200 * We know that the first page matched, and an otherwise valid TB
201 * encountered an incomplete instruction at the end of that page,
202 * therefore we know that generating a new TB from the current PC
203 * must also require reading from the next page -- even if the
204 * second pages do not match, and therefore the resulting insn
205 * is different for the new TB. Therefore any exception raised
206 * here by the faulting lookup is not premature.
207 */
93b99616
RH
208 virt_page1 = TARGET_PAGE_ALIGN(desc->pc);
209 phys_page1 = get_page_addr_code(desc->env, virt_page1);
28905cfb 210 if (tb_phys_page1 == phys_page1) {
0c90ba16
RH
211 return true;
212 }
213 }
214 }
215 return false;
216}
217
f0a08b09
AJ
218static TranslationBlock *tb_htable_lookup(CPUState *cpu, vaddr pc,
219 uint64_t cs_base, uint32_t flags,
0c90ba16
RH
220 uint32_t cflags)
221{
222 tb_page_addr_t phys_pc;
223 struct tb_desc desc;
224 uint32_t h;
225
b77af26e 226 desc.env = cpu_env(cpu);
0c90ba16
RH
227 desc.cs_base = cs_base;
228 desc.flags = flags;
229 desc.cflags = cflags;
0c90ba16
RH
230 desc.pc = pc;
231 phys_pc = get_page_addr_code(desc.env, pc);
232 if (phys_pc == -1) {
233 return NULL;
234 }
93b99616 235 desc.page_addr0 = phys_pc;
4be79026 236 h = tb_hash_func(phys_pc, (cflags & CF_PCREL ? 0 : pc),
367189ef 237 flags, cs_base, cflags);
0c90ba16
RH
238 return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
239}
240
632cb63d 241/* Might cause an exception, so have a longjmp destination ready */
f0a08b09
AJ
242static inline TranslationBlock *tb_lookup(CPUState *cpu, vaddr pc,
243 uint64_t cs_base, uint32_t flags,
244 uint32_t cflags)
632cb63d
RH
245{
246 TranslationBlock *tb;
8ed558ec 247 CPUJumpCache *jc;
632cb63d
RH
248 uint32_t hash;
249
250 /* we should never be trying to look up an INVALID tb */
251 tcg_debug_assert(!(cflags & CF_INVALID));
252
253 hash = tb_jmp_cache_hash_func(pc);
8ed558ec 254 jc = cpu->tb_jmp_cache;
632cb63d 255
d157e540
PB
256 tb = qatomic_read(&jc->array[hash].tb);
257 if (likely(tb &&
258 jc->array[hash].pc == pc &&
259 tb->cs_base == cs_base &&
260 tb->flags == flags &&
261 tb_cflags(tb) == cflags)) {
262 goto hit;
632cb63d 263 }
2dd5b7a1 264
d157e540
PB
265 tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags);
266 if (tb == NULL) {
267 return NULL;
268 }
269
270 jc->array[hash].pc = pc;
271 qatomic_set(&jc->array[hash].tb, tb);
272
273hit:
274 /*
275 * As long as tb is not NULL, the contents are consistent. Therefore,
276 * the virtual PC has to match for non-CF_PCREL translations.
277 */
278 assert((tb_cflags(tb) & CF_PCREL) || tb->pc == pc);
632cb63d
RH
279 return tb;
280}
281
f0a08b09 282static void log_cpu_exec(vaddr pc, CPUState *cpu,
fbf59aad 283 const TranslationBlock *tb)
abb0cd93 284{
fbf59aad 285 if (qemu_log_in_addr_range(pc)) {
abb0cd93 286 qemu_log_mask(CPU_LOG_EXEC,
85314e13 287 "Trace %d: %p [%08" PRIx64
e60a7d0d 288 "/%016" VADDR_PRIx "/%08x/%08x] %s\n",
7eabad36
RH
289 cpu->cpu_index, tb->tc.ptr, tb->cs_base, pc,
290 tb->flags, tb->cflags, lookup_symbol(pc));
abb0cd93 291
abb0cd93 292 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
c60f599b 293 FILE *logfile = qemu_log_trylock();
78b54858
RH
294 if (logfile) {
295 int flags = 0;
abb0cd93 296
78b54858
RH
297 if (qemu_loglevel_mask(CPU_LOG_TB_FPU)) {
298 flags |= CPU_DUMP_FPU;
299 }
abb0cd93 300#if defined(TARGET_I386)
78b54858 301 flags |= CPU_DUMP_CCOP;
abb0cd93 302#endif
b84694de
IK
303 if (qemu_loglevel_mask(CPU_LOG_TB_VPU)) {
304 flags |= CPU_DUMP_VPU;
305 }
c769fbd7 306 cpu_dump_state(cpu, logfile, flags);
78b54858
RH
307 qemu_log_unlock(logfile);
308 }
abb0cd93 309 }
abb0cd93
RH
310 }
311}
312
f0a08b09 313static bool check_for_breakpoints_slow(CPUState *cpu, vaddr pc,
69993c4e 314 uint32_t *cflags)
10c37828
RH
315{
316 CPUBreakpoint *bp;
317 bool match_page = false;
318
10c37828
RH
319 /*
320 * Singlestep overrides breakpoints.
321 * This requirement is visible in the record-replay tests, where
322 * we would fail to make forward progress in reverse-continue.
323 *
324 * TODO: gdb singlestep should only override gdb breakpoints,
325 * so that one could (gdb) singlestep into the guest kernel's
326 * architectural breakpoint handler.
327 */
328 if (cpu->singlestep_enabled) {
329 return false;
330 }
331
332 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
333 /*
334 * If we have an exact pc match, trigger the breakpoint.
335 * Otherwise, note matches within the page.
336 */
337 if (pc == bp->pc) {
338 bool match_bp = false;
339
340 if (bp->flags & BP_GDB) {
341 match_bp = true;
342 } else if (bp->flags & BP_CPU) {
343#ifdef CONFIG_USER_ONLY
344 g_assert_not_reached();
345#else
346 CPUClass *cc = CPU_GET_CLASS(cpu);
347 assert(cc->tcg_ops->debug_check_breakpoint);
348 match_bp = cc->tcg_ops->debug_check_breakpoint(cpu);
349#endif
350 }
351
352 if (match_bp) {
353 cpu->exception_index = EXCP_DEBUG;
354 return true;
355 }
356 } else if (((pc ^ bp->pc) & TARGET_PAGE_MASK) == 0) {
357 match_page = true;
358 }
359 }
360
361 /*
362 * Within the same page as a breakpoint, single-step,
363 * returning to helper_lookup_tb_ptr after each insn looking
364 * for the actual breakpoint.
365 *
366 * TODO: Perhaps better to record all of the TBs associated
367 * with a given virtual page that contains a breakpoint, and
368 * then invalidate them when a new overlapping breakpoint is
369 * set on the page. Non-overlapping TBs would not be
370 * invalidated, nor would any TB need to be invalidated as
371 * breakpoints are removed.
372 */
373 if (match_page) {
374 *cflags = (*cflags & ~CF_COUNT_MASK) | CF_NO_GOTO_TB | 1;
375 }
376 return false;
377}
378
f0a08b09 379static inline bool check_for_breakpoints(CPUState *cpu, vaddr pc,
69993c4e
LL
380 uint32_t *cflags)
381{
382 return unlikely(!QTAILQ_EMPTY(&cpu->breakpoints)) &&
383 check_for_breakpoints_slow(cpu, pc, cflags);
384}
385
4288eb26
RH
386/**
387 * helper_lookup_tb_ptr: quick check for next tb
388 * @env: current cpu state
389 *
390 * Look for an existing TB matching the current cpu state.
391 * If found, return the code pointer. If not found, return
392 * the tcg epilogue so that we return into cpu_tb_exec.
393 */
394const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
395{
396 CPUState *cpu = env_cpu(env);
397 TranslationBlock *tb;
bb5de525
AJ
398 vaddr pc;
399 uint64_t cs_base;
10c37828 400 uint32_t flags, cflags;
4288eb26
RH
401
402 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
403
10c37828
RH
404 cflags = curr_cflags(cpu);
405 if (check_for_breakpoints(cpu, pc, &cflags)) {
406 cpu_loop_exit(cpu);
407 }
408
409 tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
4288eb26
RH
410 if (tb == NULL) {
411 return tcg_code_gen_epilogue;
412 }
abb0cd93 413
fbf59aad
RH
414 if (qemu_loglevel_mask(CPU_LOG_TB_CPU | CPU_LOG_EXEC)) {
415 log_cpu_exec(pc, cpu, tb);
416 }
abb0cd93 417
4288eb26
RH
418 return tb->tc.ptr;
419}
420
77211379 421/* Execute a TB, and fix up the CPU state afterwards if necessary */
c905a368
DB
422/*
423 * Disable CFI checks.
424 * TCG creates binary blobs at runtime, with the transformed code.
425 * A TB is a blob of binary code, created at runtime and called with an
426 * indirect function call. Since such function did not exist at compile time,
427 * the CFI runtime has no way to verify its signature and would fail.
428 * TCG is not considered a security-sensitive part of QEMU so this does not
429 * affect the impact of CFI in environment with high security requirements
430 */
eba40358
RH
431static inline TranslationBlock * QEMU_DISABLE_CFI
432cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
77211379 433{
b77af26e 434 CPUArchState *env = cpu_env(cpu);
819af24b
SF
435 uintptr_t ret;
436 TranslationBlock *last_tb;
db0c51a3 437 const void *tb_ptr = itb->tc.ptr;
1a830635 438
fbf59aad
RH
439 if (qemu_loglevel_mask(CPU_LOG_TB_CPU | CPU_LOG_EXEC)) {
440 log_cpu_exec(log_pc(cpu, itb), cpu, itb);
441 }
03afa5f8 442
653b87eb 443 qemu_thread_jit_execute();
819af24b 444 ret = tcg_qemu_tb_exec(env, tb_ptr);
464dacf6 445 cpu->neg.can_do_io = true;
e04660af 446 qemu_plugin_disable_mem_helpers(cpu);
eba40358
RH
447 /*
448 * TODO: Delay swapping back to the read-write region of the TB
449 * until we actually need to modify the TB. The read-only copy,
450 * coming from the rx region, shares the same host TLB entry as
451 * the code that executed the exit_tb opcode that arrived here.
452 * If we insist on touching both the RX and the RW pages, we
453 * double the host TLB pressure.
454 */
455 last_tb = tcg_splitwx_to_rw((void *)(ret & ~TB_EXIT_MASK));
456 *tb_exit = ret & TB_EXIT_MASK;
457
458 trace_exec_tb_exit(last_tb, *tb_exit);
6db8b538 459
eba40358 460 if (*tb_exit > TB_EXIT_IDX1) {
77211379
PM
461 /* We didn't start executing this TB (eg because the instruction
462 * counter hit zero); we must restore the guest PC to the address
463 * of the start of the TB.
464 */
bdf7ae5b 465 CPUClass *cc = CPU_GET_CLASS(cpu);
fbf59aad 466
78271684
CF
467 if (cc->tcg_ops->synchronize_from_tb) {
468 cc->tcg_ops->synchronize_from_tb(cpu, last_tb);
bdf7ae5b 469 } else {
4be79026 470 tcg_debug_assert(!(tb_cflags(last_tb) & CF_PCREL));
bdf7ae5b 471 assert(cc->set_pc);
279513c7 472 cc->set_pc(cpu, last_tb->pc);
fbf59aad
RH
473 }
474 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
f0a08b09 475 vaddr pc = log_pc(cpu, last_tb);
fbf59aad 476 if (qemu_log_in_addr_range(pc)) {
e60a7d0d 477 qemu_log("Stopped execution of TB chain before %p [%016"
f0a08b09 478 VADDR_PRIx "] %s\n",
fbf59aad
RH
479 last_tb->tc.ptr, pc, lookup_symbol(pc));
480 }
bdf7ae5b 481 }
77211379 482 }
c9460d75
RH
483
484 /*
485 * If gdb single-step, and we haven't raised another exception,
486 * raise a debug exception. Single-step with another exception
487 * is handled in cpu_handle_exception.
488 */
489 if (unlikely(cpu->singlestep_enabled) && cpu->exception_index == -1) {
490 cpu->exception_index = EXCP_DEBUG;
491 cpu_loop_exit(cpu);
492 }
493
eba40358 494 return last_tb;
77211379
PM
495}
496
2e70f6ef 497
035ba06c
EH
498static void cpu_exec_enter(CPUState *cpu)
499{
500 CPUClass *cc = CPU_GET_CLASS(cpu);
501
78271684
CF
502 if (cc->tcg_ops->cpu_exec_enter) {
503 cc->tcg_ops->cpu_exec_enter(cpu);
80c4750b 504 }
035ba06c
EH
505}
506
507static void cpu_exec_exit(CPUState *cpu)
fdbc2b57 508{
08e73c48 509 CPUClass *cc = CPU_GET_CLASS(cpu);
035ba06c 510
78271684
CF
511 if (cc->tcg_ops->cpu_exec_exit) {
512 cc->tcg_ops->cpu_exec_exit(cpu);
80c4750b 513 }
035ba06c
EH
514}
515
cb62bd15
RH
516static void cpu_exec_longjmp_cleanup(CPUState *cpu)
517{
518 /* Non-buggy compilers preserve this; assert the correct value. */
519 g_assert(cpu == current_cpu);
520
521#ifdef CONFIG_USER_ONLY
522 clear_helper_retaddr();
523 if (have_mmap_lock()) {
524 mmap_unlock();
525 }
deba7870
RH
526#else
527 /*
528 * For softmmu, a tlb_fill fault during translation will land here,
529 * and we need to release any page locks held. In system mode we
530 * have one tcg_ctx per thread, so we know it was this cpu doing
531 * the translation.
532 *
533 * Alternative 1: Install a cleanup to be called via an exception
534 * handling safe longjmp. It seems plausible that all our hosts
535 * support such a thing. We'd have to properly register unwind info
536 * for the JIT for EH, rather that just for GDB.
537 *
538 * Alternative 2: Set and restore cpu->jmp_env in tb_gen_code to
539 * capture the cpu_loop_exit longjmp, perform the cleanup, and
540 * jump again to arrive here.
541 */
542 if (tcg_ctx->gen_tb) {
543 tb_unlock_pages(tcg_ctx->gen_tb);
544 tcg_ctx->gen_tb = NULL;
545 }
cb62bd15 546#endif
195801d7
SH
547 if (bql_locked()) {
548 bql_unlock();
cb62bd15
RH
549 }
550 assert_no_pages_locked();
551}
552
035ba06c
EH
553void cpu_exec_step_atomic(CPUState *cpu)
554{
b77af26e 555 CPUArchState *env = cpu_env(cpu);
fdbc2b57 556 TranslationBlock *tb;
bb5de525
AJ
557 vaddr pc;
558 uint64_t cs_base;
258afb48 559 uint32_t flags, cflags;
eba40358 560 int tb_exit;
fdbc2b57 561
08e73c48 562 if (sigsetjmp(cpu->jmp_env, 0) == 0) {
886cc689 563 start_exclusive();
bfff072c
DC
564 g_assert(cpu == current_cpu);
565 g_assert(!cpu->running);
566 cpu->running = true;
886cc689 567
6f04cb1c 568 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
6f04cb1c 569
258afb48
RH
570 cflags = curr_cflags(cpu);
571 /* Execute in a serial context. */
572 cflags &= ~CF_PARALLEL;
573 /* After 1 insn, return and release the exclusive lock. */
574 cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | 1;
10c37828
RH
575 /*
576 * No need to check_for_breakpoints here.
577 * We only arrive in cpu_exec_step_atomic after beginning execution
578 * of an insn that includes an atomic operation we can't handle.
579 * Any breakpoint for this insn will have been recognized earlier.
580 */
258afb48
RH
581
582 tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
4e2ca83e
EC
583 if (tb == NULL) {
584 mmap_lock();
95590e24 585 tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
4e2ca83e
EC
586 mmap_unlock();
587 }
08e73c48 588
035ba06c 589 cpu_exec_enter(cpu);
08e73c48 590 /* execute the generated code */
4e2ca83e 591 trace_exec_tb(tb, pc);
eba40358 592 cpu_tb_exec(cpu, tb, &tb_exit);
035ba06c 593 cpu_exec_exit(cpu);
08e73c48 594 } else {
cb62bd15 595 cpu_exec_longjmp_cleanup(cpu);
08e73c48 596 }
426eeecd 597
886cc689
AB
598 /*
599 * As we start the exclusive region before codegen we must still
600 * be in the region if we longjump out of either the codegen or
601 * the execution.
602 */
603 g_assert(cpu_in_exclusive_context(cpu));
bfff072c 604 cpu->running = false;
886cc689 605 end_exclusive();
fdbc2b57
RH
606}
607
a8583393
RH
608void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr)
609{
2fd2e78d
RH
610 /*
611 * Get the rx view of the structure, from which we find the
612 * executable code address, and tb_target_set_jmp_target can
613 * produce a pc-relative displacement to jmp_target_addr[n].
614 */
615 const TranslationBlock *c_tb = tcg_splitwx_to_rx(tb);
616 uintptr_t offset = tb->jmp_insn_offset[n];
617 uintptr_t jmp_rx = (uintptr_t)tb->tc.ptr + offset;
618 uintptr_t jmp_rw = jmp_rx - tcg_splitwx_diff;
619
9da6079b 620 tb->jmp_target_addr[n] = addr;
2fd2e78d 621 tb_target_set_jmp_target(c_tb, n, jmp_rx, jmp_rw);
a8583393
RH
622}
623
a8583393
RH
624static inline void tb_add_jump(TranslationBlock *tb, int n,
625 TranslationBlock *tb_next)
626{
194125e3
EC
627 uintptr_t old;
628
653b87eb 629 qemu_thread_jit_write();
a8583393 630 assert(n < ARRAY_SIZE(tb->jmp_list_next));
194125e3
EC
631 qemu_spin_lock(&tb_next->jmp_lock);
632
633 /* make sure the destination TB is valid */
634 if (tb_next->cflags & CF_INVALID) {
635 goto out_unlock_next;
636 }
637 /* Atomically claim the jump destination slot only if it was NULL */
d73415a3
SH
638 old = qatomic_cmpxchg(&tb->jmp_dest[n], (uintptr_t)NULL,
639 (uintptr_t)tb_next);
194125e3
EC
640 if (old) {
641 goto out_unlock_next;
a8583393 642 }
194125e3
EC
643
644 /* patch the native jump address */
645 tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc.ptr);
646
647 /* add in TB jmp list */
648 tb->jmp_list_next[n] = tb_next->jmp_list_head;
649 tb_next->jmp_list_head = (uintptr_t)tb | n;
650
651 qemu_spin_unlock(&tb_next->jmp_lock);
652
fbf59aad
RH
653 qemu_log_mask(CPU_LOG_EXEC, "Linking TBs %p index %d -> %p\n",
654 tb->tc.ptr, n, tb_next->tc.ptr);
194125e3 655 return;
a8583393 656
194125e3
EC
657 out_unlock_next:
658 qemu_spin_unlock(&tb_next->jmp_lock);
659 return;
a8583393
RH
660}
661
8b2d34e9
SF
662static inline bool cpu_handle_halt(CPUState *cpu)
663{
0596fa11 664#ifndef CONFIG_USER_ONLY
8b2d34e9 665 if (cpu->halted) {
0596fa11 666#if defined(TARGET_I386)
4084893d 667 if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
8b2d34e9 668 X86CPU *x86_cpu = X86_CPU(cpu);
195801d7 669 bql_lock();
8b2d34e9
SF
670 apic_poll_irq(x86_cpu->apic_state);
671 cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
195801d7 672 bql_unlock();
8b2d34e9 673 }
0596fa11 674#endif /* TARGET_I386 */
8b2d34e9 675 if (!cpu_has_work(cpu)) {
8b2d34e9
SF
676 return true;
677 }
678
679 cpu->halted = 0;
680 }
0596fa11 681#endif /* !CONFIG_USER_ONLY */
8b2d34e9
SF
682
683 return false;
684}
685
ea284766 686static inline void cpu_handle_debug_exception(CPUState *cpu)
1009d2ed 687{
86025ee4 688 CPUClass *cc = CPU_GET_CLASS(cpu);
1009d2ed
JK
689 CPUWatchpoint *wp;
690
ff4700b0
AF
691 if (!cpu->watchpoint_hit) {
692 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
1009d2ed
JK
693 wp->flags &= ~BP_WATCHPOINT_HIT;
694 }
695 }
86025ee4 696
78271684
CF
697 if (cc->tcg_ops->debug_excp_handler) {
698 cc->tcg_ops->debug_excp_handler(cpu);
710384d0 699 }
1009d2ed
JK
700}
701
ea284766
SF
702static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
703{
17b50b0c
PD
704 if (cpu->exception_index < 0) {
705#ifndef CONFIG_USER_ONLY
706 if (replay_has_exception()
a953b5fa 707 && cpu->neg.icount_decr.u16.low + cpu->icount_extra == 0) {
a11bbb6a 708 /* Execute just one insn to trigger exception pending in the log */
c3e97f64 709 cpu->cflags_next_tb = (curr_cflags(cpu) & ~CF_USE_ICOUNT)
cf9b5790 710 | CF_NOIRQ | 1;
17b50b0c
PD
711 }
712#endif
a11bbb6a 713 return false;
17b50b0c 714 }
17b50b0c
PD
715 if (cpu->exception_index >= EXCP_INTERRUPT) {
716 /* exit request from the cpu execution loop */
717 *ret = cpu->exception_index;
718 if (*ret == EXCP_DEBUG) {
719 cpu_handle_debug_exception(cpu);
720 }
721 cpu->exception_index = -1;
722 return true;
723 } else {
ea284766 724#if defined(CONFIG_USER_ONLY)
17b50b0c
PD
725 /* if user mode only, we simulate a fake exception
726 which will be handled outside the cpu execution
727 loop */
ea284766 728#if defined(TARGET_I386)
17b50b0c 729 CPUClass *cc = CPU_GET_CLASS(cpu);
12096421
PMD
730 cc->tcg_ops->fake_user_interrupt(cpu);
731#endif /* TARGET_I386 */
17b50b0c
PD
732 *ret = cpu->exception_index;
733 cpu->exception_index = -1;
734 return true;
735#else
736 if (replay_exception()) {
ea284766 737 CPUClass *cc = CPU_GET_CLASS(cpu);
195801d7 738 bql_lock();
78271684 739 cc->tcg_ops->do_interrupt(cpu);
195801d7 740 bql_unlock();
ea284766 741 cpu->exception_index = -1;
a7ba744f
LM
742
743 if (unlikely(cpu->singlestep_enabled)) {
744 /*
745 * After processing the exception, ensure an EXCP_DEBUG is
746 * raised when single-stepping so that GDB doesn't miss the
747 * next instruction.
748 */
749 *ret = EXCP_DEBUG;
750 cpu_handle_debug_exception(cpu);
751 return true;
752 }
17b50b0c
PD
753 } else if (!replay_has_interrupt()) {
754 /* give a chance to iothread in replay mode */
755 *ret = EXCP_INTERRUPT;
ea284766 756 return true;
ea284766 757 }
ea284766
SF
758#endif
759 }
760
761 return false;
762}
763
77c0fc4e 764#ifndef CONFIG_USER_ONLY
4084893d
PD
765/*
766 * CPU_INTERRUPT_POLL is a virtual event which gets converted into a
767 * "real" interrupt event later. It does not need to be recorded for
768 * replay purposes.
769 */
770static inline bool need_replay_interrupt(int interrupt_request)
771{
772#if defined(TARGET_I386)
773 return !(interrupt_request & CPU_INTERRUPT_POLL);
774#else
775 return true;
776#endif
777}
77c0fc4e 778#endif /* !CONFIG_USER_ONLY */
4084893d 779
209b71b6 780static inline bool cpu_handle_interrupt(CPUState *cpu,
c385e6e4
SF
781 TranslationBlock **last_tb)
782{
aff0e204
AB
783 /*
784 * If we have requested custom cflags with CF_NOIRQ we should
785 * skip checking here. Any pending interrupts will get picked up
786 * by the next TB we execute under normal cflags.
787 */
788 if (cpu->cflags_next_tb != -1 && cpu->cflags_next_tb & CF_NOIRQ) {
789 return false;
790 }
791
17b50b0c
PD
792 /* Clear the interrupt flag now since we're processing
793 * cpu->interrupt_request and cpu->exit_request.
d84be02d
DH
794 * Ensure zeroing happens before reading cpu->exit_request or
795 * cpu->interrupt_request (see also smp_wmb in cpu_exit())
17b50b0c 796 */
a953b5fa 797 qatomic_set_mb(&cpu->neg.icount_decr.u16.high, 0);
c385e6e4 798
d73415a3 799 if (unlikely(qatomic_read(&cpu->interrupt_request))) {
8d04fb55 800 int interrupt_request;
195801d7 801 bql_lock();
8d04fb55 802 interrupt_request = cpu->interrupt_request;
c385e6e4
SF
803 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
804 /* Mask out external interrupts for this step. */
805 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
806 }
807 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
808 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
809 cpu->exception_index = EXCP_DEBUG;
195801d7 810 bql_unlock();
209b71b6 811 return true;
c385e6e4 812 }
77c0fc4e 813#if !defined(CONFIG_USER_ONLY)
c385e6e4
SF
814 if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) {
815 /* Do nothing */
816 } else if (interrupt_request & CPU_INTERRUPT_HALT) {
817 replay_interrupt();
818 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
819 cpu->halted = 1;
820 cpu->exception_index = EXCP_HLT;
195801d7 821 bql_unlock();
209b71b6 822 return true;
c385e6e4
SF
823 }
824#if defined(TARGET_I386)
825 else if (interrupt_request & CPU_INTERRUPT_INIT) {
826 X86CPU *x86_cpu = X86_CPU(cpu);
827 CPUArchState *env = &x86_cpu->env;
828 replay_interrupt();
65c9d60a 829 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0);
c385e6e4
SF
830 do_cpu_init(x86_cpu);
831 cpu->exception_index = EXCP_HALTED;
195801d7 832 bql_unlock();
209b71b6 833 return true;
c385e6e4
SF
834 }
835#else
836 else if (interrupt_request & CPU_INTERRUPT_RESET) {
837 replay_interrupt();
838 cpu_reset(cpu);
195801d7 839 bql_unlock();
209b71b6 840 return true;
c385e6e4 841 }
77c0fc4e 842#endif /* !TARGET_I386 */
c385e6e4
SF
843 /* The target hook has 3 exit conditions:
844 False when the interrupt isn't processed,
845 True when it is, and we should restart on a new TB,
846 and via longjmp via cpu_loop_exit. */
847 else {
77c0fc4e
PMD
848 CPUClass *cc = CPU_GET_CLASS(cpu);
849
78271684
CF
850 if (cc->tcg_ops->cpu_exec_interrupt &&
851 cc->tcg_ops->cpu_exec_interrupt(cpu, interrupt_request)) {
4084893d
PD
852 if (need_replay_interrupt(interrupt_request)) {
853 replay_interrupt();
854 }
ba3c35d9
RH
855 /*
856 * After processing the interrupt, ensure an EXCP_DEBUG is
857 * raised when single-stepping so that GDB doesn't miss the
858 * next instruction.
859 */
5b7b197c
LM
860 if (unlikely(cpu->singlestep_enabled)) {
861 cpu->exception_index = EXCP_DEBUG;
195801d7 862 bql_unlock();
5b7b197c
LM
863 return true;
864 }
865 cpu->exception_index = -1;
c385e6e4
SF
866 *last_tb = NULL;
867 }
8b1fe3f4
SF
868 /* The target hook may have updated the 'cpu->interrupt_request';
869 * reload the 'interrupt_request' value */
870 interrupt_request = cpu->interrupt_request;
c385e6e4 871 }
77c0fc4e 872#endif /* !CONFIG_USER_ONLY */
8b1fe3f4 873 if (interrupt_request & CPU_INTERRUPT_EXITTB) {
c385e6e4
SF
874 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
875 /* ensure that no TB jump will be modified as
876 the program flow was changed */
877 *last_tb = NULL;
878 }
8d04fb55
JK
879
880 /* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */
195801d7 881 bql_unlock();
c385e6e4 882 }
8d04fb55 883
cfb2d02b 884 /* Finally, check if we need to exit to the main loop. */
d73415a3 885 if (unlikely(qatomic_read(&cpu->exit_request))
740b1759 886 || (icount_enabled()
a11bbb6a 887 && (cpu->cflags_next_tb == -1 || cpu->cflags_next_tb & CF_USE_ICOUNT)
a953b5fa 888 && cpu->neg.icount_decr.u16.low + cpu->icount_extra == 0)) {
d73415a3 889 qatomic_set(&cpu->exit_request, 0);
5f3bdfd4
PD
890 if (cpu->exception_index == -1) {
891 cpu->exception_index = EXCP_INTERRUPT;
892 }
209b71b6 893 return true;
c385e6e4 894 }
209b71b6
PB
895
896 return false;
c385e6e4
SF
897}
898
928de9ee 899static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
f0a08b09
AJ
900 vaddr pc, TranslationBlock **last_tb,
901 int *tb_exit)
928de9ee 902{
1aab16c2 903 int32_t insns_left;
928de9ee 904
fbf59aad 905 trace_exec_tb(tb, pc);
eba40358 906 tb = cpu_tb_exec(cpu, tb, tb_exit);
1aab16c2
PB
907 if (*tb_exit != TB_EXIT_REQUESTED) {
908 *last_tb = tb;
909 return;
910 }
911
912 *last_tb = NULL;
a953b5fa 913 insns_left = qatomic_read(&cpu->neg.icount_decr.u32);
1aab16c2 914 if (insns_left < 0) {
e5143e30
AB
915 /* Something asked us to stop executing chained TBs; just
916 * continue round the main loop. Whatever requested the exit
30f3dda2 917 * will also have set something else (eg exit_request or
17b50b0c
PD
918 * interrupt_request) which will be handled by
919 * cpu_handle_interrupt. cpu_handle_interrupt will also
920 * clear cpu->icount_decr.u16.high.
928de9ee 921 */
1aab16c2 922 return;
928de9ee 923 }
1aab16c2
PB
924
925 /* Instruction counter expired. */
740b1759 926 assert(icount_enabled());
1aab16c2 927#ifndef CONFIG_USER_ONLY
eda5f7c6 928 /* Ensure global icount has gone forward */
8191d368 929 icount_update(cpu);
eda5f7c6 930 /* Refill decrementer and continue execution. */
df3a2de5 931 insns_left = MIN(0xffff, cpu->icount_budget);
a953b5fa 932 cpu->neg.icount_decr.u16.low = insns_left;
eda5f7c6 933 cpu->icount_extra = cpu->icount_budget - insns_left;
bc662a33
AB
934
935 /*
936 * If the next tb has more instructions than we have left to
937 * execute we need to ensure we find/generate a TB with exactly
938 * insns_left instructions in it.
939 */
c8cf47a9
PM
940 if (insns_left > 0 && insns_left < tb->icount) {
941 assert(insns_left <= CF_COUNT_MASK);
942 assert(cpu->icount_extra == 0);
bc662a33 943 cpu->cflags_next_tb = (tb->cflags & ~CF_COUNT_MASK) | insns_left;
928de9ee 944 }
1aab16c2 945#endif
928de9ee
SF
946}
947
7d13299d
FB
948/* main execution loop */
949
61710a7e
RH
950static int __attribute__((noinline))
951cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
7d13299d 952{
c385e6e4 953 int ret;
4515e58d
PB
954
955 /* if an exception is pending, we execute it here */
956 while (!cpu_handle_exception(cpu, &ret)) {
957 TranslationBlock *last_tb = NULL;
958 int tb_exit = 0;
959
960 while (!cpu_handle_interrupt(cpu, &last_tb)) {
9b990ee5 961 TranslationBlock *tb;
bb5de525
AJ
962 vaddr pc;
963 uint64_t cs_base;
11c1d5f8
RH
964 uint32_t flags, cflags;
965
b77af26e 966 cpu_get_tb_cpu_state(cpu_env(cpu), &pc, &cs_base, &flags);
10c37828 967
11c1d5f8
RH
968 /*
969 * When requested, use an exact setting for cflags for the next
970 * execution. This is used for icount, precise smc, and stop-
971 * after-access watchpoints. Since this request should never
972 * have CF_INVALID set, -1 is a convenient invalid value that
973 * does not require tcg headers for cpu_common_reset.
974 */
975 cflags = cpu->cflags_next_tb;
9b990ee5 976 if (cflags == -1) {
c0ae396a 977 cflags = curr_cflags(cpu);
9b990ee5
RH
978 } else {
979 cpu->cflags_next_tb = -1;
980 }
981
10c37828
RH
982 if (check_for_breakpoints(cpu, pc, &cflags)) {
983 break;
984 }
11c1d5f8
RH
985
986 tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
987 if (tb == NULL) {
3371802f 988 CPUJumpCache *jc;
a976a99a
RH
989 uint32_t h;
990
11c1d5f8
RH
991 mmap_lock();
992 tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
993 mmap_unlock();
3371802f 994
11c1d5f8
RH
995 /*
996 * We add the TB in the virtual pc hash table
997 * for the fast lookup
998 */
a976a99a 999 h = tb_jmp_cache_hash_func(pc);
3371802f 1000 jc = cpu->tb_jmp_cache;
d157e540
PB
1001 jc->array[h].pc = pc;
1002 qatomic_set(&jc->array[h].tb, tb);
11c1d5f8
RH
1003 }
1004
1005#ifndef CONFIG_USER_ONLY
1006 /*
1007 * We don't take care of direct jumps when address mapping
1008 * changes in system emulation. So it's not safe to make a
1009 * direct jump to a TB spanning two pages because the mapping
1010 * for the second page can change.
1011 */
28905cfb 1012 if (tb_page_addr1(tb) != -1) {
11c1d5f8
RH
1013 last_tb = NULL;
1014 }
1015#endif
1016 /* See if we can patch the calling TB. */
1017 if (last_tb) {
1018 tb_add_jump(last_tb, tb_exit, tb);
1019 }
1020
fbf59aad 1021 cpu_loop_exec_tb(cpu, tb, pc, &last_tb, &tb_exit);
11c1d5f8 1022
4515e58d
PB
1023 /* Try to align the host and virtual clocks
1024 if the guest is in advance */
61710a7e
RH
1025 align_clocks(sc, cpu);
1026 }
1027 }
1028 return ret;
1029}
1030
1031static int cpu_exec_setjmp(CPUState *cpu, SyncClocks *sc)
1032{
1033 /* Prepare setjmp context for exception handling. */
1034 if (unlikely(sigsetjmp(cpu->jmp_env, 0) != 0)) {
cb62bd15 1035 cpu_exec_longjmp_cleanup(cpu);
61710a7e
RH
1036 }
1037
1038 return cpu_exec_loop(cpu, sc);
1039}
1040
1041int cpu_exec(CPUState *cpu)
1042{
1043 int ret;
1044 SyncClocks sc = { 0 };
1045
1046 /* replay_interrupt may need current_cpu */
1047 current_cpu = cpu;
1048
1049 if (cpu_handle_halt(cpu)) {
1050 return EXCP_HALTED;
4515e58d 1051 }
3fb2ded1 1052
61710a7e
RH
1053 rcu_read_lock();
1054 cpu_exec_enter(cpu);
1055
1056 /*
1057 * Calculate difference between guest clock and host clock.
1058 * This delay includes the delay of the last cycle, so
1059 * what we have to do is sleep until it is 0. As for the
1060 * advance/delay we gain here, we try to fix it next time.
1061 */
1062 init_delay_params(&sc, cpu);
1063
1064 ret = cpu_exec_setjmp(cpu, &sc);
1065
035ba06c 1066 cpu_exec_exit(cpu);
79e2b9ae 1067 rcu_read_unlock();
1057eaa7 1068
7d13299d
FB
1069 return ret;
1070}
740b1759 1071
fa312f2e 1072bool tcg_exec_realizefn(CPUState *cpu, Error **errp)
7df5e3d6
CF
1073{
1074 static bool tcg_target_initialized;
1075 CPUClass *cc = CPU_GET_CLASS(cpu);
1076
1077 if (!tcg_target_initialized) {
78271684 1078 cc->tcg_ops->initialize();
7df5e3d6
CF
1079 tcg_target_initialized = true;
1080 }
7df5e3d6 1081
4e4fa6c1
RH
1082 cpu->tb_jmp_cache = g_new0(CPUJumpCache, 1);
1083 tlb_init(cpu);
7df5e3d6
CF
1084#ifndef CONFIG_USER_ONLY
1085 tcg_iommu_init_notifier_list(cpu);
1086#endif /* !CONFIG_USER_ONLY */
4e4fa6c1 1087 /* qemu_plugin_vcpu_init_hook delayed until cpu_index assigned. */
fa312f2e
PMD
1088
1089 return true;
7df5e3d6
CF
1090}
1091
1092/* undo the initializations in reverse order */
1093void tcg_exec_unrealizefn(CPUState *cpu)
1094{
1095#ifndef CONFIG_USER_ONLY
1096 tcg_iommu_free_notifier_list(cpu);
1097#endif /* !CONFIG_USER_ONLY */
1098
7df5e3d6 1099 tlb_destroy(cpu);
4731f89b 1100 g_free_rcu(cpu->tb_jmp_cache, rcu);
7df5e3d6 1101}