]>
Commit | Line | Data |
---|---|---|
7d13299d | 1 | /* |
e965fc38 | 2 | * emulator main execution loop |
5fafdf24 | 3 | * |
66321a11 | 4 | * Copyright (c) 2003-2005 Fabrice Bellard |
7d13299d | 5 | * |
3ef693a0 FB |
6 | * This library is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
fb0343d5 | 9 | * version 2.1 of the License, or (at your option) any later version. |
7d13299d | 10 | * |
3ef693a0 FB |
11 | * This library is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
7d13299d | 15 | * |
3ef693a0 | 16 | * You should have received a copy of the GNU Lesser General Public |
8167ee88 | 17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
7d13299d | 18 | */ |
a8d25326 | 19 | |
7b31bbc2 | 20 | #include "qemu/osdep.h" |
740b1759 | 21 | #include "qemu/qemu-print.h" |
3a841ab5 | 22 | #include "qapi/error.h" |
3a841ab5 | 23 | #include "qapi/type-helpers.h" |
78271684 | 24 | #include "hw/core/tcg-cpu-ops.h" |
d9bb58e5 | 25 | #include "trace.h" |
76cad711 | 26 | #include "disas/disas.h" |
63c91552 | 27 | #include "exec/exec-all.h" |
dcb32f1d | 28 | #include "tcg/tcg.h" |
1de7afc9 | 29 | #include "qemu/atomic.h" |
79e2b9ae | 30 | #include "qemu/rcu.h" |
508127e2 | 31 | #include "exec/log.h" |
8d04fb55 | 32 | #include "qemu/main-loop.h" |
6220e900 PD |
33 | #if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY) |
34 | #include "hw/i386/apic.h" | |
35 | #endif | |
d2528bdc | 36 | #include "sysemu/cpus.h" |
740b1759 CF |
37 | #include "exec/cpu-all.h" |
38 | #include "sysemu/cpu-timers.h" | |
5b5968c4 | 39 | #include "exec/replay-core.h" |
3a841ab5 | 40 | #include "sysemu/tcg.h" |
4288eb26 | 41 | #include "exec/helper-proto.h" |
a976a99a | 42 | #include "tb-jmp-cache.h" |
e5ceadff | 43 | #include "tb-hash.h" |
e5ceadff | 44 | #include "tb-context.h" |
c03f041f | 45 | #include "internal.h" |
c2aa5f81 ST |
46 | |
47 | /* -icount align implementation. */ | |
48 | ||
49 | typedef struct SyncClocks { | |
50 | int64_t diff_clk; | |
51 | int64_t last_cpu_icount; | |
7f7bc144 | 52 | int64_t realtime_clock; |
c2aa5f81 ST |
53 | } SyncClocks; |
54 | ||
55 | #if !defined(CONFIG_USER_ONLY) | |
56 | /* Allow the guest to have a max 3ms advance. | |
57 | * The difference between the 2 clocks could therefore | |
58 | * oscillate around 0. | |
59 | */ | |
60 | #define VM_CLOCK_ADVANCE 3000000 | |
7f7bc144 ST |
61 | #define THRESHOLD_REDUCE 1.5 |
62 | #define MAX_DELAY_PRINT_RATE 2000000000LL | |
63 | #define MAX_NB_PRINTS 100 | |
c2aa5f81 | 64 | |
00c9a5c2 PMD |
65 | int64_t max_delay; |
66 | int64_t max_advance; | |
740b1759 | 67 | |
5e140196 | 68 | static void align_clocks(SyncClocks *sc, CPUState *cpu) |
c2aa5f81 ST |
69 | { |
70 | int64_t cpu_icount; | |
71 | ||
72 | if (!icount_align_option) { | |
73 | return; | |
74 | } | |
75 | ||
5e140196 | 76 | cpu_icount = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low; |
8191d368 | 77 | sc->diff_clk += icount_to_ns(sc->last_cpu_icount - cpu_icount); |
c2aa5f81 ST |
78 | sc->last_cpu_icount = cpu_icount; |
79 | ||
80 | if (sc->diff_clk > VM_CLOCK_ADVANCE) { | |
81 | #ifndef _WIN32 | |
82 | struct timespec sleep_delay, rem_delay; | |
83 | sleep_delay.tv_sec = sc->diff_clk / 1000000000LL; | |
84 | sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL; | |
85 | if (nanosleep(&sleep_delay, &rem_delay) < 0) { | |
a498d0ef | 86 | sc->diff_clk = rem_delay.tv_sec * 1000000000LL + rem_delay.tv_nsec; |
c2aa5f81 ST |
87 | } else { |
88 | sc->diff_clk = 0; | |
89 | } | |
90 | #else | |
91 | Sleep(sc->diff_clk / SCALE_MS); | |
92 | sc->diff_clk = 0; | |
93 | #endif | |
94 | } | |
95 | } | |
96 | ||
7f7bc144 ST |
97 | static void print_delay(const SyncClocks *sc) |
98 | { | |
99 | static float threshold_delay; | |
100 | static int64_t last_realtime_clock; | |
101 | static int nb_prints; | |
102 | ||
103 | if (icount_align_option && | |
104 | sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE && | |
105 | nb_prints < MAX_NB_PRINTS) { | |
106 | if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) || | |
107 | (-sc->diff_clk / (float)1000000000LL < | |
108 | (threshold_delay - THRESHOLD_REDUCE))) { | |
109 | threshold_delay = (-sc->diff_clk / 1000000000LL) + 1; | |
740b1759 CF |
110 | qemu_printf("Warning: The guest is now late by %.1f to %.1f seconds\n", |
111 | threshold_delay - 1, | |
112 | threshold_delay); | |
7f7bc144 ST |
113 | nb_prints++; |
114 | last_realtime_clock = sc->realtime_clock; | |
115 | } | |
116 | } | |
117 | } | |
118 | ||
5e140196 | 119 | static void init_delay_params(SyncClocks *sc, CPUState *cpu) |
c2aa5f81 ST |
120 | { |
121 | if (!icount_align_option) { | |
122 | return; | |
123 | } | |
2e91cc62 PB |
124 | sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT); |
125 | sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock; | |
5e140196 RH |
126 | sc->last_cpu_icount |
127 | = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low; | |
27498bef ST |
128 | if (sc->diff_clk < max_delay) { |
129 | max_delay = sc->diff_clk; | |
130 | } | |
131 | if (sc->diff_clk > max_advance) { | |
132 | max_advance = sc->diff_clk; | |
133 | } | |
7f7bc144 ST |
134 | |
135 | /* Print every 2s max if the guest is late. We limit the number | |
136 | of printed messages to NB_PRINT_MAX(currently 100) */ | |
137 | print_delay(sc); | |
c2aa5f81 ST |
138 | } |
139 | #else | |
140 | static void align_clocks(SyncClocks *sc, const CPUState *cpu) | |
141 | { | |
142 | } | |
143 | ||
144 | static void init_delay_params(SyncClocks *sc, const CPUState *cpu) | |
145 | { | |
146 | } | |
147 | #endif /* CONFIG USER ONLY */ | |
7d13299d | 148 | |
043e35d9 RH |
149 | uint32_t curr_cflags(CPUState *cpu) |
150 | { | |
84f15616 RH |
151 | uint32_t cflags = cpu->tcg_cflags; |
152 | ||
04f5b647 | 153 | /* |
c2ffd754 RH |
154 | * Record gdb single-step. We should be exiting the TB by raising |
155 | * EXCP_DEBUG, but to simplify other tests, disable chaining too. | |
156 | * | |
04f5b647 RH |
157 | * For singlestep and -d nochain, suppress goto_tb so that |
158 | * we can log -d cpu,exec after every TB. | |
159 | */ | |
c2ffd754 RH |
160 | if (unlikely(cpu->singlestep_enabled)) { |
161 | cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | CF_SINGLE_STEP | 1; | |
0e33928c | 162 | } else if (qatomic_read(&one_insn_per_tb)) { |
04f5b647 RH |
163 | cflags |= CF_NO_GOTO_TB | 1; |
164 | } else if (qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) { | |
fb957011 | 165 | cflags |= CF_NO_GOTO_TB; |
84f15616 RH |
166 | } |
167 | ||
168 | return cflags; | |
043e35d9 RH |
169 | } |
170 | ||
0c90ba16 RH |
171 | struct tb_desc { |
172 | target_ulong pc; | |
173 | target_ulong cs_base; | |
174 | CPUArchState *env; | |
93b99616 | 175 | tb_page_addr_t page_addr0; |
0c90ba16 RH |
176 | uint32_t flags; |
177 | uint32_t cflags; | |
0c90ba16 RH |
178 | }; |
179 | ||
180 | static bool tb_lookup_cmp(const void *p, const void *d) | |
181 | { | |
182 | const TranslationBlock *tb = p; | |
183 | const struct tb_desc *desc = d; | |
184 | ||
279513c7 | 185 | if ((tb_cflags(tb) & CF_PCREL || tb->pc == desc->pc) && |
28905cfb | 186 | tb_page_addr0(tb) == desc->page_addr0 && |
0c90ba16 RH |
187 | tb->cs_base == desc->cs_base && |
188 | tb->flags == desc->flags && | |
0c90ba16 RH |
189 | tb_cflags(tb) == desc->cflags) { |
190 | /* check next page if needed */ | |
28905cfb RH |
191 | tb_page_addr_t tb_phys_page1 = tb_page_addr1(tb); |
192 | if (tb_phys_page1 == -1) { | |
0c90ba16 RH |
193 | return true; |
194 | } else { | |
93b99616 RH |
195 | tb_page_addr_t phys_page1; |
196 | target_ulong virt_page1; | |
0c90ba16 | 197 | |
9867b302 RH |
198 | /* |
199 | * We know that the first page matched, and an otherwise valid TB | |
200 | * encountered an incomplete instruction at the end of that page, | |
201 | * therefore we know that generating a new TB from the current PC | |
202 | * must also require reading from the next page -- even if the | |
203 | * second pages do not match, and therefore the resulting insn | |
204 | * is different for the new TB. Therefore any exception raised | |
205 | * here by the faulting lookup is not premature. | |
206 | */ | |
93b99616 RH |
207 | virt_page1 = TARGET_PAGE_ALIGN(desc->pc); |
208 | phys_page1 = get_page_addr_code(desc->env, virt_page1); | |
28905cfb | 209 | if (tb_phys_page1 == phys_page1) { |
0c90ba16 RH |
210 | return true; |
211 | } | |
212 | } | |
213 | } | |
214 | return false; | |
215 | } | |
216 | ||
217 | static TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc, | |
218 | target_ulong cs_base, uint32_t flags, | |
219 | uint32_t cflags) | |
220 | { | |
221 | tb_page_addr_t phys_pc; | |
222 | struct tb_desc desc; | |
223 | uint32_t h; | |
224 | ||
225 | desc.env = cpu->env_ptr; | |
226 | desc.cs_base = cs_base; | |
227 | desc.flags = flags; | |
228 | desc.cflags = cflags; | |
0c90ba16 RH |
229 | desc.pc = pc; |
230 | phys_pc = get_page_addr_code(desc.env, pc); | |
231 | if (phys_pc == -1) { | |
232 | return NULL; | |
233 | } | |
93b99616 | 234 | desc.page_addr0 = phys_pc; |
4be79026 | 235 | h = tb_hash_func(phys_pc, (cflags & CF_PCREL ? 0 : pc), |
367189ef | 236 | flags, cs_base, cflags); |
0c90ba16 RH |
237 | return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp); |
238 | } | |
239 | ||
632cb63d RH |
240 | /* Might cause an exception, so have a longjmp destination ready */ |
241 | static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc, | |
242 | target_ulong cs_base, | |
243 | uint32_t flags, uint32_t cflags) | |
244 | { | |
245 | TranslationBlock *tb; | |
8ed558ec | 246 | CPUJumpCache *jc; |
632cb63d RH |
247 | uint32_t hash; |
248 | ||
249 | /* we should never be trying to look up an INVALID tb */ | |
250 | tcg_debug_assert(!(cflags & CF_INVALID)); | |
251 | ||
252 | hash = tb_jmp_cache_hash_func(pc); | |
8ed558ec | 253 | jc = cpu->tb_jmp_cache; |
632cb63d | 254 | |
2dd5b7a1 AJ |
255 | if (cflags & CF_PCREL) { |
256 | /* Use acquire to ensure current load of pc from jc. */ | |
3371802f | 257 | tb = qatomic_load_acquire(&jc->array[hash].tb); |
2dd5b7a1 AJ |
258 | |
259 | if (likely(tb && | |
260 | jc->array[hash].pc == pc && | |
261 | tb->cs_base == cs_base && | |
262 | tb->flags == flags && | |
2dd5b7a1 AJ |
263 | tb_cflags(tb) == cflags)) { |
264 | return tb; | |
265 | } | |
266 | tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags); | |
267 | if (tb == NULL) { | |
268 | return NULL; | |
269 | } | |
270 | jc->array[hash].pc = pc; | |
3371802f | 271 | /* Ensure pc is written first. */ |
2dd5b7a1 AJ |
272 | qatomic_store_release(&jc->array[hash].tb, tb); |
273 | } else { | |
274 | /* Use rcu_read to ensure current load of pc from *tb. */ | |
275 | tb = qatomic_rcu_read(&jc->array[hash].tb); | |
276 | ||
277 | if (likely(tb && | |
279513c7 | 278 | tb->pc == pc && |
2dd5b7a1 AJ |
279 | tb->cs_base == cs_base && |
280 | tb->flags == flags && | |
2dd5b7a1 AJ |
281 | tb_cflags(tb) == cflags)) { |
282 | return tb; | |
283 | } | |
284 | tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags); | |
285 | if (tb == NULL) { | |
286 | return NULL; | |
287 | } | |
288 | /* Use the pc value already stored in tb->pc. */ | |
289 | qatomic_set(&jc->array[hash].tb, tb); | |
632cb63d | 290 | } |
2dd5b7a1 | 291 | |
632cb63d RH |
292 | return tb; |
293 | } | |
294 | ||
fbf59aad RH |
295 | static void log_cpu_exec(target_ulong pc, CPUState *cpu, |
296 | const TranslationBlock *tb) | |
abb0cd93 | 297 | { |
fbf59aad | 298 | if (qemu_log_in_addr_range(pc)) { |
abb0cd93 | 299 | qemu_log_mask(CPU_LOG_EXEC, |
85314e13 | 300 | "Trace %d: %p [%08" PRIx64 |
7eabad36 RH |
301 | "/" TARGET_FMT_lx "/%08x/%08x] %s\n", |
302 | cpu->cpu_index, tb->tc.ptr, tb->cs_base, pc, | |
303 | tb->flags, tb->cflags, lookup_symbol(pc)); | |
abb0cd93 | 304 | |
abb0cd93 | 305 | if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) { |
c60f599b | 306 | FILE *logfile = qemu_log_trylock(); |
78b54858 RH |
307 | if (logfile) { |
308 | int flags = 0; | |
abb0cd93 | 309 | |
78b54858 RH |
310 | if (qemu_loglevel_mask(CPU_LOG_TB_FPU)) { |
311 | flags |= CPU_DUMP_FPU; | |
312 | } | |
abb0cd93 | 313 | #if defined(TARGET_I386) |
78b54858 | 314 | flags |= CPU_DUMP_CCOP; |
abb0cd93 | 315 | #endif |
c769fbd7 | 316 | cpu_dump_state(cpu, logfile, flags); |
78b54858 RH |
317 | qemu_log_unlock(logfile); |
318 | } | |
abb0cd93 | 319 | } |
abb0cd93 RH |
320 | } |
321 | } | |
322 | ||
69993c4e LL |
323 | static bool check_for_breakpoints_slow(CPUState *cpu, target_ulong pc, |
324 | uint32_t *cflags) | |
10c37828 RH |
325 | { |
326 | CPUBreakpoint *bp; | |
327 | bool match_page = false; | |
328 | ||
10c37828 RH |
329 | /* |
330 | * Singlestep overrides breakpoints. | |
331 | * This requirement is visible in the record-replay tests, where | |
332 | * we would fail to make forward progress in reverse-continue. | |
333 | * | |
334 | * TODO: gdb singlestep should only override gdb breakpoints, | |
335 | * so that one could (gdb) singlestep into the guest kernel's | |
336 | * architectural breakpoint handler. | |
337 | */ | |
338 | if (cpu->singlestep_enabled) { | |
339 | return false; | |
340 | } | |
341 | ||
342 | QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) { | |
343 | /* | |
344 | * If we have an exact pc match, trigger the breakpoint. | |
345 | * Otherwise, note matches within the page. | |
346 | */ | |
347 | if (pc == bp->pc) { | |
348 | bool match_bp = false; | |
349 | ||
350 | if (bp->flags & BP_GDB) { | |
351 | match_bp = true; | |
352 | } else if (bp->flags & BP_CPU) { | |
353 | #ifdef CONFIG_USER_ONLY | |
354 | g_assert_not_reached(); | |
355 | #else | |
356 | CPUClass *cc = CPU_GET_CLASS(cpu); | |
357 | assert(cc->tcg_ops->debug_check_breakpoint); | |
358 | match_bp = cc->tcg_ops->debug_check_breakpoint(cpu); | |
359 | #endif | |
360 | } | |
361 | ||
362 | if (match_bp) { | |
363 | cpu->exception_index = EXCP_DEBUG; | |
364 | return true; | |
365 | } | |
366 | } else if (((pc ^ bp->pc) & TARGET_PAGE_MASK) == 0) { | |
367 | match_page = true; | |
368 | } | |
369 | } | |
370 | ||
371 | /* | |
372 | * Within the same page as a breakpoint, single-step, | |
373 | * returning to helper_lookup_tb_ptr after each insn looking | |
374 | * for the actual breakpoint. | |
375 | * | |
376 | * TODO: Perhaps better to record all of the TBs associated | |
377 | * with a given virtual page that contains a breakpoint, and | |
378 | * then invalidate them when a new overlapping breakpoint is | |
379 | * set on the page. Non-overlapping TBs would not be | |
380 | * invalidated, nor would any TB need to be invalidated as | |
381 | * breakpoints are removed. | |
382 | */ | |
383 | if (match_page) { | |
384 | *cflags = (*cflags & ~CF_COUNT_MASK) | CF_NO_GOTO_TB | 1; | |
385 | } | |
386 | return false; | |
387 | } | |
388 | ||
69993c4e LL |
389 | static inline bool check_for_breakpoints(CPUState *cpu, target_ulong pc, |
390 | uint32_t *cflags) | |
391 | { | |
392 | return unlikely(!QTAILQ_EMPTY(&cpu->breakpoints)) && | |
393 | check_for_breakpoints_slow(cpu, pc, cflags); | |
394 | } | |
395 | ||
4288eb26 RH |
396 | /** |
397 | * helper_lookup_tb_ptr: quick check for next tb | |
398 | * @env: current cpu state | |
399 | * | |
400 | * Look for an existing TB matching the current cpu state. | |
401 | * If found, return the code pointer. If not found, return | |
402 | * the tcg epilogue so that we return into cpu_tb_exec. | |
403 | */ | |
404 | const void *HELPER(lookup_tb_ptr)(CPUArchState *env) | |
405 | { | |
406 | CPUState *cpu = env_cpu(env); | |
407 | TranslationBlock *tb; | |
408 | target_ulong cs_base, pc; | |
10c37828 | 409 | uint32_t flags, cflags; |
4288eb26 RH |
410 | |
411 | cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); | |
412 | ||
10c37828 RH |
413 | cflags = curr_cflags(cpu); |
414 | if (check_for_breakpoints(cpu, pc, &cflags)) { | |
415 | cpu_loop_exit(cpu); | |
416 | } | |
417 | ||
418 | tb = tb_lookup(cpu, pc, cs_base, flags, cflags); | |
4288eb26 RH |
419 | if (tb == NULL) { |
420 | return tcg_code_gen_epilogue; | |
421 | } | |
abb0cd93 | 422 | |
fbf59aad RH |
423 | if (qemu_loglevel_mask(CPU_LOG_TB_CPU | CPU_LOG_EXEC)) { |
424 | log_cpu_exec(pc, cpu, tb); | |
425 | } | |
abb0cd93 | 426 | |
4288eb26 RH |
427 | return tb->tc.ptr; |
428 | } | |
429 | ||
77211379 | 430 | /* Execute a TB, and fix up the CPU state afterwards if necessary */ |
c905a368 DB |
431 | /* |
432 | * Disable CFI checks. | |
433 | * TCG creates binary blobs at runtime, with the transformed code. | |
434 | * A TB is a blob of binary code, created at runtime and called with an | |
435 | * indirect function call. Since such function did not exist at compile time, | |
436 | * the CFI runtime has no way to verify its signature and would fail. | |
437 | * TCG is not considered a security-sensitive part of QEMU so this does not | |
438 | * affect the impact of CFI in environment with high security requirements | |
439 | */ | |
eba40358 RH |
440 | static inline TranslationBlock * QEMU_DISABLE_CFI |
441 | cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit) | |
77211379 PM |
442 | { |
443 | CPUArchState *env = cpu->env_ptr; | |
819af24b SF |
444 | uintptr_t ret; |
445 | TranslationBlock *last_tb; | |
db0c51a3 | 446 | const void *tb_ptr = itb->tc.ptr; |
1a830635 | 447 | |
fbf59aad RH |
448 | if (qemu_loglevel_mask(CPU_LOG_TB_CPU | CPU_LOG_EXEC)) { |
449 | log_cpu_exec(log_pc(cpu, itb), cpu, itb); | |
450 | } | |
03afa5f8 | 451 | |
653b87eb | 452 | qemu_thread_jit_execute(); |
819af24b | 453 | ret = tcg_qemu_tb_exec(env, tb_ptr); |
626cf8f4 | 454 | cpu->can_do_io = 1; |
e04660af | 455 | qemu_plugin_disable_mem_helpers(cpu); |
eba40358 RH |
456 | /* |
457 | * TODO: Delay swapping back to the read-write region of the TB | |
458 | * until we actually need to modify the TB. The read-only copy, | |
459 | * coming from the rx region, shares the same host TLB entry as | |
460 | * the code that executed the exit_tb opcode that arrived here. | |
461 | * If we insist on touching both the RX and the RW pages, we | |
462 | * double the host TLB pressure. | |
463 | */ | |
464 | last_tb = tcg_splitwx_to_rw((void *)(ret & ~TB_EXIT_MASK)); | |
465 | *tb_exit = ret & TB_EXIT_MASK; | |
466 | ||
467 | trace_exec_tb_exit(last_tb, *tb_exit); | |
6db8b538 | 468 | |
eba40358 | 469 | if (*tb_exit > TB_EXIT_IDX1) { |
77211379 PM |
470 | /* We didn't start executing this TB (eg because the instruction |
471 | * counter hit zero); we must restore the guest PC to the address | |
472 | * of the start of the TB. | |
473 | */ | |
bdf7ae5b | 474 | CPUClass *cc = CPU_GET_CLASS(cpu); |
fbf59aad | 475 | |
78271684 CF |
476 | if (cc->tcg_ops->synchronize_from_tb) { |
477 | cc->tcg_ops->synchronize_from_tb(cpu, last_tb); | |
bdf7ae5b | 478 | } else { |
4be79026 | 479 | tcg_debug_assert(!(tb_cflags(last_tb) & CF_PCREL)); |
bdf7ae5b | 480 | assert(cc->set_pc); |
279513c7 | 481 | cc->set_pc(cpu, last_tb->pc); |
fbf59aad RH |
482 | } |
483 | if (qemu_loglevel_mask(CPU_LOG_EXEC)) { | |
484 | target_ulong pc = log_pc(cpu, last_tb); | |
485 | if (qemu_log_in_addr_range(pc)) { | |
486 | qemu_log("Stopped execution of TB chain before %p [" | |
487 | TARGET_FMT_lx "] %s\n", | |
488 | last_tb->tc.ptr, pc, lookup_symbol(pc)); | |
489 | } | |
bdf7ae5b | 490 | } |
77211379 | 491 | } |
c9460d75 RH |
492 | |
493 | /* | |
494 | * If gdb single-step, and we haven't raised another exception, | |
495 | * raise a debug exception. Single-step with another exception | |
496 | * is handled in cpu_handle_exception. | |
497 | */ | |
498 | if (unlikely(cpu->singlestep_enabled) && cpu->exception_index == -1) { | |
499 | cpu->exception_index = EXCP_DEBUG; | |
500 | cpu_loop_exit(cpu); | |
501 | } | |
502 | ||
eba40358 | 503 | return last_tb; |
77211379 PM |
504 | } |
505 | ||
2e70f6ef | 506 | |
035ba06c EH |
507 | static void cpu_exec_enter(CPUState *cpu) |
508 | { | |
509 | CPUClass *cc = CPU_GET_CLASS(cpu); | |
510 | ||
78271684 CF |
511 | if (cc->tcg_ops->cpu_exec_enter) { |
512 | cc->tcg_ops->cpu_exec_enter(cpu); | |
80c4750b | 513 | } |
035ba06c EH |
514 | } |
515 | ||
516 | static void cpu_exec_exit(CPUState *cpu) | |
fdbc2b57 | 517 | { |
08e73c48 | 518 | CPUClass *cc = CPU_GET_CLASS(cpu); |
035ba06c | 519 | |
78271684 CF |
520 | if (cc->tcg_ops->cpu_exec_exit) { |
521 | cc->tcg_ops->cpu_exec_exit(cpu); | |
80c4750b | 522 | } |
035ba06c EH |
523 | } |
524 | ||
525 | void cpu_exec_step_atomic(CPUState *cpu) | |
526 | { | |
61deada4 | 527 | CPUArchState *env = cpu->env_ptr; |
fdbc2b57 RH |
528 | TranslationBlock *tb; |
529 | target_ulong cs_base, pc; | |
258afb48 | 530 | uint32_t flags, cflags; |
eba40358 | 531 | int tb_exit; |
fdbc2b57 | 532 | |
08e73c48 | 533 | if (sigsetjmp(cpu->jmp_env, 0) == 0) { |
886cc689 | 534 | start_exclusive(); |
bfff072c DC |
535 | g_assert(cpu == current_cpu); |
536 | g_assert(!cpu->running); | |
537 | cpu->running = true; | |
886cc689 | 538 | |
6f04cb1c | 539 | cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); |
6f04cb1c | 540 | |
258afb48 RH |
541 | cflags = curr_cflags(cpu); |
542 | /* Execute in a serial context. */ | |
543 | cflags &= ~CF_PARALLEL; | |
544 | /* After 1 insn, return and release the exclusive lock. */ | |
545 | cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | 1; | |
10c37828 RH |
546 | /* |
547 | * No need to check_for_breakpoints here. | |
548 | * We only arrive in cpu_exec_step_atomic after beginning execution | |
549 | * of an insn that includes an atomic operation we can't handle. | |
550 | * Any breakpoint for this insn will have been recognized earlier. | |
551 | */ | |
258afb48 RH |
552 | |
553 | tb = tb_lookup(cpu, pc, cs_base, flags, cflags); | |
4e2ca83e EC |
554 | if (tb == NULL) { |
555 | mmap_lock(); | |
95590e24 | 556 | tb = tb_gen_code(cpu, pc, cs_base, flags, cflags); |
4e2ca83e EC |
557 | mmap_unlock(); |
558 | } | |
08e73c48 | 559 | |
035ba06c | 560 | cpu_exec_enter(cpu); |
08e73c48 | 561 | /* execute the generated code */ |
4e2ca83e | 562 | trace_exec_tb(tb, pc); |
eba40358 | 563 | cpu_tb_exec(cpu, tb, &tb_exit); |
035ba06c | 564 | cpu_exec_exit(cpu); |
08e73c48 | 565 | } else { |
08e73c48 | 566 | #ifndef CONFIG_SOFTMMU |
f920ffdd | 567 | clear_helper_retaddr(); |
297368c7 RH |
568 | if (have_mmap_lock()) { |
569 | mmap_unlock(); | |
570 | } | |
08e73c48 | 571 | #endif |
6aaa24f9 EC |
572 | if (qemu_mutex_iothread_locked()) { |
573 | qemu_mutex_unlock_iothread(); | |
574 | } | |
faa9372c | 575 | assert_no_pages_locked(); |
08e73c48 | 576 | } |
426eeecd | 577 | |
886cc689 AB |
578 | /* |
579 | * As we start the exclusive region before codegen we must still | |
580 | * be in the region if we longjump out of either the codegen or | |
581 | * the execution. | |
582 | */ | |
583 | g_assert(cpu_in_exclusive_context(cpu)); | |
bfff072c | 584 | cpu->running = false; |
886cc689 | 585 | end_exclusive(); |
fdbc2b57 RH |
586 | } |
587 | ||
a8583393 RH |
588 | void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr) |
589 | { | |
2fd2e78d RH |
590 | /* |
591 | * Get the rx view of the structure, from which we find the | |
592 | * executable code address, and tb_target_set_jmp_target can | |
593 | * produce a pc-relative displacement to jmp_target_addr[n]. | |
594 | */ | |
595 | const TranslationBlock *c_tb = tcg_splitwx_to_rx(tb); | |
596 | uintptr_t offset = tb->jmp_insn_offset[n]; | |
597 | uintptr_t jmp_rx = (uintptr_t)tb->tc.ptr + offset; | |
598 | uintptr_t jmp_rw = jmp_rx - tcg_splitwx_diff; | |
599 | ||
9da6079b | 600 | tb->jmp_target_addr[n] = addr; |
2fd2e78d | 601 | tb_target_set_jmp_target(c_tb, n, jmp_rx, jmp_rw); |
a8583393 RH |
602 | } |
603 | ||
a8583393 RH |
604 | static inline void tb_add_jump(TranslationBlock *tb, int n, |
605 | TranslationBlock *tb_next) | |
606 | { | |
194125e3 EC |
607 | uintptr_t old; |
608 | ||
653b87eb | 609 | qemu_thread_jit_write(); |
a8583393 | 610 | assert(n < ARRAY_SIZE(tb->jmp_list_next)); |
194125e3 EC |
611 | qemu_spin_lock(&tb_next->jmp_lock); |
612 | ||
613 | /* make sure the destination TB is valid */ | |
614 | if (tb_next->cflags & CF_INVALID) { | |
615 | goto out_unlock_next; | |
616 | } | |
617 | /* Atomically claim the jump destination slot only if it was NULL */ | |
d73415a3 SH |
618 | old = qatomic_cmpxchg(&tb->jmp_dest[n], (uintptr_t)NULL, |
619 | (uintptr_t)tb_next); | |
194125e3 EC |
620 | if (old) { |
621 | goto out_unlock_next; | |
a8583393 | 622 | } |
194125e3 EC |
623 | |
624 | /* patch the native jump address */ | |
625 | tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc.ptr); | |
626 | ||
627 | /* add in TB jmp list */ | |
628 | tb->jmp_list_next[n] = tb_next->jmp_list_head; | |
629 | tb_next->jmp_list_head = (uintptr_t)tb | n; | |
630 | ||
631 | qemu_spin_unlock(&tb_next->jmp_lock); | |
632 | ||
fbf59aad RH |
633 | qemu_log_mask(CPU_LOG_EXEC, "Linking TBs %p index %d -> %p\n", |
634 | tb->tc.ptr, n, tb_next->tc.ptr); | |
194125e3 | 635 | return; |
a8583393 | 636 | |
194125e3 EC |
637 | out_unlock_next: |
638 | qemu_spin_unlock(&tb_next->jmp_lock); | |
639 | return; | |
a8583393 RH |
640 | } |
641 | ||
8b2d34e9 SF |
642 | static inline bool cpu_handle_halt(CPUState *cpu) |
643 | { | |
0596fa11 | 644 | #ifndef CONFIG_USER_ONLY |
8b2d34e9 | 645 | if (cpu->halted) { |
0596fa11 | 646 | #if defined(TARGET_I386) |
4084893d | 647 | if (cpu->interrupt_request & CPU_INTERRUPT_POLL) { |
8b2d34e9 | 648 | X86CPU *x86_cpu = X86_CPU(cpu); |
8d04fb55 | 649 | qemu_mutex_lock_iothread(); |
8b2d34e9 SF |
650 | apic_poll_irq(x86_cpu->apic_state); |
651 | cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL); | |
8d04fb55 | 652 | qemu_mutex_unlock_iothread(); |
8b2d34e9 | 653 | } |
0596fa11 | 654 | #endif /* TARGET_I386 */ |
8b2d34e9 | 655 | if (!cpu_has_work(cpu)) { |
8b2d34e9 SF |
656 | return true; |
657 | } | |
658 | ||
659 | cpu->halted = 0; | |
660 | } | |
0596fa11 | 661 | #endif /* !CONFIG_USER_ONLY */ |
8b2d34e9 SF |
662 | |
663 | return false; | |
664 | } | |
665 | ||
ea284766 | 666 | static inline void cpu_handle_debug_exception(CPUState *cpu) |
1009d2ed | 667 | { |
86025ee4 | 668 | CPUClass *cc = CPU_GET_CLASS(cpu); |
1009d2ed JK |
669 | CPUWatchpoint *wp; |
670 | ||
ff4700b0 AF |
671 | if (!cpu->watchpoint_hit) { |
672 | QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { | |
1009d2ed JK |
673 | wp->flags &= ~BP_WATCHPOINT_HIT; |
674 | } | |
675 | } | |
86025ee4 | 676 | |
78271684 CF |
677 | if (cc->tcg_ops->debug_excp_handler) { |
678 | cc->tcg_ops->debug_excp_handler(cpu); | |
710384d0 | 679 | } |
1009d2ed JK |
680 | } |
681 | ||
ea284766 SF |
682 | static inline bool cpu_handle_exception(CPUState *cpu, int *ret) |
683 | { | |
17b50b0c PD |
684 | if (cpu->exception_index < 0) { |
685 | #ifndef CONFIG_USER_ONLY | |
686 | if (replay_has_exception() | |
5e140196 | 687 | && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0) { |
a11bbb6a | 688 | /* Execute just one insn to trigger exception pending in the log */ |
c3e97f64 PD |
689 | cpu->cflags_next_tb = (curr_cflags(cpu) & ~CF_USE_ICOUNT) |
690 | | CF_NOIRQ | 1; | |
17b50b0c PD |
691 | } |
692 | #endif | |
a11bbb6a | 693 | return false; |
17b50b0c | 694 | } |
17b50b0c PD |
695 | if (cpu->exception_index >= EXCP_INTERRUPT) { |
696 | /* exit request from the cpu execution loop */ | |
697 | *ret = cpu->exception_index; | |
698 | if (*ret == EXCP_DEBUG) { | |
699 | cpu_handle_debug_exception(cpu); | |
700 | } | |
701 | cpu->exception_index = -1; | |
702 | return true; | |
703 | } else { | |
ea284766 | 704 | #if defined(CONFIG_USER_ONLY) |
17b50b0c PD |
705 | /* if user mode only, we simulate a fake exception |
706 | which will be handled outside the cpu execution | |
707 | loop */ | |
ea284766 | 708 | #if defined(TARGET_I386) |
17b50b0c | 709 | CPUClass *cc = CPU_GET_CLASS(cpu); |
12096421 PMD |
710 | cc->tcg_ops->fake_user_interrupt(cpu); |
711 | #endif /* TARGET_I386 */ | |
17b50b0c PD |
712 | *ret = cpu->exception_index; |
713 | cpu->exception_index = -1; | |
714 | return true; | |
715 | #else | |
716 | if (replay_exception()) { | |
ea284766 | 717 | CPUClass *cc = CPU_GET_CLASS(cpu); |
17b50b0c | 718 | qemu_mutex_lock_iothread(); |
78271684 | 719 | cc->tcg_ops->do_interrupt(cpu); |
17b50b0c | 720 | qemu_mutex_unlock_iothread(); |
ea284766 | 721 | cpu->exception_index = -1; |
a7ba744f LM |
722 | |
723 | if (unlikely(cpu->singlestep_enabled)) { | |
724 | /* | |
725 | * After processing the exception, ensure an EXCP_DEBUG is | |
726 | * raised when single-stepping so that GDB doesn't miss the | |
727 | * next instruction. | |
728 | */ | |
729 | *ret = EXCP_DEBUG; | |
730 | cpu_handle_debug_exception(cpu); | |
731 | return true; | |
732 | } | |
17b50b0c PD |
733 | } else if (!replay_has_interrupt()) { |
734 | /* give a chance to iothread in replay mode */ | |
735 | *ret = EXCP_INTERRUPT; | |
ea284766 | 736 | return true; |
ea284766 | 737 | } |
ea284766 SF |
738 | #endif |
739 | } | |
740 | ||
741 | return false; | |
742 | } | |
743 | ||
77c0fc4e | 744 | #ifndef CONFIG_USER_ONLY |
4084893d PD |
745 | /* |
746 | * CPU_INTERRUPT_POLL is a virtual event which gets converted into a | |
747 | * "real" interrupt event later. It does not need to be recorded for | |
748 | * replay purposes. | |
749 | */ | |
750 | static inline bool need_replay_interrupt(int interrupt_request) | |
751 | { | |
752 | #if defined(TARGET_I386) | |
753 | return !(interrupt_request & CPU_INTERRUPT_POLL); | |
754 | #else | |
755 | return true; | |
756 | #endif | |
757 | } | |
77c0fc4e | 758 | #endif /* !CONFIG_USER_ONLY */ |
4084893d | 759 | |
209b71b6 | 760 | static inline bool cpu_handle_interrupt(CPUState *cpu, |
c385e6e4 SF |
761 | TranslationBlock **last_tb) |
762 | { | |
aff0e204 AB |
763 | /* |
764 | * If we have requested custom cflags with CF_NOIRQ we should | |
765 | * skip checking here. Any pending interrupts will get picked up | |
766 | * by the next TB we execute under normal cflags. | |
767 | */ | |
768 | if (cpu->cflags_next_tb != -1 && cpu->cflags_next_tb & CF_NOIRQ) { | |
769 | return false; | |
770 | } | |
771 | ||
17b50b0c PD |
772 | /* Clear the interrupt flag now since we're processing |
773 | * cpu->interrupt_request and cpu->exit_request. | |
d84be02d DH |
774 | * Ensure zeroing happens before reading cpu->exit_request or |
775 | * cpu->interrupt_request (see also smp_wmb in cpu_exit()) | |
17b50b0c | 776 | */ |
d73415a3 | 777 | qatomic_mb_set(&cpu_neg(cpu)->icount_decr.u16.high, 0); |
c385e6e4 | 778 | |
d73415a3 | 779 | if (unlikely(qatomic_read(&cpu->interrupt_request))) { |
8d04fb55 JK |
780 | int interrupt_request; |
781 | qemu_mutex_lock_iothread(); | |
782 | interrupt_request = cpu->interrupt_request; | |
c385e6e4 SF |
783 | if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) { |
784 | /* Mask out external interrupts for this step. */ | |
785 | interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK; | |
786 | } | |
787 | if (interrupt_request & CPU_INTERRUPT_DEBUG) { | |
788 | cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG; | |
789 | cpu->exception_index = EXCP_DEBUG; | |
8d04fb55 | 790 | qemu_mutex_unlock_iothread(); |
209b71b6 | 791 | return true; |
c385e6e4 | 792 | } |
77c0fc4e | 793 | #if !defined(CONFIG_USER_ONLY) |
c385e6e4 SF |
794 | if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) { |
795 | /* Do nothing */ | |
796 | } else if (interrupt_request & CPU_INTERRUPT_HALT) { | |
797 | replay_interrupt(); | |
798 | cpu->interrupt_request &= ~CPU_INTERRUPT_HALT; | |
799 | cpu->halted = 1; | |
800 | cpu->exception_index = EXCP_HLT; | |
8d04fb55 | 801 | qemu_mutex_unlock_iothread(); |
209b71b6 | 802 | return true; |
c385e6e4 SF |
803 | } |
804 | #if defined(TARGET_I386) | |
805 | else if (interrupt_request & CPU_INTERRUPT_INIT) { | |
806 | X86CPU *x86_cpu = X86_CPU(cpu); | |
807 | CPUArchState *env = &x86_cpu->env; | |
808 | replay_interrupt(); | |
65c9d60a | 809 | cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0); |
c385e6e4 SF |
810 | do_cpu_init(x86_cpu); |
811 | cpu->exception_index = EXCP_HALTED; | |
8d04fb55 | 812 | qemu_mutex_unlock_iothread(); |
209b71b6 | 813 | return true; |
c385e6e4 SF |
814 | } |
815 | #else | |
816 | else if (interrupt_request & CPU_INTERRUPT_RESET) { | |
817 | replay_interrupt(); | |
818 | cpu_reset(cpu); | |
8d04fb55 | 819 | qemu_mutex_unlock_iothread(); |
209b71b6 | 820 | return true; |
c385e6e4 | 821 | } |
77c0fc4e | 822 | #endif /* !TARGET_I386 */ |
c385e6e4 SF |
823 | /* The target hook has 3 exit conditions: |
824 | False when the interrupt isn't processed, | |
825 | True when it is, and we should restart on a new TB, | |
826 | and via longjmp via cpu_loop_exit. */ | |
827 | else { | |
77c0fc4e PMD |
828 | CPUClass *cc = CPU_GET_CLASS(cpu); |
829 | ||
78271684 CF |
830 | if (cc->tcg_ops->cpu_exec_interrupt && |
831 | cc->tcg_ops->cpu_exec_interrupt(cpu, interrupt_request)) { | |
4084893d PD |
832 | if (need_replay_interrupt(interrupt_request)) { |
833 | replay_interrupt(); | |
834 | } | |
ba3c35d9 RH |
835 | /* |
836 | * After processing the interrupt, ensure an EXCP_DEBUG is | |
837 | * raised when single-stepping so that GDB doesn't miss the | |
838 | * next instruction. | |
839 | */ | |
5b7b197c LM |
840 | if (unlikely(cpu->singlestep_enabled)) { |
841 | cpu->exception_index = EXCP_DEBUG; | |
842 | qemu_mutex_unlock_iothread(); | |
843 | return true; | |
844 | } | |
845 | cpu->exception_index = -1; | |
c385e6e4 SF |
846 | *last_tb = NULL; |
847 | } | |
8b1fe3f4 SF |
848 | /* The target hook may have updated the 'cpu->interrupt_request'; |
849 | * reload the 'interrupt_request' value */ | |
850 | interrupt_request = cpu->interrupt_request; | |
c385e6e4 | 851 | } |
77c0fc4e | 852 | #endif /* !CONFIG_USER_ONLY */ |
8b1fe3f4 | 853 | if (interrupt_request & CPU_INTERRUPT_EXITTB) { |
c385e6e4 SF |
854 | cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB; |
855 | /* ensure that no TB jump will be modified as | |
856 | the program flow was changed */ | |
857 | *last_tb = NULL; | |
858 | } | |
8d04fb55 JK |
859 | |
860 | /* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */ | |
861 | qemu_mutex_unlock_iothread(); | |
c385e6e4 | 862 | } |
8d04fb55 | 863 | |
cfb2d02b | 864 | /* Finally, check if we need to exit to the main loop. */ |
d73415a3 | 865 | if (unlikely(qatomic_read(&cpu->exit_request)) |
740b1759 | 866 | || (icount_enabled() |
a11bbb6a | 867 | && (cpu->cflags_next_tb == -1 || cpu->cflags_next_tb & CF_USE_ICOUNT) |
5e140196 | 868 | && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0)) { |
d73415a3 | 869 | qatomic_set(&cpu->exit_request, 0); |
5f3bdfd4 PD |
870 | if (cpu->exception_index == -1) { |
871 | cpu->exception_index = EXCP_INTERRUPT; | |
872 | } | |
209b71b6 | 873 | return true; |
c385e6e4 | 874 | } |
209b71b6 PB |
875 | |
876 | return false; | |
c385e6e4 SF |
877 | } |
878 | ||
928de9ee | 879 | static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb, |
fbf59aad | 880 | target_ulong pc, |
cfb2d02b | 881 | TranslationBlock **last_tb, int *tb_exit) |
928de9ee | 882 | { |
1aab16c2 | 883 | int32_t insns_left; |
928de9ee | 884 | |
fbf59aad | 885 | trace_exec_tb(tb, pc); |
eba40358 | 886 | tb = cpu_tb_exec(cpu, tb, tb_exit); |
1aab16c2 PB |
887 | if (*tb_exit != TB_EXIT_REQUESTED) { |
888 | *last_tb = tb; | |
889 | return; | |
890 | } | |
891 | ||
892 | *last_tb = NULL; | |
d73415a3 | 893 | insns_left = qatomic_read(&cpu_neg(cpu)->icount_decr.u32); |
1aab16c2 | 894 | if (insns_left < 0) { |
e5143e30 AB |
895 | /* Something asked us to stop executing chained TBs; just |
896 | * continue round the main loop. Whatever requested the exit | |
30f3dda2 | 897 | * will also have set something else (eg exit_request or |
17b50b0c PD |
898 | * interrupt_request) which will be handled by |
899 | * cpu_handle_interrupt. cpu_handle_interrupt will also | |
900 | * clear cpu->icount_decr.u16.high. | |
928de9ee | 901 | */ |
1aab16c2 | 902 | return; |
928de9ee | 903 | } |
1aab16c2 PB |
904 | |
905 | /* Instruction counter expired. */ | |
740b1759 | 906 | assert(icount_enabled()); |
1aab16c2 | 907 | #ifndef CONFIG_USER_ONLY |
eda5f7c6 | 908 | /* Ensure global icount has gone forward */ |
8191d368 | 909 | icount_update(cpu); |
eda5f7c6 | 910 | /* Refill decrementer and continue execution. */ |
df3a2de5 | 911 | insns_left = MIN(0xffff, cpu->icount_budget); |
5e140196 | 912 | cpu_neg(cpu)->icount_decr.u16.low = insns_left; |
eda5f7c6 | 913 | cpu->icount_extra = cpu->icount_budget - insns_left; |
bc662a33 AB |
914 | |
915 | /* | |
916 | * If the next tb has more instructions than we have left to | |
917 | * execute we need to ensure we find/generate a TB with exactly | |
918 | * insns_left instructions in it. | |
919 | */ | |
c8cf47a9 PM |
920 | if (insns_left > 0 && insns_left < tb->icount) { |
921 | assert(insns_left <= CF_COUNT_MASK); | |
922 | assert(cpu->icount_extra == 0); | |
bc662a33 | 923 | cpu->cflags_next_tb = (tb->cflags & ~CF_COUNT_MASK) | insns_left; |
928de9ee | 924 | } |
1aab16c2 | 925 | #endif |
928de9ee SF |
926 | } |
927 | ||
7d13299d FB |
928 | /* main execution loop */ |
929 | ||
61710a7e RH |
930 | static int __attribute__((noinline)) |
931 | cpu_exec_loop(CPUState *cpu, SyncClocks *sc) | |
7d13299d | 932 | { |
c385e6e4 | 933 | int ret; |
4515e58d PB |
934 | |
935 | /* if an exception is pending, we execute it here */ | |
936 | while (!cpu_handle_exception(cpu, &ret)) { | |
937 | TranslationBlock *last_tb = NULL; | |
938 | int tb_exit = 0; | |
939 | ||
940 | while (!cpu_handle_interrupt(cpu, &last_tb)) { | |
9b990ee5 | 941 | TranslationBlock *tb; |
11c1d5f8 RH |
942 | target_ulong cs_base, pc; |
943 | uint32_t flags, cflags; | |
944 | ||
10c37828 RH |
945 | cpu_get_tb_cpu_state(cpu->env_ptr, &pc, &cs_base, &flags); |
946 | ||
11c1d5f8 RH |
947 | /* |
948 | * When requested, use an exact setting for cflags for the next | |
949 | * execution. This is used for icount, precise smc, and stop- | |
950 | * after-access watchpoints. Since this request should never | |
951 | * have CF_INVALID set, -1 is a convenient invalid value that | |
952 | * does not require tcg headers for cpu_common_reset. | |
953 | */ | |
954 | cflags = cpu->cflags_next_tb; | |
9b990ee5 | 955 | if (cflags == -1) { |
c0ae396a | 956 | cflags = curr_cflags(cpu); |
9b990ee5 RH |
957 | } else { |
958 | cpu->cflags_next_tb = -1; | |
959 | } | |
960 | ||
10c37828 RH |
961 | if (check_for_breakpoints(cpu, pc, &cflags)) { |
962 | break; | |
963 | } | |
11c1d5f8 RH |
964 | |
965 | tb = tb_lookup(cpu, pc, cs_base, flags, cflags); | |
966 | if (tb == NULL) { | |
3371802f | 967 | CPUJumpCache *jc; |
a976a99a RH |
968 | uint32_t h; |
969 | ||
11c1d5f8 RH |
970 | mmap_lock(); |
971 | tb = tb_gen_code(cpu, pc, cs_base, flags, cflags); | |
972 | mmap_unlock(); | |
3371802f | 973 | |
11c1d5f8 RH |
974 | /* |
975 | * We add the TB in the virtual pc hash table | |
976 | * for the fast lookup | |
977 | */ | |
a976a99a | 978 | h = tb_jmp_cache_hash_func(pc); |
3371802f RH |
979 | jc = cpu->tb_jmp_cache; |
980 | if (cflags & CF_PCREL) { | |
981 | jc->array[h].pc = pc; | |
982 | /* Ensure pc is written first. */ | |
983 | qatomic_store_release(&jc->array[h].tb, tb); | |
984 | } else { | |
985 | /* Use the pc value already stored in tb->pc. */ | |
986 | qatomic_set(&jc->array[h].tb, tb); | |
987 | } | |
11c1d5f8 RH |
988 | } |
989 | ||
990 | #ifndef CONFIG_USER_ONLY | |
991 | /* | |
992 | * We don't take care of direct jumps when address mapping | |
993 | * changes in system emulation. So it's not safe to make a | |
994 | * direct jump to a TB spanning two pages because the mapping | |
995 | * for the second page can change. | |
996 | */ | |
28905cfb | 997 | if (tb_page_addr1(tb) != -1) { |
11c1d5f8 RH |
998 | last_tb = NULL; |
999 | } | |
1000 | #endif | |
1001 | /* See if we can patch the calling TB. */ | |
1002 | if (last_tb) { | |
1003 | tb_add_jump(last_tb, tb_exit, tb); | |
1004 | } | |
1005 | ||
fbf59aad | 1006 | cpu_loop_exec_tb(cpu, tb, pc, &last_tb, &tb_exit); |
11c1d5f8 | 1007 | |
4515e58d PB |
1008 | /* Try to align the host and virtual clocks |
1009 | if the guest is in advance */ | |
61710a7e RH |
1010 | align_clocks(sc, cpu); |
1011 | } | |
1012 | } | |
1013 | return ret; | |
1014 | } | |
1015 | ||
1016 | static int cpu_exec_setjmp(CPUState *cpu, SyncClocks *sc) | |
1017 | { | |
1018 | /* Prepare setjmp context for exception handling. */ | |
1019 | if (unlikely(sigsetjmp(cpu->jmp_env, 0) != 0)) { | |
1020 | /* Non-buggy compilers preserve this; assert the correct value. */ | |
1021 | g_assert(cpu == current_cpu); | |
1022 | ||
1023 | #ifndef CONFIG_SOFTMMU | |
1024 | clear_helper_retaddr(); | |
1025 | if (have_mmap_lock()) { | |
1026 | mmap_unlock(); | |
7d13299d | 1027 | } |
61710a7e RH |
1028 | #endif |
1029 | if (qemu_mutex_iothread_locked()) { | |
1030 | qemu_mutex_unlock_iothread(); | |
1031 | } | |
61710a7e RH |
1032 | |
1033 | assert_no_pages_locked(); | |
1034 | } | |
1035 | ||
1036 | return cpu_exec_loop(cpu, sc); | |
1037 | } | |
1038 | ||
1039 | int cpu_exec(CPUState *cpu) | |
1040 | { | |
1041 | int ret; | |
1042 | SyncClocks sc = { 0 }; | |
1043 | ||
1044 | /* replay_interrupt may need current_cpu */ | |
1045 | current_cpu = cpu; | |
1046 | ||
1047 | if (cpu_handle_halt(cpu)) { | |
1048 | return EXCP_HALTED; | |
4515e58d | 1049 | } |
3fb2ded1 | 1050 | |
61710a7e RH |
1051 | rcu_read_lock(); |
1052 | cpu_exec_enter(cpu); | |
1053 | ||
1054 | /* | |
1055 | * Calculate difference between guest clock and host clock. | |
1056 | * This delay includes the delay of the last cycle, so | |
1057 | * what we have to do is sleep until it is 0. As for the | |
1058 | * advance/delay we gain here, we try to fix it next time. | |
1059 | */ | |
1060 | init_delay_params(&sc, cpu); | |
1061 | ||
1062 | ret = cpu_exec_setjmp(cpu, &sc); | |
1063 | ||
035ba06c | 1064 | cpu_exec_exit(cpu); |
79e2b9ae | 1065 | rcu_read_unlock(); |
1057eaa7 | 1066 | |
7d13299d FB |
1067 | return ret; |
1068 | } | |
740b1759 | 1069 | |
7df5e3d6 CF |
1070 | void tcg_exec_realizefn(CPUState *cpu, Error **errp) |
1071 | { | |
1072 | static bool tcg_target_initialized; | |
1073 | CPUClass *cc = CPU_GET_CLASS(cpu); | |
1074 | ||
1075 | if (!tcg_target_initialized) { | |
78271684 | 1076 | cc->tcg_ops->initialize(); |
7df5e3d6 CF |
1077 | tcg_target_initialized = true; |
1078 | } | |
7df5e3d6 | 1079 | |
4e4fa6c1 RH |
1080 | cpu->tb_jmp_cache = g_new0(CPUJumpCache, 1); |
1081 | tlb_init(cpu); | |
7df5e3d6 CF |
1082 | #ifndef CONFIG_USER_ONLY |
1083 | tcg_iommu_init_notifier_list(cpu); | |
1084 | #endif /* !CONFIG_USER_ONLY */ | |
4e4fa6c1 | 1085 | /* qemu_plugin_vcpu_init_hook delayed until cpu_index assigned. */ |
7df5e3d6 CF |
1086 | } |
1087 | ||
1088 | /* undo the initializations in reverse order */ | |
1089 | void tcg_exec_unrealizefn(CPUState *cpu) | |
1090 | { | |
1091 | #ifndef CONFIG_USER_ONLY | |
1092 | tcg_iommu_free_notifier_list(cpu); | |
1093 | #endif /* !CONFIG_USER_ONLY */ | |
1094 | ||
7df5e3d6 | 1095 | tlb_destroy(cpu); |
4731f89b | 1096 | g_free_rcu(cpu->tb_jmp_cache, rcu); |
7df5e3d6 | 1097 | } |