]>
Commit | Line | Data |
---|---|---|
7d13299d | 1 | /* |
e965fc38 | 2 | * emulator main execution loop |
5fafdf24 | 3 | * |
66321a11 | 4 | * Copyright (c) 2003-2005 Fabrice Bellard |
7d13299d | 5 | * |
3ef693a0 FB |
6 | * This library is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
fb0343d5 | 9 | * version 2.1 of the License, or (at your option) any later version. |
7d13299d | 10 | * |
3ef693a0 FB |
11 | * This library is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
7d13299d | 15 | * |
3ef693a0 | 16 | * You should have received a copy of the GNU Lesser General Public |
8167ee88 | 17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
7d13299d | 18 | */ |
a8d25326 | 19 | |
7b31bbc2 | 20 | #include "qemu/osdep.h" |
740b1759 | 21 | #include "qemu/qemu-print.h" |
3a841ab5 | 22 | #include "qapi/error.h" |
3a841ab5 | 23 | #include "qapi/type-helpers.h" |
78271684 | 24 | #include "hw/core/tcg-cpu-ops.h" |
d9bb58e5 | 25 | #include "trace.h" |
76cad711 | 26 | #include "disas/disas.h" |
63c91552 | 27 | #include "exec/exec-all.h" |
dcb32f1d | 28 | #include "tcg/tcg.h" |
1de7afc9 | 29 | #include "qemu/atomic.h" |
79e2b9ae | 30 | #include "qemu/rcu.h" |
508127e2 | 31 | #include "exec/log.h" |
8d04fb55 | 32 | #include "qemu/main-loop.h" |
6220e900 PD |
33 | #if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY) |
34 | #include "hw/i386/apic.h" | |
35 | #endif | |
d2528bdc | 36 | #include "sysemu/cpus.h" |
740b1759 CF |
37 | #include "exec/cpu-all.h" |
38 | #include "sysemu/cpu-timers.h" | |
5b5968c4 | 39 | #include "exec/replay-core.h" |
3a841ab5 | 40 | #include "sysemu/tcg.h" |
a3e7f702 | 41 | #include "exec/helper-proto-common.h" |
a976a99a | 42 | #include "tb-jmp-cache.h" |
e5ceadff | 43 | #include "tb-hash.h" |
e5ceadff | 44 | #include "tb-context.h" |
c03f041f | 45 | #include "internal.h" |
c2aa5f81 ST |
46 | |
47 | /* -icount align implementation. */ | |
48 | ||
49 | typedef struct SyncClocks { | |
50 | int64_t diff_clk; | |
51 | int64_t last_cpu_icount; | |
7f7bc144 | 52 | int64_t realtime_clock; |
c2aa5f81 ST |
53 | } SyncClocks; |
54 | ||
55 | #if !defined(CONFIG_USER_ONLY) | |
56 | /* Allow the guest to have a max 3ms advance. | |
57 | * The difference between the 2 clocks could therefore | |
58 | * oscillate around 0. | |
59 | */ | |
60 | #define VM_CLOCK_ADVANCE 3000000 | |
7f7bc144 ST |
61 | #define THRESHOLD_REDUCE 1.5 |
62 | #define MAX_DELAY_PRINT_RATE 2000000000LL | |
63 | #define MAX_NB_PRINTS 100 | |
c2aa5f81 | 64 | |
00c9a5c2 PMD |
65 | int64_t max_delay; |
66 | int64_t max_advance; | |
740b1759 | 67 | |
5e140196 | 68 | static void align_clocks(SyncClocks *sc, CPUState *cpu) |
c2aa5f81 ST |
69 | { |
70 | int64_t cpu_icount; | |
71 | ||
72 | if (!icount_align_option) { | |
73 | return; | |
74 | } | |
75 | ||
5e140196 | 76 | cpu_icount = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low; |
8191d368 | 77 | sc->diff_clk += icount_to_ns(sc->last_cpu_icount - cpu_icount); |
c2aa5f81 ST |
78 | sc->last_cpu_icount = cpu_icount; |
79 | ||
80 | if (sc->diff_clk > VM_CLOCK_ADVANCE) { | |
81 | #ifndef _WIN32 | |
82 | struct timespec sleep_delay, rem_delay; | |
83 | sleep_delay.tv_sec = sc->diff_clk / 1000000000LL; | |
84 | sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL; | |
85 | if (nanosleep(&sleep_delay, &rem_delay) < 0) { | |
a498d0ef | 86 | sc->diff_clk = rem_delay.tv_sec * 1000000000LL + rem_delay.tv_nsec; |
c2aa5f81 ST |
87 | } else { |
88 | sc->diff_clk = 0; | |
89 | } | |
90 | #else | |
91 | Sleep(sc->diff_clk / SCALE_MS); | |
92 | sc->diff_clk = 0; | |
93 | #endif | |
94 | } | |
95 | } | |
96 | ||
7f7bc144 ST |
97 | static void print_delay(const SyncClocks *sc) |
98 | { | |
99 | static float threshold_delay; | |
100 | static int64_t last_realtime_clock; | |
101 | static int nb_prints; | |
102 | ||
103 | if (icount_align_option && | |
104 | sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE && | |
105 | nb_prints < MAX_NB_PRINTS) { | |
106 | if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) || | |
107 | (-sc->diff_clk / (float)1000000000LL < | |
108 | (threshold_delay - THRESHOLD_REDUCE))) { | |
109 | threshold_delay = (-sc->diff_clk / 1000000000LL) + 1; | |
740b1759 CF |
110 | qemu_printf("Warning: The guest is now late by %.1f to %.1f seconds\n", |
111 | threshold_delay - 1, | |
112 | threshold_delay); | |
7f7bc144 ST |
113 | nb_prints++; |
114 | last_realtime_clock = sc->realtime_clock; | |
115 | } | |
116 | } | |
117 | } | |
118 | ||
5e140196 | 119 | static void init_delay_params(SyncClocks *sc, CPUState *cpu) |
c2aa5f81 ST |
120 | { |
121 | if (!icount_align_option) { | |
122 | return; | |
123 | } | |
2e91cc62 PB |
124 | sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT); |
125 | sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock; | |
5e140196 RH |
126 | sc->last_cpu_icount |
127 | = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low; | |
27498bef ST |
128 | if (sc->diff_clk < max_delay) { |
129 | max_delay = sc->diff_clk; | |
130 | } | |
131 | if (sc->diff_clk > max_advance) { | |
132 | max_advance = sc->diff_clk; | |
133 | } | |
7f7bc144 ST |
134 | |
135 | /* Print every 2s max if the guest is late. We limit the number | |
136 | of printed messages to NB_PRINT_MAX(currently 100) */ | |
137 | print_delay(sc); | |
c2aa5f81 ST |
138 | } |
139 | #else | |
140 | static void align_clocks(SyncClocks *sc, const CPUState *cpu) | |
141 | { | |
142 | } | |
143 | ||
144 | static void init_delay_params(SyncClocks *sc, const CPUState *cpu) | |
145 | { | |
146 | } | |
147 | #endif /* CONFIG USER ONLY */ | |
7d13299d | 148 | |
043e35d9 RH |
149 | uint32_t curr_cflags(CPUState *cpu) |
150 | { | |
84f15616 RH |
151 | uint32_t cflags = cpu->tcg_cflags; |
152 | ||
04f5b647 | 153 | /* |
c2ffd754 RH |
154 | * Record gdb single-step. We should be exiting the TB by raising |
155 | * EXCP_DEBUG, but to simplify other tests, disable chaining too. | |
156 | * | |
04f5b647 RH |
157 | * For singlestep and -d nochain, suppress goto_tb so that |
158 | * we can log -d cpu,exec after every TB. | |
159 | */ | |
c2ffd754 RH |
160 | if (unlikely(cpu->singlestep_enabled)) { |
161 | cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | CF_SINGLE_STEP | 1; | |
0e33928c | 162 | } else if (qatomic_read(&one_insn_per_tb)) { |
04f5b647 RH |
163 | cflags |= CF_NO_GOTO_TB | 1; |
164 | } else if (qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) { | |
fb957011 | 165 | cflags |= CF_NO_GOTO_TB; |
84f15616 RH |
166 | } |
167 | ||
168 | return cflags; | |
043e35d9 RH |
169 | } |
170 | ||
0c90ba16 RH |
171 | struct tb_desc { |
172 | target_ulong pc; | |
173 | target_ulong cs_base; | |
174 | CPUArchState *env; | |
93b99616 | 175 | tb_page_addr_t page_addr0; |
0c90ba16 RH |
176 | uint32_t flags; |
177 | uint32_t cflags; | |
0c90ba16 RH |
178 | }; |
179 | ||
180 | static bool tb_lookup_cmp(const void *p, const void *d) | |
181 | { | |
182 | const TranslationBlock *tb = p; | |
183 | const struct tb_desc *desc = d; | |
184 | ||
279513c7 | 185 | if ((tb_cflags(tb) & CF_PCREL || tb->pc == desc->pc) && |
28905cfb | 186 | tb_page_addr0(tb) == desc->page_addr0 && |
0c90ba16 RH |
187 | tb->cs_base == desc->cs_base && |
188 | tb->flags == desc->flags && | |
0c90ba16 RH |
189 | tb_cflags(tb) == desc->cflags) { |
190 | /* check next page if needed */ | |
28905cfb RH |
191 | tb_page_addr_t tb_phys_page1 = tb_page_addr1(tb); |
192 | if (tb_phys_page1 == -1) { | |
0c90ba16 RH |
193 | return true; |
194 | } else { | |
93b99616 RH |
195 | tb_page_addr_t phys_page1; |
196 | target_ulong virt_page1; | |
0c90ba16 | 197 | |
9867b302 RH |
198 | /* |
199 | * We know that the first page matched, and an otherwise valid TB | |
200 | * encountered an incomplete instruction at the end of that page, | |
201 | * therefore we know that generating a new TB from the current PC | |
202 | * must also require reading from the next page -- even if the | |
203 | * second pages do not match, and therefore the resulting insn | |
204 | * is different for the new TB. Therefore any exception raised | |
205 | * here by the faulting lookup is not premature. | |
206 | */ | |
93b99616 RH |
207 | virt_page1 = TARGET_PAGE_ALIGN(desc->pc); |
208 | phys_page1 = get_page_addr_code(desc->env, virt_page1); | |
28905cfb | 209 | if (tb_phys_page1 == phys_page1) { |
0c90ba16 RH |
210 | return true; |
211 | } | |
212 | } | |
213 | } | |
214 | return false; | |
215 | } | |
216 | ||
217 | static TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc, | |
218 | target_ulong cs_base, uint32_t flags, | |
219 | uint32_t cflags) | |
220 | { | |
221 | tb_page_addr_t phys_pc; | |
222 | struct tb_desc desc; | |
223 | uint32_t h; | |
224 | ||
225 | desc.env = cpu->env_ptr; | |
226 | desc.cs_base = cs_base; | |
227 | desc.flags = flags; | |
228 | desc.cflags = cflags; | |
0c90ba16 RH |
229 | desc.pc = pc; |
230 | phys_pc = get_page_addr_code(desc.env, pc); | |
231 | if (phys_pc == -1) { | |
232 | return NULL; | |
233 | } | |
93b99616 | 234 | desc.page_addr0 = phys_pc; |
4be79026 | 235 | h = tb_hash_func(phys_pc, (cflags & CF_PCREL ? 0 : pc), |
367189ef | 236 | flags, cs_base, cflags); |
0c90ba16 RH |
237 | return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp); |
238 | } | |
239 | ||
632cb63d RH |
240 | /* Might cause an exception, so have a longjmp destination ready */ |
241 | static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc, | |
242 | target_ulong cs_base, | |
243 | uint32_t flags, uint32_t cflags) | |
244 | { | |
245 | TranslationBlock *tb; | |
8ed558ec | 246 | CPUJumpCache *jc; |
632cb63d RH |
247 | uint32_t hash; |
248 | ||
249 | /* we should never be trying to look up an INVALID tb */ | |
250 | tcg_debug_assert(!(cflags & CF_INVALID)); | |
251 | ||
252 | hash = tb_jmp_cache_hash_func(pc); | |
8ed558ec | 253 | jc = cpu->tb_jmp_cache; |
632cb63d | 254 | |
2dd5b7a1 AJ |
255 | if (cflags & CF_PCREL) { |
256 | /* Use acquire to ensure current load of pc from jc. */ | |
3371802f | 257 | tb = qatomic_load_acquire(&jc->array[hash].tb); |
2dd5b7a1 AJ |
258 | |
259 | if (likely(tb && | |
260 | jc->array[hash].pc == pc && | |
261 | tb->cs_base == cs_base && | |
262 | tb->flags == flags && | |
2dd5b7a1 AJ |
263 | tb_cflags(tb) == cflags)) { |
264 | return tb; | |
265 | } | |
266 | tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags); | |
267 | if (tb == NULL) { | |
268 | return NULL; | |
269 | } | |
270 | jc->array[hash].pc = pc; | |
3371802f | 271 | /* Ensure pc is written first. */ |
2dd5b7a1 AJ |
272 | qatomic_store_release(&jc->array[hash].tb, tb); |
273 | } else { | |
274 | /* Use rcu_read to ensure current load of pc from *tb. */ | |
275 | tb = qatomic_rcu_read(&jc->array[hash].tb); | |
276 | ||
277 | if (likely(tb && | |
279513c7 | 278 | tb->pc == pc && |
2dd5b7a1 AJ |
279 | tb->cs_base == cs_base && |
280 | tb->flags == flags && | |
2dd5b7a1 AJ |
281 | tb_cflags(tb) == cflags)) { |
282 | return tb; | |
283 | } | |
284 | tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags); | |
285 | if (tb == NULL) { | |
286 | return NULL; | |
287 | } | |
288 | /* Use the pc value already stored in tb->pc. */ | |
289 | qatomic_set(&jc->array[hash].tb, tb); | |
632cb63d | 290 | } |
2dd5b7a1 | 291 | |
632cb63d RH |
292 | return tb; |
293 | } | |
294 | ||
fbf59aad RH |
295 | static void log_cpu_exec(target_ulong pc, CPUState *cpu, |
296 | const TranslationBlock *tb) | |
abb0cd93 | 297 | { |
fbf59aad | 298 | if (qemu_log_in_addr_range(pc)) { |
abb0cd93 | 299 | qemu_log_mask(CPU_LOG_EXEC, |
85314e13 | 300 | "Trace %d: %p [%08" PRIx64 |
7eabad36 RH |
301 | "/" TARGET_FMT_lx "/%08x/%08x] %s\n", |
302 | cpu->cpu_index, tb->tc.ptr, tb->cs_base, pc, | |
303 | tb->flags, tb->cflags, lookup_symbol(pc)); | |
abb0cd93 | 304 | |
abb0cd93 | 305 | if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) { |
c60f599b | 306 | FILE *logfile = qemu_log_trylock(); |
78b54858 RH |
307 | if (logfile) { |
308 | int flags = 0; | |
abb0cd93 | 309 | |
78b54858 RH |
310 | if (qemu_loglevel_mask(CPU_LOG_TB_FPU)) { |
311 | flags |= CPU_DUMP_FPU; | |
312 | } | |
abb0cd93 | 313 | #if defined(TARGET_I386) |
78b54858 | 314 | flags |= CPU_DUMP_CCOP; |
abb0cd93 | 315 | #endif |
b84694de IK |
316 | if (qemu_loglevel_mask(CPU_LOG_TB_VPU)) { |
317 | flags |= CPU_DUMP_VPU; | |
318 | } | |
c769fbd7 | 319 | cpu_dump_state(cpu, logfile, flags); |
78b54858 RH |
320 | qemu_log_unlock(logfile); |
321 | } | |
abb0cd93 | 322 | } |
abb0cd93 RH |
323 | } |
324 | } | |
325 | ||
69993c4e LL |
326 | static bool check_for_breakpoints_slow(CPUState *cpu, target_ulong pc, |
327 | uint32_t *cflags) | |
10c37828 RH |
328 | { |
329 | CPUBreakpoint *bp; | |
330 | bool match_page = false; | |
331 | ||
10c37828 RH |
332 | /* |
333 | * Singlestep overrides breakpoints. | |
334 | * This requirement is visible in the record-replay tests, where | |
335 | * we would fail to make forward progress in reverse-continue. | |
336 | * | |
337 | * TODO: gdb singlestep should only override gdb breakpoints, | |
338 | * so that one could (gdb) singlestep into the guest kernel's | |
339 | * architectural breakpoint handler. | |
340 | */ | |
341 | if (cpu->singlestep_enabled) { | |
342 | return false; | |
343 | } | |
344 | ||
345 | QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) { | |
346 | /* | |
347 | * If we have an exact pc match, trigger the breakpoint. | |
348 | * Otherwise, note matches within the page. | |
349 | */ | |
350 | if (pc == bp->pc) { | |
351 | bool match_bp = false; | |
352 | ||
353 | if (bp->flags & BP_GDB) { | |
354 | match_bp = true; | |
355 | } else if (bp->flags & BP_CPU) { | |
356 | #ifdef CONFIG_USER_ONLY | |
357 | g_assert_not_reached(); | |
358 | #else | |
359 | CPUClass *cc = CPU_GET_CLASS(cpu); | |
360 | assert(cc->tcg_ops->debug_check_breakpoint); | |
361 | match_bp = cc->tcg_ops->debug_check_breakpoint(cpu); | |
362 | #endif | |
363 | } | |
364 | ||
365 | if (match_bp) { | |
366 | cpu->exception_index = EXCP_DEBUG; | |
367 | return true; | |
368 | } | |
369 | } else if (((pc ^ bp->pc) & TARGET_PAGE_MASK) == 0) { | |
370 | match_page = true; | |
371 | } | |
372 | } | |
373 | ||
374 | /* | |
375 | * Within the same page as a breakpoint, single-step, | |
376 | * returning to helper_lookup_tb_ptr after each insn looking | |
377 | * for the actual breakpoint. | |
378 | * | |
379 | * TODO: Perhaps better to record all of the TBs associated | |
380 | * with a given virtual page that contains a breakpoint, and | |
381 | * then invalidate them when a new overlapping breakpoint is | |
382 | * set on the page. Non-overlapping TBs would not be | |
383 | * invalidated, nor would any TB need to be invalidated as | |
384 | * breakpoints are removed. | |
385 | */ | |
386 | if (match_page) { | |
387 | *cflags = (*cflags & ~CF_COUNT_MASK) | CF_NO_GOTO_TB | 1; | |
388 | } | |
389 | return false; | |
390 | } | |
391 | ||
69993c4e LL |
392 | static inline bool check_for_breakpoints(CPUState *cpu, target_ulong pc, |
393 | uint32_t *cflags) | |
394 | { | |
395 | return unlikely(!QTAILQ_EMPTY(&cpu->breakpoints)) && | |
396 | check_for_breakpoints_slow(cpu, pc, cflags); | |
397 | } | |
398 | ||
4288eb26 RH |
399 | /** |
400 | * helper_lookup_tb_ptr: quick check for next tb | |
401 | * @env: current cpu state | |
402 | * | |
403 | * Look for an existing TB matching the current cpu state. | |
404 | * If found, return the code pointer. If not found, return | |
405 | * the tcg epilogue so that we return into cpu_tb_exec. | |
406 | */ | |
407 | const void *HELPER(lookup_tb_ptr)(CPUArchState *env) | |
408 | { | |
409 | CPUState *cpu = env_cpu(env); | |
410 | TranslationBlock *tb; | |
bb5de525 AJ |
411 | vaddr pc; |
412 | uint64_t cs_base; | |
10c37828 | 413 | uint32_t flags, cflags; |
4288eb26 RH |
414 | |
415 | cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); | |
416 | ||
10c37828 RH |
417 | cflags = curr_cflags(cpu); |
418 | if (check_for_breakpoints(cpu, pc, &cflags)) { | |
419 | cpu_loop_exit(cpu); | |
420 | } | |
421 | ||
422 | tb = tb_lookup(cpu, pc, cs_base, flags, cflags); | |
4288eb26 RH |
423 | if (tb == NULL) { |
424 | return tcg_code_gen_epilogue; | |
425 | } | |
abb0cd93 | 426 | |
fbf59aad RH |
427 | if (qemu_loglevel_mask(CPU_LOG_TB_CPU | CPU_LOG_EXEC)) { |
428 | log_cpu_exec(pc, cpu, tb); | |
429 | } | |
abb0cd93 | 430 | |
4288eb26 RH |
431 | return tb->tc.ptr; |
432 | } | |
433 | ||
77211379 | 434 | /* Execute a TB, and fix up the CPU state afterwards if necessary */ |
c905a368 DB |
435 | /* |
436 | * Disable CFI checks. | |
437 | * TCG creates binary blobs at runtime, with the transformed code. | |
438 | * A TB is a blob of binary code, created at runtime and called with an | |
439 | * indirect function call. Since such function did not exist at compile time, | |
440 | * the CFI runtime has no way to verify its signature and would fail. | |
441 | * TCG is not considered a security-sensitive part of QEMU so this does not | |
442 | * affect the impact of CFI in environment with high security requirements | |
443 | */ | |
eba40358 RH |
444 | static inline TranslationBlock * QEMU_DISABLE_CFI |
445 | cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit) | |
77211379 PM |
446 | { |
447 | CPUArchState *env = cpu->env_ptr; | |
819af24b SF |
448 | uintptr_t ret; |
449 | TranslationBlock *last_tb; | |
db0c51a3 | 450 | const void *tb_ptr = itb->tc.ptr; |
1a830635 | 451 | |
fbf59aad RH |
452 | if (qemu_loglevel_mask(CPU_LOG_TB_CPU | CPU_LOG_EXEC)) { |
453 | log_cpu_exec(log_pc(cpu, itb), cpu, itb); | |
454 | } | |
03afa5f8 | 455 | |
653b87eb | 456 | qemu_thread_jit_execute(); |
819af24b | 457 | ret = tcg_qemu_tb_exec(env, tb_ptr); |
626cf8f4 | 458 | cpu->can_do_io = 1; |
e04660af | 459 | qemu_plugin_disable_mem_helpers(cpu); |
eba40358 RH |
460 | /* |
461 | * TODO: Delay swapping back to the read-write region of the TB | |
462 | * until we actually need to modify the TB. The read-only copy, | |
463 | * coming from the rx region, shares the same host TLB entry as | |
464 | * the code that executed the exit_tb opcode that arrived here. | |
465 | * If we insist on touching both the RX and the RW pages, we | |
466 | * double the host TLB pressure. | |
467 | */ | |
468 | last_tb = tcg_splitwx_to_rw((void *)(ret & ~TB_EXIT_MASK)); | |
469 | *tb_exit = ret & TB_EXIT_MASK; | |
470 | ||
471 | trace_exec_tb_exit(last_tb, *tb_exit); | |
6db8b538 | 472 | |
eba40358 | 473 | if (*tb_exit > TB_EXIT_IDX1) { |
77211379 PM |
474 | /* We didn't start executing this TB (eg because the instruction |
475 | * counter hit zero); we must restore the guest PC to the address | |
476 | * of the start of the TB. | |
477 | */ | |
bdf7ae5b | 478 | CPUClass *cc = CPU_GET_CLASS(cpu); |
fbf59aad | 479 | |
78271684 CF |
480 | if (cc->tcg_ops->synchronize_from_tb) { |
481 | cc->tcg_ops->synchronize_from_tb(cpu, last_tb); | |
bdf7ae5b | 482 | } else { |
4be79026 | 483 | tcg_debug_assert(!(tb_cflags(last_tb) & CF_PCREL)); |
bdf7ae5b | 484 | assert(cc->set_pc); |
279513c7 | 485 | cc->set_pc(cpu, last_tb->pc); |
fbf59aad RH |
486 | } |
487 | if (qemu_loglevel_mask(CPU_LOG_EXEC)) { | |
488 | target_ulong pc = log_pc(cpu, last_tb); | |
489 | if (qemu_log_in_addr_range(pc)) { | |
490 | qemu_log("Stopped execution of TB chain before %p [" | |
491 | TARGET_FMT_lx "] %s\n", | |
492 | last_tb->tc.ptr, pc, lookup_symbol(pc)); | |
493 | } | |
bdf7ae5b | 494 | } |
77211379 | 495 | } |
c9460d75 RH |
496 | |
497 | /* | |
498 | * If gdb single-step, and we haven't raised another exception, | |
499 | * raise a debug exception. Single-step with another exception | |
500 | * is handled in cpu_handle_exception. | |
501 | */ | |
502 | if (unlikely(cpu->singlestep_enabled) && cpu->exception_index == -1) { | |
503 | cpu->exception_index = EXCP_DEBUG; | |
504 | cpu_loop_exit(cpu); | |
505 | } | |
506 | ||
eba40358 | 507 | return last_tb; |
77211379 PM |
508 | } |
509 | ||
2e70f6ef | 510 | |
035ba06c EH |
511 | static void cpu_exec_enter(CPUState *cpu) |
512 | { | |
513 | CPUClass *cc = CPU_GET_CLASS(cpu); | |
514 | ||
78271684 CF |
515 | if (cc->tcg_ops->cpu_exec_enter) { |
516 | cc->tcg_ops->cpu_exec_enter(cpu); | |
80c4750b | 517 | } |
035ba06c EH |
518 | } |
519 | ||
520 | static void cpu_exec_exit(CPUState *cpu) | |
fdbc2b57 | 521 | { |
08e73c48 | 522 | CPUClass *cc = CPU_GET_CLASS(cpu); |
035ba06c | 523 | |
78271684 CF |
524 | if (cc->tcg_ops->cpu_exec_exit) { |
525 | cc->tcg_ops->cpu_exec_exit(cpu); | |
80c4750b | 526 | } |
035ba06c EH |
527 | } |
528 | ||
529 | void cpu_exec_step_atomic(CPUState *cpu) | |
530 | { | |
61deada4 | 531 | CPUArchState *env = cpu->env_ptr; |
fdbc2b57 | 532 | TranslationBlock *tb; |
bb5de525 AJ |
533 | vaddr pc; |
534 | uint64_t cs_base; | |
258afb48 | 535 | uint32_t flags, cflags; |
eba40358 | 536 | int tb_exit; |
fdbc2b57 | 537 | |
08e73c48 | 538 | if (sigsetjmp(cpu->jmp_env, 0) == 0) { |
886cc689 | 539 | start_exclusive(); |
bfff072c DC |
540 | g_assert(cpu == current_cpu); |
541 | g_assert(!cpu->running); | |
542 | cpu->running = true; | |
886cc689 | 543 | |
6f04cb1c | 544 | cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); |
6f04cb1c | 545 | |
258afb48 RH |
546 | cflags = curr_cflags(cpu); |
547 | /* Execute in a serial context. */ | |
548 | cflags &= ~CF_PARALLEL; | |
549 | /* After 1 insn, return and release the exclusive lock. */ | |
550 | cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | 1; | |
10c37828 RH |
551 | /* |
552 | * No need to check_for_breakpoints here. | |
553 | * We only arrive in cpu_exec_step_atomic after beginning execution | |
554 | * of an insn that includes an atomic operation we can't handle. | |
555 | * Any breakpoint for this insn will have been recognized earlier. | |
556 | */ | |
258afb48 RH |
557 | |
558 | tb = tb_lookup(cpu, pc, cs_base, flags, cflags); | |
4e2ca83e EC |
559 | if (tb == NULL) { |
560 | mmap_lock(); | |
95590e24 | 561 | tb = tb_gen_code(cpu, pc, cs_base, flags, cflags); |
4e2ca83e EC |
562 | mmap_unlock(); |
563 | } | |
08e73c48 | 564 | |
035ba06c | 565 | cpu_exec_enter(cpu); |
08e73c48 | 566 | /* execute the generated code */ |
4e2ca83e | 567 | trace_exec_tb(tb, pc); |
eba40358 | 568 | cpu_tb_exec(cpu, tb, &tb_exit); |
035ba06c | 569 | cpu_exec_exit(cpu); |
08e73c48 | 570 | } else { |
905db98a | 571 | #ifdef CONFIG_USER_ONLY |
f920ffdd | 572 | clear_helper_retaddr(); |
297368c7 RH |
573 | if (have_mmap_lock()) { |
574 | mmap_unlock(); | |
575 | } | |
08e73c48 | 576 | #endif |
6aaa24f9 EC |
577 | if (qemu_mutex_iothread_locked()) { |
578 | qemu_mutex_unlock_iothread(); | |
579 | } | |
faa9372c | 580 | assert_no_pages_locked(); |
08e73c48 | 581 | } |
426eeecd | 582 | |
886cc689 AB |
583 | /* |
584 | * As we start the exclusive region before codegen we must still | |
585 | * be in the region if we longjump out of either the codegen or | |
586 | * the execution. | |
587 | */ | |
588 | g_assert(cpu_in_exclusive_context(cpu)); | |
bfff072c | 589 | cpu->running = false; |
886cc689 | 590 | end_exclusive(); |
fdbc2b57 RH |
591 | } |
592 | ||
a8583393 RH |
593 | void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr) |
594 | { | |
2fd2e78d RH |
595 | /* |
596 | * Get the rx view of the structure, from which we find the | |
597 | * executable code address, and tb_target_set_jmp_target can | |
598 | * produce a pc-relative displacement to jmp_target_addr[n]. | |
599 | */ | |
600 | const TranslationBlock *c_tb = tcg_splitwx_to_rx(tb); | |
601 | uintptr_t offset = tb->jmp_insn_offset[n]; | |
602 | uintptr_t jmp_rx = (uintptr_t)tb->tc.ptr + offset; | |
603 | uintptr_t jmp_rw = jmp_rx - tcg_splitwx_diff; | |
604 | ||
9da6079b | 605 | tb->jmp_target_addr[n] = addr; |
2fd2e78d | 606 | tb_target_set_jmp_target(c_tb, n, jmp_rx, jmp_rw); |
a8583393 RH |
607 | } |
608 | ||
a8583393 RH |
609 | static inline void tb_add_jump(TranslationBlock *tb, int n, |
610 | TranslationBlock *tb_next) | |
611 | { | |
194125e3 EC |
612 | uintptr_t old; |
613 | ||
653b87eb | 614 | qemu_thread_jit_write(); |
a8583393 | 615 | assert(n < ARRAY_SIZE(tb->jmp_list_next)); |
194125e3 EC |
616 | qemu_spin_lock(&tb_next->jmp_lock); |
617 | ||
618 | /* make sure the destination TB is valid */ | |
619 | if (tb_next->cflags & CF_INVALID) { | |
620 | goto out_unlock_next; | |
621 | } | |
622 | /* Atomically claim the jump destination slot only if it was NULL */ | |
d73415a3 SH |
623 | old = qatomic_cmpxchg(&tb->jmp_dest[n], (uintptr_t)NULL, |
624 | (uintptr_t)tb_next); | |
194125e3 EC |
625 | if (old) { |
626 | goto out_unlock_next; | |
a8583393 | 627 | } |
194125e3 EC |
628 | |
629 | /* patch the native jump address */ | |
630 | tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc.ptr); | |
631 | ||
632 | /* add in TB jmp list */ | |
633 | tb->jmp_list_next[n] = tb_next->jmp_list_head; | |
634 | tb_next->jmp_list_head = (uintptr_t)tb | n; | |
635 | ||
636 | qemu_spin_unlock(&tb_next->jmp_lock); | |
637 | ||
fbf59aad RH |
638 | qemu_log_mask(CPU_LOG_EXEC, "Linking TBs %p index %d -> %p\n", |
639 | tb->tc.ptr, n, tb_next->tc.ptr); | |
194125e3 | 640 | return; |
a8583393 | 641 | |
194125e3 EC |
642 | out_unlock_next: |
643 | qemu_spin_unlock(&tb_next->jmp_lock); | |
644 | return; | |
a8583393 RH |
645 | } |
646 | ||
8b2d34e9 SF |
647 | static inline bool cpu_handle_halt(CPUState *cpu) |
648 | { | |
0596fa11 | 649 | #ifndef CONFIG_USER_ONLY |
8b2d34e9 | 650 | if (cpu->halted) { |
0596fa11 | 651 | #if defined(TARGET_I386) |
4084893d | 652 | if (cpu->interrupt_request & CPU_INTERRUPT_POLL) { |
8b2d34e9 | 653 | X86CPU *x86_cpu = X86_CPU(cpu); |
8d04fb55 | 654 | qemu_mutex_lock_iothread(); |
8b2d34e9 SF |
655 | apic_poll_irq(x86_cpu->apic_state); |
656 | cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL); | |
8d04fb55 | 657 | qemu_mutex_unlock_iothread(); |
8b2d34e9 | 658 | } |
0596fa11 | 659 | #endif /* TARGET_I386 */ |
8b2d34e9 | 660 | if (!cpu_has_work(cpu)) { |
8b2d34e9 SF |
661 | return true; |
662 | } | |
663 | ||
664 | cpu->halted = 0; | |
665 | } | |
0596fa11 | 666 | #endif /* !CONFIG_USER_ONLY */ |
8b2d34e9 SF |
667 | |
668 | return false; | |
669 | } | |
670 | ||
ea284766 | 671 | static inline void cpu_handle_debug_exception(CPUState *cpu) |
1009d2ed | 672 | { |
86025ee4 | 673 | CPUClass *cc = CPU_GET_CLASS(cpu); |
1009d2ed JK |
674 | CPUWatchpoint *wp; |
675 | ||
ff4700b0 AF |
676 | if (!cpu->watchpoint_hit) { |
677 | QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { | |
1009d2ed JK |
678 | wp->flags &= ~BP_WATCHPOINT_HIT; |
679 | } | |
680 | } | |
86025ee4 | 681 | |
78271684 CF |
682 | if (cc->tcg_ops->debug_excp_handler) { |
683 | cc->tcg_ops->debug_excp_handler(cpu); | |
710384d0 | 684 | } |
1009d2ed JK |
685 | } |
686 | ||
ea284766 SF |
687 | static inline bool cpu_handle_exception(CPUState *cpu, int *ret) |
688 | { | |
17b50b0c PD |
689 | if (cpu->exception_index < 0) { |
690 | #ifndef CONFIG_USER_ONLY | |
691 | if (replay_has_exception() | |
5e140196 | 692 | && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0) { |
a11bbb6a | 693 | /* Execute just one insn to trigger exception pending in the log */ |
c3e97f64 PD |
694 | cpu->cflags_next_tb = (curr_cflags(cpu) & ~CF_USE_ICOUNT) |
695 | | CF_NOIRQ | 1; | |
17b50b0c PD |
696 | } |
697 | #endif | |
a11bbb6a | 698 | return false; |
17b50b0c | 699 | } |
17b50b0c PD |
700 | if (cpu->exception_index >= EXCP_INTERRUPT) { |
701 | /* exit request from the cpu execution loop */ | |
702 | *ret = cpu->exception_index; | |
703 | if (*ret == EXCP_DEBUG) { | |
704 | cpu_handle_debug_exception(cpu); | |
705 | } | |
706 | cpu->exception_index = -1; | |
707 | return true; | |
708 | } else { | |
ea284766 | 709 | #if defined(CONFIG_USER_ONLY) |
17b50b0c PD |
710 | /* if user mode only, we simulate a fake exception |
711 | which will be handled outside the cpu execution | |
712 | loop */ | |
ea284766 | 713 | #if defined(TARGET_I386) |
17b50b0c | 714 | CPUClass *cc = CPU_GET_CLASS(cpu); |
12096421 PMD |
715 | cc->tcg_ops->fake_user_interrupt(cpu); |
716 | #endif /* TARGET_I386 */ | |
17b50b0c PD |
717 | *ret = cpu->exception_index; |
718 | cpu->exception_index = -1; | |
719 | return true; | |
720 | #else | |
721 | if (replay_exception()) { | |
ea284766 | 722 | CPUClass *cc = CPU_GET_CLASS(cpu); |
17b50b0c | 723 | qemu_mutex_lock_iothread(); |
78271684 | 724 | cc->tcg_ops->do_interrupt(cpu); |
17b50b0c | 725 | qemu_mutex_unlock_iothread(); |
ea284766 | 726 | cpu->exception_index = -1; |
a7ba744f LM |
727 | |
728 | if (unlikely(cpu->singlestep_enabled)) { | |
729 | /* | |
730 | * After processing the exception, ensure an EXCP_DEBUG is | |
731 | * raised when single-stepping so that GDB doesn't miss the | |
732 | * next instruction. | |
733 | */ | |
734 | *ret = EXCP_DEBUG; | |
735 | cpu_handle_debug_exception(cpu); | |
736 | return true; | |
737 | } | |
17b50b0c PD |
738 | } else if (!replay_has_interrupt()) { |
739 | /* give a chance to iothread in replay mode */ | |
740 | *ret = EXCP_INTERRUPT; | |
ea284766 | 741 | return true; |
ea284766 | 742 | } |
ea284766 SF |
743 | #endif |
744 | } | |
745 | ||
746 | return false; | |
747 | } | |
748 | ||
77c0fc4e | 749 | #ifndef CONFIG_USER_ONLY |
4084893d PD |
750 | /* |
751 | * CPU_INTERRUPT_POLL is a virtual event which gets converted into a | |
752 | * "real" interrupt event later. It does not need to be recorded for | |
753 | * replay purposes. | |
754 | */ | |
755 | static inline bool need_replay_interrupt(int interrupt_request) | |
756 | { | |
757 | #if defined(TARGET_I386) | |
758 | return !(interrupt_request & CPU_INTERRUPT_POLL); | |
759 | #else | |
760 | return true; | |
761 | #endif | |
762 | } | |
77c0fc4e | 763 | #endif /* !CONFIG_USER_ONLY */ |
4084893d | 764 | |
209b71b6 | 765 | static inline bool cpu_handle_interrupt(CPUState *cpu, |
c385e6e4 SF |
766 | TranslationBlock **last_tb) |
767 | { | |
aff0e204 AB |
768 | /* |
769 | * If we have requested custom cflags with CF_NOIRQ we should | |
770 | * skip checking here. Any pending interrupts will get picked up | |
771 | * by the next TB we execute under normal cflags. | |
772 | */ | |
773 | if (cpu->cflags_next_tb != -1 && cpu->cflags_next_tb & CF_NOIRQ) { | |
774 | return false; | |
775 | } | |
776 | ||
17b50b0c PD |
777 | /* Clear the interrupt flag now since we're processing |
778 | * cpu->interrupt_request and cpu->exit_request. | |
d84be02d DH |
779 | * Ensure zeroing happens before reading cpu->exit_request or |
780 | * cpu->interrupt_request (see also smp_wmb in cpu_exit()) | |
17b50b0c | 781 | */ |
06831001 | 782 | qatomic_set_mb(&cpu_neg(cpu)->icount_decr.u16.high, 0); |
c385e6e4 | 783 | |
d73415a3 | 784 | if (unlikely(qatomic_read(&cpu->interrupt_request))) { |
8d04fb55 JK |
785 | int interrupt_request; |
786 | qemu_mutex_lock_iothread(); | |
787 | interrupt_request = cpu->interrupt_request; | |
c385e6e4 SF |
788 | if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) { |
789 | /* Mask out external interrupts for this step. */ | |
790 | interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK; | |
791 | } | |
792 | if (interrupt_request & CPU_INTERRUPT_DEBUG) { | |
793 | cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG; | |
794 | cpu->exception_index = EXCP_DEBUG; | |
8d04fb55 | 795 | qemu_mutex_unlock_iothread(); |
209b71b6 | 796 | return true; |
c385e6e4 | 797 | } |
77c0fc4e | 798 | #if !defined(CONFIG_USER_ONLY) |
c385e6e4 SF |
799 | if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) { |
800 | /* Do nothing */ | |
801 | } else if (interrupt_request & CPU_INTERRUPT_HALT) { | |
802 | replay_interrupt(); | |
803 | cpu->interrupt_request &= ~CPU_INTERRUPT_HALT; | |
804 | cpu->halted = 1; | |
805 | cpu->exception_index = EXCP_HLT; | |
8d04fb55 | 806 | qemu_mutex_unlock_iothread(); |
209b71b6 | 807 | return true; |
c385e6e4 SF |
808 | } |
809 | #if defined(TARGET_I386) | |
810 | else if (interrupt_request & CPU_INTERRUPT_INIT) { | |
811 | X86CPU *x86_cpu = X86_CPU(cpu); | |
812 | CPUArchState *env = &x86_cpu->env; | |
813 | replay_interrupt(); | |
65c9d60a | 814 | cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0); |
c385e6e4 SF |
815 | do_cpu_init(x86_cpu); |
816 | cpu->exception_index = EXCP_HALTED; | |
8d04fb55 | 817 | qemu_mutex_unlock_iothread(); |
209b71b6 | 818 | return true; |
c385e6e4 SF |
819 | } |
820 | #else | |
821 | else if (interrupt_request & CPU_INTERRUPT_RESET) { | |
822 | replay_interrupt(); | |
823 | cpu_reset(cpu); | |
8d04fb55 | 824 | qemu_mutex_unlock_iothread(); |
209b71b6 | 825 | return true; |
c385e6e4 | 826 | } |
77c0fc4e | 827 | #endif /* !TARGET_I386 */ |
c385e6e4 SF |
828 | /* The target hook has 3 exit conditions: |
829 | False when the interrupt isn't processed, | |
830 | True when it is, and we should restart on a new TB, | |
831 | and via longjmp via cpu_loop_exit. */ | |
832 | else { | |
77c0fc4e PMD |
833 | CPUClass *cc = CPU_GET_CLASS(cpu); |
834 | ||
78271684 CF |
835 | if (cc->tcg_ops->cpu_exec_interrupt && |
836 | cc->tcg_ops->cpu_exec_interrupt(cpu, interrupt_request)) { | |
4084893d PD |
837 | if (need_replay_interrupt(interrupt_request)) { |
838 | replay_interrupt(); | |
839 | } | |
ba3c35d9 RH |
840 | /* |
841 | * After processing the interrupt, ensure an EXCP_DEBUG is | |
842 | * raised when single-stepping so that GDB doesn't miss the | |
843 | * next instruction. | |
844 | */ | |
5b7b197c LM |
845 | if (unlikely(cpu->singlestep_enabled)) { |
846 | cpu->exception_index = EXCP_DEBUG; | |
847 | qemu_mutex_unlock_iothread(); | |
848 | return true; | |
849 | } | |
850 | cpu->exception_index = -1; | |
c385e6e4 SF |
851 | *last_tb = NULL; |
852 | } | |
8b1fe3f4 SF |
853 | /* The target hook may have updated the 'cpu->interrupt_request'; |
854 | * reload the 'interrupt_request' value */ | |
855 | interrupt_request = cpu->interrupt_request; | |
c385e6e4 | 856 | } |
77c0fc4e | 857 | #endif /* !CONFIG_USER_ONLY */ |
8b1fe3f4 | 858 | if (interrupt_request & CPU_INTERRUPT_EXITTB) { |
c385e6e4 SF |
859 | cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB; |
860 | /* ensure that no TB jump will be modified as | |
861 | the program flow was changed */ | |
862 | *last_tb = NULL; | |
863 | } | |
8d04fb55 JK |
864 | |
865 | /* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */ | |
866 | qemu_mutex_unlock_iothread(); | |
c385e6e4 | 867 | } |
8d04fb55 | 868 | |
cfb2d02b | 869 | /* Finally, check if we need to exit to the main loop. */ |
d73415a3 | 870 | if (unlikely(qatomic_read(&cpu->exit_request)) |
740b1759 | 871 | || (icount_enabled() |
a11bbb6a | 872 | && (cpu->cflags_next_tb == -1 || cpu->cflags_next_tb & CF_USE_ICOUNT) |
5e140196 | 873 | && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0)) { |
d73415a3 | 874 | qatomic_set(&cpu->exit_request, 0); |
5f3bdfd4 PD |
875 | if (cpu->exception_index == -1) { |
876 | cpu->exception_index = EXCP_INTERRUPT; | |
877 | } | |
209b71b6 | 878 | return true; |
c385e6e4 | 879 | } |
209b71b6 PB |
880 | |
881 | return false; | |
c385e6e4 SF |
882 | } |
883 | ||
928de9ee | 884 | static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb, |
fbf59aad | 885 | target_ulong pc, |
cfb2d02b | 886 | TranslationBlock **last_tb, int *tb_exit) |
928de9ee | 887 | { |
1aab16c2 | 888 | int32_t insns_left; |
928de9ee | 889 | |
fbf59aad | 890 | trace_exec_tb(tb, pc); |
eba40358 | 891 | tb = cpu_tb_exec(cpu, tb, tb_exit); |
1aab16c2 PB |
892 | if (*tb_exit != TB_EXIT_REQUESTED) { |
893 | *last_tb = tb; | |
894 | return; | |
895 | } | |
896 | ||
897 | *last_tb = NULL; | |
d73415a3 | 898 | insns_left = qatomic_read(&cpu_neg(cpu)->icount_decr.u32); |
1aab16c2 | 899 | if (insns_left < 0) { |
e5143e30 AB |
900 | /* Something asked us to stop executing chained TBs; just |
901 | * continue round the main loop. Whatever requested the exit | |
30f3dda2 | 902 | * will also have set something else (eg exit_request or |
17b50b0c PD |
903 | * interrupt_request) which will be handled by |
904 | * cpu_handle_interrupt. cpu_handle_interrupt will also | |
905 | * clear cpu->icount_decr.u16.high. | |
928de9ee | 906 | */ |
1aab16c2 | 907 | return; |
928de9ee | 908 | } |
1aab16c2 PB |
909 | |
910 | /* Instruction counter expired. */ | |
740b1759 | 911 | assert(icount_enabled()); |
1aab16c2 | 912 | #ifndef CONFIG_USER_ONLY |
eda5f7c6 | 913 | /* Ensure global icount has gone forward */ |
8191d368 | 914 | icount_update(cpu); |
eda5f7c6 | 915 | /* Refill decrementer and continue execution. */ |
df3a2de5 | 916 | insns_left = MIN(0xffff, cpu->icount_budget); |
5e140196 | 917 | cpu_neg(cpu)->icount_decr.u16.low = insns_left; |
eda5f7c6 | 918 | cpu->icount_extra = cpu->icount_budget - insns_left; |
bc662a33 AB |
919 | |
920 | /* | |
921 | * If the next tb has more instructions than we have left to | |
922 | * execute we need to ensure we find/generate a TB with exactly | |
923 | * insns_left instructions in it. | |
924 | */ | |
c8cf47a9 PM |
925 | if (insns_left > 0 && insns_left < tb->icount) { |
926 | assert(insns_left <= CF_COUNT_MASK); | |
927 | assert(cpu->icount_extra == 0); | |
bc662a33 | 928 | cpu->cflags_next_tb = (tb->cflags & ~CF_COUNT_MASK) | insns_left; |
928de9ee | 929 | } |
1aab16c2 | 930 | #endif |
928de9ee SF |
931 | } |
932 | ||
7d13299d FB |
933 | /* main execution loop */ |
934 | ||
61710a7e RH |
935 | static int __attribute__((noinline)) |
936 | cpu_exec_loop(CPUState *cpu, SyncClocks *sc) | |
7d13299d | 937 | { |
c385e6e4 | 938 | int ret; |
4515e58d PB |
939 | |
940 | /* if an exception is pending, we execute it here */ | |
941 | while (!cpu_handle_exception(cpu, &ret)) { | |
942 | TranslationBlock *last_tb = NULL; | |
943 | int tb_exit = 0; | |
944 | ||
945 | while (!cpu_handle_interrupt(cpu, &last_tb)) { | |
9b990ee5 | 946 | TranslationBlock *tb; |
bb5de525 AJ |
947 | vaddr pc; |
948 | uint64_t cs_base; | |
11c1d5f8 RH |
949 | uint32_t flags, cflags; |
950 | ||
10c37828 RH |
951 | cpu_get_tb_cpu_state(cpu->env_ptr, &pc, &cs_base, &flags); |
952 | ||
11c1d5f8 RH |
953 | /* |
954 | * When requested, use an exact setting for cflags for the next | |
955 | * execution. This is used for icount, precise smc, and stop- | |
956 | * after-access watchpoints. Since this request should never | |
957 | * have CF_INVALID set, -1 is a convenient invalid value that | |
958 | * does not require tcg headers for cpu_common_reset. | |
959 | */ | |
960 | cflags = cpu->cflags_next_tb; | |
9b990ee5 | 961 | if (cflags == -1) { |
c0ae396a | 962 | cflags = curr_cflags(cpu); |
9b990ee5 RH |
963 | } else { |
964 | cpu->cflags_next_tb = -1; | |
965 | } | |
966 | ||
10c37828 RH |
967 | if (check_for_breakpoints(cpu, pc, &cflags)) { |
968 | break; | |
969 | } | |
11c1d5f8 RH |
970 | |
971 | tb = tb_lookup(cpu, pc, cs_base, flags, cflags); | |
972 | if (tb == NULL) { | |
3371802f | 973 | CPUJumpCache *jc; |
a976a99a RH |
974 | uint32_t h; |
975 | ||
11c1d5f8 RH |
976 | mmap_lock(); |
977 | tb = tb_gen_code(cpu, pc, cs_base, flags, cflags); | |
978 | mmap_unlock(); | |
3371802f | 979 | |
11c1d5f8 RH |
980 | /* |
981 | * We add the TB in the virtual pc hash table | |
982 | * for the fast lookup | |
983 | */ | |
a976a99a | 984 | h = tb_jmp_cache_hash_func(pc); |
3371802f RH |
985 | jc = cpu->tb_jmp_cache; |
986 | if (cflags & CF_PCREL) { | |
987 | jc->array[h].pc = pc; | |
988 | /* Ensure pc is written first. */ | |
989 | qatomic_store_release(&jc->array[h].tb, tb); | |
990 | } else { | |
991 | /* Use the pc value already stored in tb->pc. */ | |
992 | qatomic_set(&jc->array[h].tb, tb); | |
993 | } | |
11c1d5f8 RH |
994 | } |
995 | ||
996 | #ifndef CONFIG_USER_ONLY | |
997 | /* | |
998 | * We don't take care of direct jumps when address mapping | |
999 | * changes in system emulation. So it's not safe to make a | |
1000 | * direct jump to a TB spanning two pages because the mapping | |
1001 | * for the second page can change. | |
1002 | */ | |
28905cfb | 1003 | if (tb_page_addr1(tb) != -1) { |
11c1d5f8 RH |
1004 | last_tb = NULL; |
1005 | } | |
1006 | #endif | |
1007 | /* See if we can patch the calling TB. */ | |
1008 | if (last_tb) { | |
1009 | tb_add_jump(last_tb, tb_exit, tb); | |
1010 | } | |
1011 | ||
fbf59aad | 1012 | cpu_loop_exec_tb(cpu, tb, pc, &last_tb, &tb_exit); |
11c1d5f8 | 1013 | |
4515e58d PB |
1014 | /* Try to align the host and virtual clocks |
1015 | if the guest is in advance */ | |
61710a7e RH |
1016 | align_clocks(sc, cpu); |
1017 | } | |
1018 | } | |
1019 | return ret; | |
1020 | } | |
1021 | ||
1022 | static int cpu_exec_setjmp(CPUState *cpu, SyncClocks *sc) | |
1023 | { | |
1024 | /* Prepare setjmp context for exception handling. */ | |
1025 | if (unlikely(sigsetjmp(cpu->jmp_env, 0) != 0)) { | |
1026 | /* Non-buggy compilers preserve this; assert the correct value. */ | |
1027 | g_assert(cpu == current_cpu); | |
1028 | ||
905db98a | 1029 | #ifdef CONFIG_USER_ONLY |
61710a7e RH |
1030 | clear_helper_retaddr(); |
1031 | if (have_mmap_lock()) { | |
1032 | mmap_unlock(); | |
7d13299d | 1033 | } |
61710a7e RH |
1034 | #endif |
1035 | if (qemu_mutex_iothread_locked()) { | |
1036 | qemu_mutex_unlock_iothread(); | |
1037 | } | |
61710a7e RH |
1038 | |
1039 | assert_no_pages_locked(); | |
1040 | } | |
1041 | ||
1042 | return cpu_exec_loop(cpu, sc); | |
1043 | } | |
1044 | ||
1045 | int cpu_exec(CPUState *cpu) | |
1046 | { | |
1047 | int ret; | |
1048 | SyncClocks sc = { 0 }; | |
1049 | ||
1050 | /* replay_interrupt may need current_cpu */ | |
1051 | current_cpu = cpu; | |
1052 | ||
1053 | if (cpu_handle_halt(cpu)) { | |
1054 | return EXCP_HALTED; | |
4515e58d | 1055 | } |
3fb2ded1 | 1056 | |
61710a7e RH |
1057 | rcu_read_lock(); |
1058 | cpu_exec_enter(cpu); | |
1059 | ||
1060 | /* | |
1061 | * Calculate difference between guest clock and host clock. | |
1062 | * This delay includes the delay of the last cycle, so | |
1063 | * what we have to do is sleep until it is 0. As for the | |
1064 | * advance/delay we gain here, we try to fix it next time. | |
1065 | */ | |
1066 | init_delay_params(&sc, cpu); | |
1067 | ||
1068 | ret = cpu_exec_setjmp(cpu, &sc); | |
1069 | ||
035ba06c | 1070 | cpu_exec_exit(cpu); |
79e2b9ae | 1071 | rcu_read_unlock(); |
1057eaa7 | 1072 | |
7d13299d FB |
1073 | return ret; |
1074 | } | |
740b1759 | 1075 | |
7df5e3d6 CF |
1076 | void tcg_exec_realizefn(CPUState *cpu, Error **errp) |
1077 | { | |
1078 | static bool tcg_target_initialized; | |
1079 | CPUClass *cc = CPU_GET_CLASS(cpu); | |
1080 | ||
1081 | if (!tcg_target_initialized) { | |
78271684 | 1082 | cc->tcg_ops->initialize(); |
7df5e3d6 CF |
1083 | tcg_target_initialized = true; |
1084 | } | |
7df5e3d6 | 1085 | |
4e4fa6c1 RH |
1086 | cpu->tb_jmp_cache = g_new0(CPUJumpCache, 1); |
1087 | tlb_init(cpu); | |
7df5e3d6 CF |
1088 | #ifndef CONFIG_USER_ONLY |
1089 | tcg_iommu_init_notifier_list(cpu); | |
1090 | #endif /* !CONFIG_USER_ONLY */ | |
4e4fa6c1 | 1091 | /* qemu_plugin_vcpu_init_hook delayed until cpu_index assigned. */ |
7df5e3d6 CF |
1092 | } |
1093 | ||
1094 | /* undo the initializations in reverse order */ | |
1095 | void tcg_exec_unrealizefn(CPUState *cpu) | |
1096 | { | |
1097 | #ifndef CONFIG_USER_ONLY | |
1098 | tcg_iommu_free_notifier_list(cpu); | |
1099 | #endif /* !CONFIG_USER_ONLY */ | |
1100 | ||
7df5e3d6 | 1101 | tlb_destroy(cpu); |
4731f89b | 1102 | g_free_rcu(cpu->tb_jmp_cache, rcu); |
7df5e3d6 | 1103 | } |