]>
Commit | Line | Data |
---|---|---|
7d13299d | 1 | /* |
e965fc38 | 2 | * emulator main execution loop |
5fafdf24 | 3 | * |
66321a11 | 4 | * Copyright (c) 2003-2005 Fabrice Bellard |
7d13299d | 5 | * |
3ef693a0 FB |
6 | * This library is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
fb0343d5 | 9 | * version 2.1 of the License, or (at your option) any later version. |
7d13299d | 10 | * |
3ef693a0 FB |
11 | * This library is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
7d13299d | 15 | * |
3ef693a0 | 16 | * You should have received a copy of the GNU Lesser General Public |
8167ee88 | 17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
7d13299d | 18 | */ |
a8d25326 | 19 | |
7b31bbc2 | 20 | #include "qemu/osdep.h" |
740b1759 | 21 | #include "qemu/qemu-print.h" |
3a841ab5 | 22 | #include "qapi/error.h" |
3a841ab5 | 23 | #include "qapi/type-helpers.h" |
78271684 | 24 | #include "hw/core/tcg-cpu-ops.h" |
d9bb58e5 | 25 | #include "trace.h" |
76cad711 | 26 | #include "disas/disas.h" |
63c91552 | 27 | #include "exec/exec-all.h" |
dcb32f1d | 28 | #include "tcg/tcg.h" |
1de7afc9 | 29 | #include "qemu/atomic.h" |
79e2b9ae | 30 | #include "qemu/rcu.h" |
508127e2 | 31 | #include "exec/log.h" |
8d04fb55 | 32 | #include "qemu/main-loop.h" |
6220e900 PD |
33 | #if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY) |
34 | #include "hw/i386/apic.h" | |
35 | #endif | |
d2528bdc | 36 | #include "sysemu/cpus.h" |
740b1759 CF |
37 | #include "exec/cpu-all.h" |
38 | #include "sysemu/cpu-timers.h" | |
5b5968c4 | 39 | #include "exec/replay-core.h" |
3a841ab5 | 40 | #include "sysemu/tcg.h" |
a3e7f702 | 41 | #include "exec/helper-proto-common.h" |
a976a99a | 42 | #include "tb-jmp-cache.h" |
e5ceadff | 43 | #include "tb-hash.h" |
e5ceadff | 44 | #include "tb-context.h" |
5934660f | 45 | #include "internal-common.h" |
4c268d6d | 46 | #include "internal-target.h" |
c2aa5f81 ST |
47 | |
48 | /* -icount align implementation. */ | |
49 | ||
50 | typedef struct SyncClocks { | |
51 | int64_t diff_clk; | |
52 | int64_t last_cpu_icount; | |
7f7bc144 | 53 | int64_t realtime_clock; |
c2aa5f81 ST |
54 | } SyncClocks; |
55 | ||
56 | #if !defined(CONFIG_USER_ONLY) | |
57 | /* Allow the guest to have a max 3ms advance. | |
58 | * The difference between the 2 clocks could therefore | |
59 | * oscillate around 0. | |
60 | */ | |
61 | #define VM_CLOCK_ADVANCE 3000000 | |
7f7bc144 ST |
62 | #define THRESHOLD_REDUCE 1.5 |
63 | #define MAX_DELAY_PRINT_RATE 2000000000LL | |
64 | #define MAX_NB_PRINTS 100 | |
c2aa5f81 | 65 | |
00c9a5c2 PMD |
66 | int64_t max_delay; |
67 | int64_t max_advance; | |
740b1759 | 68 | |
5e140196 | 69 | static void align_clocks(SyncClocks *sc, CPUState *cpu) |
c2aa5f81 ST |
70 | { |
71 | int64_t cpu_icount; | |
72 | ||
73 | if (!icount_align_option) { | |
74 | return; | |
75 | } | |
76 | ||
a953b5fa | 77 | cpu_icount = cpu->icount_extra + cpu->neg.icount_decr.u16.low; |
8191d368 | 78 | sc->diff_clk += icount_to_ns(sc->last_cpu_icount - cpu_icount); |
c2aa5f81 ST |
79 | sc->last_cpu_icount = cpu_icount; |
80 | ||
81 | if (sc->diff_clk > VM_CLOCK_ADVANCE) { | |
82 | #ifndef _WIN32 | |
83 | struct timespec sleep_delay, rem_delay; | |
84 | sleep_delay.tv_sec = sc->diff_clk / 1000000000LL; | |
85 | sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL; | |
86 | if (nanosleep(&sleep_delay, &rem_delay) < 0) { | |
a498d0ef | 87 | sc->diff_clk = rem_delay.tv_sec * 1000000000LL + rem_delay.tv_nsec; |
c2aa5f81 ST |
88 | } else { |
89 | sc->diff_clk = 0; | |
90 | } | |
91 | #else | |
92 | Sleep(sc->diff_clk / SCALE_MS); | |
93 | sc->diff_clk = 0; | |
94 | #endif | |
95 | } | |
96 | } | |
97 | ||
7f7bc144 ST |
98 | static void print_delay(const SyncClocks *sc) |
99 | { | |
100 | static float threshold_delay; | |
101 | static int64_t last_realtime_clock; | |
102 | static int nb_prints; | |
103 | ||
104 | if (icount_align_option && | |
105 | sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE && | |
106 | nb_prints < MAX_NB_PRINTS) { | |
107 | if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) || | |
108 | (-sc->diff_clk / (float)1000000000LL < | |
109 | (threshold_delay - THRESHOLD_REDUCE))) { | |
110 | threshold_delay = (-sc->diff_clk / 1000000000LL) + 1; | |
740b1759 CF |
111 | qemu_printf("Warning: The guest is now late by %.1f to %.1f seconds\n", |
112 | threshold_delay - 1, | |
113 | threshold_delay); | |
7f7bc144 ST |
114 | nb_prints++; |
115 | last_realtime_clock = sc->realtime_clock; | |
116 | } | |
117 | } | |
118 | } | |
119 | ||
5e140196 | 120 | static void init_delay_params(SyncClocks *sc, CPUState *cpu) |
c2aa5f81 ST |
121 | { |
122 | if (!icount_align_option) { | |
123 | return; | |
124 | } | |
2e91cc62 PB |
125 | sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT); |
126 | sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock; | |
5e140196 | 127 | sc->last_cpu_icount |
a953b5fa | 128 | = cpu->icount_extra + cpu->neg.icount_decr.u16.low; |
27498bef ST |
129 | if (sc->diff_clk < max_delay) { |
130 | max_delay = sc->diff_clk; | |
131 | } | |
132 | if (sc->diff_clk > max_advance) { | |
133 | max_advance = sc->diff_clk; | |
134 | } | |
7f7bc144 ST |
135 | |
136 | /* Print every 2s max if the guest is late. We limit the number | |
137 | of printed messages to NB_PRINT_MAX(currently 100) */ | |
138 | print_delay(sc); | |
c2aa5f81 ST |
139 | } |
140 | #else | |
141 | static void align_clocks(SyncClocks *sc, const CPUState *cpu) | |
142 | { | |
143 | } | |
144 | ||
145 | static void init_delay_params(SyncClocks *sc, const CPUState *cpu) | |
146 | { | |
147 | } | |
148 | #endif /* CONFIG USER ONLY */ | |
7d13299d | 149 | |
043e35d9 RH |
150 | uint32_t curr_cflags(CPUState *cpu) |
151 | { | |
84f15616 RH |
152 | uint32_t cflags = cpu->tcg_cflags; |
153 | ||
04f5b647 | 154 | /* |
c2ffd754 RH |
155 | * Record gdb single-step. We should be exiting the TB by raising |
156 | * EXCP_DEBUG, but to simplify other tests, disable chaining too. | |
157 | * | |
04f5b647 RH |
158 | * For singlestep and -d nochain, suppress goto_tb so that |
159 | * we can log -d cpu,exec after every TB. | |
160 | */ | |
c2ffd754 RH |
161 | if (unlikely(cpu->singlestep_enabled)) { |
162 | cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | CF_SINGLE_STEP | 1; | |
0e33928c | 163 | } else if (qatomic_read(&one_insn_per_tb)) { |
04f5b647 RH |
164 | cflags |= CF_NO_GOTO_TB | 1; |
165 | } else if (qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) { | |
fb957011 | 166 | cflags |= CF_NO_GOTO_TB; |
84f15616 RH |
167 | } |
168 | ||
169 | return cflags; | |
043e35d9 RH |
170 | } |
171 | ||
0c90ba16 | 172 | struct tb_desc { |
f0a08b09 AJ |
173 | vaddr pc; |
174 | uint64_t cs_base; | |
0c90ba16 | 175 | CPUArchState *env; |
93b99616 | 176 | tb_page_addr_t page_addr0; |
0c90ba16 RH |
177 | uint32_t flags; |
178 | uint32_t cflags; | |
0c90ba16 RH |
179 | }; |
180 | ||
181 | static bool tb_lookup_cmp(const void *p, const void *d) | |
182 | { | |
183 | const TranslationBlock *tb = p; | |
184 | const struct tb_desc *desc = d; | |
185 | ||
279513c7 | 186 | if ((tb_cflags(tb) & CF_PCREL || tb->pc == desc->pc) && |
28905cfb | 187 | tb_page_addr0(tb) == desc->page_addr0 && |
0c90ba16 RH |
188 | tb->cs_base == desc->cs_base && |
189 | tb->flags == desc->flags && | |
0c90ba16 RH |
190 | tb_cflags(tb) == desc->cflags) { |
191 | /* check next page if needed */ | |
28905cfb RH |
192 | tb_page_addr_t tb_phys_page1 = tb_page_addr1(tb); |
193 | if (tb_phys_page1 == -1) { | |
0c90ba16 RH |
194 | return true; |
195 | } else { | |
93b99616 | 196 | tb_page_addr_t phys_page1; |
f0a08b09 | 197 | vaddr virt_page1; |
0c90ba16 | 198 | |
9867b302 RH |
199 | /* |
200 | * We know that the first page matched, and an otherwise valid TB | |
201 | * encountered an incomplete instruction at the end of that page, | |
202 | * therefore we know that generating a new TB from the current PC | |
203 | * must also require reading from the next page -- even if the | |
204 | * second pages do not match, and therefore the resulting insn | |
205 | * is different for the new TB. Therefore any exception raised | |
206 | * here by the faulting lookup is not premature. | |
207 | */ | |
93b99616 RH |
208 | virt_page1 = TARGET_PAGE_ALIGN(desc->pc); |
209 | phys_page1 = get_page_addr_code(desc->env, virt_page1); | |
28905cfb | 210 | if (tb_phys_page1 == phys_page1) { |
0c90ba16 RH |
211 | return true; |
212 | } | |
213 | } | |
214 | } | |
215 | return false; | |
216 | } | |
217 | ||
f0a08b09 AJ |
218 | static TranslationBlock *tb_htable_lookup(CPUState *cpu, vaddr pc, |
219 | uint64_t cs_base, uint32_t flags, | |
0c90ba16 RH |
220 | uint32_t cflags) |
221 | { | |
222 | tb_page_addr_t phys_pc; | |
223 | struct tb_desc desc; | |
224 | uint32_t h; | |
225 | ||
b77af26e | 226 | desc.env = cpu_env(cpu); |
0c90ba16 RH |
227 | desc.cs_base = cs_base; |
228 | desc.flags = flags; | |
229 | desc.cflags = cflags; | |
0c90ba16 RH |
230 | desc.pc = pc; |
231 | phys_pc = get_page_addr_code(desc.env, pc); | |
232 | if (phys_pc == -1) { | |
233 | return NULL; | |
234 | } | |
93b99616 | 235 | desc.page_addr0 = phys_pc; |
4be79026 | 236 | h = tb_hash_func(phys_pc, (cflags & CF_PCREL ? 0 : pc), |
367189ef | 237 | flags, cs_base, cflags); |
0c90ba16 RH |
238 | return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp); |
239 | } | |
240 | ||
632cb63d | 241 | /* Might cause an exception, so have a longjmp destination ready */ |
f0a08b09 AJ |
242 | static inline TranslationBlock *tb_lookup(CPUState *cpu, vaddr pc, |
243 | uint64_t cs_base, uint32_t flags, | |
244 | uint32_t cflags) | |
632cb63d RH |
245 | { |
246 | TranslationBlock *tb; | |
8ed558ec | 247 | CPUJumpCache *jc; |
632cb63d RH |
248 | uint32_t hash; |
249 | ||
250 | /* we should never be trying to look up an INVALID tb */ | |
251 | tcg_debug_assert(!(cflags & CF_INVALID)); | |
252 | ||
253 | hash = tb_jmp_cache_hash_func(pc); | |
8ed558ec | 254 | jc = cpu->tb_jmp_cache; |
632cb63d | 255 | |
2dd5b7a1 AJ |
256 | if (cflags & CF_PCREL) { |
257 | /* Use acquire to ensure current load of pc from jc. */ | |
3371802f | 258 | tb = qatomic_load_acquire(&jc->array[hash].tb); |
2dd5b7a1 AJ |
259 | |
260 | if (likely(tb && | |
261 | jc->array[hash].pc == pc && | |
262 | tb->cs_base == cs_base && | |
263 | tb->flags == flags && | |
2dd5b7a1 AJ |
264 | tb_cflags(tb) == cflags)) { |
265 | return tb; | |
266 | } | |
267 | tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags); | |
268 | if (tb == NULL) { | |
269 | return NULL; | |
270 | } | |
271 | jc->array[hash].pc = pc; | |
3371802f | 272 | /* Ensure pc is written first. */ |
2dd5b7a1 AJ |
273 | qatomic_store_release(&jc->array[hash].tb, tb); |
274 | } else { | |
275 | /* Use rcu_read to ensure current load of pc from *tb. */ | |
276 | tb = qatomic_rcu_read(&jc->array[hash].tb); | |
277 | ||
278 | if (likely(tb && | |
279513c7 | 279 | tb->pc == pc && |
2dd5b7a1 AJ |
280 | tb->cs_base == cs_base && |
281 | tb->flags == flags && | |
2dd5b7a1 AJ |
282 | tb_cflags(tb) == cflags)) { |
283 | return tb; | |
284 | } | |
285 | tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags); | |
286 | if (tb == NULL) { | |
287 | return NULL; | |
288 | } | |
289 | /* Use the pc value already stored in tb->pc. */ | |
290 | qatomic_set(&jc->array[hash].tb, tb); | |
632cb63d | 291 | } |
2dd5b7a1 | 292 | |
632cb63d RH |
293 | return tb; |
294 | } | |
295 | ||
f0a08b09 | 296 | static void log_cpu_exec(vaddr pc, CPUState *cpu, |
fbf59aad | 297 | const TranslationBlock *tb) |
abb0cd93 | 298 | { |
fbf59aad | 299 | if (qemu_log_in_addr_range(pc)) { |
abb0cd93 | 300 | qemu_log_mask(CPU_LOG_EXEC, |
85314e13 | 301 | "Trace %d: %p [%08" PRIx64 |
e60a7d0d | 302 | "/%016" VADDR_PRIx "/%08x/%08x] %s\n", |
7eabad36 RH |
303 | cpu->cpu_index, tb->tc.ptr, tb->cs_base, pc, |
304 | tb->flags, tb->cflags, lookup_symbol(pc)); | |
abb0cd93 | 305 | |
abb0cd93 | 306 | if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) { |
c60f599b | 307 | FILE *logfile = qemu_log_trylock(); |
78b54858 RH |
308 | if (logfile) { |
309 | int flags = 0; | |
abb0cd93 | 310 | |
78b54858 RH |
311 | if (qemu_loglevel_mask(CPU_LOG_TB_FPU)) { |
312 | flags |= CPU_DUMP_FPU; | |
313 | } | |
abb0cd93 | 314 | #if defined(TARGET_I386) |
78b54858 | 315 | flags |= CPU_DUMP_CCOP; |
abb0cd93 | 316 | #endif |
b84694de IK |
317 | if (qemu_loglevel_mask(CPU_LOG_TB_VPU)) { |
318 | flags |= CPU_DUMP_VPU; | |
319 | } | |
c769fbd7 | 320 | cpu_dump_state(cpu, logfile, flags); |
78b54858 RH |
321 | qemu_log_unlock(logfile); |
322 | } | |
abb0cd93 | 323 | } |
abb0cd93 RH |
324 | } |
325 | } | |
326 | ||
f0a08b09 | 327 | static bool check_for_breakpoints_slow(CPUState *cpu, vaddr pc, |
69993c4e | 328 | uint32_t *cflags) |
10c37828 RH |
329 | { |
330 | CPUBreakpoint *bp; | |
331 | bool match_page = false; | |
332 | ||
10c37828 RH |
333 | /* |
334 | * Singlestep overrides breakpoints. | |
335 | * This requirement is visible in the record-replay tests, where | |
336 | * we would fail to make forward progress in reverse-continue. | |
337 | * | |
338 | * TODO: gdb singlestep should only override gdb breakpoints, | |
339 | * so that one could (gdb) singlestep into the guest kernel's | |
340 | * architectural breakpoint handler. | |
341 | */ | |
342 | if (cpu->singlestep_enabled) { | |
343 | return false; | |
344 | } | |
345 | ||
346 | QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) { | |
347 | /* | |
348 | * If we have an exact pc match, trigger the breakpoint. | |
349 | * Otherwise, note matches within the page. | |
350 | */ | |
351 | if (pc == bp->pc) { | |
352 | bool match_bp = false; | |
353 | ||
354 | if (bp->flags & BP_GDB) { | |
355 | match_bp = true; | |
356 | } else if (bp->flags & BP_CPU) { | |
357 | #ifdef CONFIG_USER_ONLY | |
358 | g_assert_not_reached(); | |
359 | #else | |
360 | CPUClass *cc = CPU_GET_CLASS(cpu); | |
361 | assert(cc->tcg_ops->debug_check_breakpoint); | |
362 | match_bp = cc->tcg_ops->debug_check_breakpoint(cpu); | |
363 | #endif | |
364 | } | |
365 | ||
366 | if (match_bp) { | |
367 | cpu->exception_index = EXCP_DEBUG; | |
368 | return true; | |
369 | } | |
370 | } else if (((pc ^ bp->pc) & TARGET_PAGE_MASK) == 0) { | |
371 | match_page = true; | |
372 | } | |
373 | } | |
374 | ||
375 | /* | |
376 | * Within the same page as a breakpoint, single-step, | |
377 | * returning to helper_lookup_tb_ptr after each insn looking | |
378 | * for the actual breakpoint. | |
379 | * | |
380 | * TODO: Perhaps better to record all of the TBs associated | |
381 | * with a given virtual page that contains a breakpoint, and | |
382 | * then invalidate them when a new overlapping breakpoint is | |
383 | * set on the page. Non-overlapping TBs would not be | |
384 | * invalidated, nor would any TB need to be invalidated as | |
385 | * breakpoints are removed. | |
386 | */ | |
387 | if (match_page) { | |
388 | *cflags = (*cflags & ~CF_COUNT_MASK) | CF_NO_GOTO_TB | 1; | |
389 | } | |
390 | return false; | |
391 | } | |
392 | ||
f0a08b09 | 393 | static inline bool check_for_breakpoints(CPUState *cpu, vaddr pc, |
69993c4e LL |
394 | uint32_t *cflags) |
395 | { | |
396 | return unlikely(!QTAILQ_EMPTY(&cpu->breakpoints)) && | |
397 | check_for_breakpoints_slow(cpu, pc, cflags); | |
398 | } | |
399 | ||
4288eb26 RH |
400 | /** |
401 | * helper_lookup_tb_ptr: quick check for next tb | |
402 | * @env: current cpu state | |
403 | * | |
404 | * Look for an existing TB matching the current cpu state. | |
405 | * If found, return the code pointer. If not found, return | |
406 | * the tcg epilogue so that we return into cpu_tb_exec. | |
407 | */ | |
408 | const void *HELPER(lookup_tb_ptr)(CPUArchState *env) | |
409 | { | |
410 | CPUState *cpu = env_cpu(env); | |
411 | TranslationBlock *tb; | |
bb5de525 AJ |
412 | vaddr pc; |
413 | uint64_t cs_base; | |
10c37828 | 414 | uint32_t flags, cflags; |
4288eb26 RH |
415 | |
416 | cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); | |
417 | ||
10c37828 RH |
418 | cflags = curr_cflags(cpu); |
419 | if (check_for_breakpoints(cpu, pc, &cflags)) { | |
420 | cpu_loop_exit(cpu); | |
421 | } | |
422 | ||
423 | tb = tb_lookup(cpu, pc, cs_base, flags, cflags); | |
4288eb26 RH |
424 | if (tb == NULL) { |
425 | return tcg_code_gen_epilogue; | |
426 | } | |
abb0cd93 | 427 | |
fbf59aad RH |
428 | if (qemu_loglevel_mask(CPU_LOG_TB_CPU | CPU_LOG_EXEC)) { |
429 | log_cpu_exec(pc, cpu, tb); | |
430 | } | |
abb0cd93 | 431 | |
4288eb26 RH |
432 | return tb->tc.ptr; |
433 | } | |
434 | ||
77211379 | 435 | /* Execute a TB, and fix up the CPU state afterwards if necessary */ |
c905a368 DB |
436 | /* |
437 | * Disable CFI checks. | |
438 | * TCG creates binary blobs at runtime, with the transformed code. | |
439 | * A TB is a blob of binary code, created at runtime and called with an | |
440 | * indirect function call. Since such function did not exist at compile time, | |
441 | * the CFI runtime has no way to verify its signature and would fail. | |
442 | * TCG is not considered a security-sensitive part of QEMU so this does not | |
443 | * affect the impact of CFI in environment with high security requirements | |
444 | */ | |
eba40358 RH |
445 | static inline TranslationBlock * QEMU_DISABLE_CFI |
446 | cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit) | |
77211379 | 447 | { |
b77af26e | 448 | CPUArchState *env = cpu_env(cpu); |
819af24b SF |
449 | uintptr_t ret; |
450 | TranslationBlock *last_tb; | |
db0c51a3 | 451 | const void *tb_ptr = itb->tc.ptr; |
1a830635 | 452 | |
fbf59aad RH |
453 | if (qemu_loglevel_mask(CPU_LOG_TB_CPU | CPU_LOG_EXEC)) { |
454 | log_cpu_exec(log_pc(cpu, itb), cpu, itb); | |
455 | } | |
03afa5f8 | 456 | |
653b87eb | 457 | qemu_thread_jit_execute(); |
819af24b | 458 | ret = tcg_qemu_tb_exec(env, tb_ptr); |
464dacf6 | 459 | cpu->neg.can_do_io = true; |
e04660af | 460 | qemu_plugin_disable_mem_helpers(cpu); |
eba40358 RH |
461 | /* |
462 | * TODO: Delay swapping back to the read-write region of the TB | |
463 | * until we actually need to modify the TB. The read-only copy, | |
464 | * coming from the rx region, shares the same host TLB entry as | |
465 | * the code that executed the exit_tb opcode that arrived here. | |
466 | * If we insist on touching both the RX and the RW pages, we | |
467 | * double the host TLB pressure. | |
468 | */ | |
469 | last_tb = tcg_splitwx_to_rw((void *)(ret & ~TB_EXIT_MASK)); | |
470 | *tb_exit = ret & TB_EXIT_MASK; | |
471 | ||
472 | trace_exec_tb_exit(last_tb, *tb_exit); | |
6db8b538 | 473 | |
eba40358 | 474 | if (*tb_exit > TB_EXIT_IDX1) { |
77211379 PM |
475 | /* We didn't start executing this TB (eg because the instruction |
476 | * counter hit zero); we must restore the guest PC to the address | |
477 | * of the start of the TB. | |
478 | */ | |
bdf7ae5b | 479 | CPUClass *cc = CPU_GET_CLASS(cpu); |
fbf59aad | 480 | |
78271684 CF |
481 | if (cc->tcg_ops->synchronize_from_tb) { |
482 | cc->tcg_ops->synchronize_from_tb(cpu, last_tb); | |
bdf7ae5b | 483 | } else { |
4be79026 | 484 | tcg_debug_assert(!(tb_cflags(last_tb) & CF_PCREL)); |
bdf7ae5b | 485 | assert(cc->set_pc); |
279513c7 | 486 | cc->set_pc(cpu, last_tb->pc); |
fbf59aad RH |
487 | } |
488 | if (qemu_loglevel_mask(CPU_LOG_EXEC)) { | |
f0a08b09 | 489 | vaddr pc = log_pc(cpu, last_tb); |
fbf59aad | 490 | if (qemu_log_in_addr_range(pc)) { |
e60a7d0d | 491 | qemu_log("Stopped execution of TB chain before %p [%016" |
f0a08b09 | 492 | VADDR_PRIx "] %s\n", |
fbf59aad RH |
493 | last_tb->tc.ptr, pc, lookup_symbol(pc)); |
494 | } | |
bdf7ae5b | 495 | } |
77211379 | 496 | } |
c9460d75 RH |
497 | |
498 | /* | |
499 | * If gdb single-step, and we haven't raised another exception, | |
500 | * raise a debug exception. Single-step with another exception | |
501 | * is handled in cpu_handle_exception. | |
502 | */ | |
503 | if (unlikely(cpu->singlestep_enabled) && cpu->exception_index == -1) { | |
504 | cpu->exception_index = EXCP_DEBUG; | |
505 | cpu_loop_exit(cpu); | |
506 | } | |
507 | ||
eba40358 | 508 | return last_tb; |
77211379 PM |
509 | } |
510 | ||
2e70f6ef | 511 | |
035ba06c EH |
512 | static void cpu_exec_enter(CPUState *cpu) |
513 | { | |
514 | CPUClass *cc = CPU_GET_CLASS(cpu); | |
515 | ||
78271684 CF |
516 | if (cc->tcg_ops->cpu_exec_enter) { |
517 | cc->tcg_ops->cpu_exec_enter(cpu); | |
80c4750b | 518 | } |
035ba06c EH |
519 | } |
520 | ||
521 | static void cpu_exec_exit(CPUState *cpu) | |
fdbc2b57 | 522 | { |
08e73c48 | 523 | CPUClass *cc = CPU_GET_CLASS(cpu); |
035ba06c | 524 | |
78271684 CF |
525 | if (cc->tcg_ops->cpu_exec_exit) { |
526 | cc->tcg_ops->cpu_exec_exit(cpu); | |
80c4750b | 527 | } |
035ba06c EH |
528 | } |
529 | ||
cb62bd15 RH |
530 | static void cpu_exec_longjmp_cleanup(CPUState *cpu) |
531 | { | |
532 | /* Non-buggy compilers preserve this; assert the correct value. */ | |
533 | g_assert(cpu == current_cpu); | |
534 | ||
535 | #ifdef CONFIG_USER_ONLY | |
536 | clear_helper_retaddr(); | |
537 | if (have_mmap_lock()) { | |
538 | mmap_unlock(); | |
539 | } | |
deba7870 RH |
540 | #else |
541 | /* | |
542 | * For softmmu, a tlb_fill fault during translation will land here, | |
543 | * and we need to release any page locks held. In system mode we | |
544 | * have one tcg_ctx per thread, so we know it was this cpu doing | |
545 | * the translation. | |
546 | * | |
547 | * Alternative 1: Install a cleanup to be called via an exception | |
548 | * handling safe longjmp. It seems plausible that all our hosts | |
549 | * support such a thing. We'd have to properly register unwind info | |
550 | * for the JIT for EH, rather that just for GDB. | |
551 | * | |
552 | * Alternative 2: Set and restore cpu->jmp_env in tb_gen_code to | |
553 | * capture the cpu_loop_exit longjmp, perform the cleanup, and | |
554 | * jump again to arrive here. | |
555 | */ | |
556 | if (tcg_ctx->gen_tb) { | |
557 | tb_unlock_pages(tcg_ctx->gen_tb); | |
558 | tcg_ctx->gen_tb = NULL; | |
559 | } | |
cb62bd15 RH |
560 | #endif |
561 | if (qemu_mutex_iothread_locked()) { | |
562 | qemu_mutex_unlock_iothread(); | |
563 | } | |
564 | assert_no_pages_locked(); | |
565 | } | |
566 | ||
035ba06c EH |
567 | void cpu_exec_step_atomic(CPUState *cpu) |
568 | { | |
b77af26e | 569 | CPUArchState *env = cpu_env(cpu); |
fdbc2b57 | 570 | TranslationBlock *tb; |
bb5de525 AJ |
571 | vaddr pc; |
572 | uint64_t cs_base; | |
258afb48 | 573 | uint32_t flags, cflags; |
eba40358 | 574 | int tb_exit; |
fdbc2b57 | 575 | |
08e73c48 | 576 | if (sigsetjmp(cpu->jmp_env, 0) == 0) { |
886cc689 | 577 | start_exclusive(); |
bfff072c DC |
578 | g_assert(cpu == current_cpu); |
579 | g_assert(!cpu->running); | |
580 | cpu->running = true; | |
886cc689 | 581 | |
6f04cb1c | 582 | cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); |
6f04cb1c | 583 | |
258afb48 RH |
584 | cflags = curr_cflags(cpu); |
585 | /* Execute in a serial context. */ | |
586 | cflags &= ~CF_PARALLEL; | |
587 | /* After 1 insn, return and release the exclusive lock. */ | |
588 | cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | 1; | |
10c37828 RH |
589 | /* |
590 | * No need to check_for_breakpoints here. | |
591 | * We only arrive in cpu_exec_step_atomic after beginning execution | |
592 | * of an insn that includes an atomic operation we can't handle. | |
593 | * Any breakpoint for this insn will have been recognized earlier. | |
594 | */ | |
258afb48 RH |
595 | |
596 | tb = tb_lookup(cpu, pc, cs_base, flags, cflags); | |
4e2ca83e EC |
597 | if (tb == NULL) { |
598 | mmap_lock(); | |
95590e24 | 599 | tb = tb_gen_code(cpu, pc, cs_base, flags, cflags); |
4e2ca83e EC |
600 | mmap_unlock(); |
601 | } | |
08e73c48 | 602 | |
035ba06c | 603 | cpu_exec_enter(cpu); |
08e73c48 | 604 | /* execute the generated code */ |
4e2ca83e | 605 | trace_exec_tb(tb, pc); |
eba40358 | 606 | cpu_tb_exec(cpu, tb, &tb_exit); |
035ba06c | 607 | cpu_exec_exit(cpu); |
08e73c48 | 608 | } else { |
cb62bd15 | 609 | cpu_exec_longjmp_cleanup(cpu); |
08e73c48 | 610 | } |
426eeecd | 611 | |
886cc689 AB |
612 | /* |
613 | * As we start the exclusive region before codegen we must still | |
614 | * be in the region if we longjump out of either the codegen or | |
615 | * the execution. | |
616 | */ | |
617 | g_assert(cpu_in_exclusive_context(cpu)); | |
bfff072c | 618 | cpu->running = false; |
886cc689 | 619 | end_exclusive(); |
fdbc2b57 RH |
620 | } |
621 | ||
a8583393 RH |
622 | void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr) |
623 | { | |
2fd2e78d RH |
624 | /* |
625 | * Get the rx view of the structure, from which we find the | |
626 | * executable code address, and tb_target_set_jmp_target can | |
627 | * produce a pc-relative displacement to jmp_target_addr[n]. | |
628 | */ | |
629 | const TranslationBlock *c_tb = tcg_splitwx_to_rx(tb); | |
630 | uintptr_t offset = tb->jmp_insn_offset[n]; | |
631 | uintptr_t jmp_rx = (uintptr_t)tb->tc.ptr + offset; | |
632 | uintptr_t jmp_rw = jmp_rx - tcg_splitwx_diff; | |
633 | ||
9da6079b | 634 | tb->jmp_target_addr[n] = addr; |
2fd2e78d | 635 | tb_target_set_jmp_target(c_tb, n, jmp_rx, jmp_rw); |
a8583393 RH |
636 | } |
637 | ||
a8583393 RH |
638 | static inline void tb_add_jump(TranslationBlock *tb, int n, |
639 | TranslationBlock *tb_next) | |
640 | { | |
194125e3 EC |
641 | uintptr_t old; |
642 | ||
653b87eb | 643 | qemu_thread_jit_write(); |
a8583393 | 644 | assert(n < ARRAY_SIZE(tb->jmp_list_next)); |
194125e3 EC |
645 | qemu_spin_lock(&tb_next->jmp_lock); |
646 | ||
647 | /* make sure the destination TB is valid */ | |
648 | if (tb_next->cflags & CF_INVALID) { | |
649 | goto out_unlock_next; | |
650 | } | |
651 | /* Atomically claim the jump destination slot only if it was NULL */ | |
d73415a3 SH |
652 | old = qatomic_cmpxchg(&tb->jmp_dest[n], (uintptr_t)NULL, |
653 | (uintptr_t)tb_next); | |
194125e3 EC |
654 | if (old) { |
655 | goto out_unlock_next; | |
a8583393 | 656 | } |
194125e3 EC |
657 | |
658 | /* patch the native jump address */ | |
659 | tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc.ptr); | |
660 | ||
661 | /* add in TB jmp list */ | |
662 | tb->jmp_list_next[n] = tb_next->jmp_list_head; | |
663 | tb_next->jmp_list_head = (uintptr_t)tb | n; | |
664 | ||
665 | qemu_spin_unlock(&tb_next->jmp_lock); | |
666 | ||
fbf59aad RH |
667 | qemu_log_mask(CPU_LOG_EXEC, "Linking TBs %p index %d -> %p\n", |
668 | tb->tc.ptr, n, tb_next->tc.ptr); | |
194125e3 | 669 | return; |
a8583393 | 670 | |
194125e3 EC |
671 | out_unlock_next: |
672 | qemu_spin_unlock(&tb_next->jmp_lock); | |
673 | return; | |
a8583393 RH |
674 | } |
675 | ||
8b2d34e9 SF |
676 | static inline bool cpu_handle_halt(CPUState *cpu) |
677 | { | |
0596fa11 | 678 | #ifndef CONFIG_USER_ONLY |
8b2d34e9 | 679 | if (cpu->halted) { |
0596fa11 | 680 | #if defined(TARGET_I386) |
4084893d | 681 | if (cpu->interrupt_request & CPU_INTERRUPT_POLL) { |
8b2d34e9 | 682 | X86CPU *x86_cpu = X86_CPU(cpu); |
8d04fb55 | 683 | qemu_mutex_lock_iothread(); |
8b2d34e9 SF |
684 | apic_poll_irq(x86_cpu->apic_state); |
685 | cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL); | |
8d04fb55 | 686 | qemu_mutex_unlock_iothread(); |
8b2d34e9 | 687 | } |
0596fa11 | 688 | #endif /* TARGET_I386 */ |
8b2d34e9 | 689 | if (!cpu_has_work(cpu)) { |
8b2d34e9 SF |
690 | return true; |
691 | } | |
692 | ||
693 | cpu->halted = 0; | |
694 | } | |
0596fa11 | 695 | #endif /* !CONFIG_USER_ONLY */ |
8b2d34e9 SF |
696 | |
697 | return false; | |
698 | } | |
699 | ||
ea284766 | 700 | static inline void cpu_handle_debug_exception(CPUState *cpu) |
1009d2ed | 701 | { |
86025ee4 | 702 | CPUClass *cc = CPU_GET_CLASS(cpu); |
1009d2ed JK |
703 | CPUWatchpoint *wp; |
704 | ||
ff4700b0 AF |
705 | if (!cpu->watchpoint_hit) { |
706 | QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { | |
1009d2ed JK |
707 | wp->flags &= ~BP_WATCHPOINT_HIT; |
708 | } | |
709 | } | |
86025ee4 | 710 | |
78271684 CF |
711 | if (cc->tcg_ops->debug_excp_handler) { |
712 | cc->tcg_ops->debug_excp_handler(cpu); | |
710384d0 | 713 | } |
1009d2ed JK |
714 | } |
715 | ||
ea284766 SF |
716 | static inline bool cpu_handle_exception(CPUState *cpu, int *ret) |
717 | { | |
17b50b0c PD |
718 | if (cpu->exception_index < 0) { |
719 | #ifndef CONFIG_USER_ONLY | |
720 | if (replay_has_exception() | |
a953b5fa | 721 | && cpu->neg.icount_decr.u16.low + cpu->icount_extra == 0) { |
a11bbb6a | 722 | /* Execute just one insn to trigger exception pending in the log */ |
c3e97f64 | 723 | cpu->cflags_next_tb = (curr_cflags(cpu) & ~CF_USE_ICOUNT) |
cf9b5790 | 724 | | CF_NOIRQ | 1; |
17b50b0c PD |
725 | } |
726 | #endif | |
a11bbb6a | 727 | return false; |
17b50b0c | 728 | } |
17b50b0c PD |
729 | if (cpu->exception_index >= EXCP_INTERRUPT) { |
730 | /* exit request from the cpu execution loop */ | |
731 | *ret = cpu->exception_index; | |
732 | if (*ret == EXCP_DEBUG) { | |
733 | cpu_handle_debug_exception(cpu); | |
734 | } | |
735 | cpu->exception_index = -1; | |
736 | return true; | |
737 | } else { | |
ea284766 | 738 | #if defined(CONFIG_USER_ONLY) |
17b50b0c PD |
739 | /* if user mode only, we simulate a fake exception |
740 | which will be handled outside the cpu execution | |
741 | loop */ | |
ea284766 | 742 | #if defined(TARGET_I386) |
17b50b0c | 743 | CPUClass *cc = CPU_GET_CLASS(cpu); |
12096421 PMD |
744 | cc->tcg_ops->fake_user_interrupt(cpu); |
745 | #endif /* TARGET_I386 */ | |
17b50b0c PD |
746 | *ret = cpu->exception_index; |
747 | cpu->exception_index = -1; | |
748 | return true; | |
749 | #else | |
750 | if (replay_exception()) { | |
ea284766 | 751 | CPUClass *cc = CPU_GET_CLASS(cpu); |
17b50b0c | 752 | qemu_mutex_lock_iothread(); |
78271684 | 753 | cc->tcg_ops->do_interrupt(cpu); |
17b50b0c | 754 | qemu_mutex_unlock_iothread(); |
ea284766 | 755 | cpu->exception_index = -1; |
a7ba744f LM |
756 | |
757 | if (unlikely(cpu->singlestep_enabled)) { | |
758 | /* | |
759 | * After processing the exception, ensure an EXCP_DEBUG is | |
760 | * raised when single-stepping so that GDB doesn't miss the | |
761 | * next instruction. | |
762 | */ | |
763 | *ret = EXCP_DEBUG; | |
764 | cpu_handle_debug_exception(cpu); | |
765 | return true; | |
766 | } | |
17b50b0c PD |
767 | } else if (!replay_has_interrupt()) { |
768 | /* give a chance to iothread in replay mode */ | |
769 | *ret = EXCP_INTERRUPT; | |
ea284766 | 770 | return true; |
ea284766 | 771 | } |
ea284766 SF |
772 | #endif |
773 | } | |
774 | ||
775 | return false; | |
776 | } | |
777 | ||
77c0fc4e | 778 | #ifndef CONFIG_USER_ONLY |
4084893d PD |
779 | /* |
780 | * CPU_INTERRUPT_POLL is a virtual event which gets converted into a | |
781 | * "real" interrupt event later. It does not need to be recorded for | |
782 | * replay purposes. | |
783 | */ | |
784 | static inline bool need_replay_interrupt(int interrupt_request) | |
785 | { | |
786 | #if defined(TARGET_I386) | |
787 | return !(interrupt_request & CPU_INTERRUPT_POLL); | |
788 | #else | |
789 | return true; | |
790 | #endif | |
791 | } | |
77c0fc4e | 792 | #endif /* !CONFIG_USER_ONLY */ |
4084893d | 793 | |
209b71b6 | 794 | static inline bool cpu_handle_interrupt(CPUState *cpu, |
c385e6e4 SF |
795 | TranslationBlock **last_tb) |
796 | { | |
aff0e204 AB |
797 | /* |
798 | * If we have requested custom cflags with CF_NOIRQ we should | |
799 | * skip checking here. Any pending interrupts will get picked up | |
800 | * by the next TB we execute under normal cflags. | |
801 | */ | |
802 | if (cpu->cflags_next_tb != -1 && cpu->cflags_next_tb & CF_NOIRQ) { | |
803 | return false; | |
804 | } | |
805 | ||
17b50b0c PD |
806 | /* Clear the interrupt flag now since we're processing |
807 | * cpu->interrupt_request and cpu->exit_request. | |
d84be02d DH |
808 | * Ensure zeroing happens before reading cpu->exit_request or |
809 | * cpu->interrupt_request (see also smp_wmb in cpu_exit()) | |
17b50b0c | 810 | */ |
a953b5fa | 811 | qatomic_set_mb(&cpu->neg.icount_decr.u16.high, 0); |
c385e6e4 | 812 | |
d73415a3 | 813 | if (unlikely(qatomic_read(&cpu->interrupt_request))) { |
8d04fb55 JK |
814 | int interrupt_request; |
815 | qemu_mutex_lock_iothread(); | |
816 | interrupt_request = cpu->interrupt_request; | |
c385e6e4 SF |
817 | if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) { |
818 | /* Mask out external interrupts for this step. */ | |
819 | interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK; | |
820 | } | |
821 | if (interrupt_request & CPU_INTERRUPT_DEBUG) { | |
822 | cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG; | |
823 | cpu->exception_index = EXCP_DEBUG; | |
8d04fb55 | 824 | qemu_mutex_unlock_iothread(); |
209b71b6 | 825 | return true; |
c385e6e4 | 826 | } |
77c0fc4e | 827 | #if !defined(CONFIG_USER_ONLY) |
c385e6e4 SF |
828 | if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) { |
829 | /* Do nothing */ | |
830 | } else if (interrupt_request & CPU_INTERRUPT_HALT) { | |
831 | replay_interrupt(); | |
832 | cpu->interrupt_request &= ~CPU_INTERRUPT_HALT; | |
833 | cpu->halted = 1; | |
834 | cpu->exception_index = EXCP_HLT; | |
8d04fb55 | 835 | qemu_mutex_unlock_iothread(); |
209b71b6 | 836 | return true; |
c385e6e4 SF |
837 | } |
838 | #if defined(TARGET_I386) | |
839 | else if (interrupt_request & CPU_INTERRUPT_INIT) { | |
840 | X86CPU *x86_cpu = X86_CPU(cpu); | |
841 | CPUArchState *env = &x86_cpu->env; | |
842 | replay_interrupt(); | |
65c9d60a | 843 | cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0); |
c385e6e4 SF |
844 | do_cpu_init(x86_cpu); |
845 | cpu->exception_index = EXCP_HALTED; | |
8d04fb55 | 846 | qemu_mutex_unlock_iothread(); |
209b71b6 | 847 | return true; |
c385e6e4 SF |
848 | } |
849 | #else | |
850 | else if (interrupt_request & CPU_INTERRUPT_RESET) { | |
851 | replay_interrupt(); | |
852 | cpu_reset(cpu); | |
8d04fb55 | 853 | qemu_mutex_unlock_iothread(); |
209b71b6 | 854 | return true; |
c385e6e4 | 855 | } |
77c0fc4e | 856 | #endif /* !TARGET_I386 */ |
c385e6e4 SF |
857 | /* The target hook has 3 exit conditions: |
858 | False when the interrupt isn't processed, | |
859 | True when it is, and we should restart on a new TB, | |
860 | and via longjmp via cpu_loop_exit. */ | |
861 | else { | |
77c0fc4e PMD |
862 | CPUClass *cc = CPU_GET_CLASS(cpu); |
863 | ||
78271684 CF |
864 | if (cc->tcg_ops->cpu_exec_interrupt && |
865 | cc->tcg_ops->cpu_exec_interrupt(cpu, interrupt_request)) { | |
4084893d PD |
866 | if (need_replay_interrupt(interrupt_request)) { |
867 | replay_interrupt(); | |
868 | } | |
ba3c35d9 RH |
869 | /* |
870 | * After processing the interrupt, ensure an EXCP_DEBUG is | |
871 | * raised when single-stepping so that GDB doesn't miss the | |
872 | * next instruction. | |
873 | */ | |
5b7b197c LM |
874 | if (unlikely(cpu->singlestep_enabled)) { |
875 | cpu->exception_index = EXCP_DEBUG; | |
876 | qemu_mutex_unlock_iothread(); | |
877 | return true; | |
878 | } | |
879 | cpu->exception_index = -1; | |
c385e6e4 SF |
880 | *last_tb = NULL; |
881 | } | |
8b1fe3f4 SF |
882 | /* The target hook may have updated the 'cpu->interrupt_request'; |
883 | * reload the 'interrupt_request' value */ | |
884 | interrupt_request = cpu->interrupt_request; | |
c385e6e4 | 885 | } |
77c0fc4e | 886 | #endif /* !CONFIG_USER_ONLY */ |
8b1fe3f4 | 887 | if (interrupt_request & CPU_INTERRUPT_EXITTB) { |
c385e6e4 SF |
888 | cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB; |
889 | /* ensure that no TB jump will be modified as | |
890 | the program flow was changed */ | |
891 | *last_tb = NULL; | |
892 | } | |
8d04fb55 JK |
893 | |
894 | /* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */ | |
895 | qemu_mutex_unlock_iothread(); | |
c385e6e4 | 896 | } |
8d04fb55 | 897 | |
cfb2d02b | 898 | /* Finally, check if we need to exit to the main loop. */ |
d73415a3 | 899 | if (unlikely(qatomic_read(&cpu->exit_request)) |
740b1759 | 900 | || (icount_enabled() |
a11bbb6a | 901 | && (cpu->cflags_next_tb == -1 || cpu->cflags_next_tb & CF_USE_ICOUNT) |
a953b5fa | 902 | && cpu->neg.icount_decr.u16.low + cpu->icount_extra == 0)) { |
d73415a3 | 903 | qatomic_set(&cpu->exit_request, 0); |
5f3bdfd4 PD |
904 | if (cpu->exception_index == -1) { |
905 | cpu->exception_index = EXCP_INTERRUPT; | |
906 | } | |
209b71b6 | 907 | return true; |
c385e6e4 | 908 | } |
209b71b6 PB |
909 | |
910 | return false; | |
c385e6e4 SF |
911 | } |
912 | ||
928de9ee | 913 | static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb, |
f0a08b09 AJ |
914 | vaddr pc, TranslationBlock **last_tb, |
915 | int *tb_exit) | |
928de9ee | 916 | { |
1aab16c2 | 917 | int32_t insns_left; |
928de9ee | 918 | |
fbf59aad | 919 | trace_exec_tb(tb, pc); |
eba40358 | 920 | tb = cpu_tb_exec(cpu, tb, tb_exit); |
1aab16c2 PB |
921 | if (*tb_exit != TB_EXIT_REQUESTED) { |
922 | *last_tb = tb; | |
923 | return; | |
924 | } | |
925 | ||
926 | *last_tb = NULL; | |
a953b5fa | 927 | insns_left = qatomic_read(&cpu->neg.icount_decr.u32); |
1aab16c2 | 928 | if (insns_left < 0) { |
e5143e30 AB |
929 | /* Something asked us to stop executing chained TBs; just |
930 | * continue round the main loop. Whatever requested the exit | |
30f3dda2 | 931 | * will also have set something else (eg exit_request or |
17b50b0c PD |
932 | * interrupt_request) which will be handled by |
933 | * cpu_handle_interrupt. cpu_handle_interrupt will also | |
934 | * clear cpu->icount_decr.u16.high. | |
928de9ee | 935 | */ |
1aab16c2 | 936 | return; |
928de9ee | 937 | } |
1aab16c2 PB |
938 | |
939 | /* Instruction counter expired. */ | |
740b1759 | 940 | assert(icount_enabled()); |
1aab16c2 | 941 | #ifndef CONFIG_USER_ONLY |
eda5f7c6 | 942 | /* Ensure global icount has gone forward */ |
8191d368 | 943 | icount_update(cpu); |
eda5f7c6 | 944 | /* Refill decrementer and continue execution. */ |
df3a2de5 | 945 | insns_left = MIN(0xffff, cpu->icount_budget); |
a953b5fa | 946 | cpu->neg.icount_decr.u16.low = insns_left; |
eda5f7c6 | 947 | cpu->icount_extra = cpu->icount_budget - insns_left; |
bc662a33 AB |
948 | |
949 | /* | |
950 | * If the next tb has more instructions than we have left to | |
951 | * execute we need to ensure we find/generate a TB with exactly | |
952 | * insns_left instructions in it. | |
953 | */ | |
c8cf47a9 PM |
954 | if (insns_left > 0 && insns_left < tb->icount) { |
955 | assert(insns_left <= CF_COUNT_MASK); | |
956 | assert(cpu->icount_extra == 0); | |
bc662a33 | 957 | cpu->cflags_next_tb = (tb->cflags & ~CF_COUNT_MASK) | insns_left; |
928de9ee | 958 | } |
1aab16c2 | 959 | #endif |
928de9ee SF |
960 | } |
961 | ||
7d13299d FB |
962 | /* main execution loop */ |
963 | ||
61710a7e RH |
964 | static int __attribute__((noinline)) |
965 | cpu_exec_loop(CPUState *cpu, SyncClocks *sc) | |
7d13299d | 966 | { |
c385e6e4 | 967 | int ret; |
4515e58d PB |
968 | |
969 | /* if an exception is pending, we execute it here */ | |
970 | while (!cpu_handle_exception(cpu, &ret)) { | |
971 | TranslationBlock *last_tb = NULL; | |
972 | int tb_exit = 0; | |
973 | ||
974 | while (!cpu_handle_interrupt(cpu, &last_tb)) { | |
9b990ee5 | 975 | TranslationBlock *tb; |
bb5de525 AJ |
976 | vaddr pc; |
977 | uint64_t cs_base; | |
11c1d5f8 RH |
978 | uint32_t flags, cflags; |
979 | ||
b77af26e | 980 | cpu_get_tb_cpu_state(cpu_env(cpu), &pc, &cs_base, &flags); |
10c37828 | 981 | |
11c1d5f8 RH |
982 | /* |
983 | * When requested, use an exact setting for cflags for the next | |
984 | * execution. This is used for icount, precise smc, and stop- | |
985 | * after-access watchpoints. Since this request should never | |
986 | * have CF_INVALID set, -1 is a convenient invalid value that | |
987 | * does not require tcg headers for cpu_common_reset. | |
988 | */ | |
989 | cflags = cpu->cflags_next_tb; | |
9b990ee5 | 990 | if (cflags == -1) { |
c0ae396a | 991 | cflags = curr_cflags(cpu); |
9b990ee5 RH |
992 | } else { |
993 | cpu->cflags_next_tb = -1; | |
994 | } | |
995 | ||
10c37828 RH |
996 | if (check_for_breakpoints(cpu, pc, &cflags)) { |
997 | break; | |
998 | } | |
11c1d5f8 RH |
999 | |
1000 | tb = tb_lookup(cpu, pc, cs_base, flags, cflags); | |
1001 | if (tb == NULL) { | |
3371802f | 1002 | CPUJumpCache *jc; |
a976a99a RH |
1003 | uint32_t h; |
1004 | ||
11c1d5f8 RH |
1005 | mmap_lock(); |
1006 | tb = tb_gen_code(cpu, pc, cs_base, flags, cflags); | |
1007 | mmap_unlock(); | |
3371802f | 1008 | |
11c1d5f8 RH |
1009 | /* |
1010 | * We add the TB in the virtual pc hash table | |
1011 | * for the fast lookup | |
1012 | */ | |
a976a99a | 1013 | h = tb_jmp_cache_hash_func(pc); |
3371802f RH |
1014 | jc = cpu->tb_jmp_cache; |
1015 | if (cflags & CF_PCREL) { | |
1016 | jc->array[h].pc = pc; | |
1017 | /* Ensure pc is written first. */ | |
1018 | qatomic_store_release(&jc->array[h].tb, tb); | |
1019 | } else { | |
1020 | /* Use the pc value already stored in tb->pc. */ | |
1021 | qatomic_set(&jc->array[h].tb, tb); | |
1022 | } | |
11c1d5f8 RH |
1023 | } |
1024 | ||
1025 | #ifndef CONFIG_USER_ONLY | |
1026 | /* | |
1027 | * We don't take care of direct jumps when address mapping | |
1028 | * changes in system emulation. So it's not safe to make a | |
1029 | * direct jump to a TB spanning two pages because the mapping | |
1030 | * for the second page can change. | |
1031 | */ | |
28905cfb | 1032 | if (tb_page_addr1(tb) != -1) { |
11c1d5f8 RH |
1033 | last_tb = NULL; |
1034 | } | |
1035 | #endif | |
1036 | /* See if we can patch the calling TB. */ | |
1037 | if (last_tb) { | |
1038 | tb_add_jump(last_tb, tb_exit, tb); | |
1039 | } | |
1040 | ||
fbf59aad | 1041 | cpu_loop_exec_tb(cpu, tb, pc, &last_tb, &tb_exit); |
11c1d5f8 | 1042 | |
4515e58d PB |
1043 | /* Try to align the host and virtual clocks |
1044 | if the guest is in advance */ | |
61710a7e RH |
1045 | align_clocks(sc, cpu); |
1046 | } | |
1047 | } | |
1048 | return ret; | |
1049 | } | |
1050 | ||
1051 | static int cpu_exec_setjmp(CPUState *cpu, SyncClocks *sc) | |
1052 | { | |
1053 | /* Prepare setjmp context for exception handling. */ | |
1054 | if (unlikely(sigsetjmp(cpu->jmp_env, 0) != 0)) { | |
cb62bd15 | 1055 | cpu_exec_longjmp_cleanup(cpu); |
61710a7e RH |
1056 | } |
1057 | ||
1058 | return cpu_exec_loop(cpu, sc); | |
1059 | } | |
1060 | ||
1061 | int cpu_exec(CPUState *cpu) | |
1062 | { | |
1063 | int ret; | |
1064 | SyncClocks sc = { 0 }; | |
1065 | ||
1066 | /* replay_interrupt may need current_cpu */ | |
1067 | current_cpu = cpu; | |
1068 | ||
1069 | if (cpu_handle_halt(cpu)) { | |
1070 | return EXCP_HALTED; | |
4515e58d | 1071 | } |
3fb2ded1 | 1072 | |
61710a7e RH |
1073 | rcu_read_lock(); |
1074 | cpu_exec_enter(cpu); | |
1075 | ||
1076 | /* | |
1077 | * Calculate difference between guest clock and host clock. | |
1078 | * This delay includes the delay of the last cycle, so | |
1079 | * what we have to do is sleep until it is 0. As for the | |
1080 | * advance/delay we gain here, we try to fix it next time. | |
1081 | */ | |
1082 | init_delay_params(&sc, cpu); | |
1083 | ||
1084 | ret = cpu_exec_setjmp(cpu, &sc); | |
1085 | ||
035ba06c | 1086 | cpu_exec_exit(cpu); |
79e2b9ae | 1087 | rcu_read_unlock(); |
1057eaa7 | 1088 | |
7d13299d FB |
1089 | return ret; |
1090 | } | |
740b1759 | 1091 | |
fa312f2e | 1092 | bool tcg_exec_realizefn(CPUState *cpu, Error **errp) |
7df5e3d6 CF |
1093 | { |
1094 | static bool tcg_target_initialized; | |
1095 | CPUClass *cc = CPU_GET_CLASS(cpu); | |
1096 | ||
1097 | if (!tcg_target_initialized) { | |
78271684 | 1098 | cc->tcg_ops->initialize(); |
7df5e3d6 CF |
1099 | tcg_target_initialized = true; |
1100 | } | |
7df5e3d6 | 1101 | |
4e4fa6c1 RH |
1102 | cpu->tb_jmp_cache = g_new0(CPUJumpCache, 1); |
1103 | tlb_init(cpu); | |
7df5e3d6 CF |
1104 | #ifndef CONFIG_USER_ONLY |
1105 | tcg_iommu_init_notifier_list(cpu); | |
1106 | #endif /* !CONFIG_USER_ONLY */ | |
4e4fa6c1 | 1107 | /* qemu_plugin_vcpu_init_hook delayed until cpu_index assigned. */ |
fa312f2e PMD |
1108 | |
1109 | return true; | |
7df5e3d6 CF |
1110 | } |
1111 | ||
1112 | /* undo the initializations in reverse order */ | |
1113 | void tcg_exec_unrealizefn(CPUState *cpu) | |
1114 | { | |
1115 | #ifndef CONFIG_USER_ONLY | |
1116 | tcg_iommu_free_notifier_list(cpu); | |
1117 | #endif /* !CONFIG_USER_ONLY */ | |
1118 | ||
7df5e3d6 | 1119 | tlb_destroy(cpu); |
4731f89b | 1120 | g_free_rcu(cpu->tb_jmp_cache, rcu); |
7df5e3d6 | 1121 | } |