]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * emulator main execution loop | |
3 | * | |
4 | * Copyright (c) 2003-2005 Fabrice Bellard | |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | #include "config.h" | |
20 | #include "cpu.h" | |
21 | #include "trace.h" | |
22 | #include "disas/disas.h" | |
23 | #include "tcg.h" | |
24 | #include "qemu/atomic.h" | |
25 | #include "sysemu/qtest.h" | |
26 | #include "qemu/timer.h" | |
27 | #include "exec/address-spaces.h" | |
28 | #include "qemu/rcu.h" | |
29 | #include "exec/tb-hash.h" | |
30 | #if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY) | |
31 | #include "hw/i386/apic.h" | |
32 | #endif | |
33 | ||
34 | /* -icount align implementation. */ | |
35 | ||
36 | typedef struct SyncClocks { | |
37 | int64_t diff_clk; | |
38 | int64_t last_cpu_icount; | |
39 | int64_t realtime_clock; | |
40 | } SyncClocks; | |
41 | ||
42 | #if !defined(CONFIG_USER_ONLY) | |
43 | /* Allow the guest to have a max 3ms advance. | |
44 | * The difference between the 2 clocks could therefore | |
45 | * oscillate around 0. | |
46 | */ | |
47 | #define VM_CLOCK_ADVANCE 3000000 | |
48 | #define THRESHOLD_REDUCE 1.5 | |
49 | #define MAX_DELAY_PRINT_RATE 2000000000LL | |
50 | #define MAX_NB_PRINTS 100 | |
51 | ||
52 | static void align_clocks(SyncClocks *sc, const CPUState *cpu) | |
53 | { | |
54 | int64_t cpu_icount; | |
55 | ||
56 | if (!icount_align_option) { | |
57 | return; | |
58 | } | |
59 | ||
60 | cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low; | |
61 | sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount); | |
62 | sc->last_cpu_icount = cpu_icount; | |
63 | ||
64 | if (sc->diff_clk > VM_CLOCK_ADVANCE) { | |
65 | #ifndef _WIN32 | |
66 | struct timespec sleep_delay, rem_delay; | |
67 | sleep_delay.tv_sec = sc->diff_clk / 1000000000LL; | |
68 | sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL; | |
69 | if (nanosleep(&sleep_delay, &rem_delay) < 0) { | |
70 | sc->diff_clk = rem_delay.tv_sec * 1000000000LL + rem_delay.tv_nsec; | |
71 | } else { | |
72 | sc->diff_clk = 0; | |
73 | } | |
74 | #else | |
75 | Sleep(sc->diff_clk / SCALE_MS); | |
76 | sc->diff_clk = 0; | |
77 | #endif | |
78 | } | |
79 | } | |
80 | ||
81 | static void print_delay(const SyncClocks *sc) | |
82 | { | |
83 | static float threshold_delay; | |
84 | static int64_t last_realtime_clock; | |
85 | static int nb_prints; | |
86 | ||
87 | if (icount_align_option && | |
88 | sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE && | |
89 | nb_prints < MAX_NB_PRINTS) { | |
90 | if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) || | |
91 | (-sc->diff_clk / (float)1000000000LL < | |
92 | (threshold_delay - THRESHOLD_REDUCE))) { | |
93 | threshold_delay = (-sc->diff_clk / 1000000000LL) + 1; | |
94 | printf("Warning: The guest is now late by %.1f to %.1f seconds\n", | |
95 | threshold_delay - 1, | |
96 | threshold_delay); | |
97 | nb_prints++; | |
98 | last_realtime_clock = sc->realtime_clock; | |
99 | } | |
100 | } | |
101 | } | |
102 | ||
103 | static void init_delay_params(SyncClocks *sc, | |
104 | const CPUState *cpu) | |
105 | { | |
106 | if (!icount_align_option) { | |
107 | return; | |
108 | } | |
109 | sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT); | |
110 | sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock; | |
111 | sc->last_cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low; | |
112 | if (sc->diff_clk < max_delay) { | |
113 | max_delay = sc->diff_clk; | |
114 | } | |
115 | if (sc->diff_clk > max_advance) { | |
116 | max_advance = sc->diff_clk; | |
117 | } | |
118 | ||
119 | /* Print every 2s max if the guest is late. We limit the number | |
120 | of printed messages to NB_PRINT_MAX(currently 100) */ | |
121 | print_delay(sc); | |
122 | } | |
123 | #else | |
124 | static void align_clocks(SyncClocks *sc, const CPUState *cpu) | |
125 | { | |
126 | } | |
127 | ||
128 | static void init_delay_params(SyncClocks *sc, const CPUState *cpu) | |
129 | { | |
130 | } | |
131 | #endif /* CONFIG USER ONLY */ | |
132 | ||
133 | /* Execute a TB, and fix up the CPU state afterwards if necessary */ | |
134 | static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr) | |
135 | { | |
136 | CPUArchState *env = cpu->env_ptr; | |
137 | uintptr_t next_tb; | |
138 | ||
139 | #if defined(DEBUG_DISAS) | |
140 | if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) { | |
141 | #if defined(TARGET_I386) | |
142 | log_cpu_state(cpu, CPU_DUMP_CCOP); | |
143 | #elif defined(TARGET_M68K) | |
144 | /* ??? Should not modify env state for dumping. */ | |
145 | cpu_m68k_flush_flags(env, env->cc_op); | |
146 | env->cc_op = CC_OP_FLAGS; | |
147 | env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4); | |
148 | log_cpu_state(cpu, 0); | |
149 | #else | |
150 | log_cpu_state(cpu, 0); | |
151 | #endif | |
152 | } | |
153 | #endif /* DEBUG_DISAS */ | |
154 | ||
155 | cpu->can_do_io = !use_icount; | |
156 | next_tb = tcg_qemu_tb_exec(env, tb_ptr); | |
157 | cpu->can_do_io = 1; | |
158 | trace_exec_tb_exit((void *) (next_tb & ~TB_EXIT_MASK), | |
159 | next_tb & TB_EXIT_MASK); | |
160 | ||
161 | if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) { | |
162 | /* We didn't start executing this TB (eg because the instruction | |
163 | * counter hit zero); we must restore the guest PC to the address | |
164 | * of the start of the TB. | |
165 | */ | |
166 | CPUClass *cc = CPU_GET_CLASS(cpu); | |
167 | TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK); | |
168 | if (cc->synchronize_from_tb) { | |
169 | cc->synchronize_from_tb(cpu, tb); | |
170 | } else { | |
171 | assert(cc->set_pc); | |
172 | cc->set_pc(cpu, tb->pc); | |
173 | } | |
174 | } | |
175 | if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) { | |
176 | /* We were asked to stop executing TBs (probably a pending | |
177 | * interrupt. We've now stopped, so clear the flag. | |
178 | */ | |
179 | cpu->tcg_exit_req = 0; | |
180 | } | |
181 | return next_tb; | |
182 | } | |
183 | ||
184 | /* Execute the code without caching the generated code. An interpreter | |
185 | could be used if available. */ | |
186 | static void cpu_exec_nocache(CPUState *cpu, int max_cycles, | |
187 | TranslationBlock *orig_tb) | |
188 | { | |
189 | TranslationBlock *tb; | |
190 | ||
191 | /* Should never happen. | |
192 | We only end up here when an existing TB is too long. */ | |
193 | if (max_cycles > CF_COUNT_MASK) | |
194 | max_cycles = CF_COUNT_MASK; | |
195 | ||
196 | tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags, | |
197 | max_cycles | CF_NOCACHE); | |
198 | tb->orig_tb = tcg_ctx.tb_ctx.tb_invalidated_flag ? NULL : orig_tb; | |
199 | cpu->current_tb = tb; | |
200 | /* execute the generated code */ | |
201 | trace_exec_tb_nocache(tb, tb->pc); | |
202 | cpu_tb_exec(cpu, tb->tc_ptr); | |
203 | cpu->current_tb = NULL; | |
204 | tb_phys_invalidate(tb, -1); | |
205 | tb_free(tb); | |
206 | } | |
207 | ||
208 | static TranslationBlock *tb_find_physical(CPUState *cpu, | |
209 | target_ulong pc, | |
210 | target_ulong cs_base, | |
211 | uint64_t flags) | |
212 | { | |
213 | CPUArchState *env = (CPUArchState *)cpu->env_ptr; | |
214 | TranslationBlock *tb, **ptb1; | |
215 | unsigned int h; | |
216 | tb_page_addr_t phys_pc, phys_page1; | |
217 | target_ulong virt_page2; | |
218 | ||
219 | tcg_ctx.tb_ctx.tb_invalidated_flag = 0; | |
220 | ||
221 | /* find translated block using physical mappings */ | |
222 | phys_pc = get_page_addr_code(env, pc); | |
223 | phys_page1 = phys_pc & TARGET_PAGE_MASK; | |
224 | h = tb_phys_hash_func(phys_pc); | |
225 | ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h]; | |
226 | for(;;) { | |
227 | tb = *ptb1; | |
228 | if (!tb) { | |
229 | return NULL; | |
230 | } | |
231 | if (tb->pc == pc && | |
232 | tb->page_addr[0] == phys_page1 && | |
233 | tb->cs_base == cs_base && | |
234 | tb->flags == flags) { | |
235 | /* check next page if needed */ | |
236 | if (tb->page_addr[1] != -1) { | |
237 | tb_page_addr_t phys_page2; | |
238 | ||
239 | virt_page2 = (pc & TARGET_PAGE_MASK) + | |
240 | TARGET_PAGE_SIZE; | |
241 | phys_page2 = get_page_addr_code(env, virt_page2); | |
242 | if (tb->page_addr[1] == phys_page2) { | |
243 | break; | |
244 | } | |
245 | } else { | |
246 | break; | |
247 | } | |
248 | } | |
249 | ptb1 = &tb->phys_hash_next; | |
250 | } | |
251 | ||
252 | /* Move the TB to the head of the list */ | |
253 | *ptb1 = tb->phys_hash_next; | |
254 | tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h]; | |
255 | tcg_ctx.tb_ctx.tb_phys_hash[h] = tb; | |
256 | return tb; | |
257 | } | |
258 | ||
259 | static TranslationBlock *tb_find_slow(CPUState *cpu, | |
260 | target_ulong pc, | |
261 | target_ulong cs_base, | |
262 | uint64_t flags) | |
263 | { | |
264 | TranslationBlock *tb; | |
265 | ||
266 | tb = tb_find_physical(cpu, pc, cs_base, flags); | |
267 | if (tb) { | |
268 | goto found; | |
269 | } | |
270 | ||
271 | #ifdef CONFIG_USER_ONLY | |
272 | /* mmap_lock is needed by tb_gen_code, and mmap_lock must be | |
273 | * taken outside tb_lock. Since we're momentarily dropping | |
274 | * tb_lock, there's a chance that our desired tb has been | |
275 | * translated. | |
276 | */ | |
277 | tb_unlock(); | |
278 | mmap_lock(); | |
279 | tb_lock(); | |
280 | tb = tb_find_physical(cpu, pc, cs_base, flags); | |
281 | if (tb) { | |
282 | mmap_unlock(); | |
283 | goto found; | |
284 | } | |
285 | #endif | |
286 | ||
287 | /* if no translated code available, then translate it now */ | |
288 | tb = tb_gen_code(cpu, pc, cs_base, flags, 0); | |
289 | ||
290 | #ifdef CONFIG_USER_ONLY | |
291 | mmap_unlock(); | |
292 | #endif | |
293 | ||
294 | found: | |
295 | /* we add the TB in the virtual pc hash table */ | |
296 | cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb; | |
297 | return tb; | |
298 | } | |
299 | ||
300 | static inline TranslationBlock *tb_find_fast(CPUState *cpu) | |
301 | { | |
302 | CPUArchState *env = (CPUArchState *)cpu->env_ptr; | |
303 | TranslationBlock *tb; | |
304 | target_ulong cs_base, pc; | |
305 | int flags; | |
306 | ||
307 | /* we record a subset of the CPU state. It will | |
308 | always be the same before a given translated block | |
309 | is executed. */ | |
310 | cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); | |
311 | tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)]; | |
312 | if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base || | |
313 | tb->flags != flags)) { | |
314 | tb = tb_find_slow(cpu, pc, cs_base, flags); | |
315 | } | |
316 | return tb; | |
317 | } | |
318 | ||
319 | static void cpu_handle_debug_exception(CPUState *cpu) | |
320 | { | |
321 | CPUClass *cc = CPU_GET_CLASS(cpu); | |
322 | CPUWatchpoint *wp; | |
323 | ||
324 | if (!cpu->watchpoint_hit) { | |
325 | QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { | |
326 | wp->flags &= ~BP_WATCHPOINT_HIT; | |
327 | } | |
328 | } | |
329 | ||
330 | cc->debug_excp_handler(cpu); | |
331 | } | |
332 | ||
333 | /* main execution loop */ | |
334 | ||
335 | int cpu_exec(CPUState *cpu) | |
336 | { | |
337 | CPUClass *cc = CPU_GET_CLASS(cpu); | |
338 | #ifdef TARGET_I386 | |
339 | X86CPU *x86_cpu = X86_CPU(cpu); | |
340 | CPUArchState *env = &x86_cpu->env; | |
341 | #endif | |
342 | int ret, interrupt_request; | |
343 | TranslationBlock *tb; | |
344 | uint8_t *tc_ptr; | |
345 | uintptr_t next_tb; | |
346 | SyncClocks sc; | |
347 | ||
348 | if (cpu->halted) { | |
349 | #if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY) | |
350 | if (cpu->interrupt_request & CPU_INTERRUPT_POLL) { | |
351 | apic_poll_irq(x86_cpu->apic_state); | |
352 | cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL); | |
353 | } | |
354 | #endif | |
355 | if (!cpu_has_work(cpu)) { | |
356 | return EXCP_HALTED; | |
357 | } | |
358 | ||
359 | cpu->halted = 0; | |
360 | } | |
361 | ||
362 | current_cpu = cpu; | |
363 | atomic_mb_set(&tcg_current_cpu, cpu); | |
364 | rcu_read_lock(); | |
365 | ||
366 | if (unlikely(atomic_mb_read(&exit_request))) { | |
367 | cpu->exit_request = 1; | |
368 | } | |
369 | ||
370 | cc->cpu_exec_enter(cpu); | |
371 | ||
372 | /* Calculate difference between guest clock and host clock. | |
373 | * This delay includes the delay of the last cycle, so | |
374 | * what we have to do is sleep until it is 0. As for the | |
375 | * advance/delay we gain here, we try to fix it next time. | |
376 | */ | |
377 | init_delay_params(&sc, cpu); | |
378 | ||
379 | /* prepare setjmp context for exception handling */ | |
380 | for(;;) { | |
381 | if (sigsetjmp(cpu->jmp_env, 0) == 0) { | |
382 | /* if an exception is pending, we execute it here */ | |
383 | if (cpu->exception_index >= 0) { | |
384 | if (cpu->exception_index >= EXCP_INTERRUPT) { | |
385 | /* exit request from the cpu execution loop */ | |
386 | ret = cpu->exception_index; | |
387 | if (ret == EXCP_DEBUG) { | |
388 | cpu_handle_debug_exception(cpu); | |
389 | } | |
390 | cpu->exception_index = -1; | |
391 | break; | |
392 | } else { | |
393 | #if defined(CONFIG_USER_ONLY) | |
394 | /* if user mode only, we simulate a fake exception | |
395 | which will be handled outside the cpu execution | |
396 | loop */ | |
397 | #if defined(TARGET_I386) | |
398 | cc->do_interrupt(cpu); | |
399 | #endif | |
400 | ret = cpu->exception_index; | |
401 | cpu->exception_index = -1; | |
402 | break; | |
403 | #else | |
404 | cc->do_interrupt(cpu); | |
405 | cpu->exception_index = -1; | |
406 | #endif | |
407 | } | |
408 | } | |
409 | ||
410 | next_tb = 0; /* force lookup of first TB */ | |
411 | for(;;) { | |
412 | interrupt_request = cpu->interrupt_request; | |
413 | if (unlikely(interrupt_request)) { | |
414 | if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) { | |
415 | /* Mask out external interrupts for this step. */ | |
416 | interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK; | |
417 | } | |
418 | if (interrupt_request & CPU_INTERRUPT_DEBUG) { | |
419 | cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG; | |
420 | cpu->exception_index = EXCP_DEBUG; | |
421 | cpu_loop_exit(cpu); | |
422 | } | |
423 | if (interrupt_request & CPU_INTERRUPT_HALT) { | |
424 | cpu->interrupt_request &= ~CPU_INTERRUPT_HALT; | |
425 | cpu->halted = 1; | |
426 | cpu->exception_index = EXCP_HLT; | |
427 | cpu_loop_exit(cpu); | |
428 | } | |
429 | #if defined(TARGET_I386) | |
430 | if (interrupt_request & CPU_INTERRUPT_INIT) { | |
431 | cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0); | |
432 | do_cpu_init(x86_cpu); | |
433 | cpu->exception_index = EXCP_HALTED; | |
434 | cpu_loop_exit(cpu); | |
435 | } | |
436 | #else | |
437 | if (interrupt_request & CPU_INTERRUPT_RESET) { | |
438 | cpu_reset(cpu); | |
439 | } | |
440 | #endif | |
441 | /* The target hook has 3 exit conditions: | |
442 | False when the interrupt isn't processed, | |
443 | True when it is, and we should restart on a new TB, | |
444 | and via longjmp via cpu_loop_exit. */ | |
445 | if (cc->cpu_exec_interrupt(cpu, interrupt_request)) { | |
446 | next_tb = 0; | |
447 | } | |
448 | /* Don't use the cached interrupt_request value, | |
449 | do_interrupt may have updated the EXITTB flag. */ | |
450 | if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) { | |
451 | cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB; | |
452 | /* ensure that no TB jump will be modified as | |
453 | the program flow was changed */ | |
454 | next_tb = 0; | |
455 | } | |
456 | } | |
457 | if (unlikely(cpu->exit_request)) { | |
458 | cpu->exit_request = 0; | |
459 | cpu->exception_index = EXCP_INTERRUPT; | |
460 | cpu_loop_exit(cpu); | |
461 | } | |
462 | tb_lock(); | |
463 | tb = tb_find_fast(cpu); | |
464 | /* Note: we do it here to avoid a gcc bug on Mac OS X when | |
465 | doing it in tb_find_slow */ | |
466 | if (tcg_ctx.tb_ctx.tb_invalidated_flag) { | |
467 | /* as some TB could have been invalidated because | |
468 | of memory exceptions while generating the code, we | |
469 | must recompute the hash index here */ | |
470 | next_tb = 0; | |
471 | tcg_ctx.tb_ctx.tb_invalidated_flag = 0; | |
472 | } | |
473 | if (qemu_loglevel_mask(CPU_LOG_EXEC)) { | |
474 | qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n", | |
475 | tb->tc_ptr, tb->pc, lookup_symbol(tb->pc)); | |
476 | } | |
477 | /* see if we can patch the calling TB. When the TB | |
478 | spans two pages, we cannot safely do a direct | |
479 | jump. */ | |
480 | if (next_tb != 0 && tb->page_addr[1] == -1 | |
481 | && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) { | |
482 | tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK), | |
483 | next_tb & TB_EXIT_MASK, tb); | |
484 | } | |
485 | tb_unlock(); | |
486 | if (likely(!cpu->exit_request)) { | |
487 | trace_exec_tb(tb, tb->pc); | |
488 | tc_ptr = tb->tc_ptr; | |
489 | /* execute the generated code */ | |
490 | cpu->current_tb = tb; | |
491 | next_tb = cpu_tb_exec(cpu, tc_ptr); | |
492 | cpu->current_tb = NULL; | |
493 | switch (next_tb & TB_EXIT_MASK) { | |
494 | case TB_EXIT_REQUESTED: | |
495 | /* Something asked us to stop executing | |
496 | * chained TBs; just continue round the main | |
497 | * loop. Whatever requested the exit will also | |
498 | * have set something else (eg exit_request or | |
499 | * interrupt_request) which we will handle | |
500 | * next time around the loop. But we need to | |
501 | * ensure the tcg_exit_req read in generated code | |
502 | * comes before the next read of cpu->exit_request | |
503 | * or cpu->interrupt_request. | |
504 | */ | |
505 | smp_rmb(); | |
506 | next_tb = 0; | |
507 | break; | |
508 | case TB_EXIT_ICOUNT_EXPIRED: | |
509 | { | |
510 | /* Instruction counter expired. */ | |
511 | int insns_left = cpu->icount_decr.u32; | |
512 | if (cpu->icount_extra && insns_left >= 0) { | |
513 | /* Refill decrementer and continue execution. */ | |
514 | cpu->icount_extra += insns_left; | |
515 | insns_left = MIN(0xffff, cpu->icount_extra); | |
516 | cpu->icount_extra -= insns_left; | |
517 | cpu->icount_decr.u16.low = insns_left; | |
518 | } else { | |
519 | if (insns_left > 0) { | |
520 | /* Execute remaining instructions. */ | |
521 | tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK); | |
522 | cpu_exec_nocache(cpu, insns_left, tb); | |
523 | align_clocks(&sc, cpu); | |
524 | } | |
525 | cpu->exception_index = EXCP_INTERRUPT; | |
526 | next_tb = 0; | |
527 | cpu_loop_exit(cpu); | |
528 | } | |
529 | break; | |
530 | } | |
531 | default: | |
532 | break; | |
533 | } | |
534 | } | |
535 | /* Try to align the host and virtual clocks | |
536 | if the guest is in advance */ | |
537 | align_clocks(&sc, cpu); | |
538 | /* reset soft MMU for next block (it can currently | |
539 | only be set by a memory fault) */ | |
540 | } /* for(;;) */ | |
541 | } else { | |
542 | /* Reload env after longjmp - the compiler may have smashed all | |
543 | * local variables as longjmp is marked 'noreturn'. */ | |
544 | cpu = current_cpu; | |
545 | cc = CPU_GET_CLASS(cpu); | |
546 | cpu->can_do_io = 1; | |
547 | #ifdef TARGET_I386 | |
548 | x86_cpu = X86_CPU(cpu); | |
549 | env = &x86_cpu->env; | |
550 | #endif | |
551 | tb_lock_reset(); | |
552 | } | |
553 | } /* for(;;) */ | |
554 | ||
555 | cc->cpu_exec_exit(cpu); | |
556 | rcu_read_unlock(); | |
557 | ||
558 | /* fail safe : never use current_cpu outside cpu_exec() */ | |
559 | current_cpu = NULL; | |
560 | ||
561 | /* Does not need atomic_mb_set because a spurious wakeup is okay. */ | |
562 | atomic_set(&tcg_current_cpu, NULL); | |
563 | return ret; | |
564 | } |