]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * QEMU System Emulator | |
3 | * | |
4 | * Copyright (c) 2003-2008 Fabrice Bellard | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
7 | * of this software and associated documentation files (the "Software"), to deal | |
8 | * in the Software without restriction, including without limitation the rights | |
9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
10 | * copies of the Software, and to permit persons to whom the Software is | |
11 | * furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
22 | * THE SOFTWARE. | |
23 | */ | |
24 | ||
25 | /* Needed early for CONFIG_BSD etc. */ | |
26 | #include "qemu/osdep.h" | |
27 | #include "qemu-common.h" | |
28 | #include "cpu.h" | |
29 | #include "monitor/monitor.h" | |
30 | #include "qapi/qmp/qerror.h" | |
31 | #include "qemu/error-report.h" | |
32 | #include "sysemu/sysemu.h" | |
33 | #include "sysemu/block-backend.h" | |
34 | #include "exec/gdbstub.h" | |
35 | #include "sysemu/dma.h" | |
36 | #include "sysemu/kvm.h" | |
37 | #include "qmp-commands.h" | |
38 | #include "exec/exec-all.h" | |
39 | ||
40 | #include "qemu/thread.h" | |
41 | #include "sysemu/cpus.h" | |
42 | #include "sysemu/qtest.h" | |
43 | #include "qemu/main-loop.h" | |
44 | #include "qemu/bitmap.h" | |
45 | #include "qemu/seqlock.h" | |
46 | #include "qapi-event.h" | |
47 | #include "hw/nmi.h" | |
48 | #include "sysemu/replay.h" | |
49 | ||
50 | #ifndef _WIN32 | |
51 | #include "qemu/compatfd.h" | |
52 | #endif | |
53 | ||
54 | #ifdef CONFIG_LINUX | |
55 | ||
56 | #include <sys/prctl.h> | |
57 | ||
58 | #ifndef PR_MCE_KILL | |
59 | #define PR_MCE_KILL 33 | |
60 | #endif | |
61 | ||
62 | #ifndef PR_MCE_KILL_SET | |
63 | #define PR_MCE_KILL_SET 1 | |
64 | #endif | |
65 | ||
66 | #ifndef PR_MCE_KILL_EARLY | |
67 | #define PR_MCE_KILL_EARLY 1 | |
68 | #endif | |
69 | ||
70 | #endif /* CONFIG_LINUX */ | |
71 | ||
72 | static CPUState *next_cpu; | |
73 | int64_t max_delay; | |
74 | int64_t max_advance; | |
75 | ||
76 | /* vcpu throttling controls */ | |
77 | static QEMUTimer *throttle_timer; | |
78 | static unsigned int throttle_percentage; | |
79 | ||
80 | #define CPU_THROTTLE_PCT_MIN 1 | |
81 | #define CPU_THROTTLE_PCT_MAX 99 | |
82 | #define CPU_THROTTLE_TIMESLICE_NS 10000000 | |
83 | ||
84 | bool cpu_is_stopped(CPUState *cpu) | |
85 | { | |
86 | return cpu->stopped || !runstate_is_running(); | |
87 | } | |
88 | ||
89 | static bool cpu_thread_is_idle(CPUState *cpu) | |
90 | { | |
91 | if (cpu->stop || cpu->queued_work_first) { | |
92 | return false; | |
93 | } | |
94 | if (cpu_is_stopped(cpu)) { | |
95 | return true; | |
96 | } | |
97 | if (!cpu->halted || cpu_has_work(cpu) || | |
98 | kvm_halt_in_kernel()) { | |
99 | return false; | |
100 | } | |
101 | return true; | |
102 | } | |
103 | ||
104 | static bool all_cpu_threads_idle(void) | |
105 | { | |
106 | CPUState *cpu; | |
107 | ||
108 | CPU_FOREACH(cpu) { | |
109 | if (!cpu_thread_is_idle(cpu)) { | |
110 | return false; | |
111 | } | |
112 | } | |
113 | return true; | |
114 | } | |
115 | ||
116 | /***********************************************************/ | |
117 | /* guest cycle counter */ | |
118 | ||
119 | /* Protected by TimersState seqlock */ | |
120 | ||
121 | static bool icount_sleep = true; | |
122 | static int64_t vm_clock_warp_start = -1; | |
123 | /* Conversion factor from emulated instructions to virtual clock ticks. */ | |
124 | static int icount_time_shift; | |
125 | /* Arbitrarily pick 1MIPS as the minimum allowable speed. */ | |
126 | #define MAX_ICOUNT_SHIFT 10 | |
127 | ||
128 | static QEMUTimer *icount_rt_timer; | |
129 | static QEMUTimer *icount_vm_timer; | |
130 | static QEMUTimer *icount_warp_timer; | |
131 | ||
132 | typedef struct TimersState { | |
133 | /* Protected by BQL. */ | |
134 | int64_t cpu_ticks_prev; | |
135 | int64_t cpu_ticks_offset; | |
136 | ||
137 | /* cpu_clock_offset can be read out of BQL, so protect it with | |
138 | * this lock. | |
139 | */ | |
140 | QemuSeqLock vm_clock_seqlock; | |
141 | int64_t cpu_clock_offset; | |
142 | int32_t cpu_ticks_enabled; | |
143 | int64_t dummy; | |
144 | ||
145 | /* Compensate for varying guest execution speed. */ | |
146 | int64_t qemu_icount_bias; | |
147 | /* Only written by TCG thread */ | |
148 | int64_t qemu_icount; | |
149 | } TimersState; | |
150 | ||
151 | static TimersState timers_state; | |
152 | ||
153 | int64_t cpu_get_icount_raw(void) | |
154 | { | |
155 | int64_t icount; | |
156 | CPUState *cpu = current_cpu; | |
157 | ||
158 | icount = timers_state.qemu_icount; | |
159 | if (cpu) { | |
160 | if (!cpu->can_do_io) { | |
161 | fprintf(stderr, "Bad icount read\n"); | |
162 | exit(1); | |
163 | } | |
164 | icount -= (cpu->icount_decr.u16.low + cpu->icount_extra); | |
165 | } | |
166 | return icount; | |
167 | } | |
168 | ||
169 | /* Return the virtual CPU time, based on the instruction counter. */ | |
170 | static int64_t cpu_get_icount_locked(void) | |
171 | { | |
172 | int64_t icount = cpu_get_icount_raw(); | |
173 | return timers_state.qemu_icount_bias + cpu_icount_to_ns(icount); | |
174 | } | |
175 | ||
176 | int64_t cpu_get_icount(void) | |
177 | { | |
178 | int64_t icount; | |
179 | unsigned start; | |
180 | ||
181 | do { | |
182 | start = seqlock_read_begin(&timers_state.vm_clock_seqlock); | |
183 | icount = cpu_get_icount_locked(); | |
184 | } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start)); | |
185 | ||
186 | return icount; | |
187 | } | |
188 | ||
189 | int64_t cpu_icount_to_ns(int64_t icount) | |
190 | { | |
191 | return icount << icount_time_shift; | |
192 | } | |
193 | ||
194 | /* return the host CPU cycle counter and handle stop/restart */ | |
195 | /* Caller must hold the BQL */ | |
196 | int64_t cpu_get_ticks(void) | |
197 | { | |
198 | int64_t ticks; | |
199 | ||
200 | if (use_icount) { | |
201 | return cpu_get_icount(); | |
202 | } | |
203 | ||
204 | ticks = timers_state.cpu_ticks_offset; | |
205 | if (timers_state.cpu_ticks_enabled) { | |
206 | ticks += cpu_get_host_ticks(); | |
207 | } | |
208 | ||
209 | if (timers_state.cpu_ticks_prev > ticks) { | |
210 | /* Note: non increasing ticks may happen if the host uses | |
211 | software suspend */ | |
212 | timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks; | |
213 | ticks = timers_state.cpu_ticks_prev; | |
214 | } | |
215 | ||
216 | timers_state.cpu_ticks_prev = ticks; | |
217 | return ticks; | |
218 | } | |
219 | ||
220 | static int64_t cpu_get_clock_locked(void) | |
221 | { | |
222 | int64_t ticks; | |
223 | ||
224 | ticks = timers_state.cpu_clock_offset; | |
225 | if (timers_state.cpu_ticks_enabled) { | |
226 | ticks += get_clock(); | |
227 | } | |
228 | ||
229 | return ticks; | |
230 | } | |
231 | ||
232 | /* return the host CPU monotonic timer and handle stop/restart */ | |
233 | int64_t cpu_get_clock(void) | |
234 | { | |
235 | int64_t ti; | |
236 | unsigned start; | |
237 | ||
238 | do { | |
239 | start = seqlock_read_begin(&timers_state.vm_clock_seqlock); | |
240 | ti = cpu_get_clock_locked(); | |
241 | } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start)); | |
242 | ||
243 | return ti; | |
244 | } | |
245 | ||
246 | /* enable cpu_get_ticks() | |
247 | * Caller must hold BQL which server as mutex for vm_clock_seqlock. | |
248 | */ | |
249 | void cpu_enable_ticks(void) | |
250 | { | |
251 | /* Here, the really thing protected by seqlock is cpu_clock_offset. */ | |
252 | seqlock_write_begin(&timers_state.vm_clock_seqlock); | |
253 | if (!timers_state.cpu_ticks_enabled) { | |
254 | timers_state.cpu_ticks_offset -= cpu_get_host_ticks(); | |
255 | timers_state.cpu_clock_offset -= get_clock(); | |
256 | timers_state.cpu_ticks_enabled = 1; | |
257 | } | |
258 | seqlock_write_end(&timers_state.vm_clock_seqlock); | |
259 | } | |
260 | ||
261 | /* disable cpu_get_ticks() : the clock is stopped. You must not call | |
262 | * cpu_get_ticks() after that. | |
263 | * Caller must hold BQL which server as mutex for vm_clock_seqlock. | |
264 | */ | |
265 | void cpu_disable_ticks(void) | |
266 | { | |
267 | /* Here, the really thing protected by seqlock is cpu_clock_offset. */ | |
268 | seqlock_write_begin(&timers_state.vm_clock_seqlock); | |
269 | if (timers_state.cpu_ticks_enabled) { | |
270 | timers_state.cpu_ticks_offset += cpu_get_host_ticks(); | |
271 | timers_state.cpu_clock_offset = cpu_get_clock_locked(); | |
272 | timers_state.cpu_ticks_enabled = 0; | |
273 | } | |
274 | seqlock_write_end(&timers_state.vm_clock_seqlock); | |
275 | } | |
276 | ||
277 | /* Correlation between real and virtual time is always going to be | |
278 | fairly approximate, so ignore small variation. | |
279 | When the guest is idle real and virtual time will be aligned in | |
280 | the IO wait loop. */ | |
281 | #define ICOUNT_WOBBLE (NANOSECONDS_PER_SECOND / 10) | |
282 | ||
283 | static void icount_adjust(void) | |
284 | { | |
285 | int64_t cur_time; | |
286 | int64_t cur_icount; | |
287 | int64_t delta; | |
288 | ||
289 | /* Protected by TimersState mutex. */ | |
290 | static int64_t last_delta; | |
291 | ||
292 | /* If the VM is not running, then do nothing. */ | |
293 | if (!runstate_is_running()) { | |
294 | return; | |
295 | } | |
296 | ||
297 | seqlock_write_begin(&timers_state.vm_clock_seqlock); | |
298 | cur_time = cpu_get_clock_locked(); | |
299 | cur_icount = cpu_get_icount_locked(); | |
300 | ||
301 | delta = cur_icount - cur_time; | |
302 | /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */ | |
303 | if (delta > 0 | |
304 | && last_delta + ICOUNT_WOBBLE < delta * 2 | |
305 | && icount_time_shift > 0) { | |
306 | /* The guest is getting too far ahead. Slow time down. */ | |
307 | icount_time_shift--; | |
308 | } | |
309 | if (delta < 0 | |
310 | && last_delta - ICOUNT_WOBBLE > delta * 2 | |
311 | && icount_time_shift < MAX_ICOUNT_SHIFT) { | |
312 | /* The guest is getting too far behind. Speed time up. */ | |
313 | icount_time_shift++; | |
314 | } | |
315 | last_delta = delta; | |
316 | timers_state.qemu_icount_bias = cur_icount | |
317 | - (timers_state.qemu_icount << icount_time_shift); | |
318 | seqlock_write_end(&timers_state.vm_clock_seqlock); | |
319 | } | |
320 | ||
321 | static void icount_adjust_rt(void *opaque) | |
322 | { | |
323 | timer_mod(icount_rt_timer, | |
324 | qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000); | |
325 | icount_adjust(); | |
326 | } | |
327 | ||
328 | static void icount_adjust_vm(void *opaque) | |
329 | { | |
330 | timer_mod(icount_vm_timer, | |
331 | qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + | |
332 | NANOSECONDS_PER_SECOND / 10); | |
333 | icount_adjust(); | |
334 | } | |
335 | ||
336 | static int64_t qemu_icount_round(int64_t count) | |
337 | { | |
338 | return (count + (1 << icount_time_shift) - 1) >> icount_time_shift; | |
339 | } | |
340 | ||
341 | static void icount_warp_rt(void) | |
342 | { | |
343 | unsigned seq; | |
344 | int64_t warp_start; | |
345 | ||
346 | /* The icount_warp_timer is rescheduled soon after vm_clock_warp_start | |
347 | * changes from -1 to another value, so the race here is okay. | |
348 | */ | |
349 | do { | |
350 | seq = seqlock_read_begin(&timers_state.vm_clock_seqlock); | |
351 | warp_start = vm_clock_warp_start; | |
352 | } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, seq)); | |
353 | ||
354 | if (warp_start == -1) { | |
355 | return; | |
356 | } | |
357 | ||
358 | seqlock_write_begin(&timers_state.vm_clock_seqlock); | |
359 | if (runstate_is_running()) { | |
360 | int64_t clock = REPLAY_CLOCK(REPLAY_CLOCK_VIRTUAL_RT, | |
361 | cpu_get_clock_locked()); | |
362 | int64_t warp_delta; | |
363 | ||
364 | warp_delta = clock - vm_clock_warp_start; | |
365 | if (use_icount == 2) { | |
366 | /* | |
367 | * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too | |
368 | * far ahead of real time. | |
369 | */ | |
370 | int64_t cur_icount = cpu_get_icount_locked(); | |
371 | int64_t delta = clock - cur_icount; | |
372 | warp_delta = MIN(warp_delta, delta); | |
373 | } | |
374 | timers_state.qemu_icount_bias += warp_delta; | |
375 | } | |
376 | vm_clock_warp_start = -1; | |
377 | seqlock_write_end(&timers_state.vm_clock_seqlock); | |
378 | ||
379 | if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) { | |
380 | qemu_clock_notify(QEMU_CLOCK_VIRTUAL); | |
381 | } | |
382 | } | |
383 | ||
384 | static void icount_timer_cb(void *opaque) | |
385 | { | |
386 | /* No need for a checkpoint because the timer already synchronizes | |
387 | * with CHECKPOINT_CLOCK_VIRTUAL_RT. | |
388 | */ | |
389 | icount_warp_rt(); | |
390 | } | |
391 | ||
392 | void qtest_clock_warp(int64_t dest) | |
393 | { | |
394 | int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); | |
395 | AioContext *aio_context; | |
396 | assert(qtest_enabled()); | |
397 | aio_context = qemu_get_aio_context(); | |
398 | while (clock < dest) { | |
399 | int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL); | |
400 | int64_t warp = qemu_soonest_timeout(dest - clock, deadline); | |
401 | ||
402 | seqlock_write_begin(&timers_state.vm_clock_seqlock); | |
403 | timers_state.qemu_icount_bias += warp; | |
404 | seqlock_write_end(&timers_state.vm_clock_seqlock); | |
405 | ||
406 | qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL); | |
407 | timerlist_run_timers(aio_context->tlg.tl[QEMU_CLOCK_VIRTUAL]); | |
408 | clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); | |
409 | } | |
410 | qemu_clock_notify(QEMU_CLOCK_VIRTUAL); | |
411 | } | |
412 | ||
413 | void qemu_start_warp_timer(void) | |
414 | { | |
415 | int64_t clock; | |
416 | int64_t deadline; | |
417 | ||
418 | if (!use_icount) { | |
419 | return; | |
420 | } | |
421 | ||
422 | /* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers | |
423 | * do not fire, so computing the deadline does not make sense. | |
424 | */ | |
425 | if (!runstate_is_running()) { | |
426 | return; | |
427 | } | |
428 | ||
429 | /* warp clock deterministically in record/replay mode */ | |
430 | if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_START)) { | |
431 | return; | |
432 | } | |
433 | ||
434 | if (!all_cpu_threads_idle()) { | |
435 | return; | |
436 | } | |
437 | ||
438 | if (qtest_enabled()) { | |
439 | /* When testing, qtest commands advance icount. */ | |
440 | return; | |
441 | } | |
442 | ||
443 | /* We want to use the earliest deadline from ALL vm_clocks */ | |
444 | clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT); | |
445 | deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL); | |
446 | if (deadline < 0) { | |
447 | static bool notified; | |
448 | if (!icount_sleep && !notified) { | |
449 | error_report("WARNING: icount sleep disabled and no active timers"); | |
450 | notified = true; | |
451 | } | |
452 | return; | |
453 | } | |
454 | ||
455 | if (deadline > 0) { | |
456 | /* | |
457 | * Ensure QEMU_CLOCK_VIRTUAL proceeds even when the virtual CPU goes to | |
458 | * sleep. Otherwise, the CPU might be waiting for a future timer | |
459 | * interrupt to wake it up, but the interrupt never comes because | |
460 | * the vCPU isn't running any insns and thus doesn't advance the | |
461 | * QEMU_CLOCK_VIRTUAL. | |
462 | */ | |
463 | if (!icount_sleep) { | |
464 | /* | |
465 | * We never let VCPUs sleep in no sleep icount mode. | |
466 | * If there is a pending QEMU_CLOCK_VIRTUAL timer we just advance | |
467 | * to the next QEMU_CLOCK_VIRTUAL event and notify it. | |
468 | * It is useful when we want a deterministic execution time, | |
469 | * isolated from host latencies. | |
470 | */ | |
471 | seqlock_write_begin(&timers_state.vm_clock_seqlock); | |
472 | timers_state.qemu_icount_bias += deadline; | |
473 | seqlock_write_end(&timers_state.vm_clock_seqlock); | |
474 | qemu_clock_notify(QEMU_CLOCK_VIRTUAL); | |
475 | } else { | |
476 | /* | |
477 | * We do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL after some | |
478 | * "real" time, (related to the time left until the next event) has | |
479 | * passed. The QEMU_CLOCK_VIRTUAL_RT clock will do this. | |
480 | * This avoids that the warps are visible externally; for example, | |
481 | * you will not be sending network packets continuously instead of | |
482 | * every 100ms. | |
483 | */ | |
484 | seqlock_write_begin(&timers_state.vm_clock_seqlock); | |
485 | if (vm_clock_warp_start == -1 || vm_clock_warp_start > clock) { | |
486 | vm_clock_warp_start = clock; | |
487 | } | |
488 | seqlock_write_end(&timers_state.vm_clock_seqlock); | |
489 | timer_mod_anticipate(icount_warp_timer, clock + deadline); | |
490 | } | |
491 | } else if (deadline == 0) { | |
492 | qemu_clock_notify(QEMU_CLOCK_VIRTUAL); | |
493 | } | |
494 | } | |
495 | ||
496 | static void qemu_account_warp_timer(void) | |
497 | { | |
498 | if (!use_icount || !icount_sleep) { | |
499 | return; | |
500 | } | |
501 | ||
502 | /* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers | |
503 | * do not fire, so computing the deadline does not make sense. | |
504 | */ | |
505 | if (!runstate_is_running()) { | |
506 | return; | |
507 | } | |
508 | ||
509 | /* warp clock deterministically in record/replay mode */ | |
510 | if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_ACCOUNT)) { | |
511 | return; | |
512 | } | |
513 | ||
514 | timer_del(icount_warp_timer); | |
515 | icount_warp_rt(); | |
516 | } | |
517 | ||
518 | static bool icount_state_needed(void *opaque) | |
519 | { | |
520 | return use_icount; | |
521 | } | |
522 | ||
523 | /* | |
524 | * This is a subsection for icount migration. | |
525 | */ | |
526 | static const VMStateDescription icount_vmstate_timers = { | |
527 | .name = "timer/icount", | |
528 | .version_id = 1, | |
529 | .minimum_version_id = 1, | |
530 | .needed = icount_state_needed, | |
531 | .fields = (VMStateField[]) { | |
532 | VMSTATE_INT64(qemu_icount_bias, TimersState), | |
533 | VMSTATE_INT64(qemu_icount, TimersState), | |
534 | VMSTATE_END_OF_LIST() | |
535 | } | |
536 | }; | |
537 | ||
538 | static const VMStateDescription vmstate_timers = { | |
539 | .name = "timer", | |
540 | .version_id = 2, | |
541 | .minimum_version_id = 1, | |
542 | .fields = (VMStateField[]) { | |
543 | VMSTATE_INT64(cpu_ticks_offset, TimersState), | |
544 | VMSTATE_INT64(dummy, TimersState), | |
545 | VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2), | |
546 | VMSTATE_END_OF_LIST() | |
547 | }, | |
548 | .subsections = (const VMStateDescription*[]) { | |
549 | &icount_vmstate_timers, | |
550 | NULL | |
551 | } | |
552 | }; | |
553 | ||
554 | static void cpu_throttle_thread(void *opaque) | |
555 | { | |
556 | CPUState *cpu = opaque; | |
557 | double pct; | |
558 | double throttle_ratio; | |
559 | long sleeptime_ns; | |
560 | ||
561 | if (!cpu_throttle_get_percentage()) { | |
562 | return; | |
563 | } | |
564 | ||
565 | pct = (double)cpu_throttle_get_percentage()/100; | |
566 | throttle_ratio = pct / (1 - pct); | |
567 | sleeptime_ns = (long)(throttle_ratio * CPU_THROTTLE_TIMESLICE_NS); | |
568 | ||
569 | qemu_mutex_unlock_iothread(); | |
570 | atomic_set(&cpu->throttle_thread_scheduled, 0); | |
571 | g_usleep(sleeptime_ns / 1000); /* Convert ns to us for usleep call */ | |
572 | qemu_mutex_lock_iothread(); | |
573 | } | |
574 | ||
575 | static void cpu_throttle_timer_tick(void *opaque) | |
576 | { | |
577 | CPUState *cpu; | |
578 | double pct; | |
579 | ||
580 | /* Stop the timer if needed */ | |
581 | if (!cpu_throttle_get_percentage()) { | |
582 | return; | |
583 | } | |
584 | CPU_FOREACH(cpu) { | |
585 | if (!atomic_xchg(&cpu->throttle_thread_scheduled, 1)) { | |
586 | async_run_on_cpu(cpu, cpu_throttle_thread, cpu); | |
587 | } | |
588 | } | |
589 | ||
590 | pct = (double)cpu_throttle_get_percentage()/100; | |
591 | timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) + | |
592 | CPU_THROTTLE_TIMESLICE_NS / (1-pct)); | |
593 | } | |
594 | ||
595 | void cpu_throttle_set(int new_throttle_pct) | |
596 | { | |
597 | /* Ensure throttle percentage is within valid range */ | |
598 | new_throttle_pct = MIN(new_throttle_pct, CPU_THROTTLE_PCT_MAX); | |
599 | new_throttle_pct = MAX(new_throttle_pct, CPU_THROTTLE_PCT_MIN); | |
600 | ||
601 | atomic_set(&throttle_percentage, new_throttle_pct); | |
602 | ||
603 | timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) + | |
604 | CPU_THROTTLE_TIMESLICE_NS); | |
605 | } | |
606 | ||
607 | void cpu_throttle_stop(void) | |
608 | { | |
609 | atomic_set(&throttle_percentage, 0); | |
610 | } | |
611 | ||
612 | bool cpu_throttle_active(void) | |
613 | { | |
614 | return (cpu_throttle_get_percentage() != 0); | |
615 | } | |
616 | ||
617 | int cpu_throttle_get_percentage(void) | |
618 | { | |
619 | return atomic_read(&throttle_percentage); | |
620 | } | |
621 | ||
622 | void cpu_ticks_init(void) | |
623 | { | |
624 | seqlock_init(&timers_state.vm_clock_seqlock); | |
625 | vmstate_register(NULL, 0, &vmstate_timers, &timers_state); | |
626 | throttle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT, | |
627 | cpu_throttle_timer_tick, NULL); | |
628 | } | |
629 | ||
630 | void configure_icount(QemuOpts *opts, Error **errp) | |
631 | { | |
632 | const char *option; | |
633 | char *rem_str = NULL; | |
634 | ||
635 | option = qemu_opt_get(opts, "shift"); | |
636 | if (!option) { | |
637 | if (qemu_opt_get(opts, "align") != NULL) { | |
638 | error_setg(errp, "Please specify shift option when using align"); | |
639 | } | |
640 | return; | |
641 | } | |
642 | ||
643 | icount_sleep = qemu_opt_get_bool(opts, "sleep", true); | |
644 | if (icount_sleep) { | |
645 | icount_warp_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT, | |
646 | icount_timer_cb, NULL); | |
647 | } | |
648 | ||
649 | icount_align_option = qemu_opt_get_bool(opts, "align", false); | |
650 | ||
651 | if (icount_align_option && !icount_sleep) { | |
652 | error_setg(errp, "align=on and sleep=off are incompatible"); | |
653 | } | |
654 | if (strcmp(option, "auto") != 0) { | |
655 | errno = 0; | |
656 | icount_time_shift = strtol(option, &rem_str, 0); | |
657 | if (errno != 0 || *rem_str != '\0' || !strlen(option)) { | |
658 | error_setg(errp, "icount: Invalid shift value"); | |
659 | } | |
660 | use_icount = 1; | |
661 | return; | |
662 | } else if (icount_align_option) { | |
663 | error_setg(errp, "shift=auto and align=on are incompatible"); | |
664 | } else if (!icount_sleep) { | |
665 | error_setg(errp, "shift=auto and sleep=off are incompatible"); | |
666 | } | |
667 | ||
668 | use_icount = 2; | |
669 | ||
670 | /* 125MIPS seems a reasonable initial guess at the guest speed. | |
671 | It will be corrected fairly quickly anyway. */ | |
672 | icount_time_shift = 3; | |
673 | ||
674 | /* Have both realtime and virtual time triggers for speed adjustment. | |
675 | The realtime trigger catches emulated time passing too slowly, | |
676 | the virtual time trigger catches emulated time passing too fast. | |
677 | Realtime triggers occur even when idle, so use them less frequently | |
678 | than VM triggers. */ | |
679 | icount_rt_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL_RT, | |
680 | icount_adjust_rt, NULL); | |
681 | timer_mod(icount_rt_timer, | |
682 | qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000); | |
683 | icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, | |
684 | icount_adjust_vm, NULL); | |
685 | timer_mod(icount_vm_timer, | |
686 | qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + | |
687 | NANOSECONDS_PER_SECOND / 10); | |
688 | } | |
689 | ||
690 | /***********************************************************/ | |
691 | void hw_error(const char *fmt, ...) | |
692 | { | |
693 | va_list ap; | |
694 | CPUState *cpu; | |
695 | ||
696 | va_start(ap, fmt); | |
697 | fprintf(stderr, "qemu: hardware error: "); | |
698 | vfprintf(stderr, fmt, ap); | |
699 | fprintf(stderr, "\n"); | |
700 | CPU_FOREACH(cpu) { | |
701 | fprintf(stderr, "CPU #%d:\n", cpu->cpu_index); | |
702 | cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU); | |
703 | } | |
704 | va_end(ap); | |
705 | abort(); | |
706 | } | |
707 | ||
708 | void cpu_synchronize_all_states(void) | |
709 | { | |
710 | CPUState *cpu; | |
711 | ||
712 | CPU_FOREACH(cpu) { | |
713 | cpu_synchronize_state(cpu); | |
714 | } | |
715 | } | |
716 | ||
717 | void cpu_synchronize_all_post_reset(void) | |
718 | { | |
719 | CPUState *cpu; | |
720 | ||
721 | CPU_FOREACH(cpu) { | |
722 | cpu_synchronize_post_reset(cpu); | |
723 | } | |
724 | } | |
725 | ||
726 | void cpu_synchronize_all_post_init(void) | |
727 | { | |
728 | CPUState *cpu; | |
729 | ||
730 | CPU_FOREACH(cpu) { | |
731 | cpu_synchronize_post_init(cpu); | |
732 | } | |
733 | } | |
734 | ||
735 | static int do_vm_stop(RunState state) | |
736 | { | |
737 | int ret = 0; | |
738 | ||
739 | if (runstate_is_running()) { | |
740 | cpu_disable_ticks(); | |
741 | pause_all_vcpus(); | |
742 | runstate_set(state); | |
743 | vm_state_notify(0, state); | |
744 | qapi_event_send_stop(&error_abort); | |
745 | } | |
746 | ||
747 | bdrv_drain_all(); | |
748 | ret = blk_flush_all(); | |
749 | ||
750 | return ret; | |
751 | } | |
752 | ||
753 | static bool cpu_can_run(CPUState *cpu) | |
754 | { | |
755 | if (cpu->stop) { | |
756 | return false; | |
757 | } | |
758 | if (cpu_is_stopped(cpu)) { | |
759 | return false; | |
760 | } | |
761 | return true; | |
762 | } | |
763 | ||
764 | static void cpu_handle_guest_debug(CPUState *cpu) | |
765 | { | |
766 | gdb_set_stop_cpu(cpu); | |
767 | qemu_system_debug_request(); | |
768 | cpu->stopped = true; | |
769 | } | |
770 | ||
771 | #ifdef CONFIG_LINUX | |
772 | static void sigbus_reraise(void) | |
773 | { | |
774 | sigset_t set; | |
775 | struct sigaction action; | |
776 | ||
777 | memset(&action, 0, sizeof(action)); | |
778 | action.sa_handler = SIG_DFL; | |
779 | if (!sigaction(SIGBUS, &action, NULL)) { | |
780 | raise(SIGBUS); | |
781 | sigemptyset(&set); | |
782 | sigaddset(&set, SIGBUS); | |
783 | pthread_sigmask(SIG_UNBLOCK, &set, NULL); | |
784 | } | |
785 | perror("Failed to re-raise SIGBUS!\n"); | |
786 | abort(); | |
787 | } | |
788 | ||
789 | static void sigbus_handler(int n, struct qemu_signalfd_siginfo *siginfo, | |
790 | void *ctx) | |
791 | { | |
792 | if (kvm_on_sigbus(siginfo->ssi_code, | |
793 | (void *)(intptr_t)siginfo->ssi_addr)) { | |
794 | sigbus_reraise(); | |
795 | } | |
796 | } | |
797 | ||
798 | static void qemu_init_sigbus(void) | |
799 | { | |
800 | struct sigaction action; | |
801 | ||
802 | memset(&action, 0, sizeof(action)); | |
803 | action.sa_flags = SA_SIGINFO; | |
804 | action.sa_sigaction = (void (*)(int, siginfo_t*, void*))sigbus_handler; | |
805 | sigaction(SIGBUS, &action, NULL); | |
806 | ||
807 | prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0); | |
808 | } | |
809 | ||
810 | static void qemu_kvm_eat_signals(CPUState *cpu) | |
811 | { | |
812 | struct timespec ts = { 0, 0 }; | |
813 | siginfo_t siginfo; | |
814 | sigset_t waitset; | |
815 | sigset_t chkset; | |
816 | int r; | |
817 | ||
818 | sigemptyset(&waitset); | |
819 | sigaddset(&waitset, SIG_IPI); | |
820 | sigaddset(&waitset, SIGBUS); | |
821 | ||
822 | do { | |
823 | r = sigtimedwait(&waitset, &siginfo, &ts); | |
824 | if (r == -1 && !(errno == EAGAIN || errno == EINTR)) { | |
825 | perror("sigtimedwait"); | |
826 | exit(1); | |
827 | } | |
828 | ||
829 | switch (r) { | |
830 | case SIGBUS: | |
831 | if (kvm_on_sigbus_vcpu(cpu, siginfo.si_code, siginfo.si_addr)) { | |
832 | sigbus_reraise(); | |
833 | } | |
834 | break; | |
835 | default: | |
836 | break; | |
837 | } | |
838 | ||
839 | r = sigpending(&chkset); | |
840 | if (r == -1) { | |
841 | perror("sigpending"); | |
842 | exit(1); | |
843 | } | |
844 | } while (sigismember(&chkset, SIG_IPI) || sigismember(&chkset, SIGBUS)); | |
845 | } | |
846 | ||
847 | #else /* !CONFIG_LINUX */ | |
848 | ||
849 | static void qemu_init_sigbus(void) | |
850 | { | |
851 | } | |
852 | ||
853 | static void qemu_kvm_eat_signals(CPUState *cpu) | |
854 | { | |
855 | } | |
856 | #endif /* !CONFIG_LINUX */ | |
857 | ||
858 | #ifndef _WIN32 | |
859 | static void dummy_signal(int sig) | |
860 | { | |
861 | } | |
862 | ||
863 | static void qemu_kvm_init_cpu_signals(CPUState *cpu) | |
864 | { | |
865 | int r; | |
866 | sigset_t set; | |
867 | struct sigaction sigact; | |
868 | ||
869 | memset(&sigact, 0, sizeof(sigact)); | |
870 | sigact.sa_handler = dummy_signal; | |
871 | sigaction(SIG_IPI, &sigact, NULL); | |
872 | ||
873 | pthread_sigmask(SIG_BLOCK, NULL, &set); | |
874 | sigdelset(&set, SIG_IPI); | |
875 | sigdelset(&set, SIGBUS); | |
876 | r = kvm_set_signal_mask(cpu, &set); | |
877 | if (r) { | |
878 | fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r)); | |
879 | exit(1); | |
880 | } | |
881 | } | |
882 | ||
883 | #else /* _WIN32 */ | |
884 | static void qemu_kvm_init_cpu_signals(CPUState *cpu) | |
885 | { | |
886 | abort(); | |
887 | } | |
888 | #endif /* _WIN32 */ | |
889 | ||
890 | static QemuMutex qemu_global_mutex; | |
891 | static QemuCond qemu_io_proceeded_cond; | |
892 | static unsigned iothread_requesting_mutex; | |
893 | ||
894 | static QemuThread io_thread; | |
895 | ||
896 | /* cpu creation */ | |
897 | static QemuCond qemu_cpu_cond; | |
898 | /* system init */ | |
899 | static QemuCond qemu_pause_cond; | |
900 | static QemuCond qemu_work_cond; | |
901 | ||
902 | void qemu_init_cpu_loop(void) | |
903 | { | |
904 | qemu_init_sigbus(); | |
905 | qemu_cond_init(&qemu_cpu_cond); | |
906 | qemu_cond_init(&qemu_pause_cond); | |
907 | qemu_cond_init(&qemu_work_cond); | |
908 | qemu_cond_init(&qemu_io_proceeded_cond); | |
909 | qemu_mutex_init(&qemu_global_mutex); | |
910 | ||
911 | qemu_thread_get_self(&io_thread); | |
912 | } | |
913 | ||
914 | void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data) | |
915 | { | |
916 | struct qemu_work_item wi; | |
917 | ||
918 | if (qemu_cpu_is_self(cpu)) { | |
919 | func(data); | |
920 | return; | |
921 | } | |
922 | ||
923 | wi.func = func; | |
924 | wi.data = data; | |
925 | wi.free = false; | |
926 | ||
927 | qemu_mutex_lock(&cpu->work_mutex); | |
928 | if (cpu->queued_work_first == NULL) { | |
929 | cpu->queued_work_first = &wi; | |
930 | } else { | |
931 | cpu->queued_work_last->next = &wi; | |
932 | } | |
933 | cpu->queued_work_last = &wi; | |
934 | wi.next = NULL; | |
935 | wi.done = false; | |
936 | qemu_mutex_unlock(&cpu->work_mutex); | |
937 | ||
938 | qemu_cpu_kick(cpu); | |
939 | while (!atomic_mb_read(&wi.done)) { | |
940 | CPUState *self_cpu = current_cpu; | |
941 | ||
942 | qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex); | |
943 | current_cpu = self_cpu; | |
944 | } | |
945 | } | |
946 | ||
947 | void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data) | |
948 | { | |
949 | struct qemu_work_item *wi; | |
950 | ||
951 | if (qemu_cpu_is_self(cpu)) { | |
952 | func(data); | |
953 | return; | |
954 | } | |
955 | ||
956 | wi = g_malloc0(sizeof(struct qemu_work_item)); | |
957 | wi->func = func; | |
958 | wi->data = data; | |
959 | wi->free = true; | |
960 | ||
961 | qemu_mutex_lock(&cpu->work_mutex); | |
962 | if (cpu->queued_work_first == NULL) { | |
963 | cpu->queued_work_first = wi; | |
964 | } else { | |
965 | cpu->queued_work_last->next = wi; | |
966 | } | |
967 | cpu->queued_work_last = wi; | |
968 | wi->next = NULL; | |
969 | wi->done = false; | |
970 | qemu_mutex_unlock(&cpu->work_mutex); | |
971 | ||
972 | qemu_cpu_kick(cpu); | |
973 | } | |
974 | ||
975 | static void qemu_kvm_destroy_vcpu(CPUState *cpu) | |
976 | { | |
977 | if (kvm_destroy_vcpu(cpu) < 0) { | |
978 | error_report("kvm_destroy_vcpu failed"); | |
979 | exit(EXIT_FAILURE); | |
980 | } | |
981 | } | |
982 | ||
983 | static void qemu_tcg_destroy_vcpu(CPUState *cpu) | |
984 | { | |
985 | } | |
986 | ||
987 | static void flush_queued_work(CPUState *cpu) | |
988 | { | |
989 | struct qemu_work_item *wi; | |
990 | ||
991 | if (cpu->queued_work_first == NULL) { | |
992 | return; | |
993 | } | |
994 | ||
995 | qemu_mutex_lock(&cpu->work_mutex); | |
996 | while (cpu->queued_work_first != NULL) { | |
997 | wi = cpu->queued_work_first; | |
998 | cpu->queued_work_first = wi->next; | |
999 | if (!cpu->queued_work_first) { | |
1000 | cpu->queued_work_last = NULL; | |
1001 | } | |
1002 | qemu_mutex_unlock(&cpu->work_mutex); | |
1003 | wi->func(wi->data); | |
1004 | qemu_mutex_lock(&cpu->work_mutex); | |
1005 | if (wi->free) { | |
1006 | g_free(wi); | |
1007 | } else { | |
1008 | atomic_mb_set(&wi->done, true); | |
1009 | } | |
1010 | } | |
1011 | qemu_mutex_unlock(&cpu->work_mutex); | |
1012 | qemu_cond_broadcast(&qemu_work_cond); | |
1013 | } | |
1014 | ||
1015 | static void qemu_wait_io_event_common(CPUState *cpu) | |
1016 | { | |
1017 | if (cpu->stop) { | |
1018 | cpu->stop = false; | |
1019 | cpu->stopped = true; | |
1020 | qemu_cond_broadcast(&qemu_pause_cond); | |
1021 | } | |
1022 | flush_queued_work(cpu); | |
1023 | cpu->thread_kicked = false; | |
1024 | } | |
1025 | ||
1026 | static void qemu_tcg_wait_io_event(CPUState *cpu) | |
1027 | { | |
1028 | while (all_cpu_threads_idle()) { | |
1029 | qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex); | |
1030 | } | |
1031 | ||
1032 | while (iothread_requesting_mutex) { | |
1033 | qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex); | |
1034 | } | |
1035 | ||
1036 | CPU_FOREACH(cpu) { | |
1037 | qemu_wait_io_event_common(cpu); | |
1038 | } | |
1039 | } | |
1040 | ||
1041 | static void qemu_kvm_wait_io_event(CPUState *cpu) | |
1042 | { | |
1043 | while (cpu_thread_is_idle(cpu)) { | |
1044 | qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex); | |
1045 | } | |
1046 | ||
1047 | qemu_kvm_eat_signals(cpu); | |
1048 | qemu_wait_io_event_common(cpu); | |
1049 | } | |
1050 | ||
1051 | static void *qemu_kvm_cpu_thread_fn(void *arg) | |
1052 | { | |
1053 | CPUState *cpu = arg; | |
1054 | int r; | |
1055 | ||
1056 | rcu_register_thread(); | |
1057 | ||
1058 | qemu_mutex_lock_iothread(); | |
1059 | qemu_thread_get_self(cpu->thread); | |
1060 | cpu->thread_id = qemu_get_thread_id(); | |
1061 | cpu->can_do_io = 1; | |
1062 | current_cpu = cpu; | |
1063 | ||
1064 | r = kvm_init_vcpu(cpu); | |
1065 | if (r < 0) { | |
1066 | fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r)); | |
1067 | exit(1); | |
1068 | } | |
1069 | ||
1070 | qemu_kvm_init_cpu_signals(cpu); | |
1071 | ||
1072 | /* signal CPU creation */ | |
1073 | cpu->created = true; | |
1074 | qemu_cond_signal(&qemu_cpu_cond); | |
1075 | ||
1076 | do { | |
1077 | if (cpu_can_run(cpu)) { | |
1078 | r = kvm_cpu_exec(cpu); | |
1079 | if (r == EXCP_DEBUG) { | |
1080 | cpu_handle_guest_debug(cpu); | |
1081 | } | |
1082 | } | |
1083 | qemu_kvm_wait_io_event(cpu); | |
1084 | } while (!cpu->unplug || cpu_can_run(cpu)); | |
1085 | ||
1086 | qemu_kvm_destroy_vcpu(cpu); | |
1087 | cpu->created = false; | |
1088 | qemu_cond_signal(&qemu_cpu_cond); | |
1089 | qemu_mutex_unlock_iothread(); | |
1090 | return NULL; | |
1091 | } | |
1092 | ||
1093 | static void *qemu_dummy_cpu_thread_fn(void *arg) | |
1094 | { | |
1095 | #ifdef _WIN32 | |
1096 | fprintf(stderr, "qtest is not supported under Windows\n"); | |
1097 | exit(1); | |
1098 | #else | |
1099 | CPUState *cpu = arg; | |
1100 | sigset_t waitset; | |
1101 | int r; | |
1102 | ||
1103 | rcu_register_thread(); | |
1104 | ||
1105 | qemu_mutex_lock_iothread(); | |
1106 | qemu_thread_get_self(cpu->thread); | |
1107 | cpu->thread_id = qemu_get_thread_id(); | |
1108 | cpu->can_do_io = 1; | |
1109 | ||
1110 | sigemptyset(&waitset); | |
1111 | sigaddset(&waitset, SIG_IPI); | |
1112 | ||
1113 | /* signal CPU creation */ | |
1114 | cpu->created = true; | |
1115 | qemu_cond_signal(&qemu_cpu_cond); | |
1116 | ||
1117 | current_cpu = cpu; | |
1118 | while (1) { | |
1119 | current_cpu = NULL; | |
1120 | qemu_mutex_unlock_iothread(); | |
1121 | do { | |
1122 | int sig; | |
1123 | r = sigwait(&waitset, &sig); | |
1124 | } while (r == -1 && (errno == EAGAIN || errno == EINTR)); | |
1125 | if (r == -1) { | |
1126 | perror("sigwait"); | |
1127 | exit(1); | |
1128 | } | |
1129 | qemu_mutex_lock_iothread(); | |
1130 | current_cpu = cpu; | |
1131 | qemu_wait_io_event_common(cpu); | |
1132 | } | |
1133 | ||
1134 | return NULL; | |
1135 | #endif | |
1136 | } | |
1137 | ||
1138 | static void tcg_exec_all(void); | |
1139 | ||
1140 | static void *qemu_tcg_cpu_thread_fn(void *arg) | |
1141 | { | |
1142 | CPUState *cpu = arg; | |
1143 | CPUState *remove_cpu = NULL; | |
1144 | ||
1145 | rcu_register_thread(); | |
1146 | ||
1147 | qemu_mutex_lock_iothread(); | |
1148 | qemu_thread_get_self(cpu->thread); | |
1149 | ||
1150 | CPU_FOREACH(cpu) { | |
1151 | cpu->thread_id = qemu_get_thread_id(); | |
1152 | cpu->created = true; | |
1153 | cpu->can_do_io = 1; | |
1154 | } | |
1155 | qemu_cond_signal(&qemu_cpu_cond); | |
1156 | ||
1157 | /* wait for initial kick-off after machine start */ | |
1158 | while (first_cpu->stopped) { | |
1159 | qemu_cond_wait(first_cpu->halt_cond, &qemu_global_mutex); | |
1160 | ||
1161 | /* process any pending work */ | |
1162 | CPU_FOREACH(cpu) { | |
1163 | qemu_wait_io_event_common(cpu); | |
1164 | } | |
1165 | } | |
1166 | ||
1167 | /* process any pending work */ | |
1168 | atomic_mb_set(&exit_request, 1); | |
1169 | ||
1170 | while (1) { | |
1171 | tcg_exec_all(); | |
1172 | ||
1173 | if (use_icount) { | |
1174 | int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL); | |
1175 | ||
1176 | if (deadline == 0) { | |
1177 | qemu_clock_notify(QEMU_CLOCK_VIRTUAL); | |
1178 | } | |
1179 | } | |
1180 | qemu_tcg_wait_io_event(QTAILQ_FIRST(&cpus)); | |
1181 | CPU_FOREACH(cpu) { | |
1182 | if (cpu->unplug && !cpu_can_run(cpu)) { | |
1183 | remove_cpu = cpu; | |
1184 | break; | |
1185 | } | |
1186 | } | |
1187 | if (remove_cpu) { | |
1188 | qemu_tcg_destroy_vcpu(remove_cpu); | |
1189 | cpu->created = false; | |
1190 | qemu_cond_signal(&qemu_cpu_cond); | |
1191 | remove_cpu = NULL; | |
1192 | } | |
1193 | } | |
1194 | ||
1195 | return NULL; | |
1196 | } | |
1197 | ||
1198 | static void qemu_cpu_kick_thread(CPUState *cpu) | |
1199 | { | |
1200 | #ifndef _WIN32 | |
1201 | int err; | |
1202 | ||
1203 | if (cpu->thread_kicked) { | |
1204 | return; | |
1205 | } | |
1206 | cpu->thread_kicked = true; | |
1207 | err = pthread_kill(cpu->thread->thread, SIG_IPI); | |
1208 | if (err) { | |
1209 | fprintf(stderr, "qemu:%s: %s", __func__, strerror(err)); | |
1210 | exit(1); | |
1211 | } | |
1212 | #else /* _WIN32 */ | |
1213 | abort(); | |
1214 | #endif | |
1215 | } | |
1216 | ||
1217 | static void qemu_cpu_kick_no_halt(void) | |
1218 | { | |
1219 | CPUState *cpu; | |
1220 | /* Ensure whatever caused the exit has reached the CPU threads before | |
1221 | * writing exit_request. | |
1222 | */ | |
1223 | atomic_mb_set(&exit_request, 1); | |
1224 | cpu = atomic_mb_read(&tcg_current_cpu); | |
1225 | if (cpu) { | |
1226 | cpu_exit(cpu); | |
1227 | } | |
1228 | } | |
1229 | ||
1230 | void qemu_cpu_kick(CPUState *cpu) | |
1231 | { | |
1232 | qemu_cond_broadcast(cpu->halt_cond); | |
1233 | if (tcg_enabled()) { | |
1234 | qemu_cpu_kick_no_halt(); | |
1235 | } else { | |
1236 | qemu_cpu_kick_thread(cpu); | |
1237 | } | |
1238 | } | |
1239 | ||
1240 | void qemu_cpu_kick_self(void) | |
1241 | { | |
1242 | assert(current_cpu); | |
1243 | qemu_cpu_kick_thread(current_cpu); | |
1244 | } | |
1245 | ||
1246 | bool qemu_cpu_is_self(CPUState *cpu) | |
1247 | { | |
1248 | return qemu_thread_is_self(cpu->thread); | |
1249 | } | |
1250 | ||
1251 | bool qemu_in_vcpu_thread(void) | |
1252 | { | |
1253 | return current_cpu && qemu_cpu_is_self(current_cpu); | |
1254 | } | |
1255 | ||
1256 | static __thread bool iothread_locked = false; | |
1257 | ||
1258 | bool qemu_mutex_iothread_locked(void) | |
1259 | { | |
1260 | return iothread_locked; | |
1261 | } | |
1262 | ||
1263 | void qemu_mutex_lock_iothread(void) | |
1264 | { | |
1265 | atomic_inc(&iothread_requesting_mutex); | |
1266 | /* In the simple case there is no need to bump the VCPU thread out of | |
1267 | * TCG code execution. | |
1268 | */ | |
1269 | if (!tcg_enabled() || qemu_in_vcpu_thread() || | |
1270 | !first_cpu || !first_cpu->created) { | |
1271 | qemu_mutex_lock(&qemu_global_mutex); | |
1272 | atomic_dec(&iothread_requesting_mutex); | |
1273 | } else { | |
1274 | if (qemu_mutex_trylock(&qemu_global_mutex)) { | |
1275 | qemu_cpu_kick_no_halt(); | |
1276 | qemu_mutex_lock(&qemu_global_mutex); | |
1277 | } | |
1278 | atomic_dec(&iothread_requesting_mutex); | |
1279 | qemu_cond_broadcast(&qemu_io_proceeded_cond); | |
1280 | } | |
1281 | iothread_locked = true; | |
1282 | } | |
1283 | ||
1284 | void qemu_mutex_unlock_iothread(void) | |
1285 | { | |
1286 | iothread_locked = false; | |
1287 | qemu_mutex_unlock(&qemu_global_mutex); | |
1288 | } | |
1289 | ||
1290 | static int all_vcpus_paused(void) | |
1291 | { | |
1292 | CPUState *cpu; | |
1293 | ||
1294 | CPU_FOREACH(cpu) { | |
1295 | if (!cpu->stopped) { | |
1296 | return 0; | |
1297 | } | |
1298 | } | |
1299 | ||
1300 | return 1; | |
1301 | } | |
1302 | ||
1303 | void pause_all_vcpus(void) | |
1304 | { | |
1305 | CPUState *cpu; | |
1306 | ||
1307 | qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false); | |
1308 | CPU_FOREACH(cpu) { | |
1309 | cpu->stop = true; | |
1310 | qemu_cpu_kick(cpu); | |
1311 | } | |
1312 | ||
1313 | if (qemu_in_vcpu_thread()) { | |
1314 | cpu_stop_current(); | |
1315 | if (!kvm_enabled()) { | |
1316 | CPU_FOREACH(cpu) { | |
1317 | cpu->stop = false; | |
1318 | cpu->stopped = true; | |
1319 | } | |
1320 | return; | |
1321 | } | |
1322 | } | |
1323 | ||
1324 | while (!all_vcpus_paused()) { | |
1325 | qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex); | |
1326 | CPU_FOREACH(cpu) { | |
1327 | qemu_cpu_kick(cpu); | |
1328 | } | |
1329 | } | |
1330 | } | |
1331 | ||
1332 | void cpu_resume(CPUState *cpu) | |
1333 | { | |
1334 | cpu->stop = false; | |
1335 | cpu->stopped = false; | |
1336 | qemu_cpu_kick(cpu); | |
1337 | } | |
1338 | ||
1339 | void resume_all_vcpus(void) | |
1340 | { | |
1341 | CPUState *cpu; | |
1342 | ||
1343 | qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true); | |
1344 | CPU_FOREACH(cpu) { | |
1345 | cpu_resume(cpu); | |
1346 | } | |
1347 | } | |
1348 | ||
1349 | void cpu_remove(CPUState *cpu) | |
1350 | { | |
1351 | cpu->stop = true; | |
1352 | cpu->unplug = true; | |
1353 | qemu_cpu_kick(cpu); | |
1354 | } | |
1355 | ||
1356 | void cpu_remove_sync(CPUState *cpu) | |
1357 | { | |
1358 | cpu_remove(cpu); | |
1359 | while (cpu->created) { | |
1360 | qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex); | |
1361 | } | |
1362 | } | |
1363 | ||
1364 | /* For temporary buffers for forming a name */ | |
1365 | #define VCPU_THREAD_NAME_SIZE 16 | |
1366 | ||
1367 | static void qemu_tcg_init_vcpu(CPUState *cpu) | |
1368 | { | |
1369 | char thread_name[VCPU_THREAD_NAME_SIZE]; | |
1370 | static QemuCond *tcg_halt_cond; | |
1371 | static QemuThread *tcg_cpu_thread; | |
1372 | ||
1373 | /* share a single thread for all cpus with TCG */ | |
1374 | if (!tcg_cpu_thread) { | |
1375 | cpu->thread = g_malloc0(sizeof(QemuThread)); | |
1376 | cpu->halt_cond = g_malloc0(sizeof(QemuCond)); | |
1377 | qemu_cond_init(cpu->halt_cond); | |
1378 | tcg_halt_cond = cpu->halt_cond; | |
1379 | snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG", | |
1380 | cpu->cpu_index); | |
1381 | qemu_thread_create(cpu->thread, thread_name, qemu_tcg_cpu_thread_fn, | |
1382 | cpu, QEMU_THREAD_JOINABLE); | |
1383 | #ifdef _WIN32 | |
1384 | cpu->hThread = qemu_thread_get_handle(cpu->thread); | |
1385 | #endif | |
1386 | while (!cpu->created) { | |
1387 | qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex); | |
1388 | } | |
1389 | tcg_cpu_thread = cpu->thread; | |
1390 | } else { | |
1391 | cpu->thread = tcg_cpu_thread; | |
1392 | cpu->halt_cond = tcg_halt_cond; | |
1393 | } | |
1394 | } | |
1395 | ||
1396 | static void qemu_kvm_start_vcpu(CPUState *cpu) | |
1397 | { | |
1398 | char thread_name[VCPU_THREAD_NAME_SIZE]; | |
1399 | ||
1400 | cpu->thread = g_malloc0(sizeof(QemuThread)); | |
1401 | cpu->halt_cond = g_malloc0(sizeof(QemuCond)); | |
1402 | qemu_cond_init(cpu->halt_cond); | |
1403 | snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/KVM", | |
1404 | cpu->cpu_index); | |
1405 | qemu_thread_create(cpu->thread, thread_name, qemu_kvm_cpu_thread_fn, | |
1406 | cpu, QEMU_THREAD_JOINABLE); | |
1407 | while (!cpu->created) { | |
1408 | qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex); | |
1409 | } | |
1410 | } | |
1411 | ||
1412 | static void qemu_dummy_start_vcpu(CPUState *cpu) | |
1413 | { | |
1414 | char thread_name[VCPU_THREAD_NAME_SIZE]; | |
1415 | ||
1416 | cpu->thread = g_malloc0(sizeof(QemuThread)); | |
1417 | cpu->halt_cond = g_malloc0(sizeof(QemuCond)); | |
1418 | qemu_cond_init(cpu->halt_cond); | |
1419 | snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/DUMMY", | |
1420 | cpu->cpu_index); | |
1421 | qemu_thread_create(cpu->thread, thread_name, qemu_dummy_cpu_thread_fn, cpu, | |
1422 | QEMU_THREAD_JOINABLE); | |
1423 | while (!cpu->created) { | |
1424 | qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex); | |
1425 | } | |
1426 | } | |
1427 | ||
1428 | void qemu_init_vcpu(CPUState *cpu) | |
1429 | { | |
1430 | cpu->nr_cores = smp_cores; | |
1431 | cpu->nr_threads = smp_threads; | |
1432 | cpu->stopped = true; | |
1433 | ||
1434 | if (!cpu->as) { | |
1435 | /* If the target cpu hasn't set up any address spaces itself, | |
1436 | * give it the default one. | |
1437 | */ | |
1438 | AddressSpace *as = address_space_init_shareable(cpu->memory, | |
1439 | "cpu-memory"); | |
1440 | cpu->num_ases = 1; | |
1441 | cpu_address_space_init(cpu, as, 0); | |
1442 | } | |
1443 | ||
1444 | if (kvm_enabled()) { | |
1445 | qemu_kvm_start_vcpu(cpu); | |
1446 | } else if (tcg_enabled()) { | |
1447 | qemu_tcg_init_vcpu(cpu); | |
1448 | } else { | |
1449 | qemu_dummy_start_vcpu(cpu); | |
1450 | } | |
1451 | } | |
1452 | ||
1453 | void cpu_stop_current(void) | |
1454 | { | |
1455 | if (current_cpu) { | |
1456 | current_cpu->stop = false; | |
1457 | current_cpu->stopped = true; | |
1458 | cpu_exit(current_cpu); | |
1459 | qemu_cond_broadcast(&qemu_pause_cond); | |
1460 | } | |
1461 | } | |
1462 | ||
1463 | int vm_stop(RunState state) | |
1464 | { | |
1465 | if (qemu_in_vcpu_thread()) { | |
1466 | qemu_system_vmstop_request_prepare(); | |
1467 | qemu_system_vmstop_request(state); | |
1468 | /* | |
1469 | * FIXME: should not return to device code in case | |
1470 | * vm_stop() has been requested. | |
1471 | */ | |
1472 | cpu_stop_current(); | |
1473 | return 0; | |
1474 | } | |
1475 | ||
1476 | return do_vm_stop(state); | |
1477 | } | |
1478 | ||
1479 | /* does a state transition even if the VM is already stopped, | |
1480 | current state is forgotten forever */ | |
1481 | int vm_stop_force_state(RunState state) | |
1482 | { | |
1483 | if (runstate_is_running()) { | |
1484 | return vm_stop(state); | |
1485 | } else { | |
1486 | runstate_set(state); | |
1487 | ||
1488 | bdrv_drain_all(); | |
1489 | /* Make sure to return an error if the flush in a previous vm_stop() | |
1490 | * failed. */ | |
1491 | return blk_flush_all(); | |
1492 | } | |
1493 | } | |
1494 | ||
1495 | static int64_t tcg_get_icount_limit(void) | |
1496 | { | |
1497 | int64_t deadline; | |
1498 | ||
1499 | if (replay_mode != REPLAY_MODE_PLAY) { | |
1500 | deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL); | |
1501 | ||
1502 | /* Maintain prior (possibly buggy) behaviour where if no deadline | |
1503 | * was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than | |
1504 | * INT32_MAX nanoseconds ahead, we still use INT32_MAX | |
1505 | * nanoseconds. | |
1506 | */ | |
1507 | if ((deadline < 0) || (deadline > INT32_MAX)) { | |
1508 | deadline = INT32_MAX; | |
1509 | } | |
1510 | ||
1511 | return qemu_icount_round(deadline); | |
1512 | } else { | |
1513 | return replay_get_instructions(); | |
1514 | } | |
1515 | } | |
1516 | ||
1517 | static int tcg_cpu_exec(CPUState *cpu) | |
1518 | { | |
1519 | int ret; | |
1520 | #ifdef CONFIG_PROFILER | |
1521 | int64_t ti; | |
1522 | #endif | |
1523 | ||
1524 | #ifdef CONFIG_PROFILER | |
1525 | ti = profile_getclock(); | |
1526 | #endif | |
1527 | if (use_icount) { | |
1528 | int64_t count; | |
1529 | int decr; | |
1530 | timers_state.qemu_icount -= (cpu->icount_decr.u16.low | |
1531 | + cpu->icount_extra); | |
1532 | cpu->icount_decr.u16.low = 0; | |
1533 | cpu->icount_extra = 0; | |
1534 | count = tcg_get_icount_limit(); | |
1535 | timers_state.qemu_icount += count; | |
1536 | decr = (count > 0xffff) ? 0xffff : count; | |
1537 | count -= decr; | |
1538 | cpu->icount_decr.u16.low = decr; | |
1539 | cpu->icount_extra = count; | |
1540 | } | |
1541 | ret = cpu_exec(cpu); | |
1542 | #ifdef CONFIG_PROFILER | |
1543 | tcg_time += profile_getclock() - ti; | |
1544 | #endif | |
1545 | if (use_icount) { | |
1546 | /* Fold pending instructions back into the | |
1547 | instruction counter, and clear the interrupt flag. */ | |
1548 | timers_state.qemu_icount -= (cpu->icount_decr.u16.low | |
1549 | + cpu->icount_extra); | |
1550 | cpu->icount_decr.u32 = 0; | |
1551 | cpu->icount_extra = 0; | |
1552 | replay_account_executed_instructions(); | |
1553 | } | |
1554 | return ret; | |
1555 | } | |
1556 | ||
1557 | static void tcg_exec_all(void) | |
1558 | { | |
1559 | int r; | |
1560 | ||
1561 | /* Account partial waits to QEMU_CLOCK_VIRTUAL. */ | |
1562 | qemu_account_warp_timer(); | |
1563 | ||
1564 | if (next_cpu == NULL) { | |
1565 | next_cpu = first_cpu; | |
1566 | } | |
1567 | for (; next_cpu != NULL && !exit_request; next_cpu = CPU_NEXT(next_cpu)) { | |
1568 | CPUState *cpu = next_cpu; | |
1569 | ||
1570 | qemu_clock_enable(QEMU_CLOCK_VIRTUAL, | |
1571 | (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0); | |
1572 | ||
1573 | if (cpu_can_run(cpu)) { | |
1574 | r = tcg_cpu_exec(cpu); | |
1575 | if (r == EXCP_DEBUG) { | |
1576 | cpu_handle_guest_debug(cpu); | |
1577 | break; | |
1578 | } | |
1579 | } else if (cpu->stop || cpu->stopped) { | |
1580 | if (cpu->unplug) { | |
1581 | next_cpu = CPU_NEXT(cpu); | |
1582 | } | |
1583 | break; | |
1584 | } | |
1585 | } | |
1586 | ||
1587 | /* Pairs with smp_wmb in qemu_cpu_kick. */ | |
1588 | atomic_mb_set(&exit_request, 0); | |
1589 | } | |
1590 | ||
1591 | void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg) | |
1592 | { | |
1593 | /* XXX: implement xxx_cpu_list for targets that still miss it */ | |
1594 | #if defined(cpu_list) | |
1595 | cpu_list(f, cpu_fprintf); | |
1596 | #endif | |
1597 | } | |
1598 | ||
1599 | CpuInfoList *qmp_query_cpus(Error **errp) | |
1600 | { | |
1601 | CpuInfoList *head = NULL, *cur_item = NULL; | |
1602 | CPUState *cpu; | |
1603 | ||
1604 | CPU_FOREACH(cpu) { | |
1605 | CpuInfoList *info; | |
1606 | #if defined(TARGET_I386) | |
1607 | X86CPU *x86_cpu = X86_CPU(cpu); | |
1608 | CPUX86State *env = &x86_cpu->env; | |
1609 | #elif defined(TARGET_PPC) | |
1610 | PowerPCCPU *ppc_cpu = POWERPC_CPU(cpu); | |
1611 | CPUPPCState *env = &ppc_cpu->env; | |
1612 | #elif defined(TARGET_SPARC) | |
1613 | SPARCCPU *sparc_cpu = SPARC_CPU(cpu); | |
1614 | CPUSPARCState *env = &sparc_cpu->env; | |
1615 | #elif defined(TARGET_MIPS) | |
1616 | MIPSCPU *mips_cpu = MIPS_CPU(cpu); | |
1617 | CPUMIPSState *env = &mips_cpu->env; | |
1618 | #elif defined(TARGET_TRICORE) | |
1619 | TriCoreCPU *tricore_cpu = TRICORE_CPU(cpu); | |
1620 | CPUTriCoreState *env = &tricore_cpu->env; | |
1621 | #endif | |
1622 | ||
1623 | cpu_synchronize_state(cpu); | |
1624 | ||
1625 | info = g_malloc0(sizeof(*info)); | |
1626 | info->value = g_malloc0(sizeof(*info->value)); | |
1627 | info->value->CPU = cpu->cpu_index; | |
1628 | info->value->current = (cpu == first_cpu); | |
1629 | info->value->halted = cpu->halted; | |
1630 | info->value->qom_path = object_get_canonical_path(OBJECT(cpu)); | |
1631 | info->value->thread_id = cpu->thread_id; | |
1632 | #if defined(TARGET_I386) | |
1633 | info->value->arch = CPU_INFO_ARCH_X86; | |
1634 | info->value->u.x86.pc = env->eip + env->segs[R_CS].base; | |
1635 | #elif defined(TARGET_PPC) | |
1636 | info->value->arch = CPU_INFO_ARCH_PPC; | |
1637 | info->value->u.ppc.nip = env->nip; | |
1638 | #elif defined(TARGET_SPARC) | |
1639 | info->value->arch = CPU_INFO_ARCH_SPARC; | |
1640 | info->value->u.q_sparc.pc = env->pc; | |
1641 | info->value->u.q_sparc.npc = env->npc; | |
1642 | #elif defined(TARGET_MIPS) | |
1643 | info->value->arch = CPU_INFO_ARCH_MIPS; | |
1644 | info->value->u.q_mips.PC = env->active_tc.PC; | |
1645 | #elif defined(TARGET_TRICORE) | |
1646 | info->value->arch = CPU_INFO_ARCH_TRICORE; | |
1647 | info->value->u.tricore.PC = env->PC; | |
1648 | #else | |
1649 | info->value->arch = CPU_INFO_ARCH_OTHER; | |
1650 | #endif | |
1651 | ||
1652 | /* XXX: waiting for the qapi to support GSList */ | |
1653 | if (!cur_item) { | |
1654 | head = cur_item = info; | |
1655 | } else { | |
1656 | cur_item->next = info; | |
1657 | cur_item = info; | |
1658 | } | |
1659 | } | |
1660 | ||
1661 | return head; | |
1662 | } | |
1663 | ||
1664 | void qmp_memsave(int64_t addr, int64_t size, const char *filename, | |
1665 | bool has_cpu, int64_t cpu_index, Error **errp) | |
1666 | { | |
1667 | FILE *f; | |
1668 | uint32_t l; | |
1669 | CPUState *cpu; | |
1670 | uint8_t buf[1024]; | |
1671 | int64_t orig_addr = addr, orig_size = size; | |
1672 | ||
1673 | if (!has_cpu) { | |
1674 | cpu_index = 0; | |
1675 | } | |
1676 | ||
1677 | cpu = qemu_get_cpu(cpu_index); | |
1678 | if (cpu == NULL) { | |
1679 | error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index", | |
1680 | "a CPU number"); | |
1681 | return; | |
1682 | } | |
1683 | ||
1684 | f = fopen(filename, "wb"); | |
1685 | if (!f) { | |
1686 | error_setg_file_open(errp, errno, filename); | |
1687 | return; | |
1688 | } | |
1689 | ||
1690 | while (size != 0) { | |
1691 | l = sizeof(buf); | |
1692 | if (l > size) | |
1693 | l = size; | |
1694 | if (cpu_memory_rw_debug(cpu, addr, buf, l, 0) != 0) { | |
1695 | error_setg(errp, "Invalid addr 0x%016" PRIx64 "/size %" PRId64 | |
1696 | " specified", orig_addr, orig_size); | |
1697 | goto exit; | |
1698 | } | |
1699 | if (fwrite(buf, 1, l, f) != l) { | |
1700 | error_setg(errp, QERR_IO_ERROR); | |
1701 | goto exit; | |
1702 | } | |
1703 | addr += l; | |
1704 | size -= l; | |
1705 | } | |
1706 | ||
1707 | exit: | |
1708 | fclose(f); | |
1709 | } | |
1710 | ||
1711 | void qmp_pmemsave(int64_t addr, int64_t size, const char *filename, | |
1712 | Error **errp) | |
1713 | { | |
1714 | FILE *f; | |
1715 | uint32_t l; | |
1716 | uint8_t buf[1024]; | |
1717 | ||
1718 | f = fopen(filename, "wb"); | |
1719 | if (!f) { | |
1720 | error_setg_file_open(errp, errno, filename); | |
1721 | return; | |
1722 | } | |
1723 | ||
1724 | while (size != 0) { | |
1725 | l = sizeof(buf); | |
1726 | if (l > size) | |
1727 | l = size; | |
1728 | cpu_physical_memory_read(addr, buf, l); | |
1729 | if (fwrite(buf, 1, l, f) != l) { | |
1730 | error_setg(errp, QERR_IO_ERROR); | |
1731 | goto exit; | |
1732 | } | |
1733 | addr += l; | |
1734 | size -= l; | |
1735 | } | |
1736 | ||
1737 | exit: | |
1738 | fclose(f); | |
1739 | } | |
1740 | ||
1741 | void qmp_inject_nmi(Error **errp) | |
1742 | { | |
1743 | nmi_monitor_handle(monitor_get_cpu_index(), errp); | |
1744 | } | |
1745 | ||
1746 | void dump_drift_info(FILE *f, fprintf_function cpu_fprintf) | |
1747 | { | |
1748 | if (!use_icount) { | |
1749 | return; | |
1750 | } | |
1751 | ||
1752 | cpu_fprintf(f, "Host - Guest clock %"PRIi64" ms\n", | |
1753 | (cpu_get_clock() - cpu_get_icount())/SCALE_MS); | |
1754 | if (icount_align_option) { | |
1755 | cpu_fprintf(f, "Max guest delay %"PRIi64" ms\n", -max_delay/SCALE_MS); | |
1756 | cpu_fprintf(f, "Max guest advance %"PRIi64" ms\n", max_advance/SCALE_MS); | |
1757 | } else { | |
1758 | cpu_fprintf(f, "Max guest delay NA\n"); | |
1759 | cpu_fprintf(f, "Max guest advance NA\n"); | |
1760 | } | |
1761 | } |