]> git.proxmox.com Git - mirror_qemu.git/blame - qemu-timer.c
Refactor CPUState handling out of vl.c
[mirror_qemu.git] / qemu-timer.c
CommitLineData
db1a4972
PB
1/*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25#include "sysemu.h"
26#include "net.h"
27#include "monitor.h"
28#include "console.h"
29
30#include "hw/hw.h"
31
32#include <unistd.h>
33#include <fcntl.h>
34#include <time.h>
35#include <errno.h>
36#include <sys/time.h>
37#include <signal.h>
44459349
JL
38#ifdef __FreeBSD__
39#include <sys/param.h>
40#endif
db1a4972
PB
41
42#ifdef __linux__
43#include <sys/ioctl.h>
44#include <linux/rtc.h>
45/* For the benefit of older linux systems which don't supply it,
46 we use a local copy of hpet.h. */
47/* #include <linux/hpet.h> */
48#include "hpet.h"
49#endif
50
51#ifdef _WIN32
52#include <windows.h>
53#include <mmsystem.h>
54#endif
55
56#include "cpu-defs.h"
57#include "qemu-timer.h"
58#include "exec-all.h"
59
60/* Conversion factor from emulated instructions to virtual clock ticks. */
61static int icount_time_shift;
62/* Arbitrarily pick 1MIPS as the minimum allowable speed. */
63#define MAX_ICOUNT_SHIFT 10
64/* Compensate for varying guest execution speed. */
65static int64_t qemu_icount_bias;
66static QEMUTimer *icount_rt_timer;
67static QEMUTimer *icount_vm_timer;
68
69
70/***********************************************************/
71/* real time host monotonic timer */
72
73
74static int64_t get_clock_realtime(void)
75{
76 struct timeval tv;
77
78 gettimeofday(&tv, NULL);
79 return tv.tv_sec * 1000000000LL + (tv.tv_usec * 1000);
80}
81
82#ifdef WIN32
83
84static int64_t clock_freq;
85
86static void init_get_clock(void)
87{
88 LARGE_INTEGER freq;
89 int ret;
90 ret = QueryPerformanceFrequency(&freq);
91 if (ret == 0) {
92 fprintf(stderr, "Could not calibrate ticks\n");
93 exit(1);
94 }
95 clock_freq = freq.QuadPart;
96}
97
98static int64_t get_clock(void)
99{
100 LARGE_INTEGER ti;
101 QueryPerformanceCounter(&ti);
102 return muldiv64(ti.QuadPart, get_ticks_per_sec(), clock_freq);
103}
104
105#else
106
107static int use_rt_clock;
108
109static void init_get_clock(void)
110{
111 use_rt_clock = 0;
112#if defined(__linux__) || (defined(__FreeBSD__) && __FreeBSD_version >= 500000) \
113 || defined(__DragonFly__) || defined(__FreeBSD_kernel__)
114 {
115 struct timespec ts;
116 if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
117 use_rt_clock = 1;
118 }
119 }
120#endif
121}
122
123static int64_t get_clock(void)
124{
125#if defined(__linux__) || (defined(__FreeBSD__) && __FreeBSD_version >= 500000) \
126 || defined(__DragonFly__) || defined(__FreeBSD_kernel__)
127 if (use_rt_clock) {
128 struct timespec ts;
129 clock_gettime(CLOCK_MONOTONIC, &ts);
130 return ts.tv_sec * 1000000000LL + ts.tv_nsec;
131 } else
132#endif
133 {
134 /* XXX: using gettimeofday leads to problems if the date
135 changes, so it should be avoided. */
136 return get_clock_realtime();
137 }
138}
139#endif
140
141/* Return the virtual CPU time, based on the instruction counter. */
142static int64_t cpu_get_icount(void)
143{
144 int64_t icount;
145 CPUState *env = cpu_single_env;;
146 icount = qemu_icount;
147 if (env) {
148 if (!can_do_io(env))
149 fprintf(stderr, "Bad clock read\n");
150 icount -= (env->icount_decr.u16.low + env->icount_extra);
151 }
152 return qemu_icount_bias + (icount << icount_time_shift);
153}
154
155/***********************************************************/
156/* guest cycle counter */
157
158typedef struct TimersState {
159 int64_t cpu_ticks_prev;
160 int64_t cpu_ticks_offset;
161 int64_t cpu_clock_offset;
162 int32_t cpu_ticks_enabled;
163 int64_t dummy;
164} TimersState;
165
166TimersState timers_state;
167
168/* return the host CPU cycle counter and handle stop/restart */
169int64_t cpu_get_ticks(void)
170{
171 if (use_icount) {
172 return cpu_get_icount();
173 }
174 if (!timers_state.cpu_ticks_enabled) {
175 return timers_state.cpu_ticks_offset;
176 } else {
177 int64_t ticks;
178 ticks = cpu_get_real_ticks();
179 if (timers_state.cpu_ticks_prev > ticks) {
180 /* Note: non increasing ticks may happen if the host uses
181 software suspend */
182 timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
183 }
184 timers_state.cpu_ticks_prev = ticks;
185 return ticks + timers_state.cpu_ticks_offset;
186 }
187}
188
189/* return the host CPU monotonic timer and handle stop/restart */
190static int64_t cpu_get_clock(void)
191{
192 int64_t ti;
193 if (!timers_state.cpu_ticks_enabled) {
194 return timers_state.cpu_clock_offset;
195 } else {
196 ti = get_clock();
197 return ti + timers_state.cpu_clock_offset;
198 }
199}
200
201#ifndef CONFIG_IOTHREAD
202static int64_t qemu_icount_delta(void)
203{
204 if (!use_icount) {
205 return 5000 * (int64_t) 1000000;
206 } else if (use_icount == 1) {
207 /* When not using an adaptive execution frequency
208 we tend to get badly out of sync with real time,
209 so just delay for a reasonable amount of time. */
210 return 0;
211 } else {
212 return cpu_get_icount() - cpu_get_clock();
213 }
214}
215#endif
216
217/* enable cpu_get_ticks() */
218void cpu_enable_ticks(void)
219{
220 if (!timers_state.cpu_ticks_enabled) {
221 timers_state.cpu_ticks_offset -= cpu_get_real_ticks();
222 timers_state.cpu_clock_offset -= get_clock();
223 timers_state.cpu_ticks_enabled = 1;
224 }
225}
226
227/* disable cpu_get_ticks() : the clock is stopped. You must not call
228 cpu_get_ticks() after that. */
229void cpu_disable_ticks(void)
230{
231 if (timers_state.cpu_ticks_enabled) {
232 timers_state.cpu_ticks_offset = cpu_get_ticks();
233 timers_state.cpu_clock_offset = cpu_get_clock();
234 timers_state.cpu_ticks_enabled = 0;
235 }
236}
237
238/***********************************************************/
239/* timers */
240
241#define QEMU_CLOCK_REALTIME 0
242#define QEMU_CLOCK_VIRTUAL 1
243#define QEMU_CLOCK_HOST 2
244
245struct QEMUClock {
246 int type;
247 int enabled;
248 /* XXX: add frequency */
249};
250
251struct QEMUTimer {
252 QEMUClock *clock;
253 int64_t expire_time;
254 QEMUTimerCB *cb;
255 void *opaque;
256 struct QEMUTimer *next;
257};
258
259struct qemu_alarm_timer {
260 char const *name;
261 int (*start)(struct qemu_alarm_timer *t);
262 void (*stop)(struct qemu_alarm_timer *t);
263 void (*rearm)(struct qemu_alarm_timer *t);
264 void *priv;
265
266 char expired;
267 char pending;
268};
269
270static struct qemu_alarm_timer *alarm_timer;
271
272int qemu_alarm_pending(void)
273{
274 return alarm_timer->pending;
275}
276
277static inline int alarm_has_dynticks(struct qemu_alarm_timer *t)
278{
279 return !!t->rearm;
280}
281
282static void qemu_rearm_alarm_timer(struct qemu_alarm_timer *t)
283{
284 if (!alarm_has_dynticks(t))
285 return;
286
287 t->rearm(t);
288}
289
290/* TODO: MIN_TIMER_REARM_US should be optimized */
291#define MIN_TIMER_REARM_US 250
292
293#ifdef _WIN32
294
295struct qemu_alarm_win32 {
296 MMRESULT timerId;
297 unsigned int period;
298} alarm_win32_data = {0, 0};
299
300static int win32_start_timer(struct qemu_alarm_timer *t);
301static void win32_stop_timer(struct qemu_alarm_timer *t);
302static void win32_rearm_timer(struct qemu_alarm_timer *t);
303
304#else
305
306static int unix_start_timer(struct qemu_alarm_timer *t);
307static void unix_stop_timer(struct qemu_alarm_timer *t);
308
309#ifdef __linux__
310
311static int dynticks_start_timer(struct qemu_alarm_timer *t);
312static void dynticks_stop_timer(struct qemu_alarm_timer *t);
313static void dynticks_rearm_timer(struct qemu_alarm_timer *t);
314
315static int hpet_start_timer(struct qemu_alarm_timer *t);
316static void hpet_stop_timer(struct qemu_alarm_timer *t);
317
318static int rtc_start_timer(struct qemu_alarm_timer *t);
319static void rtc_stop_timer(struct qemu_alarm_timer *t);
320
321#endif /* __linux__ */
322
323#endif /* _WIN32 */
324
325/* Correlation between real and virtual time is always going to be
326 fairly approximate, so ignore small variation.
327 When the guest is idle real and virtual time will be aligned in
328 the IO wait loop. */
329#define ICOUNT_WOBBLE (get_ticks_per_sec() / 10)
330
331static void icount_adjust(void)
332{
333 int64_t cur_time;
334 int64_t cur_icount;
335 int64_t delta;
336 static int64_t last_delta;
337 /* If the VM is not running, then do nothing. */
338 if (!vm_running)
339 return;
340
341 cur_time = cpu_get_clock();
342 cur_icount = qemu_get_clock(vm_clock);
343 delta = cur_icount - cur_time;
344 /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
345 if (delta > 0
346 && last_delta + ICOUNT_WOBBLE < delta * 2
347 && icount_time_shift > 0) {
348 /* The guest is getting too far ahead. Slow time down. */
349 icount_time_shift--;
350 }
351 if (delta < 0
352 && last_delta - ICOUNT_WOBBLE > delta * 2
353 && icount_time_shift < MAX_ICOUNT_SHIFT) {
354 /* The guest is getting too far behind. Speed time up. */
355 icount_time_shift++;
356 }
357 last_delta = delta;
358 qemu_icount_bias = cur_icount - (qemu_icount << icount_time_shift);
359}
360
361static void icount_adjust_rt(void * opaque)
362{
363 qemu_mod_timer(icount_rt_timer,
364 qemu_get_clock(rt_clock) + 1000);
365 icount_adjust();
366}
367
368static void icount_adjust_vm(void * opaque)
369{
370 qemu_mod_timer(icount_vm_timer,
371 qemu_get_clock(vm_clock) + get_ticks_per_sec() / 10);
372 icount_adjust();
373}
374
375int64_t qemu_icount_round(int64_t count)
376{
377 return (count + (1 << icount_time_shift) - 1) >> icount_time_shift;
378}
379
380static struct qemu_alarm_timer alarm_timers[] = {
381#ifndef _WIN32
382#ifdef __linux__
383 {"dynticks", dynticks_start_timer,
384 dynticks_stop_timer, dynticks_rearm_timer, NULL},
385 /* HPET - if available - is preferred */
386 {"hpet", hpet_start_timer, hpet_stop_timer, NULL, NULL},
387 /* ...otherwise try RTC */
388 {"rtc", rtc_start_timer, rtc_stop_timer, NULL, NULL},
389#endif
390 {"unix", unix_start_timer, unix_stop_timer, NULL, NULL},
391#else
392 {"dynticks", win32_start_timer,
393 win32_stop_timer, win32_rearm_timer, &alarm_win32_data},
394 {"win32", win32_start_timer,
395 win32_stop_timer, NULL, &alarm_win32_data},
396#endif
397 {NULL, }
398};
399
400static void show_available_alarms(void)
401{
402 int i;
403
404 printf("Available alarm timers, in order of precedence:\n");
405 for (i = 0; alarm_timers[i].name; i++)
406 printf("%s\n", alarm_timers[i].name);
407}
408
409void configure_alarms(char const *opt)
410{
411 int i;
412 int cur = 0;
413 int count = ARRAY_SIZE(alarm_timers) - 1;
414 char *arg;
415 char *name;
416 struct qemu_alarm_timer tmp;
417
418 if (!strcmp(opt, "?")) {
419 show_available_alarms();
420 exit(0);
421 }
422
423 arg = qemu_strdup(opt);
424
425 /* Reorder the array */
426 name = strtok(arg, ",");
427 while (name) {
428 for (i = 0; i < count && alarm_timers[i].name; i++) {
429 if (!strcmp(alarm_timers[i].name, name))
430 break;
431 }
432
433 if (i == count) {
434 fprintf(stderr, "Unknown clock %s\n", name);
435 goto next;
436 }
437
438 if (i < cur)
439 /* Ignore */
440 goto next;
441
442 /* Swap */
443 tmp = alarm_timers[i];
444 alarm_timers[i] = alarm_timers[cur];
445 alarm_timers[cur] = tmp;
446
447 cur++;
448next:
449 name = strtok(NULL, ",");
450 }
451
452 qemu_free(arg);
453
454 if (cur) {
455 /* Disable remaining timers */
456 for (i = cur; i < count; i++)
457 alarm_timers[i].name = NULL;
458 } else {
459 show_available_alarms();
460 exit(1);
461 }
462}
463
464#define QEMU_NUM_CLOCKS 3
465
466QEMUClock *rt_clock;
467QEMUClock *vm_clock;
468QEMUClock *host_clock;
469
470static QEMUTimer *active_timers[QEMU_NUM_CLOCKS];
471
472static QEMUClock *qemu_new_clock(int type)
473{
474 QEMUClock *clock;
475 clock = qemu_mallocz(sizeof(QEMUClock));
476 clock->type = type;
477 clock->enabled = 1;
478 return clock;
479}
480
481void qemu_clock_enable(QEMUClock *clock, int enabled)
482{
483 clock->enabled = enabled;
484}
485
486QEMUTimer *qemu_new_timer(QEMUClock *clock, QEMUTimerCB *cb, void *opaque)
487{
488 QEMUTimer *ts;
489
490 ts = qemu_mallocz(sizeof(QEMUTimer));
491 ts->clock = clock;
492 ts->cb = cb;
493 ts->opaque = opaque;
494 return ts;
495}
496
497void qemu_free_timer(QEMUTimer *ts)
498{
499 qemu_free(ts);
500}
501
502/* stop a timer, but do not dealloc it */
503void qemu_del_timer(QEMUTimer *ts)
504{
505 QEMUTimer **pt, *t;
506
507 /* NOTE: this code must be signal safe because
508 qemu_timer_expired() can be called from a signal. */
509 pt = &active_timers[ts->clock->type];
510 for(;;) {
511 t = *pt;
512 if (!t)
513 break;
514 if (t == ts) {
515 *pt = t->next;
516 break;
517 }
518 pt = &t->next;
519 }
520}
521
522/* modify the current timer so that it will be fired when current_time
523 >= expire_time. The corresponding callback will be called. */
524void qemu_mod_timer(QEMUTimer *ts, int64_t expire_time)
525{
526 QEMUTimer **pt, *t;
527
528 qemu_del_timer(ts);
529
530 /* add the timer in the sorted list */
531 /* NOTE: this code must be signal safe because
532 qemu_timer_expired() can be called from a signal. */
533 pt = &active_timers[ts->clock->type];
534 for(;;) {
535 t = *pt;
536 if (!t)
537 break;
538 if (t->expire_time > expire_time)
539 break;
540 pt = &t->next;
541 }
542 ts->expire_time = expire_time;
543 ts->next = *pt;
544 *pt = ts;
545
546 /* Rearm if necessary */
547 if (pt == &active_timers[ts->clock->type]) {
548 if (!alarm_timer->pending) {
549 qemu_rearm_alarm_timer(alarm_timer);
550 }
551 /* Interrupt execution to force deadline recalculation. */
552 if (use_icount)
553 qemu_notify_event();
554 }
555}
556
557int qemu_timer_pending(QEMUTimer *ts)
558{
559 QEMUTimer *t;
560 for(t = active_timers[ts->clock->type]; t != NULL; t = t->next) {
561 if (t == ts)
562 return 1;
563 }
564 return 0;
565}
566
567int qemu_timer_expired(QEMUTimer *timer_head, int64_t current_time)
568{
569 if (!timer_head)
570 return 0;
571 return (timer_head->expire_time <= current_time);
572}
573
574static void qemu_run_timers(QEMUClock *clock)
575{
576 QEMUTimer **ptimer_head, *ts;
577 int64_t current_time;
578
579 if (!clock->enabled)
580 return;
581
582 current_time = qemu_get_clock (clock);
583 ptimer_head = &active_timers[clock->type];
584 for(;;) {
585 ts = *ptimer_head;
586 if (!ts || ts->expire_time > current_time)
587 break;
588 /* remove timer from the list before calling the callback */
589 *ptimer_head = ts->next;
590 ts->next = NULL;
591
592 /* run the callback (the timer list can be modified) */
593 ts->cb(ts->opaque);
594 }
595}
596
597int64_t qemu_get_clock(QEMUClock *clock)
598{
599 switch(clock->type) {
600 case QEMU_CLOCK_REALTIME:
601 return get_clock() / 1000000;
602 default:
603 case QEMU_CLOCK_VIRTUAL:
604 if (use_icount) {
605 return cpu_get_icount();
606 } else {
607 return cpu_get_clock();
608 }
609 case QEMU_CLOCK_HOST:
610 return get_clock_realtime();
611 }
612}
613
614int64_t qemu_get_clock_ns(QEMUClock *clock)
615{
616 switch(clock->type) {
617 case QEMU_CLOCK_REALTIME:
618 return get_clock();
619 default:
620 case QEMU_CLOCK_VIRTUAL:
621 if (use_icount) {
622 return cpu_get_icount();
623 } else {
624 return cpu_get_clock();
625 }
626 case QEMU_CLOCK_HOST:
627 return get_clock_realtime();
628 }
629}
630
631void init_clocks(void)
632{
633 init_get_clock();
634 rt_clock = qemu_new_clock(QEMU_CLOCK_REALTIME);
635 vm_clock = qemu_new_clock(QEMU_CLOCK_VIRTUAL);
636 host_clock = qemu_new_clock(QEMU_CLOCK_HOST);
637
638 rtc_clock = host_clock;
639}
640
641/* save a timer */
642void qemu_put_timer(QEMUFile *f, QEMUTimer *ts)
643{
644 uint64_t expire_time;
645
646 if (qemu_timer_pending(ts)) {
647 expire_time = ts->expire_time;
648 } else {
649 expire_time = -1;
650 }
651 qemu_put_be64(f, expire_time);
652}
653
654void qemu_get_timer(QEMUFile *f, QEMUTimer *ts)
655{
656 uint64_t expire_time;
657
658 expire_time = qemu_get_be64(f);
659 if (expire_time != -1) {
660 qemu_mod_timer(ts, expire_time);
661 } else {
662 qemu_del_timer(ts);
663 }
664}
665
666static const VMStateDescription vmstate_timers = {
667 .name = "timer",
668 .version_id = 2,
669 .minimum_version_id = 1,
670 .minimum_version_id_old = 1,
671 .fields = (VMStateField []) {
672 VMSTATE_INT64(cpu_ticks_offset, TimersState),
673 VMSTATE_INT64(dummy, TimersState),
674 VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
675 VMSTATE_END_OF_LIST()
676 }
677};
678
679void configure_icount(const char *option)
680{
681 vmstate_register(0, &vmstate_timers, &timers_state);
682 if (!option)
683 return;
684
685 if (strcmp(option, "auto") != 0) {
686 icount_time_shift = strtol(option, NULL, 0);
687 use_icount = 1;
688 return;
689 }
690
691 use_icount = 2;
692
693 /* 125MIPS seems a reasonable initial guess at the guest speed.
694 It will be corrected fairly quickly anyway. */
695 icount_time_shift = 3;
696
697 /* Have both realtime and virtual time triggers for speed adjustment.
698 The realtime trigger catches emulated time passing too slowly,
699 the virtual time trigger catches emulated time passing too fast.
700 Realtime triggers occur even when idle, so use them less frequently
701 than VM triggers. */
702 icount_rt_timer = qemu_new_timer(rt_clock, icount_adjust_rt, NULL);
703 qemu_mod_timer(icount_rt_timer,
704 qemu_get_clock(rt_clock) + 1000);
705 icount_vm_timer = qemu_new_timer(vm_clock, icount_adjust_vm, NULL);
706 qemu_mod_timer(icount_vm_timer,
707 qemu_get_clock(vm_clock) + get_ticks_per_sec() / 10);
708}
709
710void qemu_run_all_timers(void)
711{
ca5a2a4b
PB
712 alarm_timer->pending = 0;
713
db1a4972
PB
714 /* rearm timer, if not periodic */
715 if (alarm_timer->expired) {
716 alarm_timer->expired = 0;
717 qemu_rearm_alarm_timer(alarm_timer);
718 }
719
db1a4972
PB
720 /* vm time timers */
721 if (vm_running) {
722 qemu_run_timers(vm_clock);
723 }
724
725 qemu_run_timers(rt_clock);
726 qemu_run_timers(host_clock);
727}
728
729#ifdef _WIN32
730static void CALLBACK host_alarm_handler(UINT uTimerID, UINT uMsg,
731 DWORD_PTR dwUser, DWORD_PTR dw1,
732 DWORD_PTR dw2)
733#else
734static void host_alarm_handler(int host_signum)
735#endif
736{
737 struct qemu_alarm_timer *t = alarm_timer;
738 if (!t)
739 return;
740
741#if 0
742#define DISP_FREQ 1000
743 {
744 static int64_t delta_min = INT64_MAX;
745 static int64_t delta_max, delta_cum, last_clock, delta, ti;
746 static int count;
747 ti = qemu_get_clock(vm_clock);
748 if (last_clock != 0) {
749 delta = ti - last_clock;
750 if (delta < delta_min)
751 delta_min = delta;
752 if (delta > delta_max)
753 delta_max = delta;
754 delta_cum += delta;
755 if (++count == DISP_FREQ) {
756 printf("timer: min=%" PRId64 " us max=%" PRId64 " us avg=%" PRId64 " us avg_freq=%0.3f Hz\n",
757 muldiv64(delta_min, 1000000, get_ticks_per_sec()),
758 muldiv64(delta_max, 1000000, get_ticks_per_sec()),
759 muldiv64(delta_cum, 1000000 / DISP_FREQ, get_ticks_per_sec()),
760 (double)get_ticks_per_sec() / ((double)delta_cum / DISP_FREQ));
761 count = 0;
762 delta_min = INT64_MAX;
763 delta_max = 0;
764 delta_cum = 0;
765 }
766 }
767 last_clock = ti;
768 }
769#endif
770 if (alarm_has_dynticks(t) ||
771 (!use_icount &&
772 qemu_timer_expired(active_timers[QEMU_CLOCK_VIRTUAL],
773 qemu_get_clock(vm_clock))) ||
774 qemu_timer_expired(active_timers[QEMU_CLOCK_REALTIME],
775 qemu_get_clock(rt_clock)) ||
776 qemu_timer_expired(active_timers[QEMU_CLOCK_HOST],
777 qemu_get_clock(host_clock))) {
778
779 t->expired = alarm_has_dynticks(t);
780 t->pending = 1;
781 qemu_notify_event();
782 }
783}
784
785int64_t qemu_next_deadline(void)
786{
787 /* To avoid problems with overflow limit this to 2^32. */
788 int64_t delta = INT32_MAX;
789
790 if (active_timers[QEMU_CLOCK_VIRTUAL]) {
791 delta = active_timers[QEMU_CLOCK_VIRTUAL]->expire_time -
792 qemu_get_clock(vm_clock);
793 }
794 if (active_timers[QEMU_CLOCK_HOST]) {
795 int64_t hdelta = active_timers[QEMU_CLOCK_HOST]->expire_time -
796 qemu_get_clock(host_clock);
797 if (hdelta < delta)
798 delta = hdelta;
799 }
800
801 if (delta < 0)
802 delta = 0;
803
804 return delta;
805}
806
807#ifndef _WIN32
808
809#if defined(__linux__)
810
811#define RTC_FREQ 1024
812
813static uint64_t qemu_next_deadline_dyntick(void)
814{
815 int64_t delta;
816 int64_t rtdelta;
817
818 if (use_icount)
819 delta = INT32_MAX;
820 else
821 delta = (qemu_next_deadline() + 999) / 1000;
822
823 if (active_timers[QEMU_CLOCK_REALTIME]) {
824 rtdelta = (active_timers[QEMU_CLOCK_REALTIME]->expire_time -
825 qemu_get_clock(rt_clock))*1000;
826 if (rtdelta < delta)
827 delta = rtdelta;
828 }
829
830 if (delta < MIN_TIMER_REARM_US)
831 delta = MIN_TIMER_REARM_US;
832
833 return delta;
834}
835
836static void enable_sigio_timer(int fd)
837{
838 struct sigaction act;
839
840 /* timer signal */
841 sigfillset(&act.sa_mask);
842 act.sa_flags = 0;
843 act.sa_handler = host_alarm_handler;
844
845 sigaction(SIGIO, &act, NULL);
846 fcntl_setfl(fd, O_ASYNC);
847 fcntl(fd, F_SETOWN, getpid());
848}
849
850static int hpet_start_timer(struct qemu_alarm_timer *t)
851{
852 struct hpet_info info;
853 int r, fd;
854
855 fd = qemu_open("/dev/hpet", O_RDONLY);
856 if (fd < 0)
857 return -1;
858
859 /* Set frequency */
860 r = ioctl(fd, HPET_IRQFREQ, RTC_FREQ);
861 if (r < 0) {
862 fprintf(stderr, "Could not configure '/dev/hpet' to have a 1024Hz timer. This is not a fatal\n"
863 "error, but for better emulation accuracy type:\n"
864 "'echo 1024 > /proc/sys/dev/hpet/max-user-freq' as root.\n");
865 goto fail;
866 }
867
868 /* Check capabilities */
869 r = ioctl(fd, HPET_INFO, &info);
870 if (r < 0)
871 goto fail;
872
873 /* Enable periodic mode */
874 r = ioctl(fd, HPET_EPI, 0);
875 if (info.hi_flags && (r < 0))
876 goto fail;
877
878 /* Enable interrupt */
879 r = ioctl(fd, HPET_IE_ON, 0);
880 if (r < 0)
881 goto fail;
882
883 enable_sigio_timer(fd);
884 t->priv = (void *)(long)fd;
885
886 return 0;
887fail:
888 close(fd);
889 return -1;
890}
891
892static void hpet_stop_timer(struct qemu_alarm_timer *t)
893{
894 int fd = (long)t->priv;
895
896 close(fd);
897}
898
899static int rtc_start_timer(struct qemu_alarm_timer *t)
900{
901 int rtc_fd;
902 unsigned long current_rtc_freq = 0;
903
904 TFR(rtc_fd = qemu_open("/dev/rtc", O_RDONLY));
905 if (rtc_fd < 0)
906 return -1;
907 ioctl(rtc_fd, RTC_IRQP_READ, &current_rtc_freq);
908 if (current_rtc_freq != RTC_FREQ &&
909 ioctl(rtc_fd, RTC_IRQP_SET, RTC_FREQ) < 0) {
910 fprintf(stderr, "Could not configure '/dev/rtc' to have a 1024 Hz timer. This is not a fatal\n"
911 "error, but for better emulation accuracy either use a 2.6 host Linux kernel or\n"
912 "type 'echo 1024 > /proc/sys/dev/rtc/max-user-freq' as root.\n");
913 goto fail;
914 }
915 if (ioctl(rtc_fd, RTC_PIE_ON, 0) < 0) {
916 fail:
917 close(rtc_fd);
918 return -1;
919 }
920
921 enable_sigio_timer(rtc_fd);
922
923 t->priv = (void *)(long)rtc_fd;
924
925 return 0;
926}
927
928static void rtc_stop_timer(struct qemu_alarm_timer *t)
929{
930 int rtc_fd = (long)t->priv;
931
932 close(rtc_fd);
933}
934
935static int dynticks_start_timer(struct qemu_alarm_timer *t)
936{
937 struct sigevent ev;
938 timer_t host_timer;
939 struct sigaction act;
940
941 sigfillset(&act.sa_mask);
942 act.sa_flags = 0;
943 act.sa_handler = host_alarm_handler;
944
945 sigaction(SIGALRM, &act, NULL);
946
947 /*
948 * Initialize ev struct to 0 to avoid valgrind complaining
949 * about uninitialized data in timer_create call
950 */
951 memset(&ev, 0, sizeof(ev));
952 ev.sigev_value.sival_int = 0;
953 ev.sigev_notify = SIGEV_SIGNAL;
954 ev.sigev_signo = SIGALRM;
955
956 if (timer_create(CLOCK_REALTIME, &ev, &host_timer)) {
957 perror("timer_create");
958
959 /* disable dynticks */
960 fprintf(stderr, "Dynamic Ticks disabled\n");
961
962 return -1;
963 }
964
965 t->priv = (void *)(long)host_timer;
966
967 return 0;
968}
969
970static void dynticks_stop_timer(struct qemu_alarm_timer *t)
971{
972 timer_t host_timer = (timer_t)(long)t->priv;
973
974 timer_delete(host_timer);
975}
976
977static void dynticks_rearm_timer(struct qemu_alarm_timer *t)
978{
979 timer_t host_timer = (timer_t)(long)t->priv;
980 struct itimerspec timeout;
981 int64_t nearest_delta_us = INT64_MAX;
982 int64_t current_us;
983
984 assert(alarm_has_dynticks(t));
985 if (!active_timers[QEMU_CLOCK_REALTIME] &&
986 !active_timers[QEMU_CLOCK_VIRTUAL] &&
987 !active_timers[QEMU_CLOCK_HOST])
988 return;
989
990 nearest_delta_us = qemu_next_deadline_dyntick();
991
992 /* check whether a timer is already running */
993 if (timer_gettime(host_timer, &timeout)) {
994 perror("gettime");
995 fprintf(stderr, "Internal timer error: aborting\n");
996 exit(1);
997 }
998 current_us = timeout.it_value.tv_sec * 1000000 + timeout.it_value.tv_nsec/1000;
999 if (current_us && current_us <= nearest_delta_us)
1000 return;
1001
1002 timeout.it_interval.tv_sec = 0;
1003 timeout.it_interval.tv_nsec = 0; /* 0 for one-shot timer */
1004 timeout.it_value.tv_sec = nearest_delta_us / 1000000;
1005 timeout.it_value.tv_nsec = (nearest_delta_us % 1000000) * 1000;
1006 if (timer_settime(host_timer, 0 /* RELATIVE */, &timeout, NULL)) {
1007 perror("settime");
1008 fprintf(stderr, "Internal timer error: aborting\n");
1009 exit(1);
1010 }
1011}
1012
1013#endif /* defined(__linux__) */
1014
1015static int unix_start_timer(struct qemu_alarm_timer *t)
1016{
1017 struct sigaction act;
1018 struct itimerval itv;
1019 int err;
1020
1021 /* timer signal */
1022 sigfillset(&act.sa_mask);
1023 act.sa_flags = 0;
1024 act.sa_handler = host_alarm_handler;
1025
1026 sigaction(SIGALRM, &act, NULL);
1027
1028 itv.it_interval.tv_sec = 0;
1029 /* for i386 kernel 2.6 to get 1 ms */
1030 itv.it_interval.tv_usec = 999;
1031 itv.it_value.tv_sec = 0;
1032 itv.it_value.tv_usec = 10 * 1000;
1033
1034 err = setitimer(ITIMER_REAL, &itv, NULL);
1035 if (err)
1036 return -1;
1037
1038 return 0;
1039}
1040
1041static void unix_stop_timer(struct qemu_alarm_timer *t)
1042{
1043 struct itimerval itv;
1044
1045 memset(&itv, 0, sizeof(itv));
1046 setitimer(ITIMER_REAL, &itv, NULL);
1047}
1048
1049#endif /* !defined(_WIN32) */
1050
1051
1052#ifdef _WIN32
1053
1054static int win32_start_timer(struct qemu_alarm_timer *t)
1055{
1056 TIMECAPS tc;
1057 struct qemu_alarm_win32 *data = t->priv;
1058 UINT flags;
1059
1060 memset(&tc, 0, sizeof(tc));
1061 timeGetDevCaps(&tc, sizeof(tc));
1062
1063 data->period = tc.wPeriodMin;
1064 timeBeginPeriod(data->period);
1065
1066 flags = TIME_CALLBACK_FUNCTION;
1067 if (alarm_has_dynticks(t))
1068 flags |= TIME_ONESHOT;
1069 else
1070 flags |= TIME_PERIODIC;
1071
1072 data->timerId = timeSetEvent(1, // interval (ms)
1073 data->period, // resolution
1074 host_alarm_handler, // function
1075 (DWORD)t, // parameter
1076 flags);
1077
1078 if (!data->timerId) {
1079 fprintf(stderr, "Failed to initialize win32 alarm timer: %ld\n",
1080 GetLastError());
1081 timeEndPeriod(data->period);
1082 return -1;
1083 }
1084
1085 return 0;
1086}
1087
1088static void win32_stop_timer(struct qemu_alarm_timer *t)
1089{
1090 struct qemu_alarm_win32 *data = t->priv;
1091
1092 timeKillEvent(data->timerId);
1093 timeEndPeriod(data->period);
1094}
1095
1096static void win32_rearm_timer(struct qemu_alarm_timer *t)
1097{
1098 struct qemu_alarm_win32 *data = t->priv;
1099
1100 assert(alarm_has_dynticks(t));
1101 if (!active_timers[QEMU_CLOCK_REALTIME] &&
1102 !active_timers[QEMU_CLOCK_VIRTUAL] &&
1103 !active_timers[QEMU_CLOCK_HOST])
1104 return;
1105
1106 timeKillEvent(data->timerId);
1107
1108 data->timerId = timeSetEvent(1,
1109 data->period,
1110 host_alarm_handler,
1111 (DWORD)t,
1112 TIME_ONESHOT | TIME_CALLBACK_FUNCTION);
1113
1114 if (!data->timerId) {
1115 fprintf(stderr, "Failed to re-arm win32 alarm timer %ld\n",
1116 GetLastError());
1117
1118 timeEndPeriod(data->period);
1119 exit(1);
1120 }
1121}
1122
1123#endif /* _WIN32 */
1124
1125static void alarm_timer_on_change_state_rearm(void *opaque, int running, int reason)
1126{
1127 if (running)
1128 qemu_rearm_alarm_timer((struct qemu_alarm_timer *) opaque);
1129}
1130
1131int init_timer_alarm(void)
1132{
1133 struct qemu_alarm_timer *t = NULL;
1134 int i, err = -1;
1135
1136 for (i = 0; alarm_timers[i].name; i++) {
1137 t = &alarm_timers[i];
1138
1139 err = t->start(t);
1140 if (!err)
1141 break;
1142 }
1143
1144 if (err) {
1145 err = -ENOENT;
1146 goto fail;
1147 }
1148
1149 /* first event is at time 0 */
1150 t->pending = 1;
1151 alarm_timer = t;
1152 qemu_add_vm_change_state_handler(alarm_timer_on_change_state_rearm, t);
1153
1154 return 0;
1155
1156fail:
1157 return err;
1158}
1159
1160void quit_timers(void)
1161{
1162 struct qemu_alarm_timer *t = alarm_timer;
1163 alarm_timer = NULL;
1164 t->stop(t);
1165}
1166
1167int qemu_calculate_timeout(void)
1168{
1169#ifndef CONFIG_IOTHREAD
1170 int timeout;
1171
1172 if (!vm_running)
1173 timeout = 5000;
1174 else {
1175 /* XXX: use timeout computed from timers */
1176 int64_t add;
1177 int64_t delta;
1178 /* Advance virtual time to the next event. */
1179 delta = qemu_icount_delta();
1180 if (delta > 0) {
1181 /* If virtual time is ahead of real time then just
1182 wait for IO. */
1183 timeout = (delta + 999999) / 1000000;
1184 } else {
1185 /* Wait for either IO to occur or the next
1186 timer event. */
1187 add = qemu_next_deadline();
1188 /* We advance the timer before checking for IO.
1189 Limit the amount we advance so that early IO
1190 activity won't get the guest too far ahead. */
1191 if (add > 10000000)
1192 add = 10000000;
1193 delta += add;
1194 qemu_icount += qemu_icount_round (add);
1195 timeout = delta / 1000000;
1196 if (timeout < 0)
1197 timeout = 0;
1198 }
1199 }
1200
1201 return timeout;
1202#else /* CONFIG_IOTHREAD */
1203 return 1000;
1204#endif
1205}
1206