]> git.proxmox.com Git - qemu.git/blame - qemu-timer.c
tcg-ia64: Fix warning in qemu_ld.
[qemu.git] / qemu-timer.c
CommitLineData
db1a4972
PB
1/*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25#include "sysemu.h"
26#include "net.h"
27#include "monitor.h"
28#include "console.h"
29
30#include "hw/hw.h"
31
32#include <unistd.h>
33#include <fcntl.h>
34#include <time.h>
35#include <errno.h>
36#include <sys/time.h>
37#include <signal.h>
44459349
JL
38#ifdef __FreeBSD__
39#include <sys/param.h>
40#endif
db1a4972
PB
41
42#ifdef __linux__
43#include <sys/ioctl.h>
44#include <linux/rtc.h>
45/* For the benefit of older linux systems which don't supply it,
46 we use a local copy of hpet.h. */
47/* #include <linux/hpet.h> */
48#include "hpet.h"
49#endif
50
51#ifdef _WIN32
52#include <windows.h>
53#include <mmsystem.h>
54#endif
55
db1a4972 56#include "qemu-timer.h"
db1a4972
PB
57
58/* Conversion factor from emulated instructions to virtual clock ticks. */
29e922b6 59int icount_time_shift;
db1a4972
PB
60/* Arbitrarily pick 1MIPS as the minimum allowable speed. */
61#define MAX_ICOUNT_SHIFT 10
62/* Compensate for varying guest execution speed. */
29e922b6 63int64_t qemu_icount_bias;
db1a4972
PB
64static QEMUTimer *icount_rt_timer;
65static QEMUTimer *icount_vm_timer;
66
db1a4972
PB
67/***********************************************************/
68/* guest cycle counter */
69
70typedef struct TimersState {
71 int64_t cpu_ticks_prev;
72 int64_t cpu_ticks_offset;
73 int64_t cpu_clock_offset;
74 int32_t cpu_ticks_enabled;
75 int64_t dummy;
76} TimersState;
77
78TimersState timers_state;
79
80/* return the host CPU cycle counter and handle stop/restart */
81int64_t cpu_get_ticks(void)
82{
83 if (use_icount) {
84 return cpu_get_icount();
85 }
86 if (!timers_state.cpu_ticks_enabled) {
87 return timers_state.cpu_ticks_offset;
88 } else {
89 int64_t ticks;
90 ticks = cpu_get_real_ticks();
91 if (timers_state.cpu_ticks_prev > ticks) {
92 /* Note: non increasing ticks may happen if the host uses
93 software suspend */
94 timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
95 }
96 timers_state.cpu_ticks_prev = ticks;
97 return ticks + timers_state.cpu_ticks_offset;
98 }
99}
100
101/* return the host CPU monotonic timer and handle stop/restart */
102static int64_t cpu_get_clock(void)
103{
104 int64_t ti;
105 if (!timers_state.cpu_ticks_enabled) {
106 return timers_state.cpu_clock_offset;
107 } else {
108 ti = get_clock();
109 return ti + timers_state.cpu_clock_offset;
110 }
111}
112
113#ifndef CONFIG_IOTHREAD
114static int64_t qemu_icount_delta(void)
115{
116 if (!use_icount) {
117 return 5000 * (int64_t) 1000000;
118 } else if (use_icount == 1) {
119 /* When not using an adaptive execution frequency
120 we tend to get badly out of sync with real time,
121 so just delay for a reasonable amount of time. */
122 return 0;
123 } else {
124 return cpu_get_icount() - cpu_get_clock();
125 }
126}
127#endif
128
129/* enable cpu_get_ticks() */
130void cpu_enable_ticks(void)
131{
132 if (!timers_state.cpu_ticks_enabled) {
133 timers_state.cpu_ticks_offset -= cpu_get_real_ticks();
134 timers_state.cpu_clock_offset -= get_clock();
135 timers_state.cpu_ticks_enabled = 1;
136 }
137}
138
139/* disable cpu_get_ticks() : the clock is stopped. You must not call
140 cpu_get_ticks() after that. */
141void cpu_disable_ticks(void)
142{
143 if (timers_state.cpu_ticks_enabled) {
144 timers_state.cpu_ticks_offset = cpu_get_ticks();
145 timers_state.cpu_clock_offset = cpu_get_clock();
146 timers_state.cpu_ticks_enabled = 0;
147 }
148}
149
150/***********************************************************/
151/* timers */
152
153#define QEMU_CLOCK_REALTIME 0
154#define QEMU_CLOCK_VIRTUAL 1
155#define QEMU_CLOCK_HOST 2
156
157struct QEMUClock {
158 int type;
159 int enabled;
160 /* XXX: add frequency */
161};
162
163struct QEMUTimer {
164 QEMUClock *clock;
165 int64_t expire_time;
166 QEMUTimerCB *cb;
167 void *opaque;
168 struct QEMUTimer *next;
169};
170
171struct qemu_alarm_timer {
172 char const *name;
173 int (*start)(struct qemu_alarm_timer *t);
174 void (*stop)(struct qemu_alarm_timer *t);
175 void (*rearm)(struct qemu_alarm_timer *t);
176 void *priv;
177
178 char expired;
179 char pending;
180};
181
182static struct qemu_alarm_timer *alarm_timer;
183
184int qemu_alarm_pending(void)
185{
186 return alarm_timer->pending;
187}
188
189static inline int alarm_has_dynticks(struct qemu_alarm_timer *t)
190{
191 return !!t->rearm;
192}
193
194static void qemu_rearm_alarm_timer(struct qemu_alarm_timer *t)
195{
196 if (!alarm_has_dynticks(t))
197 return;
198
199 t->rearm(t);
200}
201
202/* TODO: MIN_TIMER_REARM_US should be optimized */
203#define MIN_TIMER_REARM_US 250
204
205#ifdef _WIN32
206
207struct qemu_alarm_win32 {
208 MMRESULT timerId;
209 unsigned int period;
210} alarm_win32_data = {0, 0};
211
212static int win32_start_timer(struct qemu_alarm_timer *t);
213static void win32_stop_timer(struct qemu_alarm_timer *t);
214static void win32_rearm_timer(struct qemu_alarm_timer *t);
215
216#else
217
218static int unix_start_timer(struct qemu_alarm_timer *t);
219static void unix_stop_timer(struct qemu_alarm_timer *t);
220
221#ifdef __linux__
222
223static int dynticks_start_timer(struct qemu_alarm_timer *t);
224static void dynticks_stop_timer(struct qemu_alarm_timer *t);
225static void dynticks_rearm_timer(struct qemu_alarm_timer *t);
226
227static int hpet_start_timer(struct qemu_alarm_timer *t);
228static void hpet_stop_timer(struct qemu_alarm_timer *t);
229
230static int rtc_start_timer(struct qemu_alarm_timer *t);
231static void rtc_stop_timer(struct qemu_alarm_timer *t);
232
233#endif /* __linux__ */
234
235#endif /* _WIN32 */
236
237/* Correlation between real and virtual time is always going to be
238 fairly approximate, so ignore small variation.
239 When the guest is idle real and virtual time will be aligned in
240 the IO wait loop. */
241#define ICOUNT_WOBBLE (get_ticks_per_sec() / 10)
242
243static void icount_adjust(void)
244{
245 int64_t cur_time;
246 int64_t cur_icount;
247 int64_t delta;
248 static int64_t last_delta;
249 /* If the VM is not running, then do nothing. */
250 if (!vm_running)
251 return;
252
253 cur_time = cpu_get_clock();
254 cur_icount = qemu_get_clock(vm_clock);
255 delta = cur_icount - cur_time;
256 /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
257 if (delta > 0
258 && last_delta + ICOUNT_WOBBLE < delta * 2
259 && icount_time_shift > 0) {
260 /* The guest is getting too far ahead. Slow time down. */
261 icount_time_shift--;
262 }
263 if (delta < 0
264 && last_delta - ICOUNT_WOBBLE > delta * 2
265 && icount_time_shift < MAX_ICOUNT_SHIFT) {
266 /* The guest is getting too far behind. Speed time up. */
267 icount_time_shift++;
268 }
269 last_delta = delta;
270 qemu_icount_bias = cur_icount - (qemu_icount << icount_time_shift);
271}
272
273static void icount_adjust_rt(void * opaque)
274{
275 qemu_mod_timer(icount_rt_timer,
276 qemu_get_clock(rt_clock) + 1000);
277 icount_adjust();
278}
279
280static void icount_adjust_vm(void * opaque)
281{
282 qemu_mod_timer(icount_vm_timer,
283 qemu_get_clock(vm_clock) + get_ticks_per_sec() / 10);
284 icount_adjust();
285}
286
287int64_t qemu_icount_round(int64_t count)
288{
289 return (count + (1 << icount_time_shift) - 1) >> icount_time_shift;
290}
291
292static struct qemu_alarm_timer alarm_timers[] = {
293#ifndef _WIN32
294#ifdef __linux__
295 {"dynticks", dynticks_start_timer,
296 dynticks_stop_timer, dynticks_rearm_timer, NULL},
297 /* HPET - if available - is preferred */
298 {"hpet", hpet_start_timer, hpet_stop_timer, NULL, NULL},
299 /* ...otherwise try RTC */
300 {"rtc", rtc_start_timer, rtc_stop_timer, NULL, NULL},
301#endif
302 {"unix", unix_start_timer, unix_stop_timer, NULL, NULL},
303#else
304 {"dynticks", win32_start_timer,
305 win32_stop_timer, win32_rearm_timer, &alarm_win32_data},
306 {"win32", win32_start_timer,
307 win32_stop_timer, NULL, &alarm_win32_data},
308#endif
309 {NULL, }
310};
311
312static void show_available_alarms(void)
313{
314 int i;
315
316 printf("Available alarm timers, in order of precedence:\n");
317 for (i = 0; alarm_timers[i].name; i++)
318 printf("%s\n", alarm_timers[i].name);
319}
320
321void configure_alarms(char const *opt)
322{
323 int i;
324 int cur = 0;
325 int count = ARRAY_SIZE(alarm_timers) - 1;
326 char *arg;
327 char *name;
328 struct qemu_alarm_timer tmp;
329
330 if (!strcmp(opt, "?")) {
331 show_available_alarms();
332 exit(0);
333 }
334
335 arg = qemu_strdup(opt);
336
337 /* Reorder the array */
338 name = strtok(arg, ",");
339 while (name) {
340 for (i = 0; i < count && alarm_timers[i].name; i++) {
341 if (!strcmp(alarm_timers[i].name, name))
342 break;
343 }
344
345 if (i == count) {
346 fprintf(stderr, "Unknown clock %s\n", name);
347 goto next;
348 }
349
350 if (i < cur)
351 /* Ignore */
352 goto next;
353
354 /* Swap */
355 tmp = alarm_timers[i];
356 alarm_timers[i] = alarm_timers[cur];
357 alarm_timers[cur] = tmp;
358
359 cur++;
360next:
361 name = strtok(NULL, ",");
362 }
363
364 qemu_free(arg);
365
366 if (cur) {
367 /* Disable remaining timers */
368 for (i = cur; i < count; i++)
369 alarm_timers[i].name = NULL;
370 } else {
371 show_available_alarms();
372 exit(1);
373 }
374}
375
376#define QEMU_NUM_CLOCKS 3
377
378QEMUClock *rt_clock;
379QEMUClock *vm_clock;
380QEMUClock *host_clock;
381
382static QEMUTimer *active_timers[QEMU_NUM_CLOCKS];
383
384static QEMUClock *qemu_new_clock(int type)
385{
386 QEMUClock *clock;
387 clock = qemu_mallocz(sizeof(QEMUClock));
388 clock->type = type;
389 clock->enabled = 1;
390 return clock;
391}
392
393void qemu_clock_enable(QEMUClock *clock, int enabled)
394{
395 clock->enabled = enabled;
396}
397
398QEMUTimer *qemu_new_timer(QEMUClock *clock, QEMUTimerCB *cb, void *opaque)
399{
400 QEMUTimer *ts;
401
402 ts = qemu_mallocz(sizeof(QEMUTimer));
403 ts->clock = clock;
404 ts->cb = cb;
405 ts->opaque = opaque;
406 return ts;
407}
408
409void qemu_free_timer(QEMUTimer *ts)
410{
411 qemu_free(ts);
412}
413
414/* stop a timer, but do not dealloc it */
415void qemu_del_timer(QEMUTimer *ts)
416{
417 QEMUTimer **pt, *t;
418
419 /* NOTE: this code must be signal safe because
420 qemu_timer_expired() can be called from a signal. */
421 pt = &active_timers[ts->clock->type];
422 for(;;) {
423 t = *pt;
424 if (!t)
425 break;
426 if (t == ts) {
427 *pt = t->next;
428 break;
429 }
430 pt = &t->next;
431 }
432}
433
434/* modify the current timer so that it will be fired when current_time
435 >= expire_time. The corresponding callback will be called. */
436void qemu_mod_timer(QEMUTimer *ts, int64_t expire_time)
437{
438 QEMUTimer **pt, *t;
439
440 qemu_del_timer(ts);
441
442 /* add the timer in the sorted list */
443 /* NOTE: this code must be signal safe because
444 qemu_timer_expired() can be called from a signal. */
445 pt = &active_timers[ts->clock->type];
446 for(;;) {
447 t = *pt;
448 if (!t)
449 break;
450 if (t->expire_time > expire_time)
451 break;
452 pt = &t->next;
453 }
454 ts->expire_time = expire_time;
455 ts->next = *pt;
456 *pt = ts;
457
458 /* Rearm if necessary */
459 if (pt == &active_timers[ts->clock->type]) {
460 if (!alarm_timer->pending) {
461 qemu_rearm_alarm_timer(alarm_timer);
462 }
463 /* Interrupt execution to force deadline recalculation. */
464 if (use_icount)
465 qemu_notify_event();
466 }
467}
468
469int qemu_timer_pending(QEMUTimer *ts)
470{
471 QEMUTimer *t;
472 for(t = active_timers[ts->clock->type]; t != NULL; t = t->next) {
473 if (t == ts)
474 return 1;
475 }
476 return 0;
477}
478
479int qemu_timer_expired(QEMUTimer *timer_head, int64_t current_time)
480{
481 if (!timer_head)
482 return 0;
483 return (timer_head->expire_time <= current_time);
484}
485
486static void qemu_run_timers(QEMUClock *clock)
487{
488 QEMUTimer **ptimer_head, *ts;
489 int64_t current_time;
490
491 if (!clock->enabled)
492 return;
493
494 current_time = qemu_get_clock (clock);
495 ptimer_head = &active_timers[clock->type];
496 for(;;) {
497 ts = *ptimer_head;
498 if (!ts || ts->expire_time > current_time)
499 break;
500 /* remove timer from the list before calling the callback */
501 *ptimer_head = ts->next;
502 ts->next = NULL;
503
504 /* run the callback (the timer list can be modified) */
505 ts->cb(ts->opaque);
506 }
507}
508
509int64_t qemu_get_clock(QEMUClock *clock)
510{
511 switch(clock->type) {
512 case QEMU_CLOCK_REALTIME:
513 return get_clock() / 1000000;
514 default:
515 case QEMU_CLOCK_VIRTUAL:
516 if (use_icount) {
517 return cpu_get_icount();
518 } else {
519 return cpu_get_clock();
520 }
521 case QEMU_CLOCK_HOST:
522 return get_clock_realtime();
523 }
524}
525
526int64_t qemu_get_clock_ns(QEMUClock *clock)
527{
528 switch(clock->type) {
529 case QEMU_CLOCK_REALTIME:
530 return get_clock();
531 default:
532 case QEMU_CLOCK_VIRTUAL:
533 if (use_icount) {
534 return cpu_get_icount();
535 } else {
536 return cpu_get_clock();
537 }
538 case QEMU_CLOCK_HOST:
539 return get_clock_realtime();
540 }
541}
542
543void init_clocks(void)
544{
db1a4972
PB
545 rt_clock = qemu_new_clock(QEMU_CLOCK_REALTIME);
546 vm_clock = qemu_new_clock(QEMU_CLOCK_VIRTUAL);
547 host_clock = qemu_new_clock(QEMU_CLOCK_HOST);
548
549 rtc_clock = host_clock;
550}
551
552/* save a timer */
553void qemu_put_timer(QEMUFile *f, QEMUTimer *ts)
554{
555 uint64_t expire_time;
556
557 if (qemu_timer_pending(ts)) {
558 expire_time = ts->expire_time;
559 } else {
560 expire_time = -1;
561 }
562 qemu_put_be64(f, expire_time);
563}
564
565void qemu_get_timer(QEMUFile *f, QEMUTimer *ts)
566{
567 uint64_t expire_time;
568
569 expire_time = qemu_get_be64(f);
570 if (expire_time != -1) {
571 qemu_mod_timer(ts, expire_time);
572 } else {
573 qemu_del_timer(ts);
574 }
575}
576
577static const VMStateDescription vmstate_timers = {
578 .name = "timer",
579 .version_id = 2,
580 .minimum_version_id = 1,
581 .minimum_version_id_old = 1,
582 .fields = (VMStateField []) {
583 VMSTATE_INT64(cpu_ticks_offset, TimersState),
584 VMSTATE_INT64(dummy, TimersState),
585 VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
586 VMSTATE_END_OF_LIST()
587 }
588};
589
590void configure_icount(const char *option)
591{
0be71e32 592 vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
db1a4972
PB
593 if (!option)
594 return;
595
596 if (strcmp(option, "auto") != 0) {
597 icount_time_shift = strtol(option, NULL, 0);
598 use_icount = 1;
599 return;
600 }
601
602 use_icount = 2;
603
604 /* 125MIPS seems a reasonable initial guess at the guest speed.
605 It will be corrected fairly quickly anyway. */
606 icount_time_shift = 3;
607
608 /* Have both realtime and virtual time triggers for speed adjustment.
609 The realtime trigger catches emulated time passing too slowly,
610 the virtual time trigger catches emulated time passing too fast.
611 Realtime triggers occur even when idle, so use them less frequently
612 than VM triggers. */
613 icount_rt_timer = qemu_new_timer(rt_clock, icount_adjust_rt, NULL);
614 qemu_mod_timer(icount_rt_timer,
615 qemu_get_clock(rt_clock) + 1000);
616 icount_vm_timer = qemu_new_timer(vm_clock, icount_adjust_vm, NULL);
617 qemu_mod_timer(icount_vm_timer,
618 qemu_get_clock(vm_clock) + get_ticks_per_sec() / 10);
619}
620
621void qemu_run_all_timers(void)
622{
ca5a2a4b
PB
623 alarm_timer->pending = 0;
624
db1a4972
PB
625 /* rearm timer, if not periodic */
626 if (alarm_timer->expired) {
627 alarm_timer->expired = 0;
628 qemu_rearm_alarm_timer(alarm_timer);
629 }
630
db1a4972
PB
631 /* vm time timers */
632 if (vm_running) {
633 qemu_run_timers(vm_clock);
634 }
635
636 qemu_run_timers(rt_clock);
637 qemu_run_timers(host_clock);
638}
639
640#ifdef _WIN32
641static void CALLBACK host_alarm_handler(UINT uTimerID, UINT uMsg,
642 DWORD_PTR dwUser, DWORD_PTR dw1,
643 DWORD_PTR dw2)
644#else
645static void host_alarm_handler(int host_signum)
646#endif
647{
648 struct qemu_alarm_timer *t = alarm_timer;
649 if (!t)
650 return;
651
652#if 0
653#define DISP_FREQ 1000
654 {
655 static int64_t delta_min = INT64_MAX;
656 static int64_t delta_max, delta_cum, last_clock, delta, ti;
657 static int count;
658 ti = qemu_get_clock(vm_clock);
659 if (last_clock != 0) {
660 delta = ti - last_clock;
661 if (delta < delta_min)
662 delta_min = delta;
663 if (delta > delta_max)
664 delta_max = delta;
665 delta_cum += delta;
666 if (++count == DISP_FREQ) {
667 printf("timer: min=%" PRId64 " us max=%" PRId64 " us avg=%" PRId64 " us avg_freq=%0.3f Hz\n",
668 muldiv64(delta_min, 1000000, get_ticks_per_sec()),
669 muldiv64(delta_max, 1000000, get_ticks_per_sec()),
670 muldiv64(delta_cum, 1000000 / DISP_FREQ, get_ticks_per_sec()),
671 (double)get_ticks_per_sec() / ((double)delta_cum / DISP_FREQ));
672 count = 0;
673 delta_min = INT64_MAX;
674 delta_max = 0;
675 delta_cum = 0;
676 }
677 }
678 last_clock = ti;
679 }
680#endif
681 if (alarm_has_dynticks(t) ||
682 (!use_icount &&
683 qemu_timer_expired(active_timers[QEMU_CLOCK_VIRTUAL],
684 qemu_get_clock(vm_clock))) ||
685 qemu_timer_expired(active_timers[QEMU_CLOCK_REALTIME],
686 qemu_get_clock(rt_clock)) ||
687 qemu_timer_expired(active_timers[QEMU_CLOCK_HOST],
688 qemu_get_clock(host_clock))) {
689
690 t->expired = alarm_has_dynticks(t);
691 t->pending = 1;
692 qemu_notify_event();
693 }
694}
695
696int64_t qemu_next_deadline(void)
697{
698 /* To avoid problems with overflow limit this to 2^32. */
699 int64_t delta = INT32_MAX;
700
701 if (active_timers[QEMU_CLOCK_VIRTUAL]) {
702 delta = active_timers[QEMU_CLOCK_VIRTUAL]->expire_time -
703 qemu_get_clock(vm_clock);
704 }
705 if (active_timers[QEMU_CLOCK_HOST]) {
706 int64_t hdelta = active_timers[QEMU_CLOCK_HOST]->expire_time -
707 qemu_get_clock(host_clock);
708 if (hdelta < delta)
709 delta = hdelta;
710 }
711
712 if (delta < 0)
713 delta = 0;
714
715 return delta;
716}
717
718#ifndef _WIN32
719
720#if defined(__linux__)
721
722#define RTC_FREQ 1024
723
724static uint64_t qemu_next_deadline_dyntick(void)
725{
726 int64_t delta;
727 int64_t rtdelta;
728
729 if (use_icount)
730 delta = INT32_MAX;
731 else
732 delta = (qemu_next_deadline() + 999) / 1000;
733
734 if (active_timers[QEMU_CLOCK_REALTIME]) {
735 rtdelta = (active_timers[QEMU_CLOCK_REALTIME]->expire_time -
736 qemu_get_clock(rt_clock))*1000;
737 if (rtdelta < delta)
738 delta = rtdelta;
739 }
740
741 if (delta < MIN_TIMER_REARM_US)
742 delta = MIN_TIMER_REARM_US;
743
744 return delta;
745}
746
747static void enable_sigio_timer(int fd)
748{
749 struct sigaction act;
750
751 /* timer signal */
752 sigfillset(&act.sa_mask);
753 act.sa_flags = 0;
754 act.sa_handler = host_alarm_handler;
755
756 sigaction(SIGIO, &act, NULL);
757 fcntl_setfl(fd, O_ASYNC);
758 fcntl(fd, F_SETOWN, getpid());
759}
760
761static int hpet_start_timer(struct qemu_alarm_timer *t)
762{
763 struct hpet_info info;
764 int r, fd;
765
766 fd = qemu_open("/dev/hpet", O_RDONLY);
767 if (fd < 0)
768 return -1;
769
770 /* Set frequency */
771 r = ioctl(fd, HPET_IRQFREQ, RTC_FREQ);
772 if (r < 0) {
773 fprintf(stderr, "Could not configure '/dev/hpet' to have a 1024Hz timer. This is not a fatal\n"
774 "error, but for better emulation accuracy type:\n"
775 "'echo 1024 > /proc/sys/dev/hpet/max-user-freq' as root.\n");
776 goto fail;
777 }
778
779 /* Check capabilities */
780 r = ioctl(fd, HPET_INFO, &info);
781 if (r < 0)
782 goto fail;
783
784 /* Enable periodic mode */
785 r = ioctl(fd, HPET_EPI, 0);
786 if (info.hi_flags && (r < 0))
787 goto fail;
788
789 /* Enable interrupt */
790 r = ioctl(fd, HPET_IE_ON, 0);
791 if (r < 0)
792 goto fail;
793
794 enable_sigio_timer(fd);
795 t->priv = (void *)(long)fd;
796
797 return 0;
798fail:
799 close(fd);
800 return -1;
801}
802
803static void hpet_stop_timer(struct qemu_alarm_timer *t)
804{
805 int fd = (long)t->priv;
806
807 close(fd);
808}
809
810static int rtc_start_timer(struct qemu_alarm_timer *t)
811{
812 int rtc_fd;
813 unsigned long current_rtc_freq = 0;
814
815 TFR(rtc_fd = qemu_open("/dev/rtc", O_RDONLY));
816 if (rtc_fd < 0)
817 return -1;
818 ioctl(rtc_fd, RTC_IRQP_READ, &current_rtc_freq);
819 if (current_rtc_freq != RTC_FREQ &&
820 ioctl(rtc_fd, RTC_IRQP_SET, RTC_FREQ) < 0) {
821 fprintf(stderr, "Could not configure '/dev/rtc' to have a 1024 Hz timer. This is not a fatal\n"
822 "error, but for better emulation accuracy either use a 2.6 host Linux kernel or\n"
823 "type 'echo 1024 > /proc/sys/dev/rtc/max-user-freq' as root.\n");
824 goto fail;
825 }
826 if (ioctl(rtc_fd, RTC_PIE_ON, 0) < 0) {
827 fail:
828 close(rtc_fd);
829 return -1;
830 }
831
832 enable_sigio_timer(rtc_fd);
833
834 t->priv = (void *)(long)rtc_fd;
835
836 return 0;
837}
838
839static void rtc_stop_timer(struct qemu_alarm_timer *t)
840{
841 int rtc_fd = (long)t->priv;
842
843 close(rtc_fd);
844}
845
846static int dynticks_start_timer(struct qemu_alarm_timer *t)
847{
848 struct sigevent ev;
849 timer_t host_timer;
850 struct sigaction act;
851
852 sigfillset(&act.sa_mask);
853 act.sa_flags = 0;
854 act.sa_handler = host_alarm_handler;
855
856 sigaction(SIGALRM, &act, NULL);
857
858 /*
859 * Initialize ev struct to 0 to avoid valgrind complaining
860 * about uninitialized data in timer_create call
861 */
862 memset(&ev, 0, sizeof(ev));
863 ev.sigev_value.sival_int = 0;
864 ev.sigev_notify = SIGEV_SIGNAL;
865 ev.sigev_signo = SIGALRM;
866
867 if (timer_create(CLOCK_REALTIME, &ev, &host_timer)) {
868 perror("timer_create");
869
870 /* disable dynticks */
871 fprintf(stderr, "Dynamic Ticks disabled\n");
872
873 return -1;
874 }
875
876 t->priv = (void *)(long)host_timer;
877
878 return 0;
879}
880
881static void dynticks_stop_timer(struct qemu_alarm_timer *t)
882{
883 timer_t host_timer = (timer_t)(long)t->priv;
884
885 timer_delete(host_timer);
886}
887
888static void dynticks_rearm_timer(struct qemu_alarm_timer *t)
889{
890 timer_t host_timer = (timer_t)(long)t->priv;
891 struct itimerspec timeout;
892 int64_t nearest_delta_us = INT64_MAX;
893 int64_t current_us;
894
895 assert(alarm_has_dynticks(t));
896 if (!active_timers[QEMU_CLOCK_REALTIME] &&
897 !active_timers[QEMU_CLOCK_VIRTUAL] &&
898 !active_timers[QEMU_CLOCK_HOST])
899 return;
900
901 nearest_delta_us = qemu_next_deadline_dyntick();
902
903 /* check whether a timer is already running */
904 if (timer_gettime(host_timer, &timeout)) {
905 perror("gettime");
906 fprintf(stderr, "Internal timer error: aborting\n");
907 exit(1);
908 }
909 current_us = timeout.it_value.tv_sec * 1000000 + timeout.it_value.tv_nsec/1000;
910 if (current_us && current_us <= nearest_delta_us)
911 return;
912
913 timeout.it_interval.tv_sec = 0;
914 timeout.it_interval.tv_nsec = 0; /* 0 for one-shot timer */
915 timeout.it_value.tv_sec = nearest_delta_us / 1000000;
916 timeout.it_value.tv_nsec = (nearest_delta_us % 1000000) * 1000;
917 if (timer_settime(host_timer, 0 /* RELATIVE */, &timeout, NULL)) {
918 perror("settime");
919 fprintf(stderr, "Internal timer error: aborting\n");
920 exit(1);
921 }
922}
923
924#endif /* defined(__linux__) */
925
926static int unix_start_timer(struct qemu_alarm_timer *t)
927{
928 struct sigaction act;
929 struct itimerval itv;
930 int err;
931
932 /* timer signal */
933 sigfillset(&act.sa_mask);
934 act.sa_flags = 0;
935 act.sa_handler = host_alarm_handler;
936
937 sigaction(SIGALRM, &act, NULL);
938
939 itv.it_interval.tv_sec = 0;
940 /* for i386 kernel 2.6 to get 1 ms */
941 itv.it_interval.tv_usec = 999;
942 itv.it_value.tv_sec = 0;
943 itv.it_value.tv_usec = 10 * 1000;
944
945 err = setitimer(ITIMER_REAL, &itv, NULL);
946 if (err)
947 return -1;
948
949 return 0;
950}
951
952static void unix_stop_timer(struct qemu_alarm_timer *t)
953{
954 struct itimerval itv;
955
956 memset(&itv, 0, sizeof(itv));
957 setitimer(ITIMER_REAL, &itv, NULL);
958}
959
960#endif /* !defined(_WIN32) */
961
962
963#ifdef _WIN32
964
965static int win32_start_timer(struct qemu_alarm_timer *t)
966{
967 TIMECAPS tc;
968 struct qemu_alarm_win32 *data = t->priv;
969 UINT flags;
970
971 memset(&tc, 0, sizeof(tc));
972 timeGetDevCaps(&tc, sizeof(tc));
973
974 data->period = tc.wPeriodMin;
975 timeBeginPeriod(data->period);
976
977 flags = TIME_CALLBACK_FUNCTION;
978 if (alarm_has_dynticks(t))
979 flags |= TIME_ONESHOT;
980 else
981 flags |= TIME_PERIODIC;
982
983 data->timerId = timeSetEvent(1, // interval (ms)
984 data->period, // resolution
985 host_alarm_handler, // function
986 (DWORD)t, // parameter
987 flags);
988
989 if (!data->timerId) {
990 fprintf(stderr, "Failed to initialize win32 alarm timer: %ld\n",
991 GetLastError());
992 timeEndPeriod(data->period);
993 return -1;
994 }
995
996 return 0;
997}
998
999static void win32_stop_timer(struct qemu_alarm_timer *t)
1000{
1001 struct qemu_alarm_win32 *data = t->priv;
1002
1003 timeKillEvent(data->timerId);
1004 timeEndPeriod(data->period);
1005}
1006
1007static void win32_rearm_timer(struct qemu_alarm_timer *t)
1008{
1009 struct qemu_alarm_win32 *data = t->priv;
1010
1011 assert(alarm_has_dynticks(t));
1012 if (!active_timers[QEMU_CLOCK_REALTIME] &&
1013 !active_timers[QEMU_CLOCK_VIRTUAL] &&
1014 !active_timers[QEMU_CLOCK_HOST])
1015 return;
1016
1017 timeKillEvent(data->timerId);
1018
1019 data->timerId = timeSetEvent(1,
1020 data->period,
1021 host_alarm_handler,
1022 (DWORD)t,
1023 TIME_ONESHOT | TIME_CALLBACK_FUNCTION);
1024
1025 if (!data->timerId) {
1026 fprintf(stderr, "Failed to re-arm win32 alarm timer %ld\n",
1027 GetLastError());
1028
1029 timeEndPeriod(data->period);
1030 exit(1);
1031 }
1032}
1033
1034#endif /* _WIN32 */
1035
1036static void alarm_timer_on_change_state_rearm(void *opaque, int running, int reason)
1037{
1038 if (running)
1039 qemu_rearm_alarm_timer((struct qemu_alarm_timer *) opaque);
1040}
1041
1042int init_timer_alarm(void)
1043{
1044 struct qemu_alarm_timer *t = NULL;
1045 int i, err = -1;
1046
1047 for (i = 0; alarm_timers[i].name; i++) {
1048 t = &alarm_timers[i];
1049
1050 err = t->start(t);
1051 if (!err)
1052 break;
1053 }
1054
1055 if (err) {
1056 err = -ENOENT;
1057 goto fail;
1058 }
1059
1060 /* first event is at time 0 */
1061 t->pending = 1;
1062 alarm_timer = t;
1063 qemu_add_vm_change_state_handler(alarm_timer_on_change_state_rearm, t);
1064
1065 return 0;
1066
1067fail:
1068 return err;
1069}
1070
1071void quit_timers(void)
1072{
1073 struct qemu_alarm_timer *t = alarm_timer;
1074 alarm_timer = NULL;
1075 t->stop(t);
1076}
1077
1078int qemu_calculate_timeout(void)
1079{
1080#ifndef CONFIG_IOTHREAD
1081 int timeout;
1082
1083 if (!vm_running)
1084 timeout = 5000;
1085 else {
1086 /* XXX: use timeout computed from timers */
1087 int64_t add;
1088 int64_t delta;
1089 /* Advance virtual time to the next event. */
1090 delta = qemu_icount_delta();
1091 if (delta > 0) {
1092 /* If virtual time is ahead of real time then just
1093 wait for IO. */
1094 timeout = (delta + 999999) / 1000000;
1095 } else {
1096 /* Wait for either IO to occur or the next
1097 timer event. */
1098 add = qemu_next_deadline();
1099 /* We advance the timer before checking for IO.
1100 Limit the amount we advance so that early IO
1101 activity won't get the guest too far ahead. */
1102 if (add > 10000000)
1103 add = 10000000;
1104 delta += add;
1105 qemu_icount += qemu_icount_round (add);
1106 timeout = delta / 1000000;
1107 if (timeout < 0)
1108 timeout = 0;
1109 }
1110 }
1111
1112 return timeout;
1113#else /* CONFIG_IOTHREAD */
1114 return 1000;
1115#endif
1116}
1117