]> git.proxmox.com Git - qemu.git/blob - qemu-timer.c
block: enable in_use flag
[qemu.git] / qemu-timer.c
1 /*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 #include "sysemu.h"
26 #include "net.h"
27 #include "monitor.h"
28 #include "console.h"
29
30 #include "hw/hw.h"
31
32 #include <unistd.h>
33 #include <fcntl.h>
34 #include <time.h>
35 #include <errno.h>
36 #include <sys/time.h>
37 #include <signal.h>
38 #ifdef __FreeBSD__
39 #include <sys/param.h>
40 #endif
41
42 #ifdef __linux__
43 #include <sys/ioctl.h>
44 #include <linux/rtc.h>
45 /* For the benefit of older linux systems which don't supply it,
46 we use a local copy of hpet.h. */
47 /* #include <linux/hpet.h> */
48 #include "hpet.h"
49 #endif
50
51 #ifdef _WIN32
52 #include <windows.h>
53 #include <mmsystem.h>
54 #endif
55
56 #include "qemu-timer.h"
57
58 /* Conversion factor from emulated instructions to virtual clock ticks. */
59 int icount_time_shift;
60 /* Arbitrarily pick 1MIPS as the minimum allowable speed. */
61 #define MAX_ICOUNT_SHIFT 10
62 /* Compensate for varying guest execution speed. */
63 int64_t qemu_icount_bias;
64 static QEMUTimer *icount_rt_timer;
65 static QEMUTimer *icount_vm_timer;
66
67 /***********************************************************/
68 /* guest cycle counter */
69
70 typedef struct TimersState {
71 int64_t cpu_ticks_prev;
72 int64_t cpu_ticks_offset;
73 int64_t cpu_clock_offset;
74 int32_t cpu_ticks_enabled;
75 int64_t dummy;
76 } TimersState;
77
78 TimersState timers_state;
79
80 /* return the host CPU cycle counter and handle stop/restart */
81 int64_t cpu_get_ticks(void)
82 {
83 if (use_icount) {
84 return cpu_get_icount();
85 }
86 if (!timers_state.cpu_ticks_enabled) {
87 return timers_state.cpu_ticks_offset;
88 } else {
89 int64_t ticks;
90 ticks = cpu_get_real_ticks();
91 if (timers_state.cpu_ticks_prev > ticks) {
92 /* Note: non increasing ticks may happen if the host uses
93 software suspend */
94 timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks;
95 }
96 timers_state.cpu_ticks_prev = ticks;
97 return ticks + timers_state.cpu_ticks_offset;
98 }
99 }
100
101 /* return the host CPU monotonic timer and handle stop/restart */
102 static int64_t cpu_get_clock(void)
103 {
104 int64_t ti;
105 if (!timers_state.cpu_ticks_enabled) {
106 return timers_state.cpu_clock_offset;
107 } else {
108 ti = get_clock();
109 return ti + timers_state.cpu_clock_offset;
110 }
111 }
112
113 static int64_t qemu_icount_delta(void)
114 {
115 if (!use_icount) {
116 return 5000 * (int64_t) 1000000;
117 } else if (use_icount == 1) {
118 /* When not using an adaptive execution frequency
119 we tend to get badly out of sync with real time,
120 so just delay for a reasonable amount of time. */
121 return 0;
122 } else {
123 return cpu_get_icount() - cpu_get_clock();
124 }
125 }
126
127 /* enable cpu_get_ticks() */
128 void cpu_enable_ticks(void)
129 {
130 if (!timers_state.cpu_ticks_enabled) {
131 timers_state.cpu_ticks_offset -= cpu_get_real_ticks();
132 timers_state.cpu_clock_offset -= get_clock();
133 timers_state.cpu_ticks_enabled = 1;
134 }
135 }
136
137 /* disable cpu_get_ticks() : the clock is stopped. You must not call
138 cpu_get_ticks() after that. */
139 void cpu_disable_ticks(void)
140 {
141 if (timers_state.cpu_ticks_enabled) {
142 timers_state.cpu_ticks_offset = cpu_get_ticks();
143 timers_state.cpu_clock_offset = cpu_get_clock();
144 timers_state.cpu_ticks_enabled = 0;
145 }
146 }
147
148 /***********************************************************/
149 /* timers */
150
151 #define QEMU_CLOCK_REALTIME 0
152 #define QEMU_CLOCK_VIRTUAL 1
153 #define QEMU_CLOCK_HOST 2
154
155 struct QEMUClock {
156 int type;
157 int enabled;
158 /* XXX: add frequency */
159 };
160
161 struct QEMUTimer {
162 QEMUClock *clock;
163 int64_t expire_time;
164 QEMUTimerCB *cb;
165 void *opaque;
166 struct QEMUTimer *next;
167 };
168
169 struct qemu_alarm_timer {
170 char const *name;
171 int (*start)(struct qemu_alarm_timer *t);
172 void (*stop)(struct qemu_alarm_timer *t);
173 void (*rearm)(struct qemu_alarm_timer *t);
174 void *priv;
175
176 char expired;
177 char pending;
178 };
179
180 static struct qemu_alarm_timer *alarm_timer;
181
182 int qemu_alarm_pending(void)
183 {
184 return alarm_timer->pending;
185 }
186
187 static inline int alarm_has_dynticks(struct qemu_alarm_timer *t)
188 {
189 return !!t->rearm;
190 }
191
192 static void qemu_rearm_alarm_timer(struct qemu_alarm_timer *t)
193 {
194 if (!alarm_has_dynticks(t))
195 return;
196
197 t->rearm(t);
198 }
199
200 /* TODO: MIN_TIMER_REARM_NS should be optimized */
201 #define MIN_TIMER_REARM_NS 250000
202
203 #ifdef _WIN32
204
205 struct qemu_alarm_win32 {
206 MMRESULT timerId;
207 unsigned int period;
208 } alarm_win32_data = {0, 0};
209
210 static int win32_start_timer(struct qemu_alarm_timer *t);
211 static void win32_stop_timer(struct qemu_alarm_timer *t);
212 static void win32_rearm_timer(struct qemu_alarm_timer *t);
213
214 #else
215
216 static int unix_start_timer(struct qemu_alarm_timer *t);
217 static void unix_stop_timer(struct qemu_alarm_timer *t);
218
219 #ifdef __linux__
220
221 static int dynticks_start_timer(struct qemu_alarm_timer *t);
222 static void dynticks_stop_timer(struct qemu_alarm_timer *t);
223 static void dynticks_rearm_timer(struct qemu_alarm_timer *t);
224
225 static int hpet_start_timer(struct qemu_alarm_timer *t);
226 static void hpet_stop_timer(struct qemu_alarm_timer *t);
227
228 static int rtc_start_timer(struct qemu_alarm_timer *t);
229 static void rtc_stop_timer(struct qemu_alarm_timer *t);
230
231 #endif /* __linux__ */
232
233 #endif /* _WIN32 */
234
235 /* Correlation between real and virtual time is always going to be
236 fairly approximate, so ignore small variation.
237 When the guest is idle real and virtual time will be aligned in
238 the IO wait loop. */
239 #define ICOUNT_WOBBLE (get_ticks_per_sec() / 10)
240
241 static void icount_adjust(void)
242 {
243 int64_t cur_time;
244 int64_t cur_icount;
245 int64_t delta;
246 static int64_t last_delta;
247 /* If the VM is not running, then do nothing. */
248 if (!vm_running)
249 return;
250
251 cur_time = cpu_get_clock();
252 cur_icount = qemu_get_clock(vm_clock);
253 delta = cur_icount - cur_time;
254 /* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
255 if (delta > 0
256 && last_delta + ICOUNT_WOBBLE < delta * 2
257 && icount_time_shift > 0) {
258 /* The guest is getting too far ahead. Slow time down. */
259 icount_time_shift--;
260 }
261 if (delta < 0
262 && last_delta - ICOUNT_WOBBLE > delta * 2
263 && icount_time_shift < MAX_ICOUNT_SHIFT) {
264 /* The guest is getting too far behind. Speed time up. */
265 icount_time_shift++;
266 }
267 last_delta = delta;
268 qemu_icount_bias = cur_icount - (qemu_icount << icount_time_shift);
269 }
270
271 static void icount_adjust_rt(void * opaque)
272 {
273 qemu_mod_timer(icount_rt_timer,
274 qemu_get_clock(rt_clock) + 1000);
275 icount_adjust();
276 }
277
278 static void icount_adjust_vm(void * opaque)
279 {
280 qemu_mod_timer(icount_vm_timer,
281 qemu_get_clock(vm_clock) + get_ticks_per_sec() / 10);
282 icount_adjust();
283 }
284
285 int64_t qemu_icount_round(int64_t count)
286 {
287 return (count + (1 << icount_time_shift) - 1) >> icount_time_shift;
288 }
289
290 static struct qemu_alarm_timer alarm_timers[] = {
291 #ifndef _WIN32
292 #ifdef __linux__
293 {"dynticks", dynticks_start_timer,
294 dynticks_stop_timer, dynticks_rearm_timer, NULL},
295 /* HPET - if available - is preferred */
296 {"hpet", hpet_start_timer, hpet_stop_timer, NULL, NULL},
297 /* ...otherwise try RTC */
298 {"rtc", rtc_start_timer, rtc_stop_timer, NULL, NULL},
299 #endif
300 {"unix", unix_start_timer, unix_stop_timer, NULL, NULL},
301 #else
302 {"dynticks", win32_start_timer,
303 win32_stop_timer, win32_rearm_timer, &alarm_win32_data},
304 {"win32", win32_start_timer,
305 win32_stop_timer, NULL, &alarm_win32_data},
306 #endif
307 {NULL, }
308 };
309
310 static void show_available_alarms(void)
311 {
312 int i;
313
314 printf("Available alarm timers, in order of precedence:\n");
315 for (i = 0; alarm_timers[i].name; i++)
316 printf("%s\n", alarm_timers[i].name);
317 }
318
319 void configure_alarms(char const *opt)
320 {
321 int i;
322 int cur = 0;
323 int count = ARRAY_SIZE(alarm_timers) - 1;
324 char *arg;
325 char *name;
326 struct qemu_alarm_timer tmp;
327
328 if (!strcmp(opt, "?")) {
329 show_available_alarms();
330 exit(0);
331 }
332
333 arg = qemu_strdup(opt);
334
335 /* Reorder the array */
336 name = strtok(arg, ",");
337 while (name) {
338 for (i = 0; i < count && alarm_timers[i].name; i++) {
339 if (!strcmp(alarm_timers[i].name, name))
340 break;
341 }
342
343 if (i == count) {
344 fprintf(stderr, "Unknown clock %s\n", name);
345 goto next;
346 }
347
348 if (i < cur)
349 /* Ignore */
350 goto next;
351
352 /* Swap */
353 tmp = alarm_timers[i];
354 alarm_timers[i] = alarm_timers[cur];
355 alarm_timers[cur] = tmp;
356
357 cur++;
358 next:
359 name = strtok(NULL, ",");
360 }
361
362 qemu_free(arg);
363
364 if (cur) {
365 /* Disable remaining timers */
366 for (i = cur; i < count; i++)
367 alarm_timers[i].name = NULL;
368 } else {
369 show_available_alarms();
370 exit(1);
371 }
372 }
373
374 #define QEMU_NUM_CLOCKS 3
375
376 QEMUClock *rt_clock;
377 QEMUClock *vm_clock;
378 QEMUClock *host_clock;
379
380 static QEMUTimer *active_timers[QEMU_NUM_CLOCKS];
381
382 static QEMUClock *qemu_new_clock(int type)
383 {
384 QEMUClock *clock;
385 clock = qemu_mallocz(sizeof(QEMUClock));
386 clock->type = type;
387 clock->enabled = 1;
388 return clock;
389 }
390
391 void qemu_clock_enable(QEMUClock *clock, int enabled)
392 {
393 clock->enabled = enabled;
394 }
395
396 QEMUTimer *qemu_new_timer(QEMUClock *clock, QEMUTimerCB *cb, void *opaque)
397 {
398 QEMUTimer *ts;
399
400 ts = qemu_mallocz(sizeof(QEMUTimer));
401 ts->clock = clock;
402 ts->cb = cb;
403 ts->opaque = opaque;
404 return ts;
405 }
406
407 void qemu_free_timer(QEMUTimer *ts)
408 {
409 qemu_free(ts);
410 }
411
412 /* stop a timer, but do not dealloc it */
413 void qemu_del_timer(QEMUTimer *ts)
414 {
415 QEMUTimer **pt, *t;
416
417 /* NOTE: this code must be signal safe because
418 qemu_timer_expired() can be called from a signal. */
419 pt = &active_timers[ts->clock->type];
420 for(;;) {
421 t = *pt;
422 if (!t)
423 break;
424 if (t == ts) {
425 *pt = t->next;
426 break;
427 }
428 pt = &t->next;
429 }
430 }
431
432 /* modify the current timer so that it will be fired when current_time
433 >= expire_time. The corresponding callback will be called. */
434 void qemu_mod_timer(QEMUTimer *ts, int64_t expire_time)
435 {
436 QEMUTimer **pt, *t;
437
438 qemu_del_timer(ts);
439
440 /* add the timer in the sorted list */
441 /* NOTE: this code must be signal safe because
442 qemu_timer_expired() can be called from a signal. */
443 pt = &active_timers[ts->clock->type];
444 for(;;) {
445 t = *pt;
446 if (!t)
447 break;
448 if (t->expire_time > expire_time)
449 break;
450 pt = &t->next;
451 }
452 ts->expire_time = expire_time;
453 ts->next = *pt;
454 *pt = ts;
455
456 /* Rearm if necessary */
457 if (pt == &active_timers[ts->clock->type]) {
458 if (!alarm_timer->pending) {
459 qemu_rearm_alarm_timer(alarm_timer);
460 }
461 /* Interrupt execution to force deadline recalculation. */
462 if (use_icount)
463 qemu_notify_event();
464 }
465 }
466
467 int qemu_timer_pending(QEMUTimer *ts)
468 {
469 QEMUTimer *t;
470 for(t = active_timers[ts->clock->type]; t != NULL; t = t->next) {
471 if (t == ts)
472 return 1;
473 }
474 return 0;
475 }
476
477 int qemu_timer_expired(QEMUTimer *timer_head, int64_t current_time)
478 {
479 if (!timer_head)
480 return 0;
481 return (timer_head->expire_time <= current_time);
482 }
483
484 static void qemu_run_timers(QEMUClock *clock)
485 {
486 QEMUTimer **ptimer_head, *ts;
487 int64_t current_time;
488
489 if (!clock->enabled)
490 return;
491
492 current_time = qemu_get_clock (clock);
493 ptimer_head = &active_timers[clock->type];
494 for(;;) {
495 ts = *ptimer_head;
496 if (!ts || ts->expire_time > current_time)
497 break;
498 /* remove timer from the list before calling the callback */
499 *ptimer_head = ts->next;
500 ts->next = NULL;
501
502 /* run the callback (the timer list can be modified) */
503 ts->cb(ts->opaque);
504 }
505 }
506
507 int64_t qemu_get_clock(QEMUClock *clock)
508 {
509 switch(clock->type) {
510 case QEMU_CLOCK_REALTIME:
511 return get_clock() / 1000000;
512 default:
513 case QEMU_CLOCK_VIRTUAL:
514 if (use_icount) {
515 return cpu_get_icount();
516 } else {
517 return cpu_get_clock();
518 }
519 case QEMU_CLOCK_HOST:
520 return get_clock_realtime();
521 }
522 }
523
524 int64_t qemu_get_clock_ns(QEMUClock *clock)
525 {
526 switch(clock->type) {
527 case QEMU_CLOCK_REALTIME:
528 return get_clock();
529 default:
530 case QEMU_CLOCK_VIRTUAL:
531 if (use_icount) {
532 return cpu_get_icount();
533 } else {
534 return cpu_get_clock();
535 }
536 case QEMU_CLOCK_HOST:
537 return get_clock_realtime();
538 }
539 }
540
541 void init_clocks(void)
542 {
543 rt_clock = qemu_new_clock(QEMU_CLOCK_REALTIME);
544 vm_clock = qemu_new_clock(QEMU_CLOCK_VIRTUAL);
545 host_clock = qemu_new_clock(QEMU_CLOCK_HOST);
546
547 rtc_clock = host_clock;
548 }
549
550 /* save a timer */
551 void qemu_put_timer(QEMUFile *f, QEMUTimer *ts)
552 {
553 uint64_t expire_time;
554
555 if (qemu_timer_pending(ts)) {
556 expire_time = ts->expire_time;
557 } else {
558 expire_time = -1;
559 }
560 qemu_put_be64(f, expire_time);
561 }
562
563 void qemu_get_timer(QEMUFile *f, QEMUTimer *ts)
564 {
565 uint64_t expire_time;
566
567 expire_time = qemu_get_be64(f);
568 if (expire_time != -1) {
569 qemu_mod_timer(ts, expire_time);
570 } else {
571 qemu_del_timer(ts);
572 }
573 }
574
575 static const VMStateDescription vmstate_timers = {
576 .name = "timer",
577 .version_id = 2,
578 .minimum_version_id = 1,
579 .minimum_version_id_old = 1,
580 .fields = (VMStateField []) {
581 VMSTATE_INT64(cpu_ticks_offset, TimersState),
582 VMSTATE_INT64(dummy, TimersState),
583 VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
584 VMSTATE_END_OF_LIST()
585 }
586 };
587
588 void configure_icount(const char *option)
589 {
590 vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
591 if (!option)
592 return;
593
594 if (strcmp(option, "auto") != 0) {
595 icount_time_shift = strtol(option, NULL, 0);
596 use_icount = 1;
597 return;
598 }
599
600 use_icount = 2;
601
602 /* 125MIPS seems a reasonable initial guess at the guest speed.
603 It will be corrected fairly quickly anyway. */
604 icount_time_shift = 3;
605
606 /* Have both realtime and virtual time triggers for speed adjustment.
607 The realtime trigger catches emulated time passing too slowly,
608 the virtual time trigger catches emulated time passing too fast.
609 Realtime triggers occur even when idle, so use them less frequently
610 than VM triggers. */
611 icount_rt_timer = qemu_new_timer(rt_clock, icount_adjust_rt, NULL);
612 qemu_mod_timer(icount_rt_timer,
613 qemu_get_clock(rt_clock) + 1000);
614 icount_vm_timer = qemu_new_timer(vm_clock, icount_adjust_vm, NULL);
615 qemu_mod_timer(icount_vm_timer,
616 qemu_get_clock(vm_clock) + get_ticks_per_sec() / 10);
617 }
618
619 void qemu_run_all_timers(void)
620 {
621 alarm_timer->pending = 0;
622
623 /* rearm timer, if not periodic */
624 if (alarm_timer->expired) {
625 alarm_timer->expired = 0;
626 qemu_rearm_alarm_timer(alarm_timer);
627 }
628
629 /* vm time timers */
630 if (vm_running) {
631 qemu_run_timers(vm_clock);
632 }
633
634 qemu_run_timers(rt_clock);
635 qemu_run_timers(host_clock);
636 }
637
638 static int64_t qemu_next_alarm_deadline(void);
639
640 #ifdef _WIN32
641 static void CALLBACK host_alarm_handler(UINT uTimerID, UINT uMsg,
642 DWORD_PTR dwUser, DWORD_PTR dw1,
643 DWORD_PTR dw2)
644 #else
645 static void host_alarm_handler(int host_signum)
646 #endif
647 {
648 struct qemu_alarm_timer *t = alarm_timer;
649 if (!t)
650 return;
651
652 #if 0
653 #define DISP_FREQ 1000
654 {
655 static int64_t delta_min = INT64_MAX;
656 static int64_t delta_max, delta_cum, last_clock, delta, ti;
657 static int count;
658 ti = qemu_get_clock(vm_clock);
659 if (last_clock != 0) {
660 delta = ti - last_clock;
661 if (delta < delta_min)
662 delta_min = delta;
663 if (delta > delta_max)
664 delta_max = delta;
665 delta_cum += delta;
666 if (++count == DISP_FREQ) {
667 printf("timer: min=%" PRId64 " us max=%" PRId64 " us avg=%" PRId64 " us avg_freq=%0.3f Hz\n",
668 muldiv64(delta_min, 1000000, get_ticks_per_sec()),
669 muldiv64(delta_max, 1000000, get_ticks_per_sec()),
670 muldiv64(delta_cum, 1000000 / DISP_FREQ, get_ticks_per_sec()),
671 (double)get_ticks_per_sec() / ((double)delta_cum / DISP_FREQ));
672 count = 0;
673 delta_min = INT64_MAX;
674 delta_max = 0;
675 delta_cum = 0;
676 }
677 }
678 last_clock = ti;
679 }
680 #endif
681 if (alarm_has_dynticks(t) ||
682 qemu_next_alarm_deadline () <= 0) {
683 t->expired = alarm_has_dynticks(t);
684 t->pending = 1;
685 qemu_notify_event();
686 }
687 }
688
689 int64_t qemu_next_deadline(void)
690 {
691 /* To avoid problems with overflow limit this to 2^32. */
692 int64_t delta = INT32_MAX;
693
694 if (active_timers[QEMU_CLOCK_VIRTUAL]) {
695 delta = active_timers[QEMU_CLOCK_VIRTUAL]->expire_time -
696 qemu_get_clock_ns(vm_clock);
697 }
698 if (active_timers[QEMU_CLOCK_HOST]) {
699 int64_t hdelta = active_timers[QEMU_CLOCK_HOST]->expire_time -
700 qemu_get_clock_ns(host_clock);
701 if (hdelta < delta)
702 delta = hdelta;
703 }
704
705 if (delta < 0)
706 delta = 0;
707
708 return delta;
709 }
710
711 #ifndef _WIN32
712
713 static int64_t qemu_next_alarm_deadline(void)
714 {
715 int64_t delta;
716 int64_t rtdelta;
717
718 if (!use_icount && active_timers[QEMU_CLOCK_VIRTUAL]) {
719 delta = active_timers[QEMU_CLOCK_VIRTUAL]->expire_time -
720 qemu_get_clock(vm_clock);
721 } else {
722 delta = INT32_MAX;
723 }
724 if (active_timers[QEMU_CLOCK_HOST]) {
725 int64_t hdelta = active_timers[QEMU_CLOCK_HOST]->expire_time -
726 qemu_get_clock_ns(host_clock);
727 if (hdelta < delta)
728 delta = hdelta;
729 }
730 if (active_timers[QEMU_CLOCK_REALTIME]) {
731 rtdelta = (active_timers[QEMU_CLOCK_REALTIME]->expire_time * 1000000 -
732 qemu_get_clock_ns(rt_clock));
733 if (rtdelta < delta)
734 delta = rtdelta;
735 }
736
737 return delta;
738 }
739
740 #if defined(__linux__)
741
742 #define RTC_FREQ 1024
743
744 static void enable_sigio_timer(int fd)
745 {
746 struct sigaction act;
747
748 /* timer signal */
749 sigfillset(&act.sa_mask);
750 act.sa_flags = 0;
751 act.sa_handler = host_alarm_handler;
752
753 sigaction(SIGIO, &act, NULL);
754 fcntl_setfl(fd, O_ASYNC);
755 fcntl(fd, F_SETOWN, getpid());
756 }
757
758 static int hpet_start_timer(struct qemu_alarm_timer *t)
759 {
760 struct hpet_info info;
761 int r, fd;
762
763 fd = qemu_open("/dev/hpet", O_RDONLY);
764 if (fd < 0)
765 return -1;
766
767 /* Set frequency */
768 r = ioctl(fd, HPET_IRQFREQ, RTC_FREQ);
769 if (r < 0) {
770 fprintf(stderr, "Could not configure '/dev/hpet' to have a 1024Hz timer. This is not a fatal\n"
771 "error, but for better emulation accuracy type:\n"
772 "'echo 1024 > /proc/sys/dev/hpet/max-user-freq' as root.\n");
773 goto fail;
774 }
775
776 /* Check capabilities */
777 r = ioctl(fd, HPET_INFO, &info);
778 if (r < 0)
779 goto fail;
780
781 /* Enable periodic mode */
782 r = ioctl(fd, HPET_EPI, 0);
783 if (info.hi_flags && (r < 0))
784 goto fail;
785
786 /* Enable interrupt */
787 r = ioctl(fd, HPET_IE_ON, 0);
788 if (r < 0)
789 goto fail;
790
791 enable_sigio_timer(fd);
792 t->priv = (void *)(long)fd;
793
794 return 0;
795 fail:
796 close(fd);
797 return -1;
798 }
799
800 static void hpet_stop_timer(struct qemu_alarm_timer *t)
801 {
802 int fd = (long)t->priv;
803
804 close(fd);
805 }
806
807 static int rtc_start_timer(struct qemu_alarm_timer *t)
808 {
809 int rtc_fd;
810 unsigned long current_rtc_freq = 0;
811
812 TFR(rtc_fd = qemu_open("/dev/rtc", O_RDONLY));
813 if (rtc_fd < 0)
814 return -1;
815 ioctl(rtc_fd, RTC_IRQP_READ, &current_rtc_freq);
816 if (current_rtc_freq != RTC_FREQ &&
817 ioctl(rtc_fd, RTC_IRQP_SET, RTC_FREQ) < 0) {
818 fprintf(stderr, "Could not configure '/dev/rtc' to have a 1024 Hz timer. This is not a fatal\n"
819 "error, but for better emulation accuracy either use a 2.6 host Linux kernel or\n"
820 "type 'echo 1024 > /proc/sys/dev/rtc/max-user-freq' as root.\n");
821 goto fail;
822 }
823 if (ioctl(rtc_fd, RTC_PIE_ON, 0) < 0) {
824 fail:
825 close(rtc_fd);
826 return -1;
827 }
828
829 enable_sigio_timer(rtc_fd);
830
831 t->priv = (void *)(long)rtc_fd;
832
833 return 0;
834 }
835
836 static void rtc_stop_timer(struct qemu_alarm_timer *t)
837 {
838 int rtc_fd = (long)t->priv;
839
840 close(rtc_fd);
841 }
842
843 static int dynticks_start_timer(struct qemu_alarm_timer *t)
844 {
845 struct sigevent ev;
846 timer_t host_timer;
847 struct sigaction act;
848
849 sigfillset(&act.sa_mask);
850 act.sa_flags = 0;
851 act.sa_handler = host_alarm_handler;
852
853 sigaction(SIGALRM, &act, NULL);
854
855 /*
856 * Initialize ev struct to 0 to avoid valgrind complaining
857 * about uninitialized data in timer_create call
858 */
859 memset(&ev, 0, sizeof(ev));
860 ev.sigev_value.sival_int = 0;
861 ev.sigev_notify = SIGEV_SIGNAL;
862 ev.sigev_signo = SIGALRM;
863
864 if (timer_create(CLOCK_REALTIME, &ev, &host_timer)) {
865 perror("timer_create");
866
867 /* disable dynticks */
868 fprintf(stderr, "Dynamic Ticks disabled\n");
869
870 return -1;
871 }
872
873 t->priv = (void *)(long)host_timer;
874
875 return 0;
876 }
877
878 static void dynticks_stop_timer(struct qemu_alarm_timer *t)
879 {
880 timer_t host_timer = (timer_t)(long)t->priv;
881
882 timer_delete(host_timer);
883 }
884
885 static void dynticks_rearm_timer(struct qemu_alarm_timer *t)
886 {
887 timer_t host_timer = (timer_t)(long)t->priv;
888 struct itimerspec timeout;
889 int64_t nearest_delta_ns = INT64_MAX;
890 int64_t current_ns;
891
892 assert(alarm_has_dynticks(t));
893 if (!active_timers[QEMU_CLOCK_REALTIME] &&
894 !active_timers[QEMU_CLOCK_VIRTUAL] &&
895 !active_timers[QEMU_CLOCK_HOST])
896 return;
897
898 nearest_delta_ns = qemu_next_alarm_deadline();
899 if (nearest_delta_ns < MIN_TIMER_REARM_NS)
900 nearest_delta_ns = MIN_TIMER_REARM_NS;
901
902 /* check whether a timer is already running */
903 if (timer_gettime(host_timer, &timeout)) {
904 perror("gettime");
905 fprintf(stderr, "Internal timer error: aborting\n");
906 exit(1);
907 }
908 current_ns = timeout.it_value.tv_sec * 1000000000LL + timeout.it_value.tv_nsec;
909 if (current_ns && current_ns <= nearest_delta_ns)
910 return;
911
912 timeout.it_interval.tv_sec = 0;
913 timeout.it_interval.tv_nsec = 0; /* 0 for one-shot timer */
914 timeout.it_value.tv_sec = nearest_delta_ns / 1000000000;
915 timeout.it_value.tv_nsec = nearest_delta_ns % 1000000000;
916 if (timer_settime(host_timer, 0 /* RELATIVE */, &timeout, NULL)) {
917 perror("settime");
918 fprintf(stderr, "Internal timer error: aborting\n");
919 exit(1);
920 }
921 }
922
923 #endif /* defined(__linux__) */
924
925 static int unix_start_timer(struct qemu_alarm_timer *t)
926 {
927 struct sigaction act;
928 struct itimerval itv;
929 int err;
930
931 /* timer signal */
932 sigfillset(&act.sa_mask);
933 act.sa_flags = 0;
934 act.sa_handler = host_alarm_handler;
935
936 sigaction(SIGALRM, &act, NULL);
937
938 itv.it_interval.tv_sec = 0;
939 /* for i386 kernel 2.6 to get 1 ms */
940 itv.it_interval.tv_usec = 999;
941 itv.it_value.tv_sec = 0;
942 itv.it_value.tv_usec = 10 * 1000;
943
944 err = setitimer(ITIMER_REAL, &itv, NULL);
945 if (err)
946 return -1;
947
948 return 0;
949 }
950
951 static void unix_stop_timer(struct qemu_alarm_timer *t)
952 {
953 struct itimerval itv;
954
955 memset(&itv, 0, sizeof(itv));
956 setitimer(ITIMER_REAL, &itv, NULL);
957 }
958
959 #endif /* !defined(_WIN32) */
960
961
962 #ifdef _WIN32
963
964 static int win32_start_timer(struct qemu_alarm_timer *t)
965 {
966 TIMECAPS tc;
967 struct qemu_alarm_win32 *data = t->priv;
968 UINT flags;
969
970 memset(&tc, 0, sizeof(tc));
971 timeGetDevCaps(&tc, sizeof(tc));
972
973 data->period = tc.wPeriodMin;
974 timeBeginPeriod(data->period);
975
976 flags = TIME_CALLBACK_FUNCTION;
977 if (alarm_has_dynticks(t))
978 flags |= TIME_ONESHOT;
979 else
980 flags |= TIME_PERIODIC;
981
982 data->timerId = timeSetEvent(1, // interval (ms)
983 data->period, // resolution
984 host_alarm_handler, // function
985 (DWORD)t, // parameter
986 flags);
987
988 if (!data->timerId) {
989 fprintf(stderr, "Failed to initialize win32 alarm timer: %ld\n",
990 GetLastError());
991 timeEndPeriod(data->period);
992 return -1;
993 }
994
995 return 0;
996 }
997
998 static void win32_stop_timer(struct qemu_alarm_timer *t)
999 {
1000 struct qemu_alarm_win32 *data = t->priv;
1001
1002 timeKillEvent(data->timerId);
1003 timeEndPeriod(data->period);
1004 }
1005
1006 static void win32_rearm_timer(struct qemu_alarm_timer *t)
1007 {
1008 struct qemu_alarm_win32 *data = t->priv;
1009
1010 assert(alarm_has_dynticks(t));
1011 if (!active_timers[QEMU_CLOCK_REALTIME] &&
1012 !active_timers[QEMU_CLOCK_VIRTUAL] &&
1013 !active_timers[QEMU_CLOCK_HOST])
1014 return;
1015
1016 timeKillEvent(data->timerId);
1017
1018 data->timerId = timeSetEvent(1,
1019 data->period,
1020 host_alarm_handler,
1021 (DWORD)t,
1022 TIME_ONESHOT | TIME_CALLBACK_FUNCTION);
1023
1024 if (!data->timerId) {
1025 fprintf(stderr, "Failed to re-arm win32 alarm timer %ld\n",
1026 GetLastError());
1027
1028 timeEndPeriod(data->period);
1029 exit(1);
1030 }
1031 }
1032
1033 #endif /* _WIN32 */
1034
1035 static void alarm_timer_on_change_state_rearm(void *opaque, int running, int reason)
1036 {
1037 if (running)
1038 qemu_rearm_alarm_timer((struct qemu_alarm_timer *) opaque);
1039 }
1040
1041 int init_timer_alarm(void)
1042 {
1043 struct qemu_alarm_timer *t = NULL;
1044 int i, err = -1;
1045
1046 for (i = 0; alarm_timers[i].name; i++) {
1047 t = &alarm_timers[i];
1048
1049 err = t->start(t);
1050 if (!err)
1051 break;
1052 }
1053
1054 if (err) {
1055 err = -ENOENT;
1056 goto fail;
1057 }
1058
1059 /* first event is at time 0 */
1060 t->pending = 1;
1061 alarm_timer = t;
1062 qemu_add_vm_change_state_handler(alarm_timer_on_change_state_rearm, t);
1063
1064 return 0;
1065
1066 fail:
1067 return err;
1068 }
1069
1070 void quit_timers(void)
1071 {
1072 struct qemu_alarm_timer *t = alarm_timer;
1073 alarm_timer = NULL;
1074 t->stop(t);
1075 }
1076
1077 int qemu_calculate_timeout(void)
1078 {
1079 int timeout;
1080
1081 #ifdef CONFIG_IOTHREAD
1082 /* When using icount, making forward progress with qemu_icount when the
1083 guest CPU is idle is critical. We only use the static io-thread timeout
1084 for non icount runs. */
1085 if (!use_icount) {
1086 return 1000;
1087 }
1088 #endif
1089
1090 if (!vm_running)
1091 timeout = 5000;
1092 else {
1093 /* XXX: use timeout computed from timers */
1094 int64_t add;
1095 int64_t delta;
1096 /* Advance virtual time to the next event. */
1097 delta = qemu_icount_delta();
1098 if (delta > 0) {
1099 /* If virtual time is ahead of real time then just
1100 wait for IO. */
1101 timeout = (delta + 999999) / 1000000;
1102 } else {
1103 /* Wait for either IO to occur or the next
1104 timer event. */
1105 add = qemu_next_deadline();
1106 /* We advance the timer before checking for IO.
1107 Limit the amount we advance so that early IO
1108 activity won't get the guest too far ahead. */
1109 if (add > 10000000)
1110 add = 10000000;
1111 delta += add;
1112 qemu_icount += qemu_icount_round (add);
1113 timeout = delta / 1000000;
1114 if (timeout < 0)
1115 timeout = 0;
1116 }
1117 }
1118
1119 return timeout;
1120 }
1121