]> git.proxmox.com Git - mirror_qemu.git/blob - include/qemu/timer.h
Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging
[mirror_qemu.git] / include / qemu / timer.h
1 #ifndef QEMU_TIMER_H
2 #define QEMU_TIMER_H
3
4 #include "qemu-common.h"
5 #include "qemu/notify.h"
6 #include "qemu/host-utils.h"
7 #include "sysemu/cpus.h"
8
9 #define NANOSECONDS_PER_SECOND 1000000000LL
10
11 /* timers */
12
13 #define SCALE_MS 1000000
14 #define SCALE_US 1000
15 #define SCALE_NS 1
16
17 /**
18 * QEMUClockType:
19 *
20 * The following clock types are available:
21 *
22 * @QEMU_CLOCK_REALTIME: Real time clock
23 *
24 * The real time clock should be used only for stuff which does not
25 * change the virtual machine state, as it runs even if the virtual
26 * machine is stopped.
27 *
28 * @QEMU_CLOCK_VIRTUAL: virtual clock
29 *
30 * The virtual clock only runs during the emulation. It stops
31 * when the virtual machine is stopped.
32 *
33 * @QEMU_CLOCK_HOST: host clock
34 *
35 * The host clock should be used for device models that emulate accurate
36 * real time sources. It will continue to run when the virtual machine
37 * is suspended, and it will reflect system time changes the host may
38 * undergo (e.g. due to NTP).
39 *
40 * @QEMU_CLOCK_VIRTUAL_RT: realtime clock used for icount warp
41 *
42 * Outside icount mode, this clock is the same as @QEMU_CLOCK_VIRTUAL.
43 * In icount mode, this clock counts nanoseconds while the virtual
44 * machine is running. It is used to increase @QEMU_CLOCK_VIRTUAL
45 * while the CPUs are sleeping and thus not executing instructions.
46 */
47
48 typedef enum {
49 QEMU_CLOCK_REALTIME = 0,
50 QEMU_CLOCK_VIRTUAL = 1,
51 QEMU_CLOCK_HOST = 2,
52 QEMU_CLOCK_VIRTUAL_RT = 3,
53 QEMU_CLOCK_MAX
54 } QEMUClockType;
55
56 typedef struct QEMUTimerList QEMUTimerList;
57
58 struct QEMUTimerListGroup {
59 QEMUTimerList *tl[QEMU_CLOCK_MAX];
60 };
61
62 typedef void QEMUTimerCB(void *opaque);
63 typedef void QEMUTimerListNotifyCB(void *opaque);
64
65 struct QEMUTimer {
66 int64_t expire_time; /* in nanoseconds */
67 QEMUTimerList *timer_list;
68 QEMUTimerCB *cb;
69 void *opaque;
70 QEMUTimer *next;
71 int scale;
72 };
73
74 extern QEMUTimerListGroup main_loop_tlg;
75
76 /*
77 * qemu_clock_get_ns;
78 * @type: the clock type
79 *
80 * Get the nanosecond value of a clock with
81 * type @type
82 *
83 * Returns: the clock value in nanoseconds
84 */
85 int64_t qemu_clock_get_ns(QEMUClockType type);
86
87 /**
88 * qemu_clock_get_ms;
89 * @type: the clock type
90 *
91 * Get the millisecond value of a clock with
92 * type @type
93 *
94 * Returns: the clock value in milliseconds
95 */
96 static inline int64_t qemu_clock_get_ms(QEMUClockType type)
97 {
98 return qemu_clock_get_ns(type) / SCALE_MS;
99 }
100
101 /**
102 * qemu_clock_get_us;
103 * @type: the clock type
104 *
105 * Get the microsecond value of a clock with
106 * type @type
107 *
108 * Returns: the clock value in microseconds
109 */
110 static inline int64_t qemu_clock_get_us(QEMUClockType type)
111 {
112 return qemu_clock_get_ns(type) / SCALE_US;
113 }
114
115 /**
116 * qemu_clock_has_timers:
117 * @type: the clock type
118 *
119 * Determines whether a clock's default timer list
120 * has timers attached
121 *
122 * Note that this function should not be used when other threads also access
123 * the timer list. The return value may be outdated by the time it is acted
124 * upon.
125 *
126 * Returns: true if the clock's default timer list
127 * has timers attached
128 */
129 bool qemu_clock_has_timers(QEMUClockType type);
130
131 /**
132 * qemu_clock_expired:
133 * @type: the clock type
134 *
135 * Determines whether a clock's default timer list
136 * has an expired clock.
137 *
138 * Returns: true if the clock's default timer list has
139 * an expired timer
140 */
141 bool qemu_clock_expired(QEMUClockType type);
142
143 /**
144 * qemu_clock_use_for_deadline:
145 * @type: the clock type
146 *
147 * Determine whether a clock should be used for deadline
148 * calculations. Some clocks, for instance vm_clock with
149 * use_icount set, do not count in nanoseconds. Such clocks
150 * are not used for deadline calculations, and are presumed
151 * to interrupt any poll using qemu_notify/aio_notify
152 * etc.
153 *
154 * Returns: true if the clock runs in nanoseconds and
155 * should be used for a deadline.
156 */
157 bool qemu_clock_use_for_deadline(QEMUClockType type);
158
159 /**
160 * qemu_clock_deadline_ns_all:
161 * @type: the clock type
162 *
163 * Calculate the deadline across all timer lists associated
164 * with a clock (as opposed to just the default one)
165 * in nanoseconds, or -1 if no timer is set to expire.
166 *
167 * Returns: time until expiry in nanoseconds or -1
168 */
169 int64_t qemu_clock_deadline_ns_all(QEMUClockType type);
170
171 /**
172 * qemu_clock_get_main_loop_timerlist:
173 * @type: the clock type
174 *
175 * Return the default timer list associated with a clock.
176 *
177 * Returns: the default timer list
178 */
179 QEMUTimerList *qemu_clock_get_main_loop_timerlist(QEMUClockType type);
180
181 /**
182 * qemu_clock_nofify:
183 * @type: the clock type
184 *
185 * Call the notifier callback connected with the default timer
186 * list linked to the clock, or qemu_notify() if none.
187 */
188 void qemu_clock_notify(QEMUClockType type);
189
190 /**
191 * qemu_clock_enable:
192 * @type: the clock type
193 * @enabled: true to enable, false to disable
194 *
195 * Enable or disable a clock
196 * Disabling the clock will wait for related timerlists to stop
197 * executing qemu_run_timers. Thus, this functions should not
198 * be used from the callback of a timer that is based on @clock.
199 * Doing so would cause a deadlock.
200 *
201 * Caller should hold BQL.
202 */
203 void qemu_clock_enable(QEMUClockType type, bool enabled);
204
205 /**
206 * qemu_start_warp_timer:
207 *
208 * Starts a timer for virtual clock update
209 */
210 void qemu_start_warp_timer(void);
211
212 /**
213 * qemu_clock_register_reset_notifier:
214 * @type: the clock type
215 * @notifier: the notifier function
216 *
217 * Register a notifier function to call when the clock
218 * concerned is reset.
219 */
220 void qemu_clock_register_reset_notifier(QEMUClockType type,
221 Notifier *notifier);
222
223 /**
224 * qemu_clock_unregister_reset_notifier:
225 * @type: the clock type
226 * @notifier: the notifier function
227 *
228 * Unregister a notifier function to call when the clock
229 * concerned is reset.
230 */
231 void qemu_clock_unregister_reset_notifier(QEMUClockType type,
232 Notifier *notifier);
233
234 /**
235 * qemu_clock_run_timers:
236 * @type: clock on which to operate
237 *
238 * Run all the timers associated with the default timer list
239 * of a clock.
240 *
241 * Returns: true if any timer ran.
242 */
243 bool qemu_clock_run_timers(QEMUClockType type);
244
245 /**
246 * qemu_clock_run_all_timers:
247 *
248 * Run all the timers associated with the default timer list
249 * of every clock.
250 *
251 * Returns: true if any timer ran.
252 */
253 bool qemu_clock_run_all_timers(void);
254
255 /*
256 * QEMUTimerList
257 */
258
259 /**
260 * timerlist_new:
261 * @type: the clock type to associate with the timerlist
262 * @cb: the callback to call on notification
263 * @opaque: the opaque pointer to pass to the callback
264 *
265 * Create a new timerlist associated with the clock of
266 * type @type.
267 *
268 * Returns: a pointer to the QEMUTimerList created
269 */
270 QEMUTimerList *timerlist_new(QEMUClockType type,
271 QEMUTimerListNotifyCB *cb, void *opaque);
272
273 /**
274 * timerlist_free:
275 * @timer_list: the timer list to free
276 *
277 * Frees a timer_list. It must have no active timers.
278 */
279 void timerlist_free(QEMUTimerList *timer_list);
280
281 /**
282 * timerlist_has_timers:
283 * @timer_list: the timer list to operate on
284 *
285 * Determine whether a timer list has active timers
286 *
287 * Note that this function should not be used when other threads also access
288 * the timer list. The return value may be outdated by the time it is acted
289 * upon.
290 *
291 * Returns: true if the timer list has timers.
292 */
293 bool timerlist_has_timers(QEMUTimerList *timer_list);
294
295 /**
296 * timerlist_expired:
297 * @timer_list: the timer list to operate on
298 *
299 * Determine whether a timer list has any timers which
300 * are expired.
301 *
302 * Returns: true if the timer list has timers which
303 * have expired.
304 */
305 bool timerlist_expired(QEMUTimerList *timer_list);
306
307 /**
308 * timerlist_deadline_ns:
309 * @timer_list: the timer list to operate on
310 *
311 * Determine the deadline for a timer_list, i.e.
312 * the number of nanoseconds until the first timer
313 * expires. Return -1 if there are no timers.
314 *
315 * Returns: the number of nanoseconds until the earliest
316 * timer expires -1 if none
317 */
318 int64_t timerlist_deadline_ns(QEMUTimerList *timer_list);
319
320 /**
321 * timerlist_get_clock:
322 * @timer_list: the timer list to operate on
323 *
324 * Determine the clock type associated with a timer list.
325 *
326 * Returns: the clock type associated with the
327 * timer list.
328 */
329 QEMUClockType timerlist_get_clock(QEMUTimerList *timer_list);
330
331 /**
332 * timerlist_run_timers:
333 * @timer_list: the timer list to use
334 *
335 * Call all expired timers associated with the timer list.
336 *
337 * Returns: true if any timer expired
338 */
339 bool timerlist_run_timers(QEMUTimerList *timer_list);
340
341 /**
342 * timerlist_notify:
343 * @timer_list: the timer list to use
344 *
345 * call the notifier callback associated with the timer list.
346 */
347 void timerlist_notify(QEMUTimerList *timer_list);
348
349 /*
350 * QEMUTimerListGroup
351 */
352
353 /**
354 * timerlistgroup_init:
355 * @tlg: the timer list group
356 * @cb: the callback to call when a notify is required
357 * @opaque: the opaque pointer to be passed to the callback.
358 *
359 * Initialise a timer list group. This must already be
360 * allocated in memory and zeroed. The notifier callback is
361 * called whenever a clock in the timer list group is
362 * reenabled or whenever a timer associated with any timer
363 * list is modified. If @cb is specified as null, qemu_notify()
364 * is used instead.
365 */
366 void timerlistgroup_init(QEMUTimerListGroup *tlg,
367 QEMUTimerListNotifyCB *cb, void *opaque);
368
369 /**
370 * timerlistgroup_deinit:
371 * @tlg: the timer list group
372 *
373 * Deinitialise a timer list group. This must already be
374 * initialised. Note the memory is not freed.
375 */
376 void timerlistgroup_deinit(QEMUTimerListGroup *tlg);
377
378 /**
379 * timerlistgroup_run_timers:
380 * @tlg: the timer list group
381 *
382 * Run the timers associated with a timer list group.
383 * This will run timers on multiple clocks.
384 *
385 * Returns: true if any timer callback ran
386 */
387 bool timerlistgroup_run_timers(QEMUTimerListGroup *tlg);
388
389 /**
390 * timerlistgroup_deadline_ns:
391 * @tlg: the timer list group
392 *
393 * Determine the deadline of the soonest timer to
394 * expire associated with any timer list linked to
395 * the timer list group. Only clocks suitable for
396 * deadline calculation are included.
397 *
398 * Returns: the deadline in nanoseconds or -1 if no
399 * timers are to expire.
400 */
401 int64_t timerlistgroup_deadline_ns(QEMUTimerListGroup *tlg);
402
403 /*
404 * QEMUTimer
405 */
406
407 /**
408 * timer_init_tl:
409 * @ts: the timer to be initialised
410 * @timer_list: the timer list to attach the timer to
411 * @scale: the scale value for the timer
412 * @cb: the callback to be called when the timer expires
413 * @opaque: the opaque pointer to be passed to the callback
414 *
415 * Initialise a new timer and associate it with @timer_list.
416 * The caller is responsible for allocating the memory.
417 *
418 * You need not call an explicit deinit call. Simply make
419 * sure it is not on a list with timer_del.
420 */
421 void timer_init_tl(QEMUTimer *ts,
422 QEMUTimerList *timer_list, int scale,
423 QEMUTimerCB *cb, void *opaque);
424
425 /**
426 * timer_init:
427 * @ts: the timer to be initialised
428 * @type: the clock to associate with the timer
429 * @scale: the scale value for the timer
430 * @cb: the callback to call when the timer expires
431 * @opaque: the opaque pointer to pass to the callback
432 *
433 * Initialize a timer with the given scale on the default timer list
434 * associated with the clock.
435 *
436 * You need not call an explicit deinit call. Simply make
437 * sure it is not on a list with timer_del.
438 */
439 static inline void timer_init(QEMUTimer *ts, QEMUClockType type, int scale,
440 QEMUTimerCB *cb, void *opaque)
441 {
442 timer_init_tl(ts, main_loop_tlg.tl[type], scale, cb, opaque);
443 }
444
445 /**
446 * timer_init_ns:
447 * @ts: the timer to be initialised
448 * @type: the clock to associate with the timer
449 * @cb: the callback to call when the timer expires
450 * @opaque: the opaque pointer to pass to the callback
451 *
452 * Initialize a timer with nanosecond scale on the default timer list
453 * associated with the clock.
454 *
455 * You need not call an explicit deinit call. Simply make
456 * sure it is not on a list with timer_del.
457 */
458 static inline void timer_init_ns(QEMUTimer *ts, QEMUClockType type,
459 QEMUTimerCB *cb, void *opaque)
460 {
461 timer_init(ts, type, SCALE_NS, cb, opaque);
462 }
463
464 /**
465 * timer_init_us:
466 * @ts: the timer to be initialised
467 * @type: the clock to associate with the timer
468 * @cb: the callback to call when the timer expires
469 * @opaque: the opaque pointer to pass to the callback
470 *
471 * Initialize a timer with microsecond scale on the default timer list
472 * associated with the clock.
473 *
474 * You need not call an explicit deinit call. Simply make
475 * sure it is not on a list with timer_del.
476 */
477 static inline void timer_init_us(QEMUTimer *ts, QEMUClockType type,
478 QEMUTimerCB *cb, void *opaque)
479 {
480 timer_init(ts, type, SCALE_US, cb, opaque);
481 }
482
483 /**
484 * timer_init_ms:
485 * @ts: the timer to be initialised
486 * @type: the clock to associate with the timer
487 * @cb: the callback to call when the timer expires
488 * @opaque: the opaque pointer to pass to the callback
489 *
490 * Initialize a timer with millisecond scale on the default timer list
491 * associated with the clock.
492 *
493 * You need not call an explicit deinit call. Simply make
494 * sure it is not on a list with timer_del.
495 */
496 static inline void timer_init_ms(QEMUTimer *ts, QEMUClockType type,
497 QEMUTimerCB *cb, void *opaque)
498 {
499 timer_init(ts, type, SCALE_MS, cb, opaque);
500 }
501
502 /**
503 * timer_new_tl:
504 * @timer_list: the timer list to attach the timer to
505 * @scale: the scale value for the timer
506 * @cb: the callback to be called when the timer expires
507 * @opaque: the opaque pointer to be passed to the callback
508 *
509 * Create a new timer and associate it with @timer_list.
510 * The memory is allocated by the function.
511 *
512 * This is not the preferred interface unless you know you
513 * are going to call timer_free. Use timer_init instead.
514 *
515 * Returns: a pointer to the timer
516 */
517 static inline QEMUTimer *timer_new_tl(QEMUTimerList *timer_list,
518 int scale,
519 QEMUTimerCB *cb,
520 void *opaque)
521 {
522 QEMUTimer *ts = g_malloc0(sizeof(QEMUTimer));
523 timer_init_tl(ts, timer_list, scale, cb, opaque);
524 return ts;
525 }
526
527 /**
528 * timer_new:
529 * @type: the clock type to use
530 * @scale: the scale value for the timer
531 * @cb: the callback to be called when the timer expires
532 * @opaque: the opaque pointer to be passed to the callback
533 *
534 * Create a new timer and associate it with the default
535 * timer list for the clock type @type.
536 *
537 * Returns: a pointer to the timer
538 */
539 static inline QEMUTimer *timer_new(QEMUClockType type, int scale,
540 QEMUTimerCB *cb, void *opaque)
541 {
542 return timer_new_tl(main_loop_tlg.tl[type], scale, cb, opaque);
543 }
544
545 /**
546 * timer_new_ns:
547 * @type: the clock type to associate with the timer
548 * @cb: the callback to call when the timer expires
549 * @opaque: the opaque pointer to pass to the callback
550 *
551 * Create a new timer with nanosecond scale on the default timer list
552 * associated with the clock.
553 *
554 * Returns: a pointer to the newly created timer
555 */
556 static inline QEMUTimer *timer_new_ns(QEMUClockType type, QEMUTimerCB *cb,
557 void *opaque)
558 {
559 return timer_new(type, SCALE_NS, cb, opaque);
560 }
561
562 /**
563 * timer_new_us:
564 * @type: the clock type to associate with the timer
565 * @cb: the callback to call when the timer expires
566 * @opaque: the opaque pointer to pass to the callback
567 *
568 * Create a new timer with microsecond scale on the default timer list
569 * associated with the clock.
570 *
571 * Returns: a pointer to the newly created timer
572 */
573 static inline QEMUTimer *timer_new_us(QEMUClockType type, QEMUTimerCB *cb,
574 void *opaque)
575 {
576 return timer_new(type, SCALE_US, cb, opaque);
577 }
578
579 /**
580 * timer_new_ms:
581 * @type: the clock type to associate with the timer
582 * @cb: the callback to call when the timer expires
583 * @opaque: the opaque pointer to pass to the callback
584 *
585 * Create a new timer with millisecond scale on the default timer list
586 * associated with the clock.
587 *
588 * Returns: a pointer to the newly created timer
589 */
590 static inline QEMUTimer *timer_new_ms(QEMUClockType type, QEMUTimerCB *cb,
591 void *opaque)
592 {
593 return timer_new(type, SCALE_MS, cb, opaque);
594 }
595
596 /**
597 * timer_deinit:
598 * @ts: the timer to be de-initialised
599 *
600 * Deassociate the timer from any timerlist. You should
601 * call timer_del before. After this call, any further
602 * timer_del call cannot cause dangling pointer accesses
603 * even if the previously used timerlist is freed.
604 */
605 void timer_deinit(QEMUTimer *ts);
606
607 /**
608 * timer_free:
609 * @ts: the timer
610 *
611 * Free a timer (it must not be on the active list)
612 */
613 void timer_free(QEMUTimer *ts);
614
615 /**
616 * timer_del:
617 * @ts: the timer
618 *
619 * Delete a timer from the active list.
620 *
621 * This function is thread-safe but the timer and its timer list must not be
622 * freed while this function is running.
623 */
624 void timer_del(QEMUTimer *ts);
625
626 /**
627 * timer_mod_ns:
628 * @ts: the timer
629 * @expire_time: the expiry time in nanoseconds
630 *
631 * Modify a timer to expire at @expire_time
632 *
633 * This function is thread-safe but the timer and its timer list must not be
634 * freed while this function is running.
635 */
636 void timer_mod_ns(QEMUTimer *ts, int64_t expire_time);
637
638 /**
639 * timer_mod_anticipate_ns:
640 * @ts: the timer
641 * @expire_time: the expiry time in nanoseconds
642 *
643 * Modify a timer to expire at @expire_time or the current time,
644 * whichever comes earlier.
645 *
646 * This function is thread-safe but the timer and its timer list must not be
647 * freed while this function is running.
648 */
649 void timer_mod_anticipate_ns(QEMUTimer *ts, int64_t expire_time);
650
651 /**
652 * timer_mod:
653 * @ts: the timer
654 * @expire_time: the expire time in the units associated with the timer
655 *
656 * Modify a timer to expiry at @expire_time, taking into
657 * account the scale associated with the timer.
658 *
659 * This function is thread-safe but the timer and its timer list must not be
660 * freed while this function is running.
661 */
662 void timer_mod(QEMUTimer *ts, int64_t expire_timer);
663
664 /**
665 * timer_mod_anticipate:
666 * @ts: the timer
667 * @expire_time: the expiry time in nanoseconds
668 *
669 * Modify a timer to expire at @expire_time or the current time, whichever
670 * comes earlier, taking into account the scale associated with the timer.
671 *
672 * This function is thread-safe but the timer and its timer list must not be
673 * freed while this function is running.
674 */
675 void timer_mod_anticipate(QEMUTimer *ts, int64_t expire_time);
676
677 /**
678 * timer_pending:
679 * @ts: the timer
680 *
681 * Determines whether a timer is pending (i.e. is on the
682 * active list of timers, whether or not it has not yet expired).
683 *
684 * Returns: true if the timer is pending
685 */
686 bool timer_pending(QEMUTimer *ts);
687
688 /**
689 * timer_expired:
690 * @ts: the timer
691 * @current_time: the current time
692 *
693 * Determines whether a timer has expired.
694 *
695 * Returns: true if the timer has expired
696 */
697 bool timer_expired(QEMUTimer *timer_head, int64_t current_time);
698
699 /**
700 * timer_expire_time_ns:
701 * @ts: the timer
702 *
703 * Determine the expiry time of a timer
704 *
705 * Returns: the expiry time in nanoseconds
706 */
707 uint64_t timer_expire_time_ns(QEMUTimer *ts);
708
709 /**
710 * timer_get:
711 * @f: the file
712 * @ts: the timer
713 *
714 * Read a timer @ts from a file @f
715 */
716 void timer_get(QEMUFile *f, QEMUTimer *ts);
717
718 /**
719 * timer_put:
720 * @f: the file
721 * @ts: the timer
722 */
723 void timer_put(QEMUFile *f, QEMUTimer *ts);
724
725 /*
726 * General utility functions
727 */
728
729 /**
730 * qemu_timeout_ns_to_ms:
731 * @ns: nanosecond timeout value
732 *
733 * Convert a nanosecond timeout value (or -1) to
734 * a millisecond value (or -1), always rounding up.
735 *
736 * Returns: millisecond timeout value
737 */
738 int qemu_timeout_ns_to_ms(int64_t ns);
739
740 /**
741 * qemu_poll_ns:
742 * @fds: Array of file descriptors
743 * @nfds: number of file descriptors
744 * @timeout: timeout in nanoseconds
745 *
746 * Perform a poll like g_poll but with a timeout in nanoseconds.
747 * See g_poll documentation for further details.
748 *
749 * Returns: number of fds ready
750 */
751 int qemu_poll_ns(GPollFD *fds, guint nfds, int64_t timeout);
752
753 /**
754 * qemu_soonest_timeout:
755 * @timeout1: first timeout in nanoseconds (or -1 for infinite)
756 * @timeout2: second timeout in nanoseconds (or -1 for infinite)
757 *
758 * Calculates the soonest of two timeout values. -1 means infinite, which
759 * is later than any other value.
760 *
761 * Returns: soonest timeout value in nanoseconds (or -1 for infinite)
762 */
763 static inline int64_t qemu_soonest_timeout(int64_t timeout1, int64_t timeout2)
764 {
765 /* we can abuse the fact that -1 (which means infinite) is a maximal
766 * value when cast to unsigned. As this is disgusting, it's kept in
767 * one inline function.
768 */
769 return ((uint64_t) timeout1 < (uint64_t) timeout2) ? timeout1 : timeout2;
770 }
771
772 /**
773 * initclocks:
774 *
775 * Initialise the clock & timer infrastructure
776 */
777 void init_clocks(void);
778
779 int64_t cpu_get_ticks(void);
780 /* Caller must hold BQL */
781 void cpu_enable_ticks(void);
782 /* Caller must hold BQL */
783 void cpu_disable_ticks(void);
784
785 static inline int64_t get_max_clock_jump(void)
786 {
787 /* This should be small enough to prevent excessive interrupts from being
788 * generated by the RTC on clock jumps, but large enough to avoid frequent
789 * unnecessary resets in idle VMs.
790 */
791 return 60 * NANOSECONDS_PER_SECOND;
792 }
793
794 /*
795 * Low level clock functions
796 */
797
798 /* get host real time in nanosecond */
799 static inline int64_t get_clock_realtime(void)
800 {
801 struct timeval tv;
802
803 gettimeofday(&tv, NULL);
804 return tv.tv_sec * 1000000000LL + (tv.tv_usec * 1000);
805 }
806
807 /* Warning: don't insert tracepoints into these functions, they are
808 also used by simpletrace backend and tracepoints would cause
809 an infinite recursion! */
810 #ifdef _WIN32
811 extern int64_t clock_freq;
812
813 static inline int64_t get_clock(void)
814 {
815 LARGE_INTEGER ti;
816 QueryPerformanceCounter(&ti);
817 return muldiv64(ti.QuadPart, NANOSECONDS_PER_SECOND, clock_freq);
818 }
819
820 #else
821
822 extern int use_rt_clock;
823
824 static inline int64_t get_clock(void)
825 {
826 #ifdef CLOCK_MONOTONIC
827 if (use_rt_clock) {
828 struct timespec ts;
829 clock_gettime(CLOCK_MONOTONIC, &ts);
830 return ts.tv_sec * 1000000000LL + ts.tv_nsec;
831 } else
832 #endif
833 {
834 /* XXX: using gettimeofday leads to problems if the date
835 changes, so it should be avoided. */
836 return get_clock_realtime();
837 }
838 }
839 #endif
840
841 /* icount */
842 int64_t cpu_get_icount_raw(void);
843 int64_t cpu_get_icount(void);
844 int64_t cpu_get_clock(void);
845 int64_t cpu_icount_to_ns(int64_t icount);
846
847 /*******************************************/
848 /* host CPU ticks (if available) */
849
850 #if defined(_ARCH_PPC)
851
852 static inline int64_t cpu_get_host_ticks(void)
853 {
854 int64_t retval;
855 #ifdef _ARCH_PPC64
856 /* This reads timebase in one 64bit go and includes Cell workaround from:
857 http://ozlabs.org/pipermail/linuxppc-dev/2006-October/027052.html
858 */
859 __asm__ __volatile__ ("mftb %0\n\t"
860 "cmpwi %0,0\n\t"
861 "beq- $-8"
862 : "=r" (retval));
863 #else
864 /* http://ozlabs.org/pipermail/linuxppc-dev/1999-October/003889.html */
865 unsigned long junk;
866 __asm__ __volatile__ ("mfspr %1,269\n\t" /* mftbu */
867 "mfspr %L0,268\n\t" /* mftb */
868 "mfspr %0,269\n\t" /* mftbu */
869 "cmpw %0,%1\n\t"
870 "bne $-16"
871 : "=r" (retval), "=r" (junk));
872 #endif
873 return retval;
874 }
875
876 #elif defined(__i386__)
877
878 static inline int64_t cpu_get_host_ticks(void)
879 {
880 int64_t val;
881 asm volatile ("rdtsc" : "=A" (val));
882 return val;
883 }
884
885 #elif defined(__x86_64__)
886
887 static inline int64_t cpu_get_host_ticks(void)
888 {
889 uint32_t low,high;
890 int64_t val;
891 asm volatile("rdtsc" : "=a" (low), "=d" (high));
892 val = high;
893 val <<= 32;
894 val |= low;
895 return val;
896 }
897
898 #elif defined(__hppa__)
899
900 static inline int64_t cpu_get_host_ticks(void)
901 {
902 int val;
903 asm volatile ("mfctl %%cr16, %0" : "=r"(val));
904 return val;
905 }
906
907 #elif defined(__ia64)
908
909 static inline int64_t cpu_get_host_ticks(void)
910 {
911 int64_t val;
912 asm volatile ("mov %0 = ar.itc" : "=r"(val) :: "memory");
913 return val;
914 }
915
916 #elif defined(__s390__)
917
918 static inline int64_t cpu_get_host_ticks(void)
919 {
920 int64_t val;
921 asm volatile("stck 0(%1)" : "=m" (val) : "a" (&val) : "cc");
922 return val;
923 }
924
925 #elif defined(__sparc__)
926
927 static inline int64_t cpu_get_host_ticks (void)
928 {
929 #if defined(_LP64)
930 uint64_t rval;
931 asm volatile("rd %%tick,%0" : "=r"(rval));
932 return rval;
933 #else
934 /* We need an %o or %g register for this. For recent enough gcc
935 there is an "h" constraint for that. Don't bother with that. */
936 union {
937 uint64_t i64;
938 struct {
939 uint32_t high;
940 uint32_t low;
941 } i32;
942 } rval;
943 asm volatile("rd %%tick,%%g1; srlx %%g1,32,%0; mov %%g1,%1"
944 : "=r"(rval.i32.high), "=r"(rval.i32.low) : : "g1");
945 return rval.i64;
946 #endif
947 }
948
949 #elif defined(__mips__) && \
950 ((defined(__mips_isa_rev) && __mips_isa_rev >= 2) || defined(__linux__))
951 /*
952 * binutils wants to use rdhwr only on mips32r2
953 * but as linux kernel emulate it, it's fine
954 * to use it.
955 *
956 */
957 #define MIPS_RDHWR(rd, value) { \
958 __asm__ __volatile__ (".set push\n\t" \
959 ".set mips32r2\n\t" \
960 "rdhwr %0, "rd"\n\t" \
961 ".set pop" \
962 : "=r" (value)); \
963 }
964
965 static inline int64_t cpu_get_host_ticks(void)
966 {
967 /* On kernels >= 2.6.25 rdhwr <reg>, $2 and $3 are emulated */
968 uint32_t count;
969 static uint32_t cyc_per_count = 0;
970
971 if (!cyc_per_count) {
972 MIPS_RDHWR("$3", cyc_per_count);
973 }
974
975 MIPS_RDHWR("$2", count);
976 return (int64_t)(count * cyc_per_count);
977 }
978
979 #elif defined(__alpha__)
980
981 static inline int64_t cpu_get_host_ticks(void)
982 {
983 uint64_t cc;
984 uint32_t cur, ofs;
985
986 asm volatile("rpcc %0" : "=r"(cc));
987 cur = cc;
988 ofs = cc >> 32;
989 return cur - ofs;
990 }
991
992 #else
993 /* The host CPU doesn't have an easily accessible cycle counter.
994 Just return a monotonically increasing value. This will be
995 totally wrong, but hopefully better than nothing. */
996 static inline int64_t cpu_get_host_ticks (void)
997 {
998 static int64_t ticks = 0;
999 return ticks++;
1000 }
1001 #endif
1002
1003 #ifdef CONFIG_PROFILER
1004 static inline int64_t profile_getclock(void)
1005 {
1006 return get_clock();
1007 }
1008
1009 extern int64_t tcg_time;
1010 extern int64_t dev_time;
1011 #endif
1012
1013 #endif