]> git.proxmox.com Git - mirror_qemu.git/blob - include/qemu/timer.h
Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20170613' into...
[mirror_qemu.git] / include / qemu / timer.h
1 #ifndef QEMU_TIMER_H
2 #define QEMU_TIMER_H
3
4 #include "qemu-common.h"
5 #include "qemu/notify.h"
6 #include "qemu/host-utils.h"
7
8 #define NANOSECONDS_PER_SECOND 1000000000LL
9
10 /* timers */
11
12 #define SCALE_MS 1000000
13 #define SCALE_US 1000
14 #define SCALE_NS 1
15
16 /**
17 * QEMUClockType:
18 *
19 * The following clock types are available:
20 *
21 * @QEMU_CLOCK_REALTIME: Real time clock
22 *
23 * The real time clock should be used only for stuff which does not
24 * change the virtual machine state, as it runs even if the virtual
25 * machine is stopped.
26 *
27 * @QEMU_CLOCK_VIRTUAL: virtual clock
28 *
29 * The virtual clock only runs during the emulation. It stops
30 * when the virtual machine is stopped.
31 *
32 * @QEMU_CLOCK_HOST: host clock
33 *
34 * The host clock should be used for device models that emulate accurate
35 * real time sources. It will continue to run when the virtual machine
36 * is suspended, and it will reflect system time changes the host may
37 * undergo (e.g. due to NTP).
38 *
39 * @QEMU_CLOCK_VIRTUAL_RT: realtime clock used for icount warp
40 *
41 * Outside icount mode, this clock is the same as @QEMU_CLOCK_VIRTUAL.
42 * In icount mode, this clock counts nanoseconds while the virtual
43 * machine is running. It is used to increase @QEMU_CLOCK_VIRTUAL
44 * while the CPUs are sleeping and thus not executing instructions.
45 */
46
47 typedef enum {
48 QEMU_CLOCK_REALTIME = 0,
49 QEMU_CLOCK_VIRTUAL = 1,
50 QEMU_CLOCK_HOST = 2,
51 QEMU_CLOCK_VIRTUAL_RT = 3,
52 QEMU_CLOCK_MAX
53 } QEMUClockType;
54
55 typedef struct QEMUTimerList QEMUTimerList;
56
57 struct QEMUTimerListGroup {
58 QEMUTimerList *tl[QEMU_CLOCK_MAX];
59 };
60
61 typedef void QEMUTimerCB(void *opaque);
62 typedef void QEMUTimerListNotifyCB(void *opaque, QEMUClockType type);
63
64 struct QEMUTimer {
65 int64_t expire_time; /* in nanoseconds */
66 QEMUTimerList *timer_list;
67 QEMUTimerCB *cb;
68 void *opaque;
69 QEMUTimer *next;
70 int scale;
71 };
72
73 extern QEMUTimerListGroup main_loop_tlg;
74
75 /*
76 * qemu_clock_get_ns;
77 * @type: the clock type
78 *
79 * Get the nanosecond value of a clock with
80 * type @type
81 *
82 * Returns: the clock value in nanoseconds
83 */
84 int64_t qemu_clock_get_ns(QEMUClockType type);
85
86 /**
87 * qemu_clock_get_ms;
88 * @type: the clock type
89 *
90 * Get the millisecond value of a clock with
91 * type @type
92 *
93 * Returns: the clock value in milliseconds
94 */
95 static inline int64_t qemu_clock_get_ms(QEMUClockType type)
96 {
97 return qemu_clock_get_ns(type) / SCALE_MS;
98 }
99
100 /**
101 * qemu_clock_get_us;
102 * @type: the clock type
103 *
104 * Get the microsecond value of a clock with
105 * type @type
106 *
107 * Returns: the clock value in microseconds
108 */
109 static inline int64_t qemu_clock_get_us(QEMUClockType type)
110 {
111 return qemu_clock_get_ns(type) / SCALE_US;
112 }
113
114 /**
115 * qemu_clock_has_timers:
116 * @type: the clock type
117 *
118 * Determines whether a clock's default timer list
119 * has timers attached
120 *
121 * Note that this function should not be used when other threads also access
122 * the timer list. The return value may be outdated by the time it is acted
123 * upon.
124 *
125 * Returns: true if the clock's default timer list
126 * has timers attached
127 */
128 bool qemu_clock_has_timers(QEMUClockType type);
129
130 /**
131 * qemu_clock_expired:
132 * @type: the clock type
133 *
134 * Determines whether a clock's default timer list
135 * has an expired timer.
136 *
137 * Returns: true if the clock's default timer list has
138 * an expired timer
139 */
140 bool qemu_clock_expired(QEMUClockType type);
141
142 /**
143 * qemu_clock_use_for_deadline:
144 * @type: the clock type
145 *
146 * Determine whether a clock should be used for deadline
147 * calculations. Some clocks, for instance vm_clock with
148 * use_icount set, do not count in nanoseconds. Such clocks
149 * are not used for deadline calculations, and are presumed
150 * to interrupt any poll using qemu_notify/aio_notify
151 * etc.
152 *
153 * Returns: true if the clock runs in nanoseconds and
154 * should be used for a deadline.
155 */
156 bool qemu_clock_use_for_deadline(QEMUClockType type);
157
158 /**
159 * qemu_clock_deadline_ns_all:
160 * @type: the clock type
161 *
162 * Calculate the deadline across all timer lists associated
163 * with a clock (as opposed to just the default one)
164 * in nanoseconds, or -1 if no timer is set to expire.
165 *
166 * Returns: time until expiry in nanoseconds or -1
167 */
168 int64_t qemu_clock_deadline_ns_all(QEMUClockType type);
169
170 /**
171 * qemu_clock_get_main_loop_timerlist:
172 * @type: the clock type
173 *
174 * Return the default timer list associated with a clock.
175 *
176 * Returns: the default timer list
177 */
178 QEMUTimerList *qemu_clock_get_main_loop_timerlist(QEMUClockType type);
179
180 /**
181 * qemu_clock_nofify:
182 * @type: the clock type
183 *
184 * Call the notifier callback connected with the default timer
185 * list linked to the clock, or qemu_notify() if none.
186 */
187 void qemu_clock_notify(QEMUClockType type);
188
189 /**
190 * qemu_clock_enable:
191 * @type: the clock type
192 * @enabled: true to enable, false to disable
193 *
194 * Enable or disable a clock
195 * Disabling the clock will wait for related timerlists to stop
196 * executing qemu_run_timers. Thus, this functions should not
197 * be used from the callback of a timer that is based on @clock.
198 * Doing so would cause a deadlock.
199 *
200 * Caller should hold BQL.
201 */
202 void qemu_clock_enable(QEMUClockType type, bool enabled);
203
204 /**
205 * qemu_start_warp_timer:
206 *
207 * Starts a timer for virtual clock update
208 */
209 void qemu_start_warp_timer(void);
210
211 /**
212 * qemu_clock_register_reset_notifier:
213 * @type: the clock type
214 * @notifier: the notifier function
215 *
216 * Register a notifier function to call when the clock
217 * concerned is reset.
218 */
219 void qemu_clock_register_reset_notifier(QEMUClockType type,
220 Notifier *notifier);
221
222 /**
223 * qemu_clock_unregister_reset_notifier:
224 * @type: the clock type
225 * @notifier: the notifier function
226 *
227 * Unregister a notifier function to call when the clock
228 * concerned is reset.
229 */
230 void qemu_clock_unregister_reset_notifier(QEMUClockType type,
231 Notifier *notifier);
232
233 /**
234 * qemu_clock_run_timers:
235 * @type: clock on which to operate
236 *
237 * Run all the timers associated with the default timer list
238 * of a clock.
239 *
240 * Returns: true if any timer ran.
241 */
242 bool qemu_clock_run_timers(QEMUClockType type);
243
244 /**
245 * qemu_clock_run_all_timers:
246 *
247 * Run all the timers associated with the default timer list
248 * of every clock.
249 *
250 * Returns: true if any timer ran.
251 */
252 bool qemu_clock_run_all_timers(void);
253
254 /*
255 * QEMUTimerList
256 */
257
258 /**
259 * timerlist_new:
260 * @type: the clock type to associate with the timerlist
261 * @cb: the callback to call on notification
262 * @opaque: the opaque pointer to pass to the callback
263 *
264 * Create a new timerlist associated with the clock of
265 * type @type.
266 *
267 * Returns: a pointer to the QEMUTimerList created
268 */
269 QEMUTimerList *timerlist_new(QEMUClockType type,
270 QEMUTimerListNotifyCB *cb, void *opaque);
271
272 /**
273 * timerlist_free:
274 * @timer_list: the timer list to free
275 *
276 * Frees a timer_list. It must have no active timers.
277 */
278 void timerlist_free(QEMUTimerList *timer_list);
279
280 /**
281 * timerlist_has_timers:
282 * @timer_list: the timer list to operate on
283 *
284 * Determine whether a timer list has active timers
285 *
286 * Note that this function should not be used when other threads also access
287 * the timer list. The return value may be outdated by the time it is acted
288 * upon.
289 *
290 * Returns: true if the timer list has timers.
291 */
292 bool timerlist_has_timers(QEMUTimerList *timer_list);
293
294 /**
295 * timerlist_expired:
296 * @timer_list: the timer list to operate on
297 *
298 * Determine whether a timer list has any timers which
299 * are expired.
300 *
301 * Returns: true if the timer list has timers which
302 * have expired.
303 */
304 bool timerlist_expired(QEMUTimerList *timer_list);
305
306 /**
307 * timerlist_deadline_ns:
308 * @timer_list: the timer list to operate on
309 *
310 * Determine the deadline for a timer_list, i.e.
311 * the number of nanoseconds until the first timer
312 * expires. Return -1 if there are no timers.
313 *
314 * Returns: the number of nanoseconds until the earliest
315 * timer expires -1 if none
316 */
317 int64_t timerlist_deadline_ns(QEMUTimerList *timer_list);
318
319 /**
320 * timerlist_get_clock:
321 * @timer_list: the timer list to operate on
322 *
323 * Determine the clock type associated with a timer list.
324 *
325 * Returns: the clock type associated with the
326 * timer list.
327 */
328 QEMUClockType timerlist_get_clock(QEMUTimerList *timer_list);
329
330 /**
331 * timerlist_run_timers:
332 * @timer_list: the timer list to use
333 *
334 * Call all expired timers associated with the timer list.
335 *
336 * Returns: true if any timer expired
337 */
338 bool timerlist_run_timers(QEMUTimerList *timer_list);
339
340 /**
341 * timerlist_notify:
342 * @timer_list: the timer list to use
343 *
344 * call the notifier callback associated with the timer list.
345 */
346 void timerlist_notify(QEMUTimerList *timer_list);
347
348 /*
349 * QEMUTimerListGroup
350 */
351
352 /**
353 * timerlistgroup_init:
354 * @tlg: the timer list group
355 * @cb: the callback to call when a notify is required
356 * @opaque: the opaque pointer to be passed to the callback.
357 *
358 * Initialise a timer list group. This must already be
359 * allocated in memory and zeroed. The notifier callback is
360 * called whenever a clock in the timer list group is
361 * reenabled or whenever a timer associated with any timer
362 * list is modified. If @cb is specified as null, qemu_notify()
363 * is used instead.
364 */
365 void timerlistgroup_init(QEMUTimerListGroup *tlg,
366 QEMUTimerListNotifyCB *cb, void *opaque);
367
368 /**
369 * timerlistgroup_deinit:
370 * @tlg: the timer list group
371 *
372 * Deinitialise a timer list group. This must already be
373 * initialised. Note the memory is not freed.
374 */
375 void timerlistgroup_deinit(QEMUTimerListGroup *tlg);
376
377 /**
378 * timerlistgroup_run_timers:
379 * @tlg: the timer list group
380 *
381 * Run the timers associated with a timer list group.
382 * This will run timers on multiple clocks.
383 *
384 * Returns: true if any timer callback ran
385 */
386 bool timerlistgroup_run_timers(QEMUTimerListGroup *tlg);
387
388 /**
389 * timerlistgroup_deadline_ns:
390 * @tlg: the timer list group
391 *
392 * Determine the deadline of the soonest timer to
393 * expire associated with any timer list linked to
394 * the timer list group. Only clocks suitable for
395 * deadline calculation are included.
396 *
397 * Returns: the deadline in nanoseconds or -1 if no
398 * timers are to expire.
399 */
400 int64_t timerlistgroup_deadline_ns(QEMUTimerListGroup *tlg);
401
402 /*
403 * QEMUTimer
404 */
405
406 /**
407 * timer_init_tl:
408 * @ts: the timer to be initialised
409 * @timer_list: the timer list to attach the timer to
410 * @scale: the scale value for the timer
411 * @cb: the callback to be called when the timer expires
412 * @opaque: the opaque pointer to be passed to the callback
413 *
414 * Initialise a new timer and associate it with @timer_list.
415 * The caller is responsible for allocating the memory.
416 *
417 * You need not call an explicit deinit call. Simply make
418 * sure it is not on a list with timer_del.
419 */
420 void timer_init_tl(QEMUTimer *ts,
421 QEMUTimerList *timer_list, int scale,
422 QEMUTimerCB *cb, void *opaque);
423
424 /**
425 * timer_init:
426 * @ts: the timer to be initialised
427 * @type: the clock to associate with the timer
428 * @scale: the scale value for the timer
429 * @cb: the callback to call when the timer expires
430 * @opaque: the opaque pointer to pass to the callback
431 *
432 * Initialize a timer with the given scale on the default timer list
433 * associated with the clock.
434 *
435 * You need not call an explicit deinit call. Simply make
436 * sure it is not on a list with timer_del.
437 */
438 static inline void timer_init(QEMUTimer *ts, QEMUClockType type, int scale,
439 QEMUTimerCB *cb, void *opaque)
440 {
441 timer_init_tl(ts, main_loop_tlg.tl[type], scale, cb, opaque);
442 }
443
444 /**
445 * timer_init_ns:
446 * @ts: the timer to be initialised
447 * @type: the clock to associate with the timer
448 * @cb: the callback to call when the timer expires
449 * @opaque: the opaque pointer to pass to the callback
450 *
451 * Initialize a timer with nanosecond scale on the default timer list
452 * associated with the clock.
453 *
454 * You need not call an explicit deinit call. Simply make
455 * sure it is not on a list with timer_del.
456 */
457 static inline void timer_init_ns(QEMUTimer *ts, QEMUClockType type,
458 QEMUTimerCB *cb, void *opaque)
459 {
460 timer_init(ts, type, SCALE_NS, cb, opaque);
461 }
462
463 /**
464 * timer_init_us:
465 * @ts: the timer to be initialised
466 * @type: the clock to associate with the timer
467 * @cb: the callback to call when the timer expires
468 * @opaque: the opaque pointer to pass to the callback
469 *
470 * Initialize a timer with microsecond scale on the default timer list
471 * associated with the clock.
472 *
473 * You need not call an explicit deinit call. Simply make
474 * sure it is not on a list with timer_del.
475 */
476 static inline void timer_init_us(QEMUTimer *ts, QEMUClockType type,
477 QEMUTimerCB *cb, void *opaque)
478 {
479 timer_init(ts, type, SCALE_US, cb, opaque);
480 }
481
482 /**
483 * timer_init_ms:
484 * @ts: the timer to be initialised
485 * @type: the clock to associate with the timer
486 * @cb: the callback to call when the timer expires
487 * @opaque: the opaque pointer to pass to the callback
488 *
489 * Initialize a timer with millisecond scale on the default timer list
490 * associated with the clock.
491 *
492 * You need not call an explicit deinit call. Simply make
493 * sure it is not on a list with timer_del.
494 */
495 static inline void timer_init_ms(QEMUTimer *ts, QEMUClockType type,
496 QEMUTimerCB *cb, void *opaque)
497 {
498 timer_init(ts, type, SCALE_MS, cb, opaque);
499 }
500
501 /**
502 * timer_new_tl:
503 * @timer_list: the timer list to attach the timer to
504 * @scale: the scale value for the timer
505 * @cb: the callback to be called when the timer expires
506 * @opaque: the opaque pointer to be passed to the callback
507 *
508 * Create a new timer and associate it with @timer_list.
509 * The memory is allocated by the function.
510 *
511 * This is not the preferred interface unless you know you
512 * are going to call timer_free. Use timer_init instead.
513 *
514 * Returns: a pointer to the timer
515 */
516 static inline QEMUTimer *timer_new_tl(QEMUTimerList *timer_list,
517 int scale,
518 QEMUTimerCB *cb,
519 void *opaque)
520 {
521 QEMUTimer *ts = g_malloc0(sizeof(QEMUTimer));
522 timer_init_tl(ts, timer_list, scale, cb, opaque);
523 return ts;
524 }
525
526 /**
527 * timer_new:
528 * @type: the clock type to use
529 * @scale: the scale value for the timer
530 * @cb: the callback to be called when the timer expires
531 * @opaque: the opaque pointer to be passed to the callback
532 *
533 * Create a new timer and associate it with the default
534 * timer list for the clock type @type.
535 *
536 * The default timer list has one special feature: in icount mode,
537 * %QEMU_CLOCK_VIRTUAL timers are run in the vCPU thread. This is
538 * not true of other timer lists, which are typically associated
539 * with an AioContext---each of them runs its timer callbacks in its own
540 * AioContext thread.
541 *
542 * Returns: a pointer to the timer
543 */
544 static inline QEMUTimer *timer_new(QEMUClockType type, int scale,
545 QEMUTimerCB *cb, void *opaque)
546 {
547 return timer_new_tl(main_loop_tlg.tl[type], scale, cb, opaque);
548 }
549
550 /**
551 * timer_new_ns:
552 * @type: the clock type to associate with the timer
553 * @cb: the callback to call when the timer expires
554 * @opaque: the opaque pointer to pass to the callback
555 *
556 * Create a new timer with nanosecond scale on the default timer list
557 * associated with the clock.
558 *
559 * The default timer list has one special feature: in icount mode,
560 * %QEMU_CLOCK_VIRTUAL timers are run in the vCPU thread. This is
561 * not true of other timer lists, which are typically associated
562 * with an AioContext---each of them runs its timer callbacks in its own
563 * AioContext thread.
564 *
565 * Returns: a pointer to the newly created timer
566 */
567 static inline QEMUTimer *timer_new_ns(QEMUClockType type, QEMUTimerCB *cb,
568 void *opaque)
569 {
570 return timer_new(type, SCALE_NS, cb, opaque);
571 }
572
573 /**
574 * timer_new_us:
575 * @type: the clock type to associate with the timer
576 * @cb: the callback to call when the timer expires
577 * @opaque: the opaque pointer to pass to the callback
578 *
579 * The default timer list has one special feature: in icount mode,
580 * %QEMU_CLOCK_VIRTUAL timers are run in the vCPU thread. This is
581 * not true of other timer lists, which are typically associated
582 * with an AioContext---each of them runs its timer callbacks in its own
583 * AioContext thread.
584 *
585 * Create a new timer with microsecond scale on the default timer list
586 * associated with the clock.
587 *
588 * Returns: a pointer to the newly created timer
589 */
590 static inline QEMUTimer *timer_new_us(QEMUClockType type, QEMUTimerCB *cb,
591 void *opaque)
592 {
593 return timer_new(type, SCALE_US, cb, opaque);
594 }
595
596 /**
597 * timer_new_ms:
598 * @type: the clock type to associate with the timer
599 * @cb: the callback to call when the timer expires
600 * @opaque: the opaque pointer to pass to the callback
601 *
602 * The default timer list has one special feature: in icount mode,
603 * %QEMU_CLOCK_VIRTUAL timers are run in the vCPU thread. This is
604 * not true of other timer lists, which are typically associated
605 * with an AioContext---each of them runs its timer callbacks in its own
606 * AioContext thread.
607 *
608 * Create a new timer with millisecond scale on the default timer list
609 * associated with the clock.
610 *
611 * Returns: a pointer to the newly created timer
612 */
613 static inline QEMUTimer *timer_new_ms(QEMUClockType type, QEMUTimerCB *cb,
614 void *opaque)
615 {
616 return timer_new(type, SCALE_MS, cb, opaque);
617 }
618
619 /**
620 * timer_deinit:
621 * @ts: the timer to be de-initialised
622 *
623 * Deassociate the timer from any timerlist. You should
624 * call timer_del before. After this call, any further
625 * timer_del call cannot cause dangling pointer accesses
626 * even if the previously used timerlist is freed.
627 */
628 void timer_deinit(QEMUTimer *ts);
629
630 /**
631 * timer_free:
632 * @ts: the timer
633 *
634 * Free a timer (it must not be on the active list)
635 */
636 static inline void timer_free(QEMUTimer *ts)
637 {
638 g_free(ts);
639 }
640
641 /**
642 * timer_del:
643 * @ts: the timer
644 *
645 * Delete a timer from the active list.
646 *
647 * This function is thread-safe but the timer and its timer list must not be
648 * freed while this function is running.
649 */
650 void timer_del(QEMUTimer *ts);
651
652 /**
653 * timer_mod_ns:
654 * @ts: the timer
655 * @expire_time: the expiry time in nanoseconds
656 *
657 * Modify a timer to expire at @expire_time
658 *
659 * This function is thread-safe but the timer and its timer list must not be
660 * freed while this function is running.
661 */
662 void timer_mod_ns(QEMUTimer *ts, int64_t expire_time);
663
664 /**
665 * timer_mod_anticipate_ns:
666 * @ts: the timer
667 * @expire_time: the expiry time in nanoseconds
668 *
669 * Modify a timer to expire at @expire_time or the current time,
670 * whichever comes earlier.
671 *
672 * This function is thread-safe but the timer and its timer list must not be
673 * freed while this function is running.
674 */
675 void timer_mod_anticipate_ns(QEMUTimer *ts, int64_t expire_time);
676
677 /**
678 * timer_mod:
679 * @ts: the timer
680 * @expire_time: the expire time in the units associated with the timer
681 *
682 * Modify a timer to expiry at @expire_time, taking into
683 * account the scale associated with the timer.
684 *
685 * This function is thread-safe but the timer and its timer list must not be
686 * freed while this function is running.
687 */
688 void timer_mod(QEMUTimer *ts, int64_t expire_timer);
689
690 /**
691 * timer_mod_anticipate:
692 * @ts: the timer
693 * @expire_time: the expiry time in nanoseconds
694 *
695 * Modify a timer to expire at @expire_time or the current time, whichever
696 * comes earlier, taking into account the scale associated with the timer.
697 *
698 * This function is thread-safe but the timer and its timer list must not be
699 * freed while this function is running.
700 */
701 void timer_mod_anticipate(QEMUTimer *ts, int64_t expire_time);
702
703 /**
704 * timer_pending:
705 * @ts: the timer
706 *
707 * Determines whether a timer is pending (i.e. is on the
708 * active list of timers, whether or not it has not yet expired).
709 *
710 * Returns: true if the timer is pending
711 */
712 bool timer_pending(QEMUTimer *ts);
713
714 /**
715 * timer_expired:
716 * @ts: the timer
717 * @current_time: the current time
718 *
719 * Determines whether a timer has expired.
720 *
721 * Returns: true if the timer has expired
722 */
723 bool timer_expired(QEMUTimer *timer_head, int64_t current_time);
724
725 /**
726 * timer_expire_time_ns:
727 * @ts: the timer
728 *
729 * Determine the expiry time of a timer
730 *
731 * Returns: the expiry time in nanoseconds
732 */
733 uint64_t timer_expire_time_ns(QEMUTimer *ts);
734
735 /**
736 * timer_get:
737 * @f: the file
738 * @ts: the timer
739 *
740 * Read a timer @ts from a file @f
741 */
742 void timer_get(QEMUFile *f, QEMUTimer *ts);
743
744 /**
745 * timer_put:
746 * @f: the file
747 * @ts: the timer
748 */
749 void timer_put(QEMUFile *f, QEMUTimer *ts);
750
751 /*
752 * General utility functions
753 */
754
755 /**
756 * qemu_timeout_ns_to_ms:
757 * @ns: nanosecond timeout value
758 *
759 * Convert a nanosecond timeout value (or -1) to
760 * a millisecond value (or -1), always rounding up.
761 *
762 * Returns: millisecond timeout value
763 */
764 int qemu_timeout_ns_to_ms(int64_t ns);
765
766 /**
767 * qemu_poll_ns:
768 * @fds: Array of file descriptors
769 * @nfds: number of file descriptors
770 * @timeout: timeout in nanoseconds
771 *
772 * Perform a poll like g_poll but with a timeout in nanoseconds.
773 * See g_poll documentation for further details.
774 *
775 * Returns: number of fds ready
776 */
777 int qemu_poll_ns(GPollFD *fds, guint nfds, int64_t timeout);
778
779 /**
780 * qemu_soonest_timeout:
781 * @timeout1: first timeout in nanoseconds (or -1 for infinite)
782 * @timeout2: second timeout in nanoseconds (or -1 for infinite)
783 *
784 * Calculates the soonest of two timeout values. -1 means infinite, which
785 * is later than any other value.
786 *
787 * Returns: soonest timeout value in nanoseconds (or -1 for infinite)
788 */
789 static inline int64_t qemu_soonest_timeout(int64_t timeout1, int64_t timeout2)
790 {
791 /* we can abuse the fact that -1 (which means infinite) is a maximal
792 * value when cast to unsigned. As this is disgusting, it's kept in
793 * one inline function.
794 */
795 return ((uint64_t) timeout1 < (uint64_t) timeout2) ? timeout1 : timeout2;
796 }
797
798 /**
799 * initclocks:
800 *
801 * Initialise the clock & timer infrastructure
802 */
803 void init_clocks(QEMUTimerListNotifyCB *notify_cb);
804
805 int64_t cpu_get_ticks(void);
806 /* Caller must hold BQL */
807 void cpu_enable_ticks(void);
808 /* Caller must hold BQL */
809 void cpu_disable_ticks(void);
810
811 static inline int64_t get_max_clock_jump(void)
812 {
813 /* This should be small enough to prevent excessive interrupts from being
814 * generated by the RTC on clock jumps, but large enough to avoid frequent
815 * unnecessary resets in idle VMs.
816 */
817 return 60 * NANOSECONDS_PER_SECOND;
818 }
819
820 /*
821 * Low level clock functions
822 */
823
824 /* get host real time in nanosecond */
825 static inline int64_t get_clock_realtime(void)
826 {
827 struct timeval tv;
828
829 gettimeofday(&tv, NULL);
830 return tv.tv_sec * 1000000000LL + (tv.tv_usec * 1000);
831 }
832
833 /* Warning: don't insert tracepoints into these functions, they are
834 also used by simpletrace backend and tracepoints would cause
835 an infinite recursion! */
836 #ifdef _WIN32
837 extern int64_t clock_freq;
838
839 static inline int64_t get_clock(void)
840 {
841 LARGE_INTEGER ti;
842 QueryPerformanceCounter(&ti);
843 return muldiv64(ti.QuadPart, NANOSECONDS_PER_SECOND, clock_freq);
844 }
845
846 #else
847
848 extern int use_rt_clock;
849
850 static inline int64_t get_clock(void)
851 {
852 #ifdef CLOCK_MONOTONIC
853 if (use_rt_clock) {
854 struct timespec ts;
855 clock_gettime(CLOCK_MONOTONIC, &ts);
856 return ts.tv_sec * 1000000000LL + ts.tv_nsec;
857 } else
858 #endif
859 {
860 /* XXX: using gettimeofday leads to problems if the date
861 changes, so it should be avoided. */
862 return get_clock_realtime();
863 }
864 }
865 #endif
866
867 /* icount */
868 int64_t cpu_get_icount_raw(void);
869 int64_t cpu_get_icount(void);
870 int64_t cpu_get_clock(void);
871 int64_t cpu_icount_to_ns(int64_t icount);
872 void cpu_update_icount(CPUState *cpu);
873
874 /*******************************************/
875 /* host CPU ticks (if available) */
876
877 #if defined(_ARCH_PPC)
878
879 static inline int64_t cpu_get_host_ticks(void)
880 {
881 int64_t retval;
882 #ifdef _ARCH_PPC64
883 /* This reads timebase in one 64bit go and includes Cell workaround from:
884 http://ozlabs.org/pipermail/linuxppc-dev/2006-October/027052.html
885 */
886 __asm__ __volatile__ ("mftb %0\n\t"
887 "cmpwi %0,0\n\t"
888 "beq- $-8"
889 : "=r" (retval));
890 #else
891 /* http://ozlabs.org/pipermail/linuxppc-dev/1999-October/003889.html */
892 unsigned long junk;
893 __asm__ __volatile__ ("mfspr %1,269\n\t" /* mftbu */
894 "mfspr %L0,268\n\t" /* mftb */
895 "mfspr %0,269\n\t" /* mftbu */
896 "cmpw %0,%1\n\t"
897 "bne $-16"
898 : "=r" (retval), "=r" (junk));
899 #endif
900 return retval;
901 }
902
903 #elif defined(__i386__)
904
905 static inline int64_t cpu_get_host_ticks(void)
906 {
907 int64_t val;
908 asm volatile ("rdtsc" : "=A" (val));
909 return val;
910 }
911
912 #elif defined(__x86_64__)
913
914 static inline int64_t cpu_get_host_ticks(void)
915 {
916 uint32_t low,high;
917 int64_t val;
918 asm volatile("rdtsc" : "=a" (low), "=d" (high));
919 val = high;
920 val <<= 32;
921 val |= low;
922 return val;
923 }
924
925 #elif defined(__hppa__)
926
927 static inline int64_t cpu_get_host_ticks(void)
928 {
929 int val;
930 asm volatile ("mfctl %%cr16, %0" : "=r"(val));
931 return val;
932 }
933
934 #elif defined(__ia64)
935
936 static inline int64_t cpu_get_host_ticks(void)
937 {
938 int64_t val;
939 asm volatile ("mov %0 = ar.itc" : "=r"(val) :: "memory");
940 return val;
941 }
942
943 #elif defined(__s390__)
944
945 static inline int64_t cpu_get_host_ticks(void)
946 {
947 int64_t val;
948 asm volatile("stck 0(%1)" : "=m" (val) : "a" (&val) : "cc");
949 return val;
950 }
951
952 #elif defined(__sparc__)
953
954 static inline int64_t cpu_get_host_ticks (void)
955 {
956 #if defined(_LP64)
957 uint64_t rval;
958 asm volatile("rd %%tick,%0" : "=r"(rval));
959 return rval;
960 #else
961 /* We need an %o or %g register for this. For recent enough gcc
962 there is an "h" constraint for that. Don't bother with that. */
963 union {
964 uint64_t i64;
965 struct {
966 uint32_t high;
967 uint32_t low;
968 } i32;
969 } rval;
970 asm volatile("rd %%tick,%%g1; srlx %%g1,32,%0; mov %%g1,%1"
971 : "=r"(rval.i32.high), "=r"(rval.i32.low) : : "g1");
972 return rval.i64;
973 #endif
974 }
975
976 #elif defined(__mips__) && \
977 ((defined(__mips_isa_rev) && __mips_isa_rev >= 2) || defined(__linux__))
978 /*
979 * binutils wants to use rdhwr only on mips32r2
980 * but as linux kernel emulate it, it's fine
981 * to use it.
982 *
983 */
984 #define MIPS_RDHWR(rd, value) { \
985 __asm__ __volatile__ (".set push\n\t" \
986 ".set mips32r2\n\t" \
987 "rdhwr %0, "rd"\n\t" \
988 ".set pop" \
989 : "=r" (value)); \
990 }
991
992 static inline int64_t cpu_get_host_ticks(void)
993 {
994 /* On kernels >= 2.6.25 rdhwr <reg>, $2 and $3 are emulated */
995 uint32_t count;
996 static uint32_t cyc_per_count = 0;
997
998 if (!cyc_per_count) {
999 MIPS_RDHWR("$3", cyc_per_count);
1000 }
1001
1002 MIPS_RDHWR("$2", count);
1003 return (int64_t)(count * cyc_per_count);
1004 }
1005
1006 #elif defined(__alpha__)
1007
1008 static inline int64_t cpu_get_host_ticks(void)
1009 {
1010 uint64_t cc;
1011 uint32_t cur, ofs;
1012
1013 asm volatile("rpcc %0" : "=r"(cc));
1014 cur = cc;
1015 ofs = cc >> 32;
1016 return cur - ofs;
1017 }
1018
1019 #else
1020 /* The host CPU doesn't have an easily accessible cycle counter.
1021 Just return a monotonically increasing value. This will be
1022 totally wrong, but hopefully better than nothing. */
1023 static inline int64_t cpu_get_host_ticks(void)
1024 {
1025 return get_clock();
1026 }
1027 #endif
1028
1029 #ifdef CONFIG_PROFILER
1030 static inline int64_t profile_getclock(void)
1031 {
1032 return get_clock();
1033 }
1034
1035 extern int64_t tcg_time;
1036 extern int64_t dev_time;
1037 #endif
1038
1039 #endif