]> git.proxmox.com Git - qemu.git/blame - include/qemu/timer.h
aio / timers: Make qemu_run_timers and qemu_run_all_timers return progress
[qemu.git] / include / qemu / timer.h
CommitLineData
87ecb68b
PB
1#ifndef QEMU_TIMER_H
2#define QEMU_TIMER_H
3
29e922b6 4#include "qemu-common.h"
1de7afc9
PB
5#include "qemu/main-loop.h"
6#include "qemu/notify.h"
29e922b6 7
87ecb68b
PB
8/* timers */
9
0ce1b948
PB
10#define SCALE_MS 1000000
11#define SCALE_US 1000
12#define SCALE_NS 1
13
58ac56b9
AB
14#define QEMU_CLOCK_REALTIME 0
15#define QEMU_CLOCK_VIRTUAL 1
16#define QEMU_CLOCK_HOST 2
17
87ecb68b
PB
18typedef struct QEMUClock QEMUClock;
19typedef void QEMUTimerCB(void *opaque);
20
21/* The real time clock should be used only for stuff which does not
22 change the virtual machine state, as it is run even if the virtual
23 machine is stopped. The real time clock has a frequency of 1000
24 Hz. */
25extern QEMUClock *rt_clock;
26
27/* The virtual clock is only run during the emulation. It is stopped
28 when the virtual machine is stopped. Virtual timers use a high
29 precision clock, usually cpu cycles (use ticks_per_sec). */
30extern QEMUClock *vm_clock;
31
21d5d12b
JK
32/* The host clock should be use for device models that emulate accurate
33 real time sources. It will continue to run when the virtual machine
34 is suspended, and it will reflect system time changes the host may
35 undergo (e.g. due to NTP). The host clock has the same precision as
36 the virtual clock. */
37extern QEMUClock *host_clock;
38
41c872b6 39int64_t qemu_get_clock_ns(QEMUClock *clock);
dc2dfcf0
PB
40int64_t qemu_clock_has_timers(QEMUClock *clock);
41int64_t qemu_clock_expired(QEMUClock *clock);
42int64_t qemu_clock_deadline(QEMUClock *clock);
02a03a9f
AB
43
44/**
45 * qemu_clock_deadline_ns:
46 * @clock: the clock to operate on
47 *
48 * Calculate the timeout of the earliest expiring timer
49 * in nanoseconds, or -1 if no timer is set to expire.
50 *
51 * Returns: time until expiry in nanoseconds or -1
52 */
53int64_t qemu_clock_deadline_ns(QEMUClock *clock);
54
55/**
56 * qemu_timeout_ns_to_ms:
57 * @ns: nanosecond timeout value
58 *
59 * Convert a nanosecond timeout value (or -1) to
60 * a millisecond value (or -1), always rounding up.
61 *
62 * Returns: millisecond timeout value
63 */
64int qemu_timeout_ns_to_ms(int64_t ns);
65
4e0c6529
AB
66/**
67 * qemu_poll_ns:
68 * @fds: Array of file descriptors
69 * @nfds: number of file descriptors
70 * @timeout: timeout in nanoseconds
71 *
72 * Perform a poll like g_poll but with a timeout in nanoseconds.
73 * See g_poll documentation for further details.
74 *
75 * Returns: number of fds ready
76 */
77int qemu_poll_ns(GPollFD *fds, guint nfds, int64_t timeout);
5e1ec7b2 78void qemu_clock_enable(QEMUClock *clock, bool enabled);
ab33fcda 79void qemu_clock_warp(QEMUClock *clock);
87ecb68b 80
691a0c9c
JK
81void qemu_register_clock_reset_notifier(QEMUClock *clock, Notifier *notifier);
82void qemu_unregister_clock_reset_notifier(QEMUClock *clock,
83 Notifier *notifier);
84
4a998740
PB
85QEMUTimer *qemu_new_timer(QEMUClock *clock, int scale,
86 QEMUTimerCB *cb, void *opaque);
87ecb68b
PB
87void qemu_free_timer(QEMUTimer *ts);
88void qemu_del_timer(QEMUTimer *ts);
2ff68d07 89void qemu_mod_timer_ns(QEMUTimer *ts, int64_t expire_time);
87ecb68b 90void qemu_mod_timer(QEMUTimer *ts, int64_t expire_time);
e93379b0
AB
91bool timer_pending(QEMUTimer *ts);
92bool timer_expired(QEMUTimer *timer_head, int64_t current_time);
93uint64_t timer_expire_time_ns(QEMUTimer *ts);
87ecb68b 94
f9a976b7
AB
95/**
96 * qemu_run_timers:
97 * @clock: clock on which to operate
98 *
99 * Run all the timers associated with a clock.
100 *
101 * Returns: true if any timer ran.
102 */
103bool qemu_run_timers(QEMUClock *clock);
104
105/**
106 * qemu_run_all_timers:
107 *
108 * Run all the timers associated with every clock.
109 *
110 * Returns: true if any timer ran.
111 */
112bool qemu_run_all_timers(void);
113
db1a4972 114void configure_alarms(char const *opt);
db1a4972
PB
115void init_clocks(void);
116int init_timer_alarm(void);
db1a4972 117
70c3b557
BS
118int64_t cpu_get_ticks(void);
119void cpu_enable_ticks(void);
120void cpu_disable_ticks(void);
121
02a03a9f
AB
122/**
123 * qemu_soonest_timeout:
124 * @timeout1: first timeout in nanoseconds (or -1 for infinite)
125 * @timeout2: second timeout in nanoseconds (or -1 for infinite)
126 *
127 * Calculates the soonest of two timeout values. -1 means infinite, which
128 * is later than any other value.
129 *
130 * Returns: soonest timeout value in nanoseconds (or -1 for infinite)
131 */
132static inline int64_t qemu_soonest_timeout(int64_t timeout1, int64_t timeout2)
133{
134 /* we can abuse the fact that -1 (which means infinite) is a maximal
135 * value when cast to unsigned. As this is disgusting, it's kept in
136 * one inline function.
137 */
138 return ((uint64_t) timeout1 < (uint64_t) timeout2) ? timeout1 : timeout2;
139}
140
0ce1b948
PB
141static inline QEMUTimer *qemu_new_timer_ns(QEMUClock *clock, QEMUTimerCB *cb,
142 void *opaque)
143{
4a998740 144 return qemu_new_timer(clock, SCALE_NS, cb, opaque);
0ce1b948
PB
145}
146
147static inline QEMUTimer *qemu_new_timer_ms(QEMUClock *clock, QEMUTimerCB *cb,
148 void *opaque)
149{
4a998740 150 return qemu_new_timer(clock, SCALE_MS, cb, opaque);
0ce1b948
PB
151}
152
153static inline int64_t qemu_get_clock_ms(QEMUClock *clock)
154{
155 return qemu_get_clock_ns(clock) / SCALE_MS;
156}
157
274dfed8
AL
158static inline int64_t get_ticks_per_sec(void)
159{
160 return 1000000000LL;
161}
87ecb68b 162
c57c846a
BS
163/* real time host monotonic timer */
164static inline int64_t get_clock_realtime(void)
165{
166 struct timeval tv;
167
168 gettimeofday(&tv, NULL);
169 return tv.tv_sec * 1000000000LL + (tv.tv_usec * 1000);
170}
171
172/* Warning: don't insert tracepoints into these functions, they are
173 also used by simpletrace backend and tracepoints would cause
174 an infinite recursion! */
175#ifdef _WIN32
176extern int64_t clock_freq;
177
178static inline int64_t get_clock(void)
179{
180 LARGE_INTEGER ti;
181 QueryPerformanceCounter(&ti);
182 return muldiv64(ti.QuadPart, get_ticks_per_sec(), clock_freq);
183}
184
185#else
186
187extern int use_rt_clock;
188
189static inline int64_t get_clock(void)
190{
d05ef160 191#ifdef CLOCK_MONOTONIC
c57c846a
BS
192 if (use_rt_clock) {
193 struct timespec ts;
194 clock_gettime(CLOCK_MONOTONIC, &ts);
195 return ts.tv_sec * 1000000000LL + ts.tv_nsec;
196 } else
197#endif
198 {
199 /* XXX: using gettimeofday leads to problems if the date
200 changes, so it should be avoided. */
201 return get_clock_realtime();
202 }
203}
204#endif
db1a4972 205
87ecb68b
PB
206void qemu_get_timer(QEMUFile *f, QEMUTimer *ts);
207void qemu_put_timer(QEMUFile *f, QEMUTimer *ts);
208
29e922b6 209/* icount */
29e922b6 210int64_t cpu_get_icount(void);
946fb27c 211int64_t cpu_get_clock(void);
29e922b6
BS
212
213/*******************************************/
214/* host CPU ticks (if available) */
215
216#if defined(_ARCH_PPC)
217
218static inline int64_t cpu_get_real_ticks(void)
219{
220 int64_t retval;
221#ifdef _ARCH_PPC64
222 /* This reads timebase in one 64bit go and includes Cell workaround from:
223 http://ozlabs.org/pipermail/linuxppc-dev/2006-October/027052.html
224 */
225 __asm__ __volatile__ ("mftb %0\n\t"
226 "cmpwi %0,0\n\t"
227 "beq- $-8"
228 : "=r" (retval));
229#else
230 /* http://ozlabs.org/pipermail/linuxppc-dev/1999-October/003889.html */
231 unsigned long junk;
4a9590f3
AG
232 __asm__ __volatile__ ("mfspr %1,269\n\t" /* mftbu */
233 "mfspr %L0,268\n\t" /* mftb */
234 "mfspr %0,269\n\t" /* mftbu */
29e922b6
BS
235 "cmpw %0,%1\n\t"
236 "bne $-16"
237 : "=r" (retval), "=r" (junk));
238#endif
239 return retval;
240}
241
242#elif defined(__i386__)
243
244static inline int64_t cpu_get_real_ticks(void)
245{
246 int64_t val;
247 asm volatile ("rdtsc" : "=A" (val));
248 return val;
249}
250
251#elif defined(__x86_64__)
252
253static inline int64_t cpu_get_real_ticks(void)
254{
255 uint32_t low,high;
256 int64_t val;
257 asm volatile("rdtsc" : "=a" (low), "=d" (high));
258 val = high;
259 val <<= 32;
260 val |= low;
261 return val;
262}
263
264#elif defined(__hppa__)
265
266static inline int64_t cpu_get_real_ticks(void)
267{
268 int val;
269 asm volatile ("mfctl %%cr16, %0" : "=r"(val));
270 return val;
271}
272
273#elif defined(__ia64)
274
275static inline int64_t cpu_get_real_ticks(void)
276{
277 int64_t val;
278 asm volatile ("mov %0 = ar.itc" : "=r"(val) :: "memory");
279 return val;
280}
281
282#elif defined(__s390__)
283
284static inline int64_t cpu_get_real_ticks(void)
285{
286 int64_t val;
287 asm volatile("stck 0(%1)" : "=m" (val) : "a" (&val) : "cc");
288 return val;
289}
290
9b9c37c3 291#elif defined(__sparc__)
29e922b6
BS
292
293static inline int64_t cpu_get_real_ticks (void)
294{
295#if defined(_LP64)
296 uint64_t rval;
297 asm volatile("rd %%tick,%0" : "=r"(rval));
298 return rval;
299#else
9b9c37c3
RH
300 /* We need an %o or %g register for this. For recent enough gcc
301 there is an "h" constraint for that. Don't bother with that. */
29e922b6
BS
302 union {
303 uint64_t i64;
304 struct {
305 uint32_t high;
306 uint32_t low;
307 } i32;
308 } rval;
9b9c37c3
RH
309 asm volatile("rd %%tick,%%g1; srlx %%g1,32,%0; mov %%g1,%1"
310 : "=r"(rval.i32.high), "=r"(rval.i32.low) : : "g1");
29e922b6
BS
311 return rval.i64;
312#endif
313}
314
315#elif defined(__mips__) && \
316 ((defined(__mips_isa_rev) && __mips_isa_rev >= 2) || defined(__linux__))
317/*
318 * binutils wants to use rdhwr only on mips32r2
319 * but as linux kernel emulate it, it's fine
320 * to use it.
321 *
322 */
323#define MIPS_RDHWR(rd, value) { \
324 __asm__ __volatile__ (".set push\n\t" \
325 ".set mips32r2\n\t" \
326 "rdhwr %0, "rd"\n\t" \
327 ".set pop" \
328 : "=r" (value)); \
329 }
330
331static inline int64_t cpu_get_real_ticks(void)
332{
333 /* On kernels >= 2.6.25 rdhwr <reg>, $2 and $3 are emulated */
334 uint32_t count;
335 static uint32_t cyc_per_count = 0;
336
337 if (!cyc_per_count) {
338 MIPS_RDHWR("$3", cyc_per_count);
339 }
340
341 MIPS_RDHWR("$2", count);
342 return (int64_t)(count * cyc_per_count);
343}
344
14a6063a
RH
345#elif defined(__alpha__)
346
347static inline int64_t cpu_get_real_ticks(void)
348{
349 uint64_t cc;
350 uint32_t cur, ofs;
351
352 asm volatile("rpcc %0" : "=r"(cc));
353 cur = cc;
354 ofs = cc >> 32;
355 return cur - ofs;
356}
357
29e922b6
BS
358#else
359/* The host CPU doesn't have an easily accessible cycle counter.
360 Just return a monotonically increasing value. This will be
361 totally wrong, but hopefully better than nothing. */
362static inline int64_t cpu_get_real_ticks (void)
363{
364 static int64_t ticks = 0;
365 return ticks++;
366}
367#endif
368
2d8ebcf9
RH
369#ifdef CONFIG_PROFILER
370static inline int64_t profile_getclock(void)
371{
372 return cpu_get_real_ticks();
373}
374
375extern int64_t qemu_time, qemu_time_start;
376extern int64_t tlb_flush_time;
377extern int64_t dev_time;
378#endif
379
87ecb68b 380#endif