]> git.proxmox.com Git - mirror_qemu.git/blame - include/qemu/timer.h
aio / timers: add ppoll support with qemu_poll_ns
[mirror_qemu.git] / include / qemu / timer.h
CommitLineData
87ecb68b
PB
1#ifndef QEMU_TIMER_H
2#define QEMU_TIMER_H
3
29e922b6 4#include "qemu-common.h"
1de7afc9
PB
5#include "qemu/main-loop.h"
6#include "qemu/notify.h"
29e922b6 7
87ecb68b
PB
8/* timers */
9
0ce1b948
PB
10#define SCALE_MS 1000000
11#define SCALE_US 1000
12#define SCALE_NS 1
13
58ac56b9
AB
14#define QEMU_CLOCK_REALTIME 0
15#define QEMU_CLOCK_VIRTUAL 1
16#define QEMU_CLOCK_HOST 2
17
87ecb68b
PB
18typedef struct QEMUClock QEMUClock;
19typedef void QEMUTimerCB(void *opaque);
20
21/* The real time clock should be used only for stuff which does not
22 change the virtual machine state, as it is run even if the virtual
23 machine is stopped. The real time clock has a frequency of 1000
24 Hz. */
25extern QEMUClock *rt_clock;
26
27/* The virtual clock is only run during the emulation. It is stopped
28 when the virtual machine is stopped. Virtual timers use a high
29 precision clock, usually cpu cycles (use ticks_per_sec). */
30extern QEMUClock *vm_clock;
31
21d5d12b
JK
32/* The host clock should be use for device models that emulate accurate
33 real time sources. It will continue to run when the virtual machine
34 is suspended, and it will reflect system time changes the host may
35 undergo (e.g. due to NTP). The host clock has the same precision as
36 the virtual clock. */
37extern QEMUClock *host_clock;
38
41c872b6 39int64_t qemu_get_clock_ns(QEMUClock *clock);
dc2dfcf0
PB
40int64_t qemu_clock_has_timers(QEMUClock *clock);
41int64_t qemu_clock_expired(QEMUClock *clock);
42int64_t qemu_clock_deadline(QEMUClock *clock);
02a03a9f
AB
43
44/**
45 * qemu_clock_deadline_ns:
46 * @clock: the clock to operate on
47 *
48 * Calculate the timeout of the earliest expiring timer
49 * in nanoseconds, or -1 if no timer is set to expire.
50 *
51 * Returns: time until expiry in nanoseconds or -1
52 */
53int64_t qemu_clock_deadline_ns(QEMUClock *clock);
54
55/**
56 * qemu_timeout_ns_to_ms:
57 * @ns: nanosecond timeout value
58 *
59 * Convert a nanosecond timeout value (or -1) to
60 * a millisecond value (or -1), always rounding up.
61 *
62 * Returns: millisecond timeout value
63 */
64int qemu_timeout_ns_to_ms(int64_t ns);
65
4e0c6529
AB
66/**
67 * qemu_poll_ns:
68 * @fds: Array of file descriptors
69 * @nfds: number of file descriptors
70 * @timeout: timeout in nanoseconds
71 *
72 * Perform a poll like g_poll but with a timeout in nanoseconds.
73 * See g_poll documentation for further details.
74 *
75 * Returns: number of fds ready
76 */
77int qemu_poll_ns(GPollFD *fds, guint nfds, int64_t timeout);
5e1ec7b2 78void qemu_clock_enable(QEMUClock *clock, bool enabled);
ab33fcda 79void qemu_clock_warp(QEMUClock *clock);
87ecb68b 80
691a0c9c
JK
81void qemu_register_clock_reset_notifier(QEMUClock *clock, Notifier *notifier);
82void qemu_unregister_clock_reset_notifier(QEMUClock *clock,
83 Notifier *notifier);
84
4a998740
PB
85QEMUTimer *qemu_new_timer(QEMUClock *clock, int scale,
86 QEMUTimerCB *cb, void *opaque);
87ecb68b
PB
87void qemu_free_timer(QEMUTimer *ts);
88void qemu_del_timer(QEMUTimer *ts);
2ff68d07 89void qemu_mod_timer_ns(QEMUTimer *ts, int64_t expire_time);
87ecb68b 90void qemu_mod_timer(QEMUTimer *ts, int64_t expire_time);
e93379b0
AB
91bool timer_pending(QEMUTimer *ts);
92bool timer_expired(QEMUTimer *timer_head, int64_t current_time);
93uint64_t timer_expire_time_ns(QEMUTimer *ts);
87ecb68b 94
8156be56 95void qemu_run_timers(QEMUClock *clock);
db1a4972 96void qemu_run_all_timers(void);
db1a4972 97void configure_alarms(char const *opt);
db1a4972
PB
98void init_clocks(void);
99int init_timer_alarm(void);
db1a4972 100
70c3b557
BS
101int64_t cpu_get_ticks(void);
102void cpu_enable_ticks(void);
103void cpu_disable_ticks(void);
104
02a03a9f
AB
105/**
106 * qemu_soonest_timeout:
107 * @timeout1: first timeout in nanoseconds (or -1 for infinite)
108 * @timeout2: second timeout in nanoseconds (or -1 for infinite)
109 *
110 * Calculates the soonest of two timeout values. -1 means infinite, which
111 * is later than any other value.
112 *
113 * Returns: soonest timeout value in nanoseconds (or -1 for infinite)
114 */
115static inline int64_t qemu_soonest_timeout(int64_t timeout1, int64_t timeout2)
116{
117 /* we can abuse the fact that -1 (which means infinite) is a maximal
118 * value when cast to unsigned. As this is disgusting, it's kept in
119 * one inline function.
120 */
121 return ((uint64_t) timeout1 < (uint64_t) timeout2) ? timeout1 : timeout2;
122}
123
0ce1b948
PB
124static inline QEMUTimer *qemu_new_timer_ns(QEMUClock *clock, QEMUTimerCB *cb,
125 void *opaque)
126{
4a998740 127 return qemu_new_timer(clock, SCALE_NS, cb, opaque);
0ce1b948
PB
128}
129
130static inline QEMUTimer *qemu_new_timer_ms(QEMUClock *clock, QEMUTimerCB *cb,
131 void *opaque)
132{
4a998740 133 return qemu_new_timer(clock, SCALE_MS, cb, opaque);
0ce1b948
PB
134}
135
136static inline int64_t qemu_get_clock_ms(QEMUClock *clock)
137{
138 return qemu_get_clock_ns(clock) / SCALE_MS;
139}
140
274dfed8
AL
141static inline int64_t get_ticks_per_sec(void)
142{
143 return 1000000000LL;
144}
87ecb68b 145
c57c846a
BS
146/* real time host monotonic timer */
147static inline int64_t get_clock_realtime(void)
148{
149 struct timeval tv;
150
151 gettimeofday(&tv, NULL);
152 return tv.tv_sec * 1000000000LL + (tv.tv_usec * 1000);
153}
154
155/* Warning: don't insert tracepoints into these functions, they are
156 also used by simpletrace backend and tracepoints would cause
157 an infinite recursion! */
158#ifdef _WIN32
159extern int64_t clock_freq;
160
161static inline int64_t get_clock(void)
162{
163 LARGE_INTEGER ti;
164 QueryPerformanceCounter(&ti);
165 return muldiv64(ti.QuadPart, get_ticks_per_sec(), clock_freq);
166}
167
168#else
169
170extern int use_rt_clock;
171
172static inline int64_t get_clock(void)
173{
d05ef160 174#ifdef CLOCK_MONOTONIC
c57c846a
BS
175 if (use_rt_clock) {
176 struct timespec ts;
177 clock_gettime(CLOCK_MONOTONIC, &ts);
178 return ts.tv_sec * 1000000000LL + ts.tv_nsec;
179 } else
180#endif
181 {
182 /* XXX: using gettimeofday leads to problems if the date
183 changes, so it should be avoided. */
184 return get_clock_realtime();
185 }
186}
187#endif
db1a4972 188
87ecb68b
PB
189void qemu_get_timer(QEMUFile *f, QEMUTimer *ts);
190void qemu_put_timer(QEMUFile *f, QEMUTimer *ts);
191
29e922b6 192/* icount */
29e922b6 193int64_t cpu_get_icount(void);
946fb27c 194int64_t cpu_get_clock(void);
29e922b6
BS
195
196/*******************************************/
197/* host CPU ticks (if available) */
198
199#if defined(_ARCH_PPC)
200
201static inline int64_t cpu_get_real_ticks(void)
202{
203 int64_t retval;
204#ifdef _ARCH_PPC64
205 /* This reads timebase in one 64bit go and includes Cell workaround from:
206 http://ozlabs.org/pipermail/linuxppc-dev/2006-October/027052.html
207 */
208 __asm__ __volatile__ ("mftb %0\n\t"
209 "cmpwi %0,0\n\t"
210 "beq- $-8"
211 : "=r" (retval));
212#else
213 /* http://ozlabs.org/pipermail/linuxppc-dev/1999-October/003889.html */
214 unsigned long junk;
4a9590f3
AG
215 __asm__ __volatile__ ("mfspr %1,269\n\t" /* mftbu */
216 "mfspr %L0,268\n\t" /* mftb */
217 "mfspr %0,269\n\t" /* mftbu */
29e922b6
BS
218 "cmpw %0,%1\n\t"
219 "bne $-16"
220 : "=r" (retval), "=r" (junk));
221#endif
222 return retval;
223}
224
225#elif defined(__i386__)
226
227static inline int64_t cpu_get_real_ticks(void)
228{
229 int64_t val;
230 asm volatile ("rdtsc" : "=A" (val));
231 return val;
232}
233
234#elif defined(__x86_64__)
235
236static inline int64_t cpu_get_real_ticks(void)
237{
238 uint32_t low,high;
239 int64_t val;
240 asm volatile("rdtsc" : "=a" (low), "=d" (high));
241 val = high;
242 val <<= 32;
243 val |= low;
244 return val;
245}
246
247#elif defined(__hppa__)
248
249static inline int64_t cpu_get_real_ticks(void)
250{
251 int val;
252 asm volatile ("mfctl %%cr16, %0" : "=r"(val));
253 return val;
254}
255
256#elif defined(__ia64)
257
258static inline int64_t cpu_get_real_ticks(void)
259{
260 int64_t val;
261 asm volatile ("mov %0 = ar.itc" : "=r"(val) :: "memory");
262 return val;
263}
264
265#elif defined(__s390__)
266
267static inline int64_t cpu_get_real_ticks(void)
268{
269 int64_t val;
270 asm volatile("stck 0(%1)" : "=m" (val) : "a" (&val) : "cc");
271 return val;
272}
273
9b9c37c3 274#elif defined(__sparc__)
29e922b6
BS
275
276static inline int64_t cpu_get_real_ticks (void)
277{
278#if defined(_LP64)
279 uint64_t rval;
280 asm volatile("rd %%tick,%0" : "=r"(rval));
281 return rval;
282#else
9b9c37c3
RH
283 /* We need an %o or %g register for this. For recent enough gcc
284 there is an "h" constraint for that. Don't bother with that. */
29e922b6
BS
285 union {
286 uint64_t i64;
287 struct {
288 uint32_t high;
289 uint32_t low;
290 } i32;
291 } rval;
9b9c37c3
RH
292 asm volatile("rd %%tick,%%g1; srlx %%g1,32,%0; mov %%g1,%1"
293 : "=r"(rval.i32.high), "=r"(rval.i32.low) : : "g1");
29e922b6
BS
294 return rval.i64;
295#endif
296}
297
298#elif defined(__mips__) && \
299 ((defined(__mips_isa_rev) && __mips_isa_rev >= 2) || defined(__linux__))
300/*
301 * binutils wants to use rdhwr only on mips32r2
302 * but as linux kernel emulate it, it's fine
303 * to use it.
304 *
305 */
306#define MIPS_RDHWR(rd, value) { \
307 __asm__ __volatile__ (".set push\n\t" \
308 ".set mips32r2\n\t" \
309 "rdhwr %0, "rd"\n\t" \
310 ".set pop" \
311 : "=r" (value)); \
312 }
313
314static inline int64_t cpu_get_real_ticks(void)
315{
316 /* On kernels >= 2.6.25 rdhwr <reg>, $2 and $3 are emulated */
317 uint32_t count;
318 static uint32_t cyc_per_count = 0;
319
320 if (!cyc_per_count) {
321 MIPS_RDHWR("$3", cyc_per_count);
322 }
323
324 MIPS_RDHWR("$2", count);
325 return (int64_t)(count * cyc_per_count);
326}
327
14a6063a
RH
328#elif defined(__alpha__)
329
330static inline int64_t cpu_get_real_ticks(void)
331{
332 uint64_t cc;
333 uint32_t cur, ofs;
334
335 asm volatile("rpcc %0" : "=r"(cc));
336 cur = cc;
337 ofs = cc >> 32;
338 return cur - ofs;
339}
340
29e922b6
BS
341#else
342/* The host CPU doesn't have an easily accessible cycle counter.
343 Just return a monotonically increasing value. This will be
344 totally wrong, but hopefully better than nothing. */
345static inline int64_t cpu_get_real_ticks (void)
346{
347 static int64_t ticks = 0;
348 return ticks++;
349}
350#endif
351
2d8ebcf9
RH
352#ifdef CONFIG_PROFILER
353static inline int64_t profile_getclock(void)
354{
355 return cpu_get_real_ticks();
356}
357
358extern int64_t qemu_time, qemu_time_start;
359extern int64_t tlb_flush_time;
360extern int64_t dev_time;
361#endif
362
87ecb68b 363#endif