]> git.proxmox.com Git - qemu.git/blob - include/qemu/timer.h
e4a64791378294c1736277c94e26594867d6d0e7
[qemu.git] / include / qemu / timer.h
1 #ifndef QEMU_TIMER_H
2 #define QEMU_TIMER_H
3
4 #include "qemu-common.h"
5 #include "qemu/main-loop.h"
6 #include "qemu/notify.h"
7
8 /* timers */
9
10 #define SCALE_MS 1000000
11 #define SCALE_US 1000
12 #define SCALE_NS 1
13
14 #define QEMU_CLOCK_REALTIME 0
15 #define QEMU_CLOCK_VIRTUAL 1
16 #define QEMU_CLOCK_HOST 2
17
18 typedef struct QEMUClock QEMUClock;
19 typedef void QEMUTimerCB(void *opaque);
20
21 /* The real time clock should be used only for stuff which does not
22 change the virtual machine state, as it is run even if the virtual
23 machine is stopped. The real time clock has a frequency of 1000
24 Hz. */
25 extern QEMUClock *rt_clock;
26
27 /* The virtual clock is only run during the emulation. It is stopped
28 when the virtual machine is stopped. Virtual timers use a high
29 precision clock, usually cpu cycles (use ticks_per_sec). */
30 extern QEMUClock *vm_clock;
31
32 /* The host clock should be use for device models that emulate accurate
33 real time sources. It will continue to run when the virtual machine
34 is suspended, and it will reflect system time changes the host may
35 undergo (e.g. due to NTP). The host clock has the same precision as
36 the virtual clock. */
37 extern QEMUClock *host_clock;
38
39 int64_t qemu_get_clock_ns(QEMUClock *clock);
40 int64_t qemu_clock_has_timers(QEMUClock *clock);
41 int64_t qemu_clock_expired(QEMUClock *clock);
42 int64_t qemu_clock_deadline(QEMUClock *clock);
43
44 /**
45 * qemu_clock_deadline_ns:
46 * @clock: the clock to operate on
47 *
48 * Calculate the timeout of the earliest expiring timer
49 * in nanoseconds, or -1 if no timer is set to expire.
50 *
51 * Returns: time until expiry in nanoseconds or -1
52 */
53 int64_t qemu_clock_deadline_ns(QEMUClock *clock);
54
55 /**
56 * qemu_timeout_ns_to_ms:
57 * @ns: nanosecond timeout value
58 *
59 * Convert a nanosecond timeout value (or -1) to
60 * a millisecond value (or -1), always rounding up.
61 *
62 * Returns: millisecond timeout value
63 */
64 int qemu_timeout_ns_to_ms(int64_t ns);
65
66 /**
67 * qemu_poll_ns:
68 * @fds: Array of file descriptors
69 * @nfds: number of file descriptors
70 * @timeout: timeout in nanoseconds
71 *
72 * Perform a poll like g_poll but with a timeout in nanoseconds.
73 * See g_poll documentation for further details.
74 *
75 * Returns: number of fds ready
76 */
77 int qemu_poll_ns(GPollFD *fds, guint nfds, int64_t timeout);
78 void qemu_clock_enable(QEMUClock *clock, bool enabled);
79 void qemu_clock_warp(QEMUClock *clock);
80
81 void qemu_register_clock_reset_notifier(QEMUClock *clock, Notifier *notifier);
82 void qemu_unregister_clock_reset_notifier(QEMUClock *clock,
83 Notifier *notifier);
84
85 QEMUTimer *qemu_new_timer(QEMUClock *clock, int scale,
86 QEMUTimerCB *cb, void *opaque);
87 void qemu_free_timer(QEMUTimer *ts);
88 void qemu_del_timer(QEMUTimer *ts);
89 void qemu_mod_timer_ns(QEMUTimer *ts, int64_t expire_time);
90 void qemu_mod_timer(QEMUTimer *ts, int64_t expire_time);
91 bool timer_pending(QEMUTimer *ts);
92 bool timer_expired(QEMUTimer *timer_head, int64_t current_time);
93 uint64_t timer_expire_time_ns(QEMUTimer *ts);
94
95 void qemu_run_timers(QEMUClock *clock);
96 void qemu_run_all_timers(void);
97 void configure_alarms(char const *opt);
98 void init_clocks(void);
99 int init_timer_alarm(void);
100
101 int64_t cpu_get_ticks(void);
102 void cpu_enable_ticks(void);
103 void cpu_disable_ticks(void);
104
105 /**
106 * qemu_soonest_timeout:
107 * @timeout1: first timeout in nanoseconds (or -1 for infinite)
108 * @timeout2: second timeout in nanoseconds (or -1 for infinite)
109 *
110 * Calculates the soonest of two timeout values. -1 means infinite, which
111 * is later than any other value.
112 *
113 * Returns: soonest timeout value in nanoseconds (or -1 for infinite)
114 */
115 static inline int64_t qemu_soonest_timeout(int64_t timeout1, int64_t timeout2)
116 {
117 /* we can abuse the fact that -1 (which means infinite) is a maximal
118 * value when cast to unsigned. As this is disgusting, it's kept in
119 * one inline function.
120 */
121 return ((uint64_t) timeout1 < (uint64_t) timeout2) ? timeout1 : timeout2;
122 }
123
124 static inline QEMUTimer *qemu_new_timer_ns(QEMUClock *clock, QEMUTimerCB *cb,
125 void *opaque)
126 {
127 return qemu_new_timer(clock, SCALE_NS, cb, opaque);
128 }
129
130 static inline QEMUTimer *qemu_new_timer_ms(QEMUClock *clock, QEMUTimerCB *cb,
131 void *opaque)
132 {
133 return qemu_new_timer(clock, SCALE_MS, cb, opaque);
134 }
135
136 static inline int64_t qemu_get_clock_ms(QEMUClock *clock)
137 {
138 return qemu_get_clock_ns(clock) / SCALE_MS;
139 }
140
141 static inline int64_t get_ticks_per_sec(void)
142 {
143 return 1000000000LL;
144 }
145
146 /* real time host monotonic timer */
147 static inline int64_t get_clock_realtime(void)
148 {
149 struct timeval tv;
150
151 gettimeofday(&tv, NULL);
152 return tv.tv_sec * 1000000000LL + (tv.tv_usec * 1000);
153 }
154
155 /* Warning: don't insert tracepoints into these functions, they are
156 also used by simpletrace backend and tracepoints would cause
157 an infinite recursion! */
158 #ifdef _WIN32
159 extern int64_t clock_freq;
160
161 static inline int64_t get_clock(void)
162 {
163 LARGE_INTEGER ti;
164 QueryPerformanceCounter(&ti);
165 return muldiv64(ti.QuadPart, get_ticks_per_sec(), clock_freq);
166 }
167
168 #else
169
170 extern int use_rt_clock;
171
172 static inline int64_t get_clock(void)
173 {
174 #ifdef CLOCK_MONOTONIC
175 if (use_rt_clock) {
176 struct timespec ts;
177 clock_gettime(CLOCK_MONOTONIC, &ts);
178 return ts.tv_sec * 1000000000LL + ts.tv_nsec;
179 } else
180 #endif
181 {
182 /* XXX: using gettimeofday leads to problems if the date
183 changes, so it should be avoided. */
184 return get_clock_realtime();
185 }
186 }
187 #endif
188
189 void qemu_get_timer(QEMUFile *f, QEMUTimer *ts);
190 void qemu_put_timer(QEMUFile *f, QEMUTimer *ts);
191
192 /* icount */
193 int64_t cpu_get_icount(void);
194 int64_t cpu_get_clock(void);
195
196 /*******************************************/
197 /* host CPU ticks (if available) */
198
199 #if defined(_ARCH_PPC)
200
201 static inline int64_t cpu_get_real_ticks(void)
202 {
203 int64_t retval;
204 #ifdef _ARCH_PPC64
205 /* This reads timebase in one 64bit go and includes Cell workaround from:
206 http://ozlabs.org/pipermail/linuxppc-dev/2006-October/027052.html
207 */
208 __asm__ __volatile__ ("mftb %0\n\t"
209 "cmpwi %0,0\n\t"
210 "beq- $-8"
211 : "=r" (retval));
212 #else
213 /* http://ozlabs.org/pipermail/linuxppc-dev/1999-October/003889.html */
214 unsigned long junk;
215 __asm__ __volatile__ ("mfspr %1,269\n\t" /* mftbu */
216 "mfspr %L0,268\n\t" /* mftb */
217 "mfspr %0,269\n\t" /* mftbu */
218 "cmpw %0,%1\n\t"
219 "bne $-16"
220 : "=r" (retval), "=r" (junk));
221 #endif
222 return retval;
223 }
224
225 #elif defined(__i386__)
226
227 static inline int64_t cpu_get_real_ticks(void)
228 {
229 int64_t val;
230 asm volatile ("rdtsc" : "=A" (val));
231 return val;
232 }
233
234 #elif defined(__x86_64__)
235
236 static inline int64_t cpu_get_real_ticks(void)
237 {
238 uint32_t low,high;
239 int64_t val;
240 asm volatile("rdtsc" : "=a" (low), "=d" (high));
241 val = high;
242 val <<= 32;
243 val |= low;
244 return val;
245 }
246
247 #elif defined(__hppa__)
248
249 static inline int64_t cpu_get_real_ticks(void)
250 {
251 int val;
252 asm volatile ("mfctl %%cr16, %0" : "=r"(val));
253 return val;
254 }
255
256 #elif defined(__ia64)
257
258 static inline int64_t cpu_get_real_ticks(void)
259 {
260 int64_t val;
261 asm volatile ("mov %0 = ar.itc" : "=r"(val) :: "memory");
262 return val;
263 }
264
265 #elif defined(__s390__)
266
267 static inline int64_t cpu_get_real_ticks(void)
268 {
269 int64_t val;
270 asm volatile("stck 0(%1)" : "=m" (val) : "a" (&val) : "cc");
271 return val;
272 }
273
274 #elif defined(__sparc__)
275
276 static inline int64_t cpu_get_real_ticks (void)
277 {
278 #if defined(_LP64)
279 uint64_t rval;
280 asm volatile("rd %%tick,%0" : "=r"(rval));
281 return rval;
282 #else
283 /* We need an %o or %g register for this. For recent enough gcc
284 there is an "h" constraint for that. Don't bother with that. */
285 union {
286 uint64_t i64;
287 struct {
288 uint32_t high;
289 uint32_t low;
290 } i32;
291 } rval;
292 asm volatile("rd %%tick,%%g1; srlx %%g1,32,%0; mov %%g1,%1"
293 : "=r"(rval.i32.high), "=r"(rval.i32.low) : : "g1");
294 return rval.i64;
295 #endif
296 }
297
298 #elif defined(__mips__) && \
299 ((defined(__mips_isa_rev) && __mips_isa_rev >= 2) || defined(__linux__))
300 /*
301 * binutils wants to use rdhwr only on mips32r2
302 * but as linux kernel emulate it, it's fine
303 * to use it.
304 *
305 */
306 #define MIPS_RDHWR(rd, value) { \
307 __asm__ __volatile__ (".set push\n\t" \
308 ".set mips32r2\n\t" \
309 "rdhwr %0, "rd"\n\t" \
310 ".set pop" \
311 : "=r" (value)); \
312 }
313
314 static inline int64_t cpu_get_real_ticks(void)
315 {
316 /* On kernels >= 2.6.25 rdhwr <reg>, $2 and $3 are emulated */
317 uint32_t count;
318 static uint32_t cyc_per_count = 0;
319
320 if (!cyc_per_count) {
321 MIPS_RDHWR("$3", cyc_per_count);
322 }
323
324 MIPS_RDHWR("$2", count);
325 return (int64_t)(count * cyc_per_count);
326 }
327
328 #elif defined(__alpha__)
329
330 static inline int64_t cpu_get_real_ticks(void)
331 {
332 uint64_t cc;
333 uint32_t cur, ofs;
334
335 asm volatile("rpcc %0" : "=r"(cc));
336 cur = cc;
337 ofs = cc >> 32;
338 return cur - ofs;
339 }
340
341 #else
342 /* The host CPU doesn't have an easily accessible cycle counter.
343 Just return a monotonically increasing value. This will be
344 totally wrong, but hopefully better than nothing. */
345 static inline int64_t cpu_get_real_ticks (void)
346 {
347 static int64_t ticks = 0;
348 return ticks++;
349 }
350 #endif
351
352 #ifdef CONFIG_PROFILER
353 static inline int64_t profile_getclock(void)
354 {
355 return cpu_get_real_ticks();
356 }
357
358 extern int64_t qemu_time, qemu_time_start;
359 extern int64_t tlb_flush_time;
360 extern int64_t dev_time;
361 #endif
362
363 #endif