]>
Commit | Line | Data |
---|---|---|
87ecb68b PB |
1 | #ifndef QEMU_TIMER_H |
2 | #define QEMU_TIMER_H | |
3 | ||
29e922b6 | 4 | #include "qemu-common.h" |
1de7afc9 PB |
5 | #include "qemu/main-loop.h" |
6 | #include "qemu/notify.h" | |
29e922b6 | 7 | |
87ecb68b PB |
8 | /* timers */ |
9 | ||
0ce1b948 PB |
10 | #define SCALE_MS 1000000 |
11 | #define SCALE_US 1000 | |
12 | #define SCALE_NS 1 | |
13 | ||
58ac56b9 AB |
14 | #define QEMU_CLOCK_REALTIME 0 |
15 | #define QEMU_CLOCK_VIRTUAL 1 | |
16 | #define QEMU_CLOCK_HOST 2 | |
17 | ||
87ecb68b PB |
18 | typedef struct QEMUClock QEMUClock; |
19 | typedef void QEMUTimerCB(void *opaque); | |
20 | ||
21 | /* The real time clock should be used only for stuff which does not | |
22 | change the virtual machine state, as it is run even if the virtual | |
23 | machine is stopped. The real time clock has a frequency of 1000 | |
24 | Hz. */ | |
25 | extern QEMUClock *rt_clock; | |
26 | ||
27 | /* The virtual clock is only run during the emulation. It is stopped | |
28 | when the virtual machine is stopped. Virtual timers use a high | |
29 | precision clock, usually cpu cycles (use ticks_per_sec). */ | |
30 | extern QEMUClock *vm_clock; | |
31 | ||
21d5d12b JK |
32 | /* The host clock should be use for device models that emulate accurate |
33 | real time sources. It will continue to run when the virtual machine | |
34 | is suspended, and it will reflect system time changes the host may | |
35 | undergo (e.g. due to NTP). The host clock has the same precision as | |
36 | the virtual clock. */ | |
37 | extern QEMUClock *host_clock; | |
38 | ||
41c872b6 | 39 | int64_t qemu_get_clock_ns(QEMUClock *clock); |
dc2dfcf0 PB |
40 | int64_t qemu_clock_has_timers(QEMUClock *clock); |
41 | int64_t qemu_clock_expired(QEMUClock *clock); | |
42 | int64_t qemu_clock_deadline(QEMUClock *clock); | |
02a03a9f AB |
43 | |
44 | /** | |
45 | * qemu_clock_deadline_ns: | |
46 | * @clock: the clock to operate on | |
47 | * | |
48 | * Calculate the timeout of the earliest expiring timer | |
49 | * in nanoseconds, or -1 if no timer is set to expire. | |
50 | * | |
51 | * Returns: time until expiry in nanoseconds or -1 | |
52 | */ | |
53 | int64_t qemu_clock_deadline_ns(QEMUClock *clock); | |
54 | ||
55 | /** | |
56 | * qemu_timeout_ns_to_ms: | |
57 | * @ns: nanosecond timeout value | |
58 | * | |
59 | * Convert a nanosecond timeout value (or -1) to | |
60 | * a millisecond value (or -1), always rounding up. | |
61 | * | |
62 | * Returns: millisecond timeout value | |
63 | */ | |
64 | int qemu_timeout_ns_to_ms(int64_t ns); | |
65 | ||
4e0c6529 AB |
66 | /** |
67 | * qemu_poll_ns: | |
68 | * @fds: Array of file descriptors | |
69 | * @nfds: number of file descriptors | |
70 | * @timeout: timeout in nanoseconds | |
71 | * | |
72 | * Perform a poll like g_poll but with a timeout in nanoseconds. | |
73 | * See g_poll documentation for further details. | |
74 | * | |
75 | * Returns: number of fds ready | |
76 | */ | |
77 | int qemu_poll_ns(GPollFD *fds, guint nfds, int64_t timeout); | |
5e1ec7b2 | 78 | void qemu_clock_enable(QEMUClock *clock, bool enabled); |
ab33fcda | 79 | void qemu_clock_warp(QEMUClock *clock); |
87ecb68b | 80 | |
691a0c9c JK |
81 | void qemu_register_clock_reset_notifier(QEMUClock *clock, Notifier *notifier); |
82 | void qemu_unregister_clock_reset_notifier(QEMUClock *clock, | |
83 | Notifier *notifier); | |
84 | ||
4a998740 PB |
85 | QEMUTimer *qemu_new_timer(QEMUClock *clock, int scale, |
86 | QEMUTimerCB *cb, void *opaque); | |
87ecb68b PB |
87 | void qemu_free_timer(QEMUTimer *ts); |
88 | void qemu_del_timer(QEMUTimer *ts); | |
2ff68d07 | 89 | void qemu_mod_timer_ns(QEMUTimer *ts, int64_t expire_time); |
87ecb68b | 90 | void qemu_mod_timer(QEMUTimer *ts, int64_t expire_time); |
e93379b0 AB |
91 | bool timer_pending(QEMUTimer *ts); |
92 | bool timer_expired(QEMUTimer *timer_head, int64_t current_time); | |
93 | uint64_t timer_expire_time_ns(QEMUTimer *ts); | |
87ecb68b | 94 | |
f9a976b7 AB |
95 | /** |
96 | * qemu_run_timers: | |
97 | * @clock: clock on which to operate | |
98 | * | |
99 | * Run all the timers associated with a clock. | |
100 | * | |
101 | * Returns: true if any timer ran. | |
102 | */ | |
103 | bool qemu_run_timers(QEMUClock *clock); | |
104 | ||
105 | /** | |
106 | * qemu_run_all_timers: | |
107 | * | |
108 | * Run all the timers associated with every clock. | |
109 | * | |
110 | * Returns: true if any timer ran. | |
111 | */ | |
112 | bool qemu_run_all_timers(void); | |
113 | ||
db1a4972 | 114 | void configure_alarms(char const *opt); |
db1a4972 PB |
115 | void init_clocks(void); |
116 | int init_timer_alarm(void); | |
db1a4972 | 117 | |
70c3b557 BS |
118 | int64_t cpu_get_ticks(void); |
119 | void cpu_enable_ticks(void); | |
120 | void cpu_disable_ticks(void); | |
121 | ||
02a03a9f AB |
122 | /** |
123 | * qemu_soonest_timeout: | |
124 | * @timeout1: first timeout in nanoseconds (or -1 for infinite) | |
125 | * @timeout2: second timeout in nanoseconds (or -1 for infinite) | |
126 | * | |
127 | * Calculates the soonest of two timeout values. -1 means infinite, which | |
128 | * is later than any other value. | |
129 | * | |
130 | * Returns: soonest timeout value in nanoseconds (or -1 for infinite) | |
131 | */ | |
132 | static inline int64_t qemu_soonest_timeout(int64_t timeout1, int64_t timeout2) | |
133 | { | |
134 | /* we can abuse the fact that -1 (which means infinite) is a maximal | |
135 | * value when cast to unsigned. As this is disgusting, it's kept in | |
136 | * one inline function. | |
137 | */ | |
138 | return ((uint64_t) timeout1 < (uint64_t) timeout2) ? timeout1 : timeout2; | |
139 | } | |
140 | ||
0ce1b948 PB |
141 | static inline QEMUTimer *qemu_new_timer_ns(QEMUClock *clock, QEMUTimerCB *cb, |
142 | void *opaque) | |
143 | { | |
4a998740 | 144 | return qemu_new_timer(clock, SCALE_NS, cb, opaque); |
0ce1b948 PB |
145 | } |
146 | ||
147 | static inline QEMUTimer *qemu_new_timer_ms(QEMUClock *clock, QEMUTimerCB *cb, | |
148 | void *opaque) | |
149 | { | |
4a998740 | 150 | return qemu_new_timer(clock, SCALE_MS, cb, opaque); |
0ce1b948 PB |
151 | } |
152 | ||
153 | static inline int64_t qemu_get_clock_ms(QEMUClock *clock) | |
154 | { | |
155 | return qemu_get_clock_ns(clock) / SCALE_MS; | |
156 | } | |
157 | ||
274dfed8 AL |
158 | static inline int64_t get_ticks_per_sec(void) |
159 | { | |
160 | return 1000000000LL; | |
161 | } | |
87ecb68b | 162 | |
c57c846a BS |
163 | /* real time host monotonic timer */ |
164 | static inline int64_t get_clock_realtime(void) | |
165 | { | |
166 | struct timeval tv; | |
167 | ||
168 | gettimeofday(&tv, NULL); | |
169 | return tv.tv_sec * 1000000000LL + (tv.tv_usec * 1000); | |
170 | } | |
171 | ||
172 | /* Warning: don't insert tracepoints into these functions, they are | |
173 | also used by simpletrace backend and tracepoints would cause | |
174 | an infinite recursion! */ | |
175 | #ifdef _WIN32 | |
176 | extern int64_t clock_freq; | |
177 | ||
178 | static inline int64_t get_clock(void) | |
179 | { | |
180 | LARGE_INTEGER ti; | |
181 | QueryPerformanceCounter(&ti); | |
182 | return muldiv64(ti.QuadPart, get_ticks_per_sec(), clock_freq); | |
183 | } | |
184 | ||
185 | #else | |
186 | ||
187 | extern int use_rt_clock; | |
188 | ||
189 | static inline int64_t get_clock(void) | |
190 | { | |
d05ef160 | 191 | #ifdef CLOCK_MONOTONIC |
c57c846a BS |
192 | if (use_rt_clock) { |
193 | struct timespec ts; | |
194 | clock_gettime(CLOCK_MONOTONIC, &ts); | |
195 | return ts.tv_sec * 1000000000LL + ts.tv_nsec; | |
196 | } else | |
197 | #endif | |
198 | { | |
199 | /* XXX: using gettimeofday leads to problems if the date | |
200 | changes, so it should be avoided. */ | |
201 | return get_clock_realtime(); | |
202 | } | |
203 | } | |
204 | #endif | |
db1a4972 | 205 | |
87ecb68b PB |
206 | void qemu_get_timer(QEMUFile *f, QEMUTimer *ts); |
207 | void qemu_put_timer(QEMUFile *f, QEMUTimer *ts); | |
208 | ||
29e922b6 | 209 | /* icount */ |
29e922b6 | 210 | int64_t cpu_get_icount(void); |
946fb27c | 211 | int64_t cpu_get_clock(void); |
29e922b6 BS |
212 | |
213 | /*******************************************/ | |
214 | /* host CPU ticks (if available) */ | |
215 | ||
216 | #if defined(_ARCH_PPC) | |
217 | ||
218 | static inline int64_t cpu_get_real_ticks(void) | |
219 | { | |
220 | int64_t retval; | |
221 | #ifdef _ARCH_PPC64 | |
222 | /* This reads timebase in one 64bit go and includes Cell workaround from: | |
223 | http://ozlabs.org/pipermail/linuxppc-dev/2006-October/027052.html | |
224 | */ | |
225 | __asm__ __volatile__ ("mftb %0\n\t" | |
226 | "cmpwi %0,0\n\t" | |
227 | "beq- $-8" | |
228 | : "=r" (retval)); | |
229 | #else | |
230 | /* http://ozlabs.org/pipermail/linuxppc-dev/1999-October/003889.html */ | |
231 | unsigned long junk; | |
4a9590f3 AG |
232 | __asm__ __volatile__ ("mfspr %1,269\n\t" /* mftbu */ |
233 | "mfspr %L0,268\n\t" /* mftb */ | |
234 | "mfspr %0,269\n\t" /* mftbu */ | |
29e922b6 BS |
235 | "cmpw %0,%1\n\t" |
236 | "bne $-16" | |
237 | : "=r" (retval), "=r" (junk)); | |
238 | #endif | |
239 | return retval; | |
240 | } | |
241 | ||
242 | #elif defined(__i386__) | |
243 | ||
244 | static inline int64_t cpu_get_real_ticks(void) | |
245 | { | |
246 | int64_t val; | |
247 | asm volatile ("rdtsc" : "=A" (val)); | |
248 | return val; | |
249 | } | |
250 | ||
251 | #elif defined(__x86_64__) | |
252 | ||
253 | static inline int64_t cpu_get_real_ticks(void) | |
254 | { | |
255 | uint32_t low,high; | |
256 | int64_t val; | |
257 | asm volatile("rdtsc" : "=a" (low), "=d" (high)); | |
258 | val = high; | |
259 | val <<= 32; | |
260 | val |= low; | |
261 | return val; | |
262 | } | |
263 | ||
264 | #elif defined(__hppa__) | |
265 | ||
266 | static inline int64_t cpu_get_real_ticks(void) | |
267 | { | |
268 | int val; | |
269 | asm volatile ("mfctl %%cr16, %0" : "=r"(val)); | |
270 | return val; | |
271 | } | |
272 | ||
273 | #elif defined(__ia64) | |
274 | ||
275 | static inline int64_t cpu_get_real_ticks(void) | |
276 | { | |
277 | int64_t val; | |
278 | asm volatile ("mov %0 = ar.itc" : "=r"(val) :: "memory"); | |
279 | return val; | |
280 | } | |
281 | ||
282 | #elif defined(__s390__) | |
283 | ||
284 | static inline int64_t cpu_get_real_ticks(void) | |
285 | { | |
286 | int64_t val; | |
287 | asm volatile("stck 0(%1)" : "=m" (val) : "a" (&val) : "cc"); | |
288 | return val; | |
289 | } | |
290 | ||
9b9c37c3 | 291 | #elif defined(__sparc__) |
29e922b6 BS |
292 | |
293 | static inline int64_t cpu_get_real_ticks (void) | |
294 | { | |
295 | #if defined(_LP64) | |
296 | uint64_t rval; | |
297 | asm volatile("rd %%tick,%0" : "=r"(rval)); | |
298 | return rval; | |
299 | #else | |
9b9c37c3 RH |
300 | /* We need an %o or %g register for this. For recent enough gcc |
301 | there is an "h" constraint for that. Don't bother with that. */ | |
29e922b6 BS |
302 | union { |
303 | uint64_t i64; | |
304 | struct { | |
305 | uint32_t high; | |
306 | uint32_t low; | |
307 | } i32; | |
308 | } rval; | |
9b9c37c3 RH |
309 | asm volatile("rd %%tick,%%g1; srlx %%g1,32,%0; mov %%g1,%1" |
310 | : "=r"(rval.i32.high), "=r"(rval.i32.low) : : "g1"); | |
29e922b6 BS |
311 | return rval.i64; |
312 | #endif | |
313 | } | |
314 | ||
315 | #elif defined(__mips__) && \ | |
316 | ((defined(__mips_isa_rev) && __mips_isa_rev >= 2) || defined(__linux__)) | |
317 | /* | |
318 | * binutils wants to use rdhwr only on mips32r2 | |
319 | * but as linux kernel emulate it, it's fine | |
320 | * to use it. | |
321 | * | |
322 | */ | |
323 | #define MIPS_RDHWR(rd, value) { \ | |
324 | __asm__ __volatile__ (".set push\n\t" \ | |
325 | ".set mips32r2\n\t" \ | |
326 | "rdhwr %0, "rd"\n\t" \ | |
327 | ".set pop" \ | |
328 | : "=r" (value)); \ | |
329 | } | |
330 | ||
331 | static inline int64_t cpu_get_real_ticks(void) | |
332 | { | |
333 | /* On kernels >= 2.6.25 rdhwr <reg>, $2 and $3 are emulated */ | |
334 | uint32_t count; | |
335 | static uint32_t cyc_per_count = 0; | |
336 | ||
337 | if (!cyc_per_count) { | |
338 | MIPS_RDHWR("$3", cyc_per_count); | |
339 | } | |
340 | ||
341 | MIPS_RDHWR("$2", count); | |
342 | return (int64_t)(count * cyc_per_count); | |
343 | } | |
344 | ||
14a6063a RH |
345 | #elif defined(__alpha__) |
346 | ||
347 | static inline int64_t cpu_get_real_ticks(void) | |
348 | { | |
349 | uint64_t cc; | |
350 | uint32_t cur, ofs; | |
351 | ||
352 | asm volatile("rpcc %0" : "=r"(cc)); | |
353 | cur = cc; | |
354 | ofs = cc >> 32; | |
355 | return cur - ofs; | |
356 | } | |
357 | ||
29e922b6 BS |
358 | #else |
359 | /* The host CPU doesn't have an easily accessible cycle counter. | |
360 | Just return a monotonically increasing value. This will be | |
361 | totally wrong, but hopefully better than nothing. */ | |
362 | static inline int64_t cpu_get_real_ticks (void) | |
363 | { | |
364 | static int64_t ticks = 0; | |
365 | return ticks++; | |
366 | } | |
367 | #endif | |
368 | ||
2d8ebcf9 RH |
369 | #ifdef CONFIG_PROFILER |
370 | static inline int64_t profile_getclock(void) | |
371 | { | |
372 | return cpu_get_real_ticks(); | |
373 | } | |
374 | ||
375 | extern int64_t qemu_time, qemu_time_start; | |
376 | extern int64_t tlb_flush_time; | |
377 | extern int64_t dev_time; | |
378 | #endif | |
379 | ||
87ecb68b | 380 | #endif |