]> git.proxmox.com Git - qemu.git/blame - qemu-timer.h
Move generic or OS function declarations to qemu-common.h
[qemu.git] / qemu-timer.h
CommitLineData
87ecb68b
PB
1#ifndef QEMU_TIMER_H
2#define QEMU_TIMER_H
3
29e922b6 4#include "qemu-common.h"
c57c846a
BS
5#include <time.h>
6#include <sys/time.h>
7
8#ifdef _WIN32
9#include <windows.h>
10#include <mmsystem.h>
11#endif
29e922b6 12
87ecb68b
PB
13/* timers */
14
0ce1b948
PB
15#define SCALE_MS 1000000
16#define SCALE_US 1000
17#define SCALE_NS 1
18
87ecb68b
PB
19typedef struct QEMUClock QEMUClock;
20typedef void QEMUTimerCB(void *opaque);
21
22/* The real time clock should be used only for stuff which does not
23 change the virtual machine state, as it is run even if the virtual
24 machine is stopped. The real time clock has a frequency of 1000
25 Hz. */
26extern QEMUClock *rt_clock;
27
28/* The virtual clock is only run during the emulation. It is stopped
29 when the virtual machine is stopped. Virtual timers use a high
30 precision clock, usually cpu cycles (use ticks_per_sec). */
31extern QEMUClock *vm_clock;
32
21d5d12b
JK
33/* The host clock should be use for device models that emulate accurate
34 real time sources. It will continue to run when the virtual machine
35 is suspended, and it will reflect system time changes the host may
36 undergo (e.g. due to NTP). The host clock has the same precision as
37 the virtual clock. */
38extern QEMUClock *host_clock;
39
41c872b6 40int64_t qemu_get_clock_ns(QEMUClock *clock);
db1a4972 41void qemu_clock_enable(QEMUClock *clock, int enabled);
ab33fcda 42void qemu_clock_warp(QEMUClock *clock);
87ecb68b 43
4a998740
PB
44QEMUTimer *qemu_new_timer(QEMUClock *clock, int scale,
45 QEMUTimerCB *cb, void *opaque);
87ecb68b
PB
46void qemu_free_timer(QEMUTimer *ts);
47void qemu_del_timer(QEMUTimer *ts);
48void qemu_mod_timer(QEMUTimer *ts, int64_t expire_time);
49int qemu_timer_pending(QEMUTimer *ts);
2430ffe4 50int qemu_timer_expired(QEMUTimer *timer_head, int64_t current_time);
87ecb68b 51
db1a4972
PB
52void qemu_run_all_timers(void);
53int qemu_alarm_pending(void);
cb842c90 54int64_t qemu_next_icount_deadline(void);
db1a4972
PB
55void configure_alarms(char const *opt);
56void configure_icount(const char *option);
57int qemu_calculate_timeout(void);
58void init_clocks(void);
59int init_timer_alarm(void);
60void quit_timers(void);
61
0ce1b948
PB
62static inline QEMUTimer *qemu_new_timer_ns(QEMUClock *clock, QEMUTimerCB *cb,
63 void *opaque)
64{
4a998740 65 return qemu_new_timer(clock, SCALE_NS, cb, opaque);
0ce1b948
PB
66}
67
68static inline QEMUTimer *qemu_new_timer_ms(QEMUClock *clock, QEMUTimerCB *cb,
69 void *opaque)
70{
4a998740 71 return qemu_new_timer(clock, SCALE_MS, cb, opaque);
0ce1b948
PB
72}
73
74static inline int64_t qemu_get_clock_ms(QEMUClock *clock)
75{
76 return qemu_get_clock_ns(clock) / SCALE_MS;
77}
78
274dfed8
AL
79static inline int64_t get_ticks_per_sec(void)
80{
81 return 1000000000LL;
82}
87ecb68b 83
c57c846a
BS
84/* real time host monotonic timer */
85static inline int64_t get_clock_realtime(void)
86{
87 struct timeval tv;
88
89 gettimeofday(&tv, NULL);
90 return tv.tv_sec * 1000000000LL + (tv.tv_usec * 1000);
91}
92
93/* Warning: don't insert tracepoints into these functions, they are
94 also used by simpletrace backend and tracepoints would cause
95 an infinite recursion! */
96#ifdef _WIN32
97extern int64_t clock_freq;
98
99static inline int64_t get_clock(void)
100{
101 LARGE_INTEGER ti;
102 QueryPerformanceCounter(&ti);
103 return muldiv64(ti.QuadPart, get_ticks_per_sec(), clock_freq);
104}
105
106#else
107
108extern int use_rt_clock;
109
110static inline int64_t get_clock(void)
111{
112#if defined(__linux__) || (defined(__FreeBSD__) && __FreeBSD_version >= 500000) \
113 || defined(__DragonFly__) || defined(__FreeBSD_kernel__)
114 if (use_rt_clock) {
115 struct timespec ts;
116 clock_gettime(CLOCK_MONOTONIC, &ts);
117 return ts.tv_sec * 1000000000LL + ts.tv_nsec;
118 } else
119#endif
120 {
121 /* XXX: using gettimeofday leads to problems if the date
122 changes, so it should be avoided. */
123 return get_clock_realtime();
124 }
125}
126#endif
db1a4972 127
87ecb68b
PB
128void qemu_get_timer(QEMUFile *f, QEMUTimer *ts);
129void qemu_put_timer(QEMUFile *f, QEMUTimer *ts);
130
131/* ptimer.c */
132typedef struct ptimer_state ptimer_state;
133typedef void (*ptimer_cb)(void *opaque);
134
135ptimer_state *ptimer_init(QEMUBH *bh);
136void ptimer_set_period(ptimer_state *s, int64_t period);
137void ptimer_set_freq(ptimer_state *s, uint32_t freq);
138void ptimer_set_limit(ptimer_state *s, uint64_t limit, int reload);
139uint64_t ptimer_get_count(ptimer_state *s);
140void ptimer_set_count(ptimer_state *s, uint64_t count);
141void ptimer_run(ptimer_state *s, int oneshot);
142void ptimer_stop(ptimer_state *s);
143void qemu_put_ptimer(QEMUFile *f, ptimer_state *s);
144void qemu_get_ptimer(QEMUFile *f, ptimer_state *s);
145
29e922b6
BS
146/* icount */
147int64_t qemu_icount_round(int64_t count);
148extern int64_t qemu_icount;
149extern int use_icount;
150extern int icount_time_shift;
151extern int64_t qemu_icount_bias;
152int64_t cpu_get_icount(void);
153
154/*******************************************/
155/* host CPU ticks (if available) */
156
157#if defined(_ARCH_PPC)
158
159static inline int64_t cpu_get_real_ticks(void)
160{
161 int64_t retval;
162#ifdef _ARCH_PPC64
163 /* This reads timebase in one 64bit go and includes Cell workaround from:
164 http://ozlabs.org/pipermail/linuxppc-dev/2006-October/027052.html
165 */
166 __asm__ __volatile__ ("mftb %0\n\t"
167 "cmpwi %0,0\n\t"
168 "beq- $-8"
169 : "=r" (retval));
170#else
171 /* http://ozlabs.org/pipermail/linuxppc-dev/1999-October/003889.html */
172 unsigned long junk;
4a9590f3
AG
173 __asm__ __volatile__ ("mfspr %1,269\n\t" /* mftbu */
174 "mfspr %L0,268\n\t" /* mftb */
175 "mfspr %0,269\n\t" /* mftbu */
29e922b6
BS
176 "cmpw %0,%1\n\t"
177 "bne $-16"
178 : "=r" (retval), "=r" (junk));
179#endif
180 return retval;
181}
182
183#elif defined(__i386__)
184
185static inline int64_t cpu_get_real_ticks(void)
186{
187 int64_t val;
188 asm volatile ("rdtsc" : "=A" (val));
189 return val;
190}
191
192#elif defined(__x86_64__)
193
194static inline int64_t cpu_get_real_ticks(void)
195{
196 uint32_t low,high;
197 int64_t val;
198 asm volatile("rdtsc" : "=a" (low), "=d" (high));
199 val = high;
200 val <<= 32;
201 val |= low;
202 return val;
203}
204
205#elif defined(__hppa__)
206
207static inline int64_t cpu_get_real_ticks(void)
208{
209 int val;
210 asm volatile ("mfctl %%cr16, %0" : "=r"(val));
211 return val;
212}
213
214#elif defined(__ia64)
215
216static inline int64_t cpu_get_real_ticks(void)
217{
218 int64_t val;
219 asm volatile ("mov %0 = ar.itc" : "=r"(val) :: "memory");
220 return val;
221}
222
223#elif defined(__s390__)
224
225static inline int64_t cpu_get_real_ticks(void)
226{
227 int64_t val;
228 asm volatile("stck 0(%1)" : "=m" (val) : "a" (&val) : "cc");
229 return val;
230}
231
232#elif defined(__sparc_v8plus__) || defined(__sparc_v8plusa__) || defined(__sparc_v9__)
233
234static inline int64_t cpu_get_real_ticks (void)
235{
236#if defined(_LP64)
237 uint64_t rval;
238 asm volatile("rd %%tick,%0" : "=r"(rval));
239 return rval;
240#else
241 union {
242 uint64_t i64;
243 struct {
244 uint32_t high;
245 uint32_t low;
246 } i32;
247 } rval;
248 asm volatile("rd %%tick,%1; srlx %1,32,%0"
249 : "=r"(rval.i32.high), "=r"(rval.i32.low));
250 return rval.i64;
251#endif
252}
253
254#elif defined(__mips__) && \
255 ((defined(__mips_isa_rev) && __mips_isa_rev >= 2) || defined(__linux__))
256/*
257 * binutils wants to use rdhwr only on mips32r2
258 * but as linux kernel emulate it, it's fine
259 * to use it.
260 *
261 */
262#define MIPS_RDHWR(rd, value) { \
263 __asm__ __volatile__ (".set push\n\t" \
264 ".set mips32r2\n\t" \
265 "rdhwr %0, "rd"\n\t" \
266 ".set pop" \
267 : "=r" (value)); \
268 }
269
270static inline int64_t cpu_get_real_ticks(void)
271{
272 /* On kernels >= 2.6.25 rdhwr <reg>, $2 and $3 are emulated */
273 uint32_t count;
274 static uint32_t cyc_per_count = 0;
275
276 if (!cyc_per_count) {
277 MIPS_RDHWR("$3", cyc_per_count);
278 }
279
280 MIPS_RDHWR("$2", count);
281 return (int64_t)(count * cyc_per_count);
282}
283
14a6063a
RH
284#elif defined(__alpha__)
285
286static inline int64_t cpu_get_real_ticks(void)
287{
288 uint64_t cc;
289 uint32_t cur, ofs;
290
291 asm volatile("rpcc %0" : "=r"(cc));
292 cur = cc;
293 ofs = cc >> 32;
294 return cur - ofs;
295}
296
29e922b6
BS
297#else
298/* The host CPU doesn't have an easily accessible cycle counter.
299 Just return a monotonically increasing value. This will be
300 totally wrong, but hopefully better than nothing. */
301static inline int64_t cpu_get_real_ticks (void)
302{
303 static int64_t ticks = 0;
304 return ticks++;
305}
306#endif
307
308#ifdef NEED_CPU_H
309/* Deterministic execution requires that IO only be performed on the last
310 instruction of a TB so that interrupts take effect immediately. */
311static inline int can_do_io(CPUState *env)
312{
313 if (!use_icount)
314 return 1;
315
316 /* If not executing code then assume we are ok. */
317 if (!env->current_tb)
318 return 1;
319
320 return env->can_do_io != 0;
321}
322#endif
323
2d8ebcf9
RH
324#ifdef CONFIG_PROFILER
325static inline int64_t profile_getclock(void)
326{
327 return cpu_get_real_ticks();
328}
329
330extern int64_t qemu_time, qemu_time_start;
331extern int64_t tlb_flush_time;
332extern int64_t dev_time;
333#endif
334
87ecb68b 335#endif