]> git.proxmox.com Git - qemu.git/blob - include/qemu/timer.h
Merge remote-tracking branch 'kraxel/usb.79' into staging
[qemu.git] / include / qemu / timer.h
1 #ifndef QEMU_TIMER_H
2 #define QEMU_TIMER_H
3
4 #include "qemu-common.h"
5 #include "qemu/main-loop.h"
6 #include "qemu/notify.h"
7
8 #ifdef __FreeBSD__
9 #include <sys/param.h>
10 #endif
11
12 /* timers */
13
14 #define SCALE_MS 1000000
15 #define SCALE_US 1000
16 #define SCALE_NS 1
17
18 typedef struct QEMUClock QEMUClock;
19 typedef void QEMUTimerCB(void *opaque);
20
21 /* The real time clock should be used only for stuff which does not
22 change the virtual machine state, as it is run even if the virtual
23 machine is stopped. The real time clock has a frequency of 1000
24 Hz. */
25 extern QEMUClock *rt_clock;
26
27 /* The virtual clock is only run during the emulation. It is stopped
28 when the virtual machine is stopped. Virtual timers use a high
29 precision clock, usually cpu cycles (use ticks_per_sec). */
30 extern QEMUClock *vm_clock;
31
32 /* The host clock should be use for device models that emulate accurate
33 real time sources. It will continue to run when the virtual machine
34 is suspended, and it will reflect system time changes the host may
35 undergo (e.g. due to NTP). The host clock has the same precision as
36 the virtual clock. */
37 extern QEMUClock *host_clock;
38
39 int64_t qemu_get_clock_ns(QEMUClock *clock);
40 int64_t qemu_clock_has_timers(QEMUClock *clock);
41 int64_t qemu_clock_expired(QEMUClock *clock);
42 int64_t qemu_clock_deadline(QEMUClock *clock);
43 void qemu_clock_enable(QEMUClock *clock, bool enabled);
44 void qemu_clock_warp(QEMUClock *clock);
45
46 void qemu_register_clock_reset_notifier(QEMUClock *clock, Notifier *notifier);
47 void qemu_unregister_clock_reset_notifier(QEMUClock *clock,
48 Notifier *notifier);
49
50 QEMUTimer *qemu_new_timer(QEMUClock *clock, int scale,
51 QEMUTimerCB *cb, void *opaque);
52 void qemu_free_timer(QEMUTimer *ts);
53 void qemu_del_timer(QEMUTimer *ts);
54 void qemu_mod_timer_ns(QEMUTimer *ts, int64_t expire_time);
55 void qemu_mod_timer(QEMUTimer *ts, int64_t expire_time);
56 bool qemu_timer_pending(QEMUTimer *ts);
57 bool qemu_timer_expired(QEMUTimer *timer_head, int64_t current_time);
58 uint64_t qemu_timer_expire_time_ns(QEMUTimer *ts);
59
60 void qemu_run_timers(QEMUClock *clock);
61 void qemu_run_all_timers(void);
62 void configure_alarms(char const *opt);
63 void init_clocks(void);
64 int init_timer_alarm(void);
65
66 int64_t cpu_get_ticks(void);
67 void cpu_enable_ticks(void);
68 void cpu_disable_ticks(void);
69
70 static inline QEMUTimer *qemu_new_timer_ns(QEMUClock *clock, QEMUTimerCB *cb,
71 void *opaque)
72 {
73 return qemu_new_timer(clock, SCALE_NS, cb, opaque);
74 }
75
76 static inline QEMUTimer *qemu_new_timer_ms(QEMUClock *clock, QEMUTimerCB *cb,
77 void *opaque)
78 {
79 return qemu_new_timer(clock, SCALE_MS, cb, opaque);
80 }
81
82 static inline int64_t qemu_get_clock_ms(QEMUClock *clock)
83 {
84 return qemu_get_clock_ns(clock) / SCALE_MS;
85 }
86
87 static inline int64_t get_ticks_per_sec(void)
88 {
89 return 1000000000LL;
90 }
91
92 /* real time host monotonic timer */
93 static inline int64_t get_clock_realtime(void)
94 {
95 struct timeval tv;
96
97 gettimeofday(&tv, NULL);
98 return tv.tv_sec * 1000000000LL + (tv.tv_usec * 1000);
99 }
100
101 /* Warning: don't insert tracepoints into these functions, they are
102 also used by simpletrace backend and tracepoints would cause
103 an infinite recursion! */
104 #ifdef _WIN32
105 extern int64_t clock_freq;
106
107 static inline int64_t get_clock(void)
108 {
109 LARGE_INTEGER ti;
110 QueryPerformanceCounter(&ti);
111 return muldiv64(ti.QuadPart, get_ticks_per_sec(), clock_freq);
112 }
113
114 #else
115
116 extern int use_rt_clock;
117
118 static inline int64_t get_clock(void)
119 {
120 #ifdef CLOCK_MONOTONIC
121 if (use_rt_clock) {
122 struct timespec ts;
123 clock_gettime(CLOCK_MONOTONIC, &ts);
124 return ts.tv_sec * 1000000000LL + ts.tv_nsec;
125 } else
126 #endif
127 {
128 /* XXX: using gettimeofday leads to problems if the date
129 changes, so it should be avoided. */
130 return get_clock_realtime();
131 }
132 }
133 #endif
134
135 void qemu_get_timer(QEMUFile *f, QEMUTimer *ts);
136 void qemu_put_timer(QEMUFile *f, QEMUTimer *ts);
137
138 /* icount */
139 int64_t cpu_get_icount(void);
140 int64_t cpu_get_clock(void);
141
142 /*******************************************/
143 /* host CPU ticks (if available) */
144
145 #if defined(_ARCH_PPC)
146
147 static inline int64_t cpu_get_real_ticks(void)
148 {
149 int64_t retval;
150 #ifdef _ARCH_PPC64
151 /* This reads timebase in one 64bit go and includes Cell workaround from:
152 http://ozlabs.org/pipermail/linuxppc-dev/2006-October/027052.html
153 */
154 __asm__ __volatile__ ("mftb %0\n\t"
155 "cmpwi %0,0\n\t"
156 "beq- $-8"
157 : "=r" (retval));
158 #else
159 /* http://ozlabs.org/pipermail/linuxppc-dev/1999-October/003889.html */
160 unsigned long junk;
161 __asm__ __volatile__ ("mfspr %1,269\n\t" /* mftbu */
162 "mfspr %L0,268\n\t" /* mftb */
163 "mfspr %0,269\n\t" /* mftbu */
164 "cmpw %0,%1\n\t"
165 "bne $-16"
166 : "=r" (retval), "=r" (junk));
167 #endif
168 return retval;
169 }
170
171 #elif defined(__i386__)
172
173 static inline int64_t cpu_get_real_ticks(void)
174 {
175 int64_t val;
176 asm volatile ("rdtsc" : "=A" (val));
177 return val;
178 }
179
180 #elif defined(__x86_64__)
181
182 static inline int64_t cpu_get_real_ticks(void)
183 {
184 uint32_t low,high;
185 int64_t val;
186 asm volatile("rdtsc" : "=a" (low), "=d" (high));
187 val = high;
188 val <<= 32;
189 val |= low;
190 return val;
191 }
192
193 #elif defined(__hppa__)
194
195 static inline int64_t cpu_get_real_ticks(void)
196 {
197 int val;
198 asm volatile ("mfctl %%cr16, %0" : "=r"(val));
199 return val;
200 }
201
202 #elif defined(__ia64)
203
204 static inline int64_t cpu_get_real_ticks(void)
205 {
206 int64_t val;
207 asm volatile ("mov %0 = ar.itc" : "=r"(val) :: "memory");
208 return val;
209 }
210
211 #elif defined(__s390__)
212
213 static inline int64_t cpu_get_real_ticks(void)
214 {
215 int64_t val;
216 asm volatile("stck 0(%1)" : "=m" (val) : "a" (&val) : "cc");
217 return val;
218 }
219
220 #elif defined(__sparc__)
221
222 static inline int64_t cpu_get_real_ticks (void)
223 {
224 #if defined(_LP64)
225 uint64_t rval;
226 asm volatile("rd %%tick,%0" : "=r"(rval));
227 return rval;
228 #else
229 /* We need an %o or %g register for this. For recent enough gcc
230 there is an "h" constraint for that. Don't bother with that. */
231 union {
232 uint64_t i64;
233 struct {
234 uint32_t high;
235 uint32_t low;
236 } i32;
237 } rval;
238 asm volatile("rd %%tick,%%g1; srlx %%g1,32,%0; mov %%g1,%1"
239 : "=r"(rval.i32.high), "=r"(rval.i32.low) : : "g1");
240 return rval.i64;
241 #endif
242 }
243
244 #elif defined(__mips__) && \
245 ((defined(__mips_isa_rev) && __mips_isa_rev >= 2) || defined(__linux__))
246 /*
247 * binutils wants to use rdhwr only on mips32r2
248 * but as linux kernel emulate it, it's fine
249 * to use it.
250 *
251 */
252 #define MIPS_RDHWR(rd, value) { \
253 __asm__ __volatile__ (".set push\n\t" \
254 ".set mips32r2\n\t" \
255 "rdhwr %0, "rd"\n\t" \
256 ".set pop" \
257 : "=r" (value)); \
258 }
259
260 static inline int64_t cpu_get_real_ticks(void)
261 {
262 /* On kernels >= 2.6.25 rdhwr <reg>, $2 and $3 are emulated */
263 uint32_t count;
264 static uint32_t cyc_per_count = 0;
265
266 if (!cyc_per_count) {
267 MIPS_RDHWR("$3", cyc_per_count);
268 }
269
270 MIPS_RDHWR("$2", count);
271 return (int64_t)(count * cyc_per_count);
272 }
273
274 #elif defined(__alpha__)
275
276 static inline int64_t cpu_get_real_ticks(void)
277 {
278 uint64_t cc;
279 uint32_t cur, ofs;
280
281 asm volatile("rpcc %0" : "=r"(cc));
282 cur = cc;
283 ofs = cc >> 32;
284 return cur - ofs;
285 }
286
287 #else
288 /* The host CPU doesn't have an easily accessible cycle counter.
289 Just return a monotonically increasing value. This will be
290 totally wrong, but hopefully better than nothing. */
291 static inline int64_t cpu_get_real_ticks (void)
292 {
293 static int64_t ticks = 0;
294 return ticks++;
295 }
296 #endif
297
298 #ifdef CONFIG_PROFILER
299 static inline int64_t profile_getclock(void)
300 {
301 return cpu_get_real_ticks();
302 }
303
304 extern int64_t qemu_time, qemu_time_start;
305 extern int64_t tlb_flush_time;
306 extern int64_t dev_time;
307 #endif
308
309 #endif