]> git.proxmox.com Git - qemu.git/blame - qemu-timer.h
vfio-pci: Rework MSIX setup/teardown
[qemu.git] / qemu-timer.h
CommitLineData
87ecb68b
PB
1#ifndef QEMU_TIMER_H
2#define QEMU_TIMER_H
3
29e922b6 4#include "qemu-common.h"
44a9b356 5#include "main-loop.h"
691a0c9c 6#include "notify.h"
29e922b6 7
165ceac0
AF
8#ifdef __FreeBSD__
9#include <sys/param.h>
10#endif
11
87ecb68b
PB
12/* timers */
13
0ce1b948
PB
14#define SCALE_MS 1000000
15#define SCALE_US 1000
16#define SCALE_NS 1
17
87ecb68b
PB
18typedef struct QEMUClock QEMUClock;
19typedef void QEMUTimerCB(void *opaque);
20
21/* The real time clock should be used only for stuff which does not
22 change the virtual machine state, as it is run even if the virtual
23 machine is stopped. The real time clock has a frequency of 1000
24 Hz. */
25extern QEMUClock *rt_clock;
26
27/* The virtual clock is only run during the emulation. It is stopped
28 when the virtual machine is stopped. Virtual timers use a high
29 precision clock, usually cpu cycles (use ticks_per_sec). */
30extern QEMUClock *vm_clock;
31
21d5d12b
JK
32/* The host clock should be use for device models that emulate accurate
33 real time sources. It will continue to run when the virtual machine
34 is suspended, and it will reflect system time changes the host may
35 undergo (e.g. due to NTP). The host clock has the same precision as
36 the virtual clock. */
37extern QEMUClock *host_clock;
38
41c872b6 39int64_t qemu_get_clock_ns(QEMUClock *clock);
dc2dfcf0
PB
40int64_t qemu_clock_has_timers(QEMUClock *clock);
41int64_t qemu_clock_expired(QEMUClock *clock);
42int64_t qemu_clock_deadline(QEMUClock *clock);
5e1ec7b2 43void qemu_clock_enable(QEMUClock *clock, bool enabled);
ab33fcda 44void qemu_clock_warp(QEMUClock *clock);
87ecb68b 45
691a0c9c
JK
46void qemu_register_clock_reset_notifier(QEMUClock *clock, Notifier *notifier);
47void qemu_unregister_clock_reset_notifier(QEMUClock *clock,
48 Notifier *notifier);
49
4a998740
PB
50QEMUTimer *qemu_new_timer(QEMUClock *clock, int scale,
51 QEMUTimerCB *cb, void *opaque);
87ecb68b
PB
52void qemu_free_timer(QEMUTimer *ts);
53void qemu_del_timer(QEMUTimer *ts);
2ff68d07 54void qemu_mod_timer_ns(QEMUTimer *ts, int64_t expire_time);
87ecb68b 55void qemu_mod_timer(QEMUTimer *ts, int64_t expire_time);
5e1ec7b2
SW
56bool qemu_timer_pending(QEMUTimer *ts);
57bool qemu_timer_expired(QEMUTimer *timer_head, int64_t current_time);
2ff68d07 58uint64_t qemu_timer_expire_time_ns(QEMUTimer *ts);
87ecb68b 59
8156be56 60void qemu_run_timers(QEMUClock *clock);
db1a4972 61void qemu_run_all_timers(void);
db1a4972 62void configure_alarms(char const *opt);
db1a4972
PB
63void init_clocks(void);
64int init_timer_alarm(void);
db1a4972 65
70c3b557
BS
66int64_t cpu_get_ticks(void);
67void cpu_enable_ticks(void);
68void cpu_disable_ticks(void);
69
0ce1b948
PB
70static inline QEMUTimer *qemu_new_timer_ns(QEMUClock *clock, QEMUTimerCB *cb,
71 void *opaque)
72{
4a998740 73 return qemu_new_timer(clock, SCALE_NS, cb, opaque);
0ce1b948
PB
74}
75
76static inline QEMUTimer *qemu_new_timer_ms(QEMUClock *clock, QEMUTimerCB *cb,
77 void *opaque)
78{
4a998740 79 return qemu_new_timer(clock, SCALE_MS, cb, opaque);
0ce1b948
PB
80}
81
82static inline int64_t qemu_get_clock_ms(QEMUClock *clock)
83{
84 return qemu_get_clock_ns(clock) / SCALE_MS;
85}
86
274dfed8
AL
87static inline int64_t get_ticks_per_sec(void)
88{
89 return 1000000000LL;
90}
87ecb68b 91
c57c846a
BS
92/* real time host monotonic timer */
93static inline int64_t get_clock_realtime(void)
94{
95 struct timeval tv;
96
97 gettimeofday(&tv, NULL);
98 return tv.tv_sec * 1000000000LL + (tv.tv_usec * 1000);
99}
100
101/* Warning: don't insert tracepoints into these functions, they are
102 also used by simpletrace backend and tracepoints would cause
103 an infinite recursion! */
104#ifdef _WIN32
105extern int64_t clock_freq;
106
107static inline int64_t get_clock(void)
108{
109 LARGE_INTEGER ti;
110 QueryPerformanceCounter(&ti);
111 return muldiv64(ti.QuadPart, get_ticks_per_sec(), clock_freq);
112}
113
114#else
115
116extern int use_rt_clock;
117
118static inline int64_t get_clock(void)
119{
120#if defined(__linux__) || (defined(__FreeBSD__) && __FreeBSD_version >= 500000) \
121 || defined(__DragonFly__) || defined(__FreeBSD_kernel__)
122 if (use_rt_clock) {
123 struct timespec ts;
124 clock_gettime(CLOCK_MONOTONIC, &ts);
125 return ts.tv_sec * 1000000000LL + ts.tv_nsec;
126 } else
127#endif
128 {
129 /* XXX: using gettimeofday leads to problems if the date
130 changes, so it should be avoided. */
131 return get_clock_realtime();
132 }
133}
134#endif
db1a4972 135
87ecb68b
PB
136void qemu_get_timer(QEMUFile *f, QEMUTimer *ts);
137void qemu_put_timer(QEMUFile *f, QEMUTimer *ts);
138
29e922b6 139/* icount */
29e922b6 140int64_t cpu_get_icount(void);
946fb27c 141int64_t cpu_get_clock(void);
29e922b6
BS
142
143/*******************************************/
144/* host CPU ticks (if available) */
145
146#if defined(_ARCH_PPC)
147
148static inline int64_t cpu_get_real_ticks(void)
149{
150 int64_t retval;
151#ifdef _ARCH_PPC64
152 /* This reads timebase in one 64bit go and includes Cell workaround from:
153 http://ozlabs.org/pipermail/linuxppc-dev/2006-October/027052.html
154 */
155 __asm__ __volatile__ ("mftb %0\n\t"
156 "cmpwi %0,0\n\t"
157 "beq- $-8"
158 : "=r" (retval));
159#else
160 /* http://ozlabs.org/pipermail/linuxppc-dev/1999-October/003889.html */
161 unsigned long junk;
4a9590f3
AG
162 __asm__ __volatile__ ("mfspr %1,269\n\t" /* mftbu */
163 "mfspr %L0,268\n\t" /* mftb */
164 "mfspr %0,269\n\t" /* mftbu */
29e922b6
BS
165 "cmpw %0,%1\n\t"
166 "bne $-16"
167 : "=r" (retval), "=r" (junk));
168#endif
169 return retval;
170}
171
172#elif defined(__i386__)
173
174static inline int64_t cpu_get_real_ticks(void)
175{
176 int64_t val;
177 asm volatile ("rdtsc" : "=A" (val));
178 return val;
179}
180
181#elif defined(__x86_64__)
182
183static inline int64_t cpu_get_real_ticks(void)
184{
185 uint32_t low,high;
186 int64_t val;
187 asm volatile("rdtsc" : "=a" (low), "=d" (high));
188 val = high;
189 val <<= 32;
190 val |= low;
191 return val;
192}
193
194#elif defined(__hppa__)
195
196static inline int64_t cpu_get_real_ticks(void)
197{
198 int val;
199 asm volatile ("mfctl %%cr16, %0" : "=r"(val));
200 return val;
201}
202
203#elif defined(__ia64)
204
205static inline int64_t cpu_get_real_ticks(void)
206{
207 int64_t val;
208 asm volatile ("mov %0 = ar.itc" : "=r"(val) :: "memory");
209 return val;
210}
211
212#elif defined(__s390__)
213
214static inline int64_t cpu_get_real_ticks(void)
215{
216 int64_t val;
217 asm volatile("stck 0(%1)" : "=m" (val) : "a" (&val) : "cc");
218 return val;
219}
220
9b9c37c3 221#elif defined(__sparc__)
29e922b6
BS
222
223static inline int64_t cpu_get_real_ticks (void)
224{
225#if defined(_LP64)
226 uint64_t rval;
227 asm volatile("rd %%tick,%0" : "=r"(rval));
228 return rval;
229#else
9b9c37c3
RH
230 /* We need an %o or %g register for this. For recent enough gcc
231 there is an "h" constraint for that. Don't bother with that. */
29e922b6
BS
232 union {
233 uint64_t i64;
234 struct {
235 uint32_t high;
236 uint32_t low;
237 } i32;
238 } rval;
9b9c37c3
RH
239 asm volatile("rd %%tick,%%g1; srlx %%g1,32,%0; mov %%g1,%1"
240 : "=r"(rval.i32.high), "=r"(rval.i32.low) : : "g1");
29e922b6
BS
241 return rval.i64;
242#endif
243}
244
245#elif defined(__mips__) && \
246 ((defined(__mips_isa_rev) && __mips_isa_rev >= 2) || defined(__linux__))
247/*
248 * binutils wants to use rdhwr only on mips32r2
249 * but as linux kernel emulate it, it's fine
250 * to use it.
251 *
252 */
253#define MIPS_RDHWR(rd, value) { \
254 __asm__ __volatile__ (".set push\n\t" \
255 ".set mips32r2\n\t" \
256 "rdhwr %0, "rd"\n\t" \
257 ".set pop" \
258 : "=r" (value)); \
259 }
260
261static inline int64_t cpu_get_real_ticks(void)
262{
263 /* On kernels >= 2.6.25 rdhwr <reg>, $2 and $3 are emulated */
264 uint32_t count;
265 static uint32_t cyc_per_count = 0;
266
267 if (!cyc_per_count) {
268 MIPS_RDHWR("$3", cyc_per_count);
269 }
270
271 MIPS_RDHWR("$2", count);
272 return (int64_t)(count * cyc_per_count);
273}
274
14a6063a
RH
275#elif defined(__alpha__)
276
277static inline int64_t cpu_get_real_ticks(void)
278{
279 uint64_t cc;
280 uint32_t cur, ofs;
281
282 asm volatile("rpcc %0" : "=r"(cc));
283 cur = cc;
284 ofs = cc >> 32;
285 return cur - ofs;
286}
287
29e922b6
BS
288#else
289/* The host CPU doesn't have an easily accessible cycle counter.
290 Just return a monotonically increasing value. This will be
291 totally wrong, but hopefully better than nothing. */
292static inline int64_t cpu_get_real_ticks (void)
293{
294 static int64_t ticks = 0;
295 return ticks++;
296}
297#endif
298
2d8ebcf9
RH
299#ifdef CONFIG_PROFILER
300static inline int64_t profile_getclock(void)
301{
302 return cpu_get_real_ticks();
303}
304
305extern int64_t qemu_time, qemu_time_start;
306extern int64_t tlb_flush_time;
307extern int64_t dev_time;
308#endif
309
87ecb68b 310#endif