4 * Copyright (c) 2003-2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "qemu/osdep.h"
26 #include "qemu-common.h"
27 #include "qemu/cutils.h"
28 #include "migration/vmstate.h"
29 #include "qapi/error.h"
30 #include "qemu/error-report.h"
31 #include "exec/exec-all.h"
32 #include "sysemu/cpus.h"
33 #include "sysemu/qtest.h"
34 #include "qemu/main-loop.h"
35 #include "qemu/option.h"
36 #include "qemu/seqlock.h"
37 #include "sysemu/replay.h"
38 #include "sysemu/runstate.h"
39 #include "hw/core/cpu.h"
40 #include "sysemu/cpu-timers.h"
41 #include "sysemu/cpu-throttle.h"
42 #include "timers-state.h"
46 static int64_t cpu_get_ticks_locked(void)
48 int64_t ticks
= timers_state
.cpu_ticks_offset
;
49 if (timers_state
.cpu_ticks_enabled
) {
50 ticks
+= cpu_get_host_ticks();
53 if (timers_state
.cpu_ticks_prev
> ticks
) {
54 /* Non increasing ticks may happen if the host uses software suspend. */
55 timers_state
.cpu_ticks_offset
+= timers_state
.cpu_ticks_prev
- ticks
;
56 ticks
= timers_state
.cpu_ticks_prev
;
59 timers_state
.cpu_ticks_prev
= ticks
;
64 * return the time elapsed in VM between vm_start and vm_stop. Unless
65 * icount is active, cpu_get_ticks() uses units of the host CPU cycle
68 int64_t cpu_get_ticks(void)
72 if (icount_enabled()) {
73 return cpu_get_icount();
76 qemu_spin_lock(&timers_state
.vm_clock_lock
);
77 ticks
= cpu_get_ticks_locked();
78 qemu_spin_unlock(&timers_state
.vm_clock_lock
);
82 int64_t cpu_get_clock_locked(void)
86 time
= timers_state
.cpu_clock_offset
;
87 if (timers_state
.cpu_ticks_enabled
) {
95 * Return the monotonic time elapsed in VM, i.e.,
96 * the time between vm_start and vm_stop
98 int64_t cpu_get_clock(void)
104 start
= seqlock_read_begin(&timers_state
.vm_clock_seqlock
);
105 ti
= cpu_get_clock_locked();
106 } while (seqlock_read_retry(&timers_state
.vm_clock_seqlock
, start
));
112 * enable cpu_get_ticks()
113 * Caller must hold BQL which serves as mutex for vm_clock_seqlock.
115 void cpu_enable_ticks(void)
117 seqlock_write_lock(&timers_state
.vm_clock_seqlock
,
118 &timers_state
.vm_clock_lock
);
119 if (!timers_state
.cpu_ticks_enabled
) {
120 timers_state
.cpu_ticks_offset
-= cpu_get_host_ticks();
121 timers_state
.cpu_clock_offset
-= get_clock();
122 timers_state
.cpu_ticks_enabled
= 1;
124 seqlock_write_unlock(&timers_state
.vm_clock_seqlock
,
125 &timers_state
.vm_clock_lock
);
129 * disable cpu_get_ticks() : the clock is stopped. You must not call
130 * cpu_get_ticks() after that.
131 * Caller must hold BQL which serves as mutex for vm_clock_seqlock.
133 void cpu_disable_ticks(void)
135 seqlock_write_lock(&timers_state
.vm_clock_seqlock
,
136 &timers_state
.vm_clock_lock
);
137 if (timers_state
.cpu_ticks_enabled
) {
138 timers_state
.cpu_ticks_offset
+= cpu_get_host_ticks();
139 timers_state
.cpu_clock_offset
= cpu_get_clock_locked();
140 timers_state
.cpu_ticks_enabled
= 0;
142 seqlock_write_unlock(&timers_state
.vm_clock_seqlock
,
143 &timers_state
.vm_clock_lock
);
146 static bool icount_state_needed(void *opaque
)
148 return icount_enabled();
151 static bool warp_timer_state_needed(void *opaque
)
153 TimersState
*s
= opaque
;
154 return s
->icount_warp_timer
!= NULL
;
157 static bool adjust_timers_state_needed(void *opaque
)
159 TimersState
*s
= opaque
;
160 return s
->icount_rt_timer
!= NULL
;
163 static bool shift_state_needed(void *opaque
)
165 return icount_enabled() == 2;
169 * Subsection for warp timer migration is optional, because may not be created
171 static const VMStateDescription icount_vmstate_warp_timer
= {
172 .name
= "timer/icount/warp_timer",
174 .minimum_version_id
= 1,
175 .needed
= warp_timer_state_needed
,
176 .fields
= (VMStateField
[]) {
177 VMSTATE_INT64(vm_clock_warp_start
, TimersState
),
178 VMSTATE_TIMER_PTR(icount_warp_timer
, TimersState
),
179 VMSTATE_END_OF_LIST()
183 static const VMStateDescription icount_vmstate_adjust_timers
= {
184 .name
= "timer/icount/timers",
186 .minimum_version_id
= 1,
187 .needed
= adjust_timers_state_needed
,
188 .fields
= (VMStateField
[]) {
189 VMSTATE_TIMER_PTR(icount_rt_timer
, TimersState
),
190 VMSTATE_TIMER_PTR(icount_vm_timer
, TimersState
),
191 VMSTATE_END_OF_LIST()
195 static const VMStateDescription icount_vmstate_shift
= {
196 .name
= "timer/icount/shift",
198 .minimum_version_id
= 1,
199 .needed
= shift_state_needed
,
200 .fields
= (VMStateField
[]) {
201 VMSTATE_INT16(icount_time_shift
, TimersState
),
202 VMSTATE_END_OF_LIST()
207 * This is a subsection for icount migration.
209 static const VMStateDescription icount_vmstate_timers
= {
210 .name
= "timer/icount",
212 .minimum_version_id
= 1,
213 .needed
= icount_state_needed
,
214 .fields
= (VMStateField
[]) {
215 VMSTATE_INT64(qemu_icount_bias
, TimersState
),
216 VMSTATE_INT64(qemu_icount
, TimersState
),
217 VMSTATE_END_OF_LIST()
219 .subsections
= (const VMStateDescription
* []) {
220 &icount_vmstate_warp_timer
,
221 &icount_vmstate_adjust_timers
,
222 &icount_vmstate_shift
,
227 static const VMStateDescription vmstate_timers
= {
230 .minimum_version_id
= 1,
231 .fields
= (VMStateField
[]) {
232 VMSTATE_INT64(cpu_ticks_offset
, TimersState
),
234 VMSTATE_INT64_V(cpu_clock_offset
, TimersState
, 2),
235 VMSTATE_END_OF_LIST()
237 .subsections
= (const VMStateDescription
* []) {
238 &icount_vmstate_timers
,
243 static void do_nothing(CPUState
*cpu
, run_on_cpu_data unused
)
247 void qemu_timer_notify_cb(void *opaque
, QEMUClockType type
)
249 if (!icount_enabled() || type
!= QEMU_CLOCK_VIRTUAL
) {
254 if (qemu_in_vcpu_thread()) {
256 * A CPU is currently running; kick it back out to the
257 * tcg_cpu_exec() loop so it will recalculate its
258 * icount deadline immediately.
260 qemu_cpu_kick(current_cpu
);
261 } else if (first_cpu
) {
263 * qemu_cpu_kick is not enough to kick a halted CPU out of
264 * qemu_tcg_wait_io_event. async_run_on_cpu, instead,
265 * causes cpu_thread_is_idle to return false. This way,
266 * handle_icount_deadline can run.
267 * If we have no CPUs at all for some reason, we don't
268 * need to do anything.
270 async_run_on_cpu(first_cpu
, do_nothing
, RUN_ON_CPU_NULL
);
274 TimersState timers_state
;
276 /* initialize timers state and the cpu throttle for convenience */
277 void cpu_timers_init(void)
279 seqlock_init(&timers_state
.vm_clock_seqlock
);
280 qemu_spin_init(&timers_state
.vm_clock_lock
);
281 vmstate_register(NULL
, 0, &vmstate_timers
, &timers_state
);