]>
Commit | Line | Data |
---|---|---|
53e72406 MZ |
1 | /* |
2 | * Copyright (C) 2012 ARM Ltd. | |
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License version 2 as | |
7 | * published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
17 | */ | |
18 | ||
19 | #include <linux/cpu.h> | |
53e72406 MZ |
20 | #include <linux/kvm.h> |
21 | #include <linux/kvm_host.h> | |
22 | #include <linux/interrupt.h> | |
b452cb52 | 23 | #include <linux/irq.h> |
53e72406 | 24 | |
372b7c1b | 25 | #include <clocksource/arm_arch_timer.h> |
53e72406 MZ |
26 | #include <asm/arch_timer.h> |
27 | ||
7275acdf MZ |
28 | #include <kvm/arm_vgic.h> |
29 | #include <kvm/arm_arch_timer.h> | |
53e72406 | 30 | |
e21f0910 CD |
31 | #include "trace.h" |
32 | ||
53e72406 MZ |
33 | static struct timecounter *timecounter; |
34 | static struct workqueue_struct *wqueue; | |
5ae7f87a | 35 | static unsigned int host_vtimer_irq; |
53e72406 | 36 | |
9b4a3004 MZ |
37 | void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu) |
38 | { | |
39 | vcpu->arch.timer_cpu.active_cleared_last = false; | |
40 | } | |
41 | ||
53e72406 MZ |
42 | static cycle_t kvm_phys_timer_read(void) |
43 | { | |
44 | return timecounter->cc->read(timecounter->cc); | |
45 | } | |
46 | ||
47 | static bool timer_is_armed(struct arch_timer_cpu *timer) | |
48 | { | |
49 | return timer->armed; | |
50 | } | |
51 | ||
52 | /* timer_arm: as in "arm the timer", not as in ARM the company */ | |
53 | static void timer_arm(struct arch_timer_cpu *timer, u64 ns) | |
54 | { | |
55 | timer->armed = true; | |
56 | hrtimer_start(&timer->timer, ktime_add_ns(ktime_get(), ns), | |
57 | HRTIMER_MODE_ABS); | |
58 | } | |
59 | ||
60 | static void timer_disarm(struct arch_timer_cpu *timer) | |
61 | { | |
62 | if (timer_is_armed(timer)) { | |
63 | hrtimer_cancel(&timer->timer); | |
64 | cancel_work_sync(&timer->expired); | |
65 | timer->armed = false; | |
66 | } | |
67 | } | |
68 | ||
53e72406 MZ |
69 | static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id) |
70 | { | |
71 | struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id; | |
72 | ||
73 | /* | |
74 | * We disable the timer in the world switch and let it be | |
75 | * handled by kvm_timer_sync_hwstate(). Getting a timer | |
76 | * interrupt at this point is a sure sign of some major | |
77 | * breakage. | |
78 | */ | |
79 | pr_warn("Unexpected interrupt %d on vcpu %p\n", irq, vcpu); | |
80 | return IRQ_HANDLED; | |
81 | } | |
82 | ||
1a748478 CD |
83 | /* |
84 | * Work function for handling the backup timer that we schedule when a vcpu is | |
85 | * no longer running, but had a timer programmed to fire in the future. | |
86 | */ | |
53e72406 MZ |
87 | static void kvm_timer_inject_irq_work(struct work_struct *work) |
88 | { | |
89 | struct kvm_vcpu *vcpu; | |
90 | ||
91 | vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired); | |
92 | vcpu->arch.timer_cpu.armed = false; | |
1a748478 | 93 | |
1c5631c7 MZ |
94 | WARN_ON(!kvm_timer_should_fire(vcpu)); |
95 | ||
1a748478 CD |
96 | /* |
97 | * If the vcpu is blocked we want to wake it up so that it will see | |
98 | * the timer has expired when entering the guest. | |
99 | */ | |
100 | kvm_vcpu_kick(vcpu); | |
53e72406 MZ |
101 | } |
102 | ||
1c5631c7 MZ |
103 | static u64 kvm_timer_compute_delta(struct kvm_vcpu *vcpu) |
104 | { | |
105 | cycle_t cval, now; | |
106 | ||
107 | cval = vcpu->arch.timer_cpu.cntv_cval; | |
108 | now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff; | |
109 | ||
110 | if (now < cval) { | |
111 | u64 ns; | |
112 | ||
113 | ns = cyclecounter_cyc2ns(timecounter->cc, | |
114 | cval - now, | |
115 | timecounter->mask, | |
116 | &timecounter->frac); | |
117 | return ns; | |
118 | } | |
119 | ||
120 | return 0; | |
121 | } | |
122 | ||
53e72406 MZ |
123 | static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt) |
124 | { | |
125 | struct arch_timer_cpu *timer; | |
1c5631c7 MZ |
126 | struct kvm_vcpu *vcpu; |
127 | u64 ns; | |
128 | ||
53e72406 | 129 | timer = container_of(hrt, struct arch_timer_cpu, timer); |
1c5631c7 MZ |
130 | vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu); |
131 | ||
132 | /* | |
133 | * Check that the timer has really expired from the guest's | |
134 | * PoV (NTP on the host may have forced it to expire | |
135 | * early). If we should have slept longer, restart it. | |
136 | */ | |
137 | ns = kvm_timer_compute_delta(vcpu); | |
138 | if (unlikely(ns)) { | |
139 | hrtimer_forward_now(hrt, ns_to_ktime(ns)); | |
140 | return HRTIMER_RESTART; | |
141 | } | |
142 | ||
53e72406 MZ |
143 | queue_work(wqueue, &timer->expired); |
144 | return HRTIMER_NORESTART; | |
145 | } | |
146 | ||
d35268da CD |
147 | static bool kvm_timer_irq_can_fire(struct kvm_vcpu *vcpu) |
148 | { | |
149 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | |
150 | ||
151 | return !(timer->cntv_ctl & ARCH_TIMER_CTRL_IT_MASK) && | |
4b4b4512 | 152 | (timer->cntv_ctl & ARCH_TIMER_CTRL_ENABLE); |
d35268da CD |
153 | } |
154 | ||
1a748478 CD |
155 | bool kvm_timer_should_fire(struct kvm_vcpu *vcpu) |
156 | { | |
157 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | |
158 | cycle_t cval, now; | |
159 | ||
d35268da | 160 | if (!kvm_timer_irq_can_fire(vcpu)) |
1a748478 CD |
161 | return false; |
162 | ||
163 | cval = timer->cntv_cval; | |
164 | now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff; | |
165 | ||
166 | return cval <= now; | |
167 | } | |
168 | ||
4b4b4512 CD |
169 | static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level) |
170 | { | |
171 | int ret; | |
172 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | |
173 | ||
174 | BUG_ON(!vgic_initialized(vcpu->kvm)); | |
175 | ||
9b4a3004 | 176 | timer->active_cleared_last = false; |
4b4b4512 | 177 | timer->irq.level = new_level; |
a7e33ad9 | 178 | trace_kvm_timer_update_irq(vcpu->vcpu_id, timer->irq.irq, |
e21f0910 | 179 | timer->irq.level); |
4b4b4512 | 180 | ret = kvm_vgic_inject_mapped_irq(vcpu->kvm, vcpu->vcpu_id, |
a7e33ad9 | 181 | timer->irq.irq, |
4b4b4512 CD |
182 | timer->irq.level); |
183 | WARN_ON(ret); | |
184 | } | |
185 | ||
186 | /* | |
187 | * Check if there was a change in the timer state (should we raise or lower | |
188 | * the line level to the GIC). | |
189 | */ | |
b3aff6cc | 190 | static int kvm_timer_update_state(struct kvm_vcpu *vcpu) |
4b4b4512 CD |
191 | { |
192 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | |
193 | ||
194 | /* | |
195 | * If userspace modified the timer registers via SET_ONE_REG before | |
196 | * the vgic was initialized, we mustn't set the timer->irq.level value | |
197 | * because the guest would never see the interrupt. Instead wait | |
198 | * until we call this function from kvm_timer_flush_hwstate. | |
199 | */ | |
200 | if (!vgic_initialized(vcpu->kvm)) | |
b3aff6cc | 201 | return -ENODEV; |
4b4b4512 CD |
202 | |
203 | if (kvm_timer_should_fire(vcpu) != timer->irq.level) | |
204 | kvm_timer_update_irq(vcpu, !timer->irq.level); | |
b3aff6cc AP |
205 | |
206 | return 0; | |
4b4b4512 CD |
207 | } |
208 | ||
d35268da CD |
209 | /* |
210 | * Schedule the background timer before calling kvm_vcpu_block, so that this | |
211 | * thread is removed from its waitqueue and made runnable when there's a timer | |
212 | * interrupt to handle. | |
213 | */ | |
214 | void kvm_timer_schedule(struct kvm_vcpu *vcpu) | |
215 | { | |
216 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | |
d35268da CD |
217 | |
218 | BUG_ON(timer_is_armed(timer)); | |
219 | ||
220 | /* | |
221 | * No need to schedule a background timer if the guest timer has | |
222 | * already expired, because kvm_vcpu_block will return before putting | |
223 | * the thread to sleep. | |
224 | */ | |
225 | if (kvm_timer_should_fire(vcpu)) | |
226 | return; | |
227 | ||
228 | /* | |
229 | * If the timer is not capable of raising interrupts (disabled or | |
230 | * masked), then there's no more work for us to do. | |
231 | */ | |
232 | if (!kvm_timer_irq_can_fire(vcpu)) | |
233 | return; | |
234 | ||
235 | /* The timer has not yet expired, schedule a background timer */ | |
1c5631c7 | 236 | timer_arm(timer, kvm_timer_compute_delta(vcpu)); |
d35268da CD |
237 | } |
238 | ||
239 | void kvm_timer_unschedule(struct kvm_vcpu *vcpu) | |
240 | { | |
241 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | |
242 | timer_disarm(timer); | |
243 | } | |
244 | ||
53e72406 MZ |
245 | /** |
246 | * kvm_timer_flush_hwstate - prepare to move the virt timer to the cpu | |
247 | * @vcpu: The vcpu pointer | |
248 | * | |
d35268da CD |
249 | * Check if the virtual timer has expired while we were running in the host, |
250 | * and inject an interrupt if that was the case. | |
53e72406 MZ |
251 | */ |
252 | void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) | |
253 | { | |
254 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | |
cff9211e CD |
255 | bool phys_active; |
256 | int ret; | |
53e72406 | 257 | |
b3aff6cc AP |
258 | if (kvm_timer_update_state(vcpu)) |
259 | return; | |
cff9211e CD |
260 | |
261 | /* | |
0e3dfda9 CD |
262 | * If we enter the guest with the virtual input level to the VGIC |
263 | * asserted, then we have already told the VGIC what we need to, and | |
264 | * we don't need to exit from the guest until the guest deactivates | |
265 | * the already injected interrupt, so therefore we should set the | |
266 | * hardware active state to prevent unnecessary exits from the guest. | |
267 | * | |
268 | * Also, if we enter the guest with the virtual timer interrupt active, | |
269 | * then it must be active on the physical distributor, because we set | |
270 | * the HW bit and the guest must be able to deactivate the virtual and | |
271 | * physical interrupt at the same time. | |
272 | * | |
273 | * Conversely, if the virtual input level is deasserted and the virtual | |
274 | * interrupt is not active, then always clear the hardware active state | |
275 | * to ensure that hardware interrupts from the timer triggers a guest | |
276 | * exit. | |
277 | */ | |
e262f419 | 278 | phys_active = timer->irq.level || |
a7e33ad9 | 279 | kvm_vgic_map_is_active(vcpu, timer->irq.irq); |
cff9211e | 280 | |
9b4a3004 MZ |
281 | /* |
282 | * We want to avoid hitting the (re)distributor as much as | |
283 | * possible, as this is a potentially expensive MMIO access | |
284 | * (not to mention locks in the irq layer), and a solution for | |
285 | * this is to cache the "active" state in memory. | |
286 | * | |
287 | * Things to consider: we cannot cache an "active set" state, | |
288 | * because the HW can change this behind our back (it becomes | |
289 | * "clear" in the HW). We must then restrict the caching to | |
290 | * the "clear" state. | |
291 | * | |
292 | * The cache is invalidated on: | |
293 | * - vcpu put, indicating that the HW cannot be trusted to be | |
294 | * in a sane state on the next vcpu load, | |
295 | * - any change in the interrupt state | |
296 | * | |
297 | * Usage conditions: | |
298 | * - cached value is "active clear" | |
299 | * - value to be programmed is "active clear" | |
300 | */ | |
301 | if (timer->active_cleared_last && !phys_active) | |
302 | return; | |
303 | ||
b452cb52 | 304 | ret = irq_set_irqchip_state(host_vtimer_irq, |
cff9211e CD |
305 | IRQCHIP_STATE_ACTIVE, |
306 | phys_active); | |
307 | WARN_ON(ret); | |
9b4a3004 MZ |
308 | |
309 | timer->active_cleared_last = !phys_active; | |
53e72406 MZ |
310 | } |
311 | ||
312 | /** | |
313 | * kvm_timer_sync_hwstate - sync timer state from cpu | |
314 | * @vcpu: The vcpu pointer | |
315 | * | |
d35268da CD |
316 | * Check if the virtual timer has expired while we were running in the guest, |
317 | * and inject an interrupt if that was the case. | |
53e72406 MZ |
318 | */ |
319 | void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) | |
320 | { | |
321 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | |
53e72406 | 322 | |
53e72406 MZ |
323 | BUG_ON(timer_is_armed(timer)); |
324 | ||
4b4b4512 CD |
325 | /* |
326 | * The guest could have modified the timer registers or the timer | |
327 | * could have expired, update the timer state. | |
328 | */ | |
329 | kvm_timer_update_state(vcpu); | |
53e72406 MZ |
330 | } |
331 | ||
f120cd65 MZ |
332 | int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu, |
333 | const struct kvm_irq_level *irq) | |
5ae7f87a AP |
334 | { |
335 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | |
b452cb52 CD |
336 | struct irq_desc *desc; |
337 | struct irq_data *data; | |
338 | int phys_irq; | |
5ae7f87a AP |
339 | |
340 | /* | |
341 | * The vcpu timer irq number cannot be determined in | |
342 | * kvm_timer_vcpu_init() because it is called much before | |
343 | * kvm_vcpu_set_target(). To handle this, we determine | |
344 | * vcpu timer irq number when the vcpu is reset. | |
345 | */ | |
4b4b4512 | 346 | timer->irq.irq = irq->irq; |
f120cd65 | 347 | |
4ad9e16a CD |
348 | /* |
349 | * The bits in CNTV_CTL are architecturally reset to UNKNOWN for ARMv8 | |
350 | * and to 0 for ARMv7. We provide an implementation that always | |
351 | * resets the timer to be disabled and unmasked and is compliant with | |
352 | * the ARMv7 architecture. | |
353 | */ | |
354 | timer->cntv_ctl = 0; | |
4b4b4512 | 355 | kvm_timer_update_state(vcpu); |
4ad9e16a | 356 | |
b452cb52 CD |
357 | /* |
358 | * Find the physical IRQ number corresponding to the host_vtimer_irq | |
359 | */ | |
360 | desc = irq_to_desc(host_vtimer_irq); | |
361 | if (!desc) { | |
362 | kvm_err("%s: no interrupt descriptor\n", __func__); | |
363 | return -EINVAL; | |
364 | } | |
365 | ||
366 | data = irq_desc_get_irq_data(desc); | |
367 | while (data->parent_data) | |
368 | data = data->parent_data; | |
369 | ||
370 | phys_irq = data->hwirq; | |
371 | ||
f120cd65 MZ |
372 | /* |
373 | * Tell the VGIC that the virtual interrupt is tied to a | |
374 | * physical interrupt. We do that once per VCPU. | |
375 | */ | |
c8eb3f6b | 376 | return kvm_vgic_map_phys_irq(vcpu, irq->irq, phys_irq); |
5ae7f87a AP |
377 | } |
378 | ||
53e72406 MZ |
379 | void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) |
380 | { | |
381 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | |
382 | ||
383 | INIT_WORK(&timer->expired, kvm_timer_inject_irq_work); | |
384 | hrtimer_init(&timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | |
385 | timer->timer.function = kvm_timer_expire; | |
53e72406 MZ |
386 | } |
387 | ||
388 | static void kvm_timer_init_interrupt(void *info) | |
389 | { | |
5ae7f87a | 390 | enable_percpu_irq(host_vtimer_irq, 0); |
53e72406 MZ |
391 | } |
392 | ||
39735a3a AP |
393 | int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value) |
394 | { | |
395 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | |
396 | ||
397 | switch (regid) { | |
398 | case KVM_REG_ARM_TIMER_CTL: | |
399 | timer->cntv_ctl = value; | |
400 | break; | |
401 | case KVM_REG_ARM_TIMER_CNT: | |
402 | vcpu->kvm->arch.timer.cntvoff = kvm_phys_timer_read() - value; | |
403 | break; | |
404 | case KVM_REG_ARM_TIMER_CVAL: | |
405 | timer->cntv_cval = value; | |
406 | break; | |
407 | default: | |
408 | return -1; | |
409 | } | |
4b4b4512 CD |
410 | |
411 | kvm_timer_update_state(vcpu); | |
39735a3a AP |
412 | return 0; |
413 | } | |
414 | ||
415 | u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid) | |
416 | { | |
417 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | |
418 | ||
419 | switch (regid) { | |
420 | case KVM_REG_ARM_TIMER_CTL: | |
421 | return timer->cntv_ctl; | |
422 | case KVM_REG_ARM_TIMER_CNT: | |
423 | return kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff; | |
424 | case KVM_REG_ARM_TIMER_CVAL: | |
425 | return timer->cntv_cval; | |
426 | } | |
427 | return (u64)-1; | |
428 | } | |
53e72406 MZ |
429 | |
430 | static int kvm_timer_cpu_notify(struct notifier_block *self, | |
431 | unsigned long action, void *cpu) | |
432 | { | |
433 | switch (action) { | |
434 | case CPU_STARTING: | |
435 | case CPU_STARTING_FROZEN: | |
436 | kvm_timer_init_interrupt(NULL); | |
437 | break; | |
438 | case CPU_DYING: | |
439 | case CPU_DYING_FROZEN: | |
5ae7f87a | 440 | disable_percpu_irq(host_vtimer_irq); |
53e72406 MZ |
441 | break; |
442 | } | |
443 | ||
444 | return NOTIFY_OK; | |
445 | } | |
446 | ||
447 | static struct notifier_block kvm_timer_cpu_nb = { | |
448 | .notifier_call = kvm_timer_cpu_notify, | |
449 | }; | |
450 | ||
53e72406 MZ |
451 | int kvm_timer_hyp_init(void) |
452 | { | |
29c2d6ff | 453 | struct arch_timer_kvm_info *info; |
53e72406 MZ |
454 | int err; |
455 | ||
29c2d6ff JG |
456 | info = arch_timer_get_kvm_info(); |
457 | timecounter = &info->timecounter; | |
53e72406 | 458 | |
29c2d6ff JG |
459 | if (info->virtual_irq <= 0) { |
460 | kvm_err("kvm_arch_timer: invalid virtual timer IRQ: %d\n", | |
461 | info->virtual_irq); | |
53e72406 MZ |
462 | return -ENODEV; |
463 | } | |
29c2d6ff | 464 | host_vtimer_irq = info->virtual_irq; |
53e72406 | 465 | |
29c2d6ff | 466 | err = request_percpu_irq(host_vtimer_irq, kvm_arch_timer_handler, |
53e72406 MZ |
467 | "kvm guest timer", kvm_get_running_vcpus()); |
468 | if (err) { | |
469 | kvm_err("kvm_arch_timer: can't request interrupt %d (%d)\n", | |
29c2d6ff | 470 | host_vtimer_irq, err); |
53e72406 MZ |
471 | goto out; |
472 | } | |
473 | ||
553f809e | 474 | err = __register_cpu_notifier(&kvm_timer_cpu_nb); |
53e72406 MZ |
475 | if (err) { |
476 | kvm_err("Cannot register timer CPU notifier\n"); | |
477 | goto out_free; | |
478 | } | |
479 | ||
480 | wqueue = create_singlethread_workqueue("kvm_arch_timer"); | |
481 | if (!wqueue) { | |
482 | err = -ENOMEM; | |
483 | goto out_free; | |
484 | } | |
485 | ||
29c2d6ff | 486 | kvm_info("virtual timer IRQ%d\n", host_vtimer_irq); |
53e72406 MZ |
487 | on_each_cpu(kvm_timer_init_interrupt, NULL, 1); |
488 | ||
489 | goto out; | |
490 | out_free: | |
29c2d6ff | 491 | free_percpu_irq(host_vtimer_irq, kvm_get_running_vcpus()); |
53e72406 | 492 | out: |
53e72406 MZ |
493 | return err; |
494 | } | |
495 | ||
496 | void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu) | |
497 | { | |
498 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | |
499 | ||
500 | timer_disarm(timer); | |
a7e33ad9 | 501 | kvm_vgic_unmap_phys_irq(vcpu, timer->irq.irq); |
53e72406 MZ |
502 | } |
503 | ||
05971120 | 504 | void kvm_timer_enable(struct kvm *kvm) |
53e72406 | 505 | { |
05971120 CD |
506 | if (kvm->arch.timer.enabled) |
507 | return; | |
508 | ||
509 | /* | |
510 | * There is a potential race here between VCPUs starting for the first | |
511 | * time, which may be enabling the timer multiple times. That doesn't | |
512 | * hurt though, because we're just setting a variable to the same | |
513 | * variable that it already was. The important thing is that all | |
514 | * VCPUs have the enabled variable set, before entering the guest, if | |
515 | * the arch timers are enabled. | |
516 | */ | |
517 | if (timecounter && wqueue) | |
53e72406 | 518 | kvm->arch.timer.enabled = 1; |
05971120 | 519 | } |
53e72406 | 520 | |
05971120 CD |
521 | void kvm_timer_init(struct kvm *kvm) |
522 | { | |
523 | kvm->arch.timer.cntvoff = kvm_phys_timer_read(); | |
53e72406 | 524 | } |