2 * Copyright (C) 2012 ARM Ltd.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/cpu.h>
20 #include <linux/kvm.h>
21 #include <linux/kvm_host.h>
22 #include <linux/interrupt.h>
23 #include <linux/irq.h>
24 #include <linux/uaccess.h>
26 #include <clocksource/arm_arch_timer.h>
27 #include <asm/arch_timer.h>
28 #include <asm/kvm_hyp.h>
30 #include <kvm/arm_vgic.h>
31 #include <kvm/arm_arch_timer.h>
35 static struct timecounter
*timecounter
;
36 static unsigned int host_vtimer_irq
;
37 static u32 host_vtimer_irq_flags
;
39 static DEFINE_STATIC_KEY_FALSE(has_gic_active_state
);
41 static const struct kvm_irq_level default_ptimer_irq
= {
46 static const struct kvm_irq_level default_vtimer_irq
= {
51 static bool kvm_timer_irq_can_fire(struct arch_timer_context
*timer_ctx
);
52 static void kvm_timer_update_irq(struct kvm_vcpu
*vcpu
, bool new_level
,
53 struct arch_timer_context
*timer_ctx
);
54 static bool kvm_timer_should_fire(struct arch_timer_context
*timer_ctx
);
56 u64
kvm_phys_timer_read(void)
58 return timecounter
->cc
->read(timecounter
->cc
);
61 static inline bool userspace_irqchip(struct kvm
*kvm
)
63 return static_branch_unlikely(&userspace_irqchip_in_use
) &&
64 unlikely(!irqchip_in_kernel(kvm
));
67 static void soft_timer_start(struct hrtimer
*hrt
, u64 ns
)
69 hrtimer_start(hrt
, ktime_add_ns(ktime_get(), ns
),
73 static void soft_timer_cancel(struct hrtimer
*hrt
)
78 static irqreturn_t
kvm_arch_timer_handler(int irq
, void *dev_id
)
80 struct kvm_vcpu
*vcpu
= *(struct kvm_vcpu
**)dev_id
;
81 struct arch_timer_context
*vtimer
;
84 * We may see a timer interrupt after vcpu_put() has been called which
85 * sets the CPU's vcpu pointer to NULL, because even though the timer
86 * has been disabled in vtimer_save_state(), the hardware interrupt
87 * signal may not have been retired from the interrupt controller yet.
92 vtimer
= vcpu_vtimer(vcpu
);
93 if (kvm_timer_should_fire(vtimer
))
94 kvm_timer_update_irq(vcpu
, true, vtimer
);
96 if (userspace_irqchip(vcpu
->kvm
) &&
97 !static_branch_unlikely(&has_gic_active_state
))
98 disable_percpu_irq(host_vtimer_irq
);
103 static u64
kvm_timer_compute_delta(struct arch_timer_context
*timer_ctx
)
107 cval
= timer_ctx
->cnt_cval
;
108 now
= kvm_phys_timer_read() - timer_ctx
->cntvoff
;
113 ns
= cyclecounter_cyc2ns(timecounter
->cc
,
123 static bool kvm_timer_irq_can_fire(struct arch_timer_context
*timer_ctx
)
125 return !(timer_ctx
->cnt_ctl
& ARCH_TIMER_CTRL_IT_MASK
) &&
126 (timer_ctx
->cnt_ctl
& ARCH_TIMER_CTRL_ENABLE
);
130 * Returns the earliest expiration time in ns among guest timers.
131 * Note that it will return 0 if none of timers can fire.
133 static u64
kvm_timer_earliest_exp(struct kvm_vcpu
*vcpu
)
135 u64 min_virt
= ULLONG_MAX
, min_phys
= ULLONG_MAX
;
136 struct arch_timer_context
*vtimer
= vcpu_vtimer(vcpu
);
137 struct arch_timer_context
*ptimer
= vcpu_ptimer(vcpu
);
139 if (kvm_timer_irq_can_fire(vtimer
))
140 min_virt
= kvm_timer_compute_delta(vtimer
);
142 if (kvm_timer_irq_can_fire(ptimer
))
143 min_phys
= kvm_timer_compute_delta(ptimer
);
145 /* If none of timers can fire, then return 0 */
146 if ((min_virt
== ULLONG_MAX
) && (min_phys
== ULLONG_MAX
))
149 return min(min_virt
, min_phys
);
152 static enum hrtimer_restart
kvm_bg_timer_expire(struct hrtimer
*hrt
)
154 struct arch_timer_cpu
*timer
;
155 struct kvm_vcpu
*vcpu
;
158 timer
= container_of(hrt
, struct arch_timer_cpu
, bg_timer
);
159 vcpu
= container_of(timer
, struct kvm_vcpu
, arch
.timer_cpu
);
162 * Check that the timer has really expired from the guest's
163 * PoV (NTP on the host may have forced it to expire
164 * early). If we should have slept longer, restart it.
166 ns
= kvm_timer_earliest_exp(vcpu
);
168 hrtimer_forward_now(hrt
, ns_to_ktime(ns
));
169 return HRTIMER_RESTART
;
172 kvm_vcpu_wake_up(vcpu
);
173 return HRTIMER_NORESTART
;
176 static enum hrtimer_restart
kvm_phys_timer_expire(struct hrtimer
*hrt
)
178 struct arch_timer_context
*ptimer
;
179 struct arch_timer_cpu
*timer
;
180 struct kvm_vcpu
*vcpu
;
183 timer
= container_of(hrt
, struct arch_timer_cpu
, phys_timer
);
184 vcpu
= container_of(timer
, struct kvm_vcpu
, arch
.timer_cpu
);
185 ptimer
= vcpu_ptimer(vcpu
);
188 * Check that the timer has really expired from the guest's
189 * PoV (NTP on the host may have forced it to expire
190 * early). If not ready, schedule for a later time.
192 ns
= kvm_timer_compute_delta(ptimer
);
194 hrtimer_forward_now(hrt
, ns_to_ktime(ns
));
195 return HRTIMER_RESTART
;
198 kvm_timer_update_irq(vcpu
, true, ptimer
);
199 return HRTIMER_NORESTART
;
202 static bool kvm_timer_should_fire(struct arch_timer_context
*timer_ctx
)
206 if (timer_ctx
->loaded
) {
209 /* Only the virtual timer can be loaded so far */
210 cnt_ctl
= read_sysreg_el0(cntv_ctl
);
211 return (cnt_ctl
& ARCH_TIMER_CTRL_ENABLE
) &&
212 (cnt_ctl
& ARCH_TIMER_CTRL_IT_STAT
) &&
213 !(cnt_ctl
& ARCH_TIMER_CTRL_IT_MASK
);
216 if (!kvm_timer_irq_can_fire(timer_ctx
))
219 cval
= timer_ctx
->cnt_cval
;
220 now
= kvm_phys_timer_read() - timer_ctx
->cntvoff
;
225 bool kvm_timer_is_pending(struct kvm_vcpu
*vcpu
)
227 struct arch_timer_context
*vtimer
= vcpu_vtimer(vcpu
);
228 struct arch_timer_context
*ptimer
= vcpu_ptimer(vcpu
);
230 if (kvm_timer_should_fire(vtimer
))
233 return kvm_timer_should_fire(ptimer
);
237 * Reflect the timer output level into the kvm_run structure
239 void kvm_timer_update_run(struct kvm_vcpu
*vcpu
)
241 struct arch_timer_context
*vtimer
= vcpu_vtimer(vcpu
);
242 struct arch_timer_context
*ptimer
= vcpu_ptimer(vcpu
);
243 struct kvm_sync_regs
*regs
= &vcpu
->run
->s
.regs
;
245 /* Populate the device bitmap with the timer states */
246 regs
->device_irq_level
&= ~(KVM_ARM_DEV_EL1_VTIMER
|
247 KVM_ARM_DEV_EL1_PTIMER
);
248 if (kvm_timer_should_fire(vtimer
))
249 regs
->device_irq_level
|= KVM_ARM_DEV_EL1_VTIMER
;
250 if (kvm_timer_should_fire(ptimer
))
251 regs
->device_irq_level
|= KVM_ARM_DEV_EL1_PTIMER
;
254 static void kvm_timer_update_irq(struct kvm_vcpu
*vcpu
, bool new_level
,
255 struct arch_timer_context
*timer_ctx
)
259 timer_ctx
->irq
.level
= new_level
;
260 trace_kvm_timer_update_irq(vcpu
->vcpu_id
, timer_ctx
->irq
.irq
,
261 timer_ctx
->irq
.level
);
263 if (!userspace_irqchip(vcpu
->kvm
)) {
264 ret
= kvm_vgic_inject_irq(vcpu
->kvm
, vcpu
->vcpu_id
,
266 timer_ctx
->irq
.level
,
272 /* Schedule the background timer for the emulated timer. */
273 static void phys_timer_emulate(struct kvm_vcpu
*vcpu
)
275 struct arch_timer_cpu
*timer
= &vcpu
->arch
.timer_cpu
;
276 struct arch_timer_context
*ptimer
= vcpu_ptimer(vcpu
);
279 * If the timer can fire now, we don't need to have a soft timer
280 * scheduled for the future. If the timer cannot fire at all,
281 * then we also don't need a soft timer.
283 if (kvm_timer_should_fire(ptimer
) || !kvm_timer_irq_can_fire(ptimer
)) {
284 soft_timer_cancel(&timer
->phys_timer
);
288 soft_timer_start(&timer
->phys_timer
, kvm_timer_compute_delta(ptimer
));
292 * Check if there was a change in the timer state, so that we should either
293 * raise or lower the line level to the GIC or schedule a background timer to
294 * emulate the physical timer.
296 static void kvm_timer_update_state(struct kvm_vcpu
*vcpu
)
298 struct arch_timer_cpu
*timer
= &vcpu
->arch
.timer_cpu
;
299 struct arch_timer_context
*vtimer
= vcpu_vtimer(vcpu
);
300 struct arch_timer_context
*ptimer
= vcpu_ptimer(vcpu
);
303 if (unlikely(!timer
->enabled
))
307 * The vtimer virtual interrupt is a 'mapped' interrupt, meaning part
308 * of its lifecycle is offloaded to the hardware, and we therefore may
309 * not have lowered the irq.level value before having to signal a new
310 * interrupt, but have to signal an interrupt every time the level is
313 level
= kvm_timer_should_fire(vtimer
);
314 kvm_timer_update_irq(vcpu
, level
, vtimer
);
316 phys_timer_emulate(vcpu
);
318 if (kvm_timer_should_fire(ptimer
) != ptimer
->irq
.level
)
319 kvm_timer_update_irq(vcpu
, !ptimer
->irq
.level
, ptimer
);
322 static void vtimer_save_state(struct kvm_vcpu
*vcpu
)
324 struct arch_timer_cpu
*timer
= &vcpu
->arch
.timer_cpu
;
325 struct arch_timer_context
*vtimer
= vcpu_vtimer(vcpu
);
328 local_irq_save(flags
);
333 if (timer
->enabled
) {
334 vtimer
->cnt_ctl
= read_sysreg_el0(cntv_ctl
);
335 vtimer
->cnt_cval
= read_sysreg_el0(cntv_cval
);
338 /* Disable the virtual timer */
339 write_sysreg_el0(0, cntv_ctl
);
342 vtimer
->loaded
= false;
344 local_irq_restore(flags
);
348 * Schedule the background timer before calling kvm_vcpu_block, so that this
349 * thread is removed from its waitqueue and made runnable when there's a timer
350 * interrupt to handle.
352 void kvm_timer_schedule(struct kvm_vcpu
*vcpu
)
354 struct arch_timer_cpu
*timer
= &vcpu
->arch
.timer_cpu
;
355 struct arch_timer_context
*vtimer
= vcpu_vtimer(vcpu
);
356 struct arch_timer_context
*ptimer
= vcpu_ptimer(vcpu
);
358 vtimer_save_state(vcpu
);
361 * No need to schedule a background timer if any guest timer has
362 * already expired, because kvm_vcpu_block will return before putting
363 * the thread to sleep.
365 if (kvm_timer_should_fire(vtimer
) || kvm_timer_should_fire(ptimer
))
369 * If both timers are not capable of raising interrupts (disabled or
370 * masked), then there's no more work for us to do.
372 if (!kvm_timer_irq_can_fire(vtimer
) && !kvm_timer_irq_can_fire(ptimer
))
376 * The guest timers have not yet expired, schedule a background timer.
377 * Set the earliest expiration time among the guest timers.
379 soft_timer_start(&timer
->bg_timer
, kvm_timer_earliest_exp(vcpu
));
382 static void vtimer_restore_state(struct kvm_vcpu
*vcpu
)
384 struct arch_timer_cpu
*timer
= &vcpu
->arch
.timer_cpu
;
385 struct arch_timer_context
*vtimer
= vcpu_vtimer(vcpu
);
388 local_irq_save(flags
);
393 if (timer
->enabled
) {
394 write_sysreg_el0(vtimer
->cnt_cval
, cntv_cval
);
396 write_sysreg_el0(vtimer
->cnt_ctl
, cntv_ctl
);
399 vtimer
->loaded
= true;
401 local_irq_restore(flags
);
404 void kvm_timer_unschedule(struct kvm_vcpu
*vcpu
)
406 struct arch_timer_cpu
*timer
= &vcpu
->arch
.timer_cpu
;
408 vtimer_restore_state(vcpu
);
410 soft_timer_cancel(&timer
->bg_timer
);
413 static void set_cntvoff(u64 cntvoff
)
415 u32 low
= lower_32_bits(cntvoff
);
416 u32 high
= upper_32_bits(cntvoff
);
419 * Since kvm_call_hyp doesn't fully support the ARM PCS especially on
420 * 32-bit systems, but rather passes register by register shifted one
421 * place (we put the function address in r0/x0), we cannot simply pass
422 * a 64-bit value as an argument, but have to split the value in two
425 kvm_call_hyp(__kvm_timer_set_cntvoff
, low
, high
);
428 static inline void set_vtimer_irq_phys_active(struct kvm_vcpu
*vcpu
, bool active
)
431 r
= irq_set_irqchip_state(host_vtimer_irq
, IRQCHIP_STATE_ACTIVE
, active
);
435 static void kvm_timer_vcpu_load_gic(struct kvm_vcpu
*vcpu
)
437 struct arch_timer_context
*vtimer
= vcpu_vtimer(vcpu
);
440 if (irqchip_in_kernel(vcpu
->kvm
))
441 phys_active
= kvm_vgic_map_is_active(vcpu
, vtimer
->irq
.irq
);
443 phys_active
= vtimer
->irq
.level
;
444 set_vtimer_irq_phys_active(vcpu
, phys_active
);
447 static void kvm_timer_vcpu_load_nogic(struct kvm_vcpu
*vcpu
)
449 struct arch_timer_context
*vtimer
= vcpu_vtimer(vcpu
);
452 * When using a userspace irqchip with the architected timers and a
453 * host interrupt controller that doesn't support an active state, we
454 * must still prevent continuously exiting from the guest, and
455 * therefore mask the physical interrupt by disabling it on the host
456 * interrupt controller when the virtual level is high, such that the
457 * guest can make forward progress. Once we detect the output level
458 * being de-asserted, we unmask the interrupt again so that we exit
459 * from the guest when the timer fires.
461 if (vtimer
->irq
.level
)
462 disable_percpu_irq(host_vtimer_irq
);
464 enable_percpu_irq(host_vtimer_irq
, host_vtimer_irq_flags
);
467 void kvm_timer_vcpu_load(struct kvm_vcpu
*vcpu
)
469 struct arch_timer_cpu
*timer
= &vcpu
->arch
.timer_cpu
;
470 struct arch_timer_context
*vtimer
= vcpu_vtimer(vcpu
);
471 struct arch_timer_context
*ptimer
= vcpu_ptimer(vcpu
);
473 if (unlikely(!timer
->enabled
))
476 if (static_branch_likely(&has_gic_active_state
))
477 kvm_timer_vcpu_load_gic(vcpu
);
479 kvm_timer_vcpu_load_nogic(vcpu
);
481 set_cntvoff(vtimer
->cntvoff
);
483 vtimer_restore_state(vcpu
);
485 /* Set the background timer for the physical timer emulation. */
486 phys_timer_emulate(vcpu
);
488 /* If the timer fired while we weren't running, inject it now */
489 if (kvm_timer_should_fire(ptimer
) != ptimer
->irq
.level
)
490 kvm_timer_update_irq(vcpu
, !ptimer
->irq
.level
, ptimer
);
493 bool kvm_timer_should_notify_user(struct kvm_vcpu
*vcpu
)
495 struct arch_timer_context
*vtimer
= vcpu_vtimer(vcpu
);
496 struct arch_timer_context
*ptimer
= vcpu_ptimer(vcpu
);
497 struct kvm_sync_regs
*sregs
= &vcpu
->run
->s
.regs
;
500 if (likely(irqchip_in_kernel(vcpu
->kvm
)))
503 vlevel
= sregs
->device_irq_level
& KVM_ARM_DEV_EL1_VTIMER
;
504 plevel
= sregs
->device_irq_level
& KVM_ARM_DEV_EL1_PTIMER
;
506 return kvm_timer_should_fire(vtimer
) != vlevel
||
507 kvm_timer_should_fire(ptimer
) != plevel
;
510 void kvm_timer_vcpu_put(struct kvm_vcpu
*vcpu
)
512 struct arch_timer_cpu
*timer
= &vcpu
->arch
.timer_cpu
;
514 if (unlikely(!timer
->enabled
))
517 vtimer_save_state(vcpu
);
520 * Cancel the physical timer emulation, because the only case where we
521 * need it after a vcpu_put is in the context of a sleeping VCPU, and
522 * in that case we already factor in the deadline for the physical
523 * timer when scheduling the bg_timer.
525 * In any case, we re-schedule the hrtimer for the physical timer when
526 * coming back to the VCPU thread in kvm_timer_vcpu_load().
528 soft_timer_cancel(&timer
->phys_timer
);
531 * The kernel may decide to run userspace after calling vcpu_put, so
532 * we reset cntvoff to 0 to ensure a consistent read between user
533 * accesses to the virtual counter and kernel access to the physical
534 * counter of non-VHE case. For VHE, the virtual counter uses a fixed
535 * virtual offset of zero, so no need to zero CNTVOFF_EL2 register.
542 * With a userspace irqchip we have to check if the guest de-asserted the
543 * timer and if so, unmask the timer irq signal on the host interrupt
544 * controller to ensure that we see future timer signals.
546 static void unmask_vtimer_irq_user(struct kvm_vcpu
*vcpu
)
548 struct arch_timer_context
*vtimer
= vcpu_vtimer(vcpu
);
550 if (!kvm_timer_should_fire(vtimer
)) {
551 kvm_timer_update_irq(vcpu
, false, vtimer
);
552 if (static_branch_likely(&has_gic_active_state
))
553 set_vtimer_irq_phys_active(vcpu
, false);
555 enable_percpu_irq(host_vtimer_irq
, host_vtimer_irq_flags
);
559 void kvm_timer_sync_hwstate(struct kvm_vcpu
*vcpu
)
561 struct arch_timer_cpu
*timer
= &vcpu
->arch
.timer_cpu
;
563 if (unlikely(!timer
->enabled
))
566 if (unlikely(!irqchip_in_kernel(vcpu
->kvm
)))
567 unmask_vtimer_irq_user(vcpu
);
570 int kvm_timer_vcpu_reset(struct kvm_vcpu
*vcpu
)
572 struct arch_timer_cpu
*timer
= &vcpu
->arch
.timer_cpu
;
573 struct arch_timer_context
*vtimer
= vcpu_vtimer(vcpu
);
574 struct arch_timer_context
*ptimer
= vcpu_ptimer(vcpu
);
577 * The bits in CNTV_CTL are architecturally reset to UNKNOWN for ARMv8
578 * and to 0 for ARMv7. We provide an implementation that always
579 * resets the timer to be disabled and unmasked and is compliant with
580 * the ARMv7 architecture.
584 kvm_timer_update_state(vcpu
);
586 if (timer
->enabled
&& irqchip_in_kernel(vcpu
->kvm
))
587 kvm_vgic_reset_mapped_irq(vcpu
, vtimer
->irq
.irq
);
592 /* Make the updates of cntvoff for all vtimer contexts atomic */
593 static void update_vtimer_cntvoff(struct kvm_vcpu
*vcpu
, u64 cntvoff
)
596 struct kvm
*kvm
= vcpu
->kvm
;
597 struct kvm_vcpu
*tmp
;
599 mutex_lock(&kvm
->lock
);
600 kvm_for_each_vcpu(i
, tmp
, kvm
)
601 vcpu_vtimer(tmp
)->cntvoff
= cntvoff
;
604 * When called from the vcpu create path, the CPU being created is not
605 * included in the loop above, so we just set it here as well.
607 vcpu_vtimer(vcpu
)->cntvoff
= cntvoff
;
608 mutex_unlock(&kvm
->lock
);
611 void kvm_timer_vcpu_init(struct kvm_vcpu
*vcpu
)
613 struct arch_timer_cpu
*timer
= &vcpu
->arch
.timer_cpu
;
614 struct arch_timer_context
*vtimer
= vcpu_vtimer(vcpu
);
615 struct arch_timer_context
*ptimer
= vcpu_ptimer(vcpu
);
617 /* Synchronize cntvoff across all vtimers of a VM. */
618 update_vtimer_cntvoff(vcpu
, kvm_phys_timer_read());
619 vcpu_ptimer(vcpu
)->cntvoff
= 0;
621 hrtimer_init(&timer
->bg_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_ABS
);
622 timer
->bg_timer
.function
= kvm_bg_timer_expire
;
624 hrtimer_init(&timer
->phys_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_ABS
);
625 timer
->phys_timer
.function
= kvm_phys_timer_expire
;
627 vtimer
->irq
.irq
= default_vtimer_irq
.irq
;
628 ptimer
->irq
.irq
= default_ptimer_irq
.irq
;
631 static void kvm_timer_init_interrupt(void *info
)
633 enable_percpu_irq(host_vtimer_irq
, host_vtimer_irq_flags
);
636 int kvm_arm_timer_set_reg(struct kvm_vcpu
*vcpu
, u64 regid
, u64 value
)
638 struct arch_timer_context
*vtimer
= vcpu_vtimer(vcpu
);
639 struct arch_timer_context
*ptimer
= vcpu_ptimer(vcpu
);
642 case KVM_REG_ARM_TIMER_CTL
:
643 vtimer
->cnt_ctl
= value
& ~ARCH_TIMER_CTRL_IT_STAT
;
645 case KVM_REG_ARM_TIMER_CNT
:
646 update_vtimer_cntvoff(vcpu
, kvm_phys_timer_read() - value
);
648 case KVM_REG_ARM_TIMER_CVAL
:
649 vtimer
->cnt_cval
= value
;
651 case KVM_REG_ARM_PTIMER_CTL
:
652 ptimer
->cnt_ctl
= value
& ~ARCH_TIMER_CTRL_IT_STAT
;
654 case KVM_REG_ARM_PTIMER_CVAL
:
655 ptimer
->cnt_cval
= value
;
662 kvm_timer_update_state(vcpu
);
666 static u64
read_timer_ctl(struct arch_timer_context
*timer
)
669 * Set ISTATUS bit if it's expired.
670 * Note that according to ARMv8 ARM Issue A.k, ISTATUS bit is
671 * UNKNOWN when ENABLE bit is 0, so we chose to set ISTATUS bit
672 * regardless of ENABLE bit for our implementation convenience.
674 if (!kvm_timer_compute_delta(timer
))
675 return timer
->cnt_ctl
| ARCH_TIMER_CTRL_IT_STAT
;
677 return timer
->cnt_ctl
;
680 u64
kvm_arm_timer_get_reg(struct kvm_vcpu
*vcpu
, u64 regid
)
682 struct arch_timer_context
*ptimer
= vcpu_ptimer(vcpu
);
683 struct arch_timer_context
*vtimer
= vcpu_vtimer(vcpu
);
686 case KVM_REG_ARM_TIMER_CTL
:
687 return read_timer_ctl(vtimer
);
688 case KVM_REG_ARM_TIMER_CNT
:
689 return kvm_phys_timer_read() - vtimer
->cntvoff
;
690 case KVM_REG_ARM_TIMER_CVAL
:
691 return vtimer
->cnt_cval
;
692 case KVM_REG_ARM_PTIMER_CTL
:
693 return read_timer_ctl(ptimer
);
694 case KVM_REG_ARM_PTIMER_CVAL
:
695 return ptimer
->cnt_cval
;
696 case KVM_REG_ARM_PTIMER_CNT
:
697 return kvm_phys_timer_read();
702 static int kvm_timer_starting_cpu(unsigned int cpu
)
704 kvm_timer_init_interrupt(NULL
);
708 static int kvm_timer_dying_cpu(unsigned int cpu
)
710 disable_percpu_irq(host_vtimer_irq
);
714 int kvm_timer_hyp_init(bool has_gic
)
716 struct arch_timer_kvm_info
*info
;
719 info
= arch_timer_get_kvm_info();
720 timecounter
= &info
->timecounter
;
722 if (!timecounter
->cc
) {
723 kvm_err("kvm_arch_timer: uninitialized timecounter\n");
727 if (info
->virtual_irq
<= 0) {
728 kvm_err("kvm_arch_timer: invalid virtual timer IRQ: %d\n",
732 host_vtimer_irq
= info
->virtual_irq
;
734 host_vtimer_irq_flags
= irq_get_trigger_type(host_vtimer_irq
);
735 if (host_vtimer_irq_flags
!= IRQF_TRIGGER_HIGH
&&
736 host_vtimer_irq_flags
!= IRQF_TRIGGER_LOW
) {
737 kvm_err("Invalid trigger for IRQ%d, assuming level low\n",
739 host_vtimer_irq_flags
= IRQF_TRIGGER_LOW
;
742 err
= request_percpu_irq(host_vtimer_irq
, kvm_arch_timer_handler
,
743 "kvm guest timer", kvm_get_running_vcpus());
745 kvm_err("kvm_arch_timer: can't request interrupt %d (%d)\n",
746 host_vtimer_irq
, err
);
751 err
= irq_set_vcpu_affinity(host_vtimer_irq
,
752 kvm_get_running_vcpus());
754 kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
758 static_branch_enable(&has_gic_active_state
);
761 kvm_debug("virtual timer IRQ%d\n", host_vtimer_irq
);
763 cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING
,
764 "kvm/arm/timer:starting", kvm_timer_starting_cpu
,
765 kvm_timer_dying_cpu
);
768 free_percpu_irq(host_vtimer_irq
, kvm_get_running_vcpus());
772 void kvm_timer_vcpu_terminate(struct kvm_vcpu
*vcpu
)
774 struct arch_timer_cpu
*timer
= &vcpu
->arch
.timer_cpu
;
776 soft_timer_cancel(&timer
->bg_timer
);
779 static bool timer_irqs_are_valid(struct kvm_vcpu
*vcpu
)
781 int vtimer_irq
, ptimer_irq
;
784 vtimer_irq
= vcpu_vtimer(vcpu
)->irq
.irq
;
785 ret
= kvm_vgic_set_owner(vcpu
, vtimer_irq
, vcpu_vtimer(vcpu
));
789 ptimer_irq
= vcpu_ptimer(vcpu
)->irq
.irq
;
790 ret
= kvm_vgic_set_owner(vcpu
, ptimer_irq
, vcpu_ptimer(vcpu
));
794 kvm_for_each_vcpu(i
, vcpu
, vcpu
->kvm
) {
795 if (vcpu_vtimer(vcpu
)->irq
.irq
!= vtimer_irq
||
796 vcpu_ptimer(vcpu
)->irq
.irq
!= ptimer_irq
)
803 bool kvm_arch_timer_get_input_level(int vintid
)
805 struct kvm_vcpu
*vcpu
= kvm_arm_get_running_vcpu();
806 struct arch_timer_context
*timer
;
808 if (vintid
== vcpu_vtimer(vcpu
)->irq
.irq
)
809 timer
= vcpu_vtimer(vcpu
);
811 BUG(); /* We only map the vtimer so far */
813 return kvm_timer_should_fire(timer
);
816 int kvm_timer_enable(struct kvm_vcpu
*vcpu
)
818 struct arch_timer_cpu
*timer
= &vcpu
->arch
.timer_cpu
;
819 struct arch_timer_context
*vtimer
= vcpu_vtimer(vcpu
);
825 /* Without a VGIC we do not map virtual IRQs to physical IRQs */
826 if (!irqchip_in_kernel(vcpu
->kvm
))
829 if (!vgic_initialized(vcpu
->kvm
))
832 if (!timer_irqs_are_valid(vcpu
)) {
833 kvm_debug("incorrectly configured timer irqs\n");
837 ret
= kvm_vgic_map_phys_irq(vcpu
, host_vtimer_irq
, vtimer
->irq
.irq
,
838 kvm_arch_timer_get_input_level
);
848 * On VHE system, we only need to configure trap on physical timer and counter
849 * accesses in EL0 and EL1 once, not for every world switch.
850 * The host kernel runs at EL2 with HCR_EL2.TGE == 1,
851 * and this makes those bits have no effect for the host kernel execution.
853 void kvm_timer_init_vhe(void)
855 /* When HCR_EL2.E2H ==1, EL1PCEN and EL1PCTEN are shifted by 10 */
856 u32 cnthctl_shift
= 10;
860 * Disallow physical timer access for the guest.
861 * Physical counter access is allowed.
863 val
= read_sysreg(cnthctl_el2
);
864 val
&= ~(CNTHCTL_EL1PCEN
<< cnthctl_shift
);
865 val
|= (CNTHCTL_EL1PCTEN
<< cnthctl_shift
);
866 write_sysreg(val
, cnthctl_el2
);
869 static void set_timer_irqs(struct kvm
*kvm
, int vtimer_irq
, int ptimer_irq
)
871 struct kvm_vcpu
*vcpu
;
874 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
875 vcpu_vtimer(vcpu
)->irq
.irq
= vtimer_irq
;
876 vcpu_ptimer(vcpu
)->irq
.irq
= ptimer_irq
;
880 int kvm_arm_timer_set_attr(struct kvm_vcpu
*vcpu
, struct kvm_device_attr
*attr
)
882 int __user
*uaddr
= (int __user
*)(long)attr
->addr
;
883 struct arch_timer_context
*vtimer
= vcpu_vtimer(vcpu
);
884 struct arch_timer_context
*ptimer
= vcpu_ptimer(vcpu
);
887 if (!irqchip_in_kernel(vcpu
->kvm
))
890 if (get_user(irq
, uaddr
))
893 if (!(irq_is_ppi(irq
)))
896 if (vcpu
->arch
.timer_cpu
.enabled
)
899 switch (attr
->attr
) {
900 case KVM_ARM_VCPU_TIMER_IRQ_VTIMER
:
901 set_timer_irqs(vcpu
->kvm
, irq
, ptimer
->irq
.irq
);
903 case KVM_ARM_VCPU_TIMER_IRQ_PTIMER
:
904 set_timer_irqs(vcpu
->kvm
, vtimer
->irq
.irq
, irq
);
913 int kvm_arm_timer_get_attr(struct kvm_vcpu
*vcpu
, struct kvm_device_attr
*attr
)
915 int __user
*uaddr
= (int __user
*)(long)attr
->addr
;
916 struct arch_timer_context
*timer
;
919 switch (attr
->attr
) {
920 case KVM_ARM_VCPU_TIMER_IRQ_VTIMER
:
921 timer
= vcpu_vtimer(vcpu
);
923 case KVM_ARM_VCPU_TIMER_IRQ_PTIMER
:
924 timer
= vcpu_ptimer(vcpu
);
930 irq
= timer
->irq
.irq
;
931 return put_user(irq
, uaddr
);
934 int kvm_arm_timer_has_attr(struct kvm_vcpu
*vcpu
, struct kvm_device_attr
*attr
)
936 switch (attr
->attr
) {
937 case KVM_ARM_VCPU_TIMER_IRQ_VTIMER
:
938 case KVM_ARM_VCPU_TIMER_IRQ_PTIMER
: