2 * Copyright (C) 2012 ARM Ltd.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/cpu.h>
20 #include <linux/kvm.h>
21 #include <linux/kvm_host.h>
22 #include <linux/interrupt.h>
23 #include <linux/irq.h>
24 #include <linux/uaccess.h>
26 #include <clocksource/arm_arch_timer.h>
27 #include <asm/arch_timer.h>
28 #include <asm/kvm_hyp.h>
30 #include <kvm/arm_vgic.h>
31 #include <kvm/arm_arch_timer.h>
35 static struct timecounter
*timecounter
;
36 static unsigned int host_vtimer_irq
;
37 static u32 host_vtimer_irq_flags
;
39 static const struct kvm_irq_level default_ptimer_irq
= {
44 static const struct kvm_irq_level default_vtimer_irq
= {
49 static bool kvm_timer_irq_can_fire(struct arch_timer_context
*timer_ctx
);
50 static void kvm_timer_update_irq(struct kvm_vcpu
*vcpu
, bool new_level
,
51 struct arch_timer_context
*timer_ctx
);
52 static bool kvm_timer_should_fire(struct arch_timer_context
*timer_ctx
);
54 u64
kvm_phys_timer_read(void)
56 return timecounter
->cc
->read(timecounter
->cc
);
59 static void soft_timer_start(struct hrtimer
*hrt
, u64 ns
)
61 hrtimer_start(hrt
, ktime_add_ns(ktime_get(), ns
),
65 static void soft_timer_cancel(struct hrtimer
*hrt
, struct work_struct
*work
)
69 cancel_work_sync(work
);
72 static void kvm_vtimer_update_mask_user(struct kvm_vcpu
*vcpu
)
74 struct arch_timer_context
*vtimer
= vcpu_vtimer(vcpu
);
77 * When using a userspace irqchip with the architected timers, we must
78 * prevent continuously exiting from the guest, and therefore mask the
79 * physical interrupt by disabling it on the host interrupt controller
80 * when the virtual level is high, such that the guest can make
81 * forward progress. Once we detect the output level being
82 * de-asserted, we unmask the interrupt again so that we exit from the
83 * guest when the timer fires.
85 if (vtimer
->irq
.level
)
86 disable_percpu_irq(host_vtimer_irq
);
88 enable_percpu_irq(host_vtimer_irq
, 0);
91 static irqreturn_t
kvm_arch_timer_handler(int irq
, void *dev_id
)
93 struct kvm_vcpu
*vcpu
= *(struct kvm_vcpu
**)dev_id
;
94 struct arch_timer_context
*vtimer
;
98 * We may see a timer interrupt after vcpu_put() has been called which
99 * sets the CPU's vcpu pointer to NULL, because even though the timer
100 * has been disabled in vtimer_save_state(), the hardware interrupt
101 * signal may not have been retired from the interrupt controller yet.
106 vtimer
= vcpu_vtimer(vcpu
);
107 if (!vtimer
->irq
.level
) {
108 cnt_ctl
= read_sysreg_el0(cntv_ctl
);
109 cnt_ctl
&= ARCH_TIMER_CTRL_ENABLE
| ARCH_TIMER_CTRL_IT_STAT
|
110 ARCH_TIMER_CTRL_IT_MASK
;
111 if (cnt_ctl
== (ARCH_TIMER_CTRL_ENABLE
| ARCH_TIMER_CTRL_IT_STAT
))
112 kvm_timer_update_irq(vcpu
, true, vtimer
);
115 if (unlikely(!irqchip_in_kernel(vcpu
->kvm
)))
116 kvm_vtimer_update_mask_user(vcpu
);
122 * Work function for handling the backup timer that we schedule when a vcpu is
123 * no longer running, but had a timer programmed to fire in the future.
125 static void kvm_timer_inject_irq_work(struct work_struct
*work
)
127 struct kvm_vcpu
*vcpu
;
129 vcpu
= container_of(work
, struct kvm_vcpu
, arch
.timer_cpu
.expired
);
132 * If the vcpu is blocked we want to wake it up so that it will see
133 * the timer has expired when entering the guest.
135 kvm_vcpu_wake_up(vcpu
);
138 static u64
kvm_timer_compute_delta(struct arch_timer_context
*timer_ctx
)
142 cval
= timer_ctx
->cnt_cval
;
143 now
= kvm_phys_timer_read() - timer_ctx
->cntvoff
;
148 ns
= cyclecounter_cyc2ns(timecounter
->cc
,
158 static bool kvm_timer_irq_can_fire(struct arch_timer_context
*timer_ctx
)
160 return !(timer_ctx
->cnt_ctl
& ARCH_TIMER_CTRL_IT_MASK
) &&
161 (timer_ctx
->cnt_ctl
& ARCH_TIMER_CTRL_ENABLE
);
165 * Returns the earliest expiration time in ns among guest timers.
166 * Note that it will return 0 if none of timers can fire.
168 static u64
kvm_timer_earliest_exp(struct kvm_vcpu
*vcpu
)
170 u64 min_virt
= ULLONG_MAX
, min_phys
= ULLONG_MAX
;
171 struct arch_timer_context
*vtimer
= vcpu_vtimer(vcpu
);
172 struct arch_timer_context
*ptimer
= vcpu_ptimer(vcpu
);
174 if (kvm_timer_irq_can_fire(vtimer
))
175 min_virt
= kvm_timer_compute_delta(vtimer
);
177 if (kvm_timer_irq_can_fire(ptimer
))
178 min_phys
= kvm_timer_compute_delta(ptimer
);
180 /* If none of timers can fire, then return 0 */
181 if ((min_virt
== ULLONG_MAX
) && (min_phys
== ULLONG_MAX
))
184 return min(min_virt
, min_phys
);
187 static enum hrtimer_restart
kvm_bg_timer_expire(struct hrtimer
*hrt
)
189 struct arch_timer_cpu
*timer
;
190 struct kvm_vcpu
*vcpu
;
193 timer
= container_of(hrt
, struct arch_timer_cpu
, bg_timer
);
194 vcpu
= container_of(timer
, struct kvm_vcpu
, arch
.timer_cpu
);
197 * Check that the timer has really expired from the guest's
198 * PoV (NTP on the host may have forced it to expire
199 * early). If we should have slept longer, restart it.
201 ns
= kvm_timer_earliest_exp(vcpu
);
203 hrtimer_forward_now(hrt
, ns_to_ktime(ns
));
204 return HRTIMER_RESTART
;
207 schedule_work(&timer
->expired
);
208 return HRTIMER_NORESTART
;
211 static enum hrtimer_restart
kvm_phys_timer_expire(struct hrtimer
*hrt
)
213 struct arch_timer_context
*ptimer
;
214 struct arch_timer_cpu
*timer
;
215 struct kvm_vcpu
*vcpu
;
218 timer
= container_of(hrt
, struct arch_timer_cpu
, phys_timer
);
219 vcpu
= container_of(timer
, struct kvm_vcpu
, arch
.timer_cpu
);
220 ptimer
= vcpu_ptimer(vcpu
);
223 * Check that the timer has really expired from the guest's
224 * PoV (NTP on the host may have forced it to expire
225 * early). If not ready, schedule for a later time.
227 ns
= kvm_timer_compute_delta(ptimer
);
229 hrtimer_forward_now(hrt
, ns_to_ktime(ns
));
230 return HRTIMER_RESTART
;
233 kvm_timer_update_irq(vcpu
, true, ptimer
);
234 return HRTIMER_NORESTART
;
237 static bool kvm_timer_should_fire(struct arch_timer_context
*timer_ctx
)
241 if (!kvm_timer_irq_can_fire(timer_ctx
))
244 cval
= timer_ctx
->cnt_cval
;
245 now
= kvm_phys_timer_read() - timer_ctx
->cntvoff
;
250 bool kvm_timer_is_pending(struct kvm_vcpu
*vcpu
)
252 struct arch_timer_context
*vtimer
= vcpu_vtimer(vcpu
);
253 struct arch_timer_context
*ptimer
= vcpu_ptimer(vcpu
);
255 if (vtimer
->irq
.level
|| ptimer
->irq
.level
)
259 * When this is called from withing the wait loop of kvm_vcpu_block(),
260 * the software view of the timer state is up to date (timer->loaded
261 * is false), and so we can simply check if the timer should fire now.
263 if (!vtimer
->loaded
&& kvm_timer_should_fire(vtimer
))
266 return kvm_timer_should_fire(ptimer
);
270 * Reflect the timer output level into the kvm_run structure
272 void kvm_timer_update_run(struct kvm_vcpu
*vcpu
)
274 struct arch_timer_context
*vtimer
= vcpu_vtimer(vcpu
);
275 struct arch_timer_context
*ptimer
= vcpu_ptimer(vcpu
);
276 struct kvm_sync_regs
*regs
= &vcpu
->run
->s
.regs
;
278 /* Populate the device bitmap with the timer states */
279 regs
->device_irq_level
&= ~(KVM_ARM_DEV_EL1_VTIMER
|
280 KVM_ARM_DEV_EL1_PTIMER
);
281 if (vtimer
->irq
.level
)
282 regs
->device_irq_level
|= KVM_ARM_DEV_EL1_VTIMER
;
283 if (ptimer
->irq
.level
)
284 regs
->device_irq_level
|= KVM_ARM_DEV_EL1_PTIMER
;
287 static void kvm_timer_update_irq(struct kvm_vcpu
*vcpu
, bool new_level
,
288 struct arch_timer_context
*timer_ctx
)
292 timer_ctx
->irq
.level
= new_level
;
293 trace_kvm_timer_update_irq(vcpu
->vcpu_id
, timer_ctx
->irq
.irq
,
294 timer_ctx
->irq
.level
);
296 if (likely(irqchip_in_kernel(vcpu
->kvm
))) {
297 ret
= kvm_vgic_inject_irq(vcpu
->kvm
, vcpu
->vcpu_id
,
299 timer_ctx
->irq
.level
,
305 /* Schedule the background timer for the emulated timer. */
306 static void phys_timer_emulate(struct kvm_vcpu
*vcpu
)
308 struct arch_timer_cpu
*timer
= &vcpu
->arch
.timer_cpu
;
309 struct arch_timer_context
*ptimer
= vcpu_ptimer(vcpu
);
312 * If the timer can fire now we have just raised the IRQ line and we
313 * don't need to have a soft timer scheduled for the future. If the
314 * timer cannot fire at all, then we also don't need a soft timer.
316 if (kvm_timer_should_fire(ptimer
) || !kvm_timer_irq_can_fire(ptimer
)) {
317 soft_timer_cancel(&timer
->phys_timer
, NULL
);
321 soft_timer_start(&timer
->phys_timer
, kvm_timer_compute_delta(ptimer
));
325 * Check if there was a change in the timer state, so that we should either
326 * raise or lower the line level to the GIC or schedule a background timer to
327 * emulate the physical timer.
329 static void kvm_timer_update_state(struct kvm_vcpu
*vcpu
)
331 struct arch_timer_cpu
*timer
= &vcpu
->arch
.timer_cpu
;
332 struct arch_timer_context
*vtimer
= vcpu_vtimer(vcpu
);
333 struct arch_timer_context
*ptimer
= vcpu_ptimer(vcpu
);
335 if (unlikely(!timer
->enabled
))
338 if (kvm_timer_should_fire(vtimer
) != vtimer
->irq
.level
)
339 kvm_timer_update_irq(vcpu
, !vtimer
->irq
.level
, vtimer
);
341 if (kvm_timer_should_fire(ptimer
) != ptimer
->irq
.level
)
342 kvm_timer_update_irq(vcpu
, !ptimer
->irq
.level
, ptimer
);
344 phys_timer_emulate(vcpu
);
347 static void vtimer_save_state(struct kvm_vcpu
*vcpu
)
349 struct arch_timer_cpu
*timer
= &vcpu
->arch
.timer_cpu
;
350 struct arch_timer_context
*vtimer
= vcpu_vtimer(vcpu
);
353 local_irq_save(flags
);
358 if (timer
->enabled
) {
359 vtimer
->cnt_ctl
= read_sysreg_el0(cntv_ctl
);
360 vtimer
->cnt_cval
= read_sysreg_el0(cntv_cval
);
363 /* Disable the virtual timer */
364 write_sysreg_el0(0, cntv_ctl
);
367 vtimer
->loaded
= false;
369 local_irq_restore(flags
);
373 * Schedule the background timer before calling kvm_vcpu_block, so that this
374 * thread is removed from its waitqueue and made runnable when there's a timer
375 * interrupt to handle.
377 void kvm_timer_schedule(struct kvm_vcpu
*vcpu
)
379 struct arch_timer_cpu
*timer
= &vcpu
->arch
.timer_cpu
;
380 struct arch_timer_context
*vtimer
= vcpu_vtimer(vcpu
);
381 struct arch_timer_context
*ptimer
= vcpu_ptimer(vcpu
);
383 vtimer_save_state(vcpu
);
386 * No need to schedule a background timer if any guest timer has
387 * already expired, because kvm_vcpu_block will return before putting
388 * the thread to sleep.
390 if (kvm_timer_should_fire(vtimer
) || kvm_timer_should_fire(ptimer
))
394 * If both timers are not capable of raising interrupts (disabled or
395 * masked), then there's no more work for us to do.
397 if (!kvm_timer_irq_can_fire(vtimer
) && !kvm_timer_irq_can_fire(ptimer
))
401 * The guest timers have not yet expired, schedule a background timer.
402 * Set the earliest expiration time among the guest timers.
404 soft_timer_start(&timer
->bg_timer
, kvm_timer_earliest_exp(vcpu
));
407 static void vtimer_restore_state(struct kvm_vcpu
*vcpu
)
409 struct arch_timer_cpu
*timer
= &vcpu
->arch
.timer_cpu
;
410 struct arch_timer_context
*vtimer
= vcpu_vtimer(vcpu
);
413 local_irq_save(flags
);
418 if (timer
->enabled
) {
419 write_sysreg_el0(vtimer
->cnt_cval
, cntv_cval
);
421 write_sysreg_el0(vtimer
->cnt_ctl
, cntv_ctl
);
424 vtimer
->loaded
= true;
426 local_irq_restore(flags
);
429 void kvm_timer_unschedule(struct kvm_vcpu
*vcpu
)
431 struct arch_timer_cpu
*timer
= &vcpu
->arch
.timer_cpu
;
433 vtimer_restore_state(vcpu
);
435 soft_timer_cancel(&timer
->bg_timer
, &timer
->expired
);
438 static void set_cntvoff(u64 cntvoff
)
440 u32 low
= lower_32_bits(cntvoff
);
441 u32 high
= upper_32_bits(cntvoff
);
444 * Since kvm_call_hyp doesn't fully support the ARM PCS especially on
445 * 32-bit systems, but rather passes register by register shifted one
446 * place (we put the function address in r0/x0), we cannot simply pass
447 * a 64-bit value as an argument, but have to split the value in two
450 kvm_call_hyp(__kvm_timer_set_cntvoff
, low
, high
);
453 static void kvm_timer_vcpu_load_vgic(struct kvm_vcpu
*vcpu
)
455 struct arch_timer_context
*vtimer
= vcpu_vtimer(vcpu
);
459 phys_active
= vtimer
->irq
.level
||
460 kvm_vgic_map_is_active(vcpu
, vtimer
->irq
.irq
);
462 ret
= irq_set_irqchip_state(host_vtimer_irq
,
463 IRQCHIP_STATE_ACTIVE
,
468 static void kvm_timer_vcpu_load_user(struct kvm_vcpu
*vcpu
)
470 kvm_vtimer_update_mask_user(vcpu
);
473 void kvm_timer_vcpu_load(struct kvm_vcpu
*vcpu
)
475 struct arch_timer_cpu
*timer
= &vcpu
->arch
.timer_cpu
;
476 struct arch_timer_context
*vtimer
= vcpu_vtimer(vcpu
);
478 if (unlikely(!timer
->enabled
))
481 if (unlikely(!irqchip_in_kernel(vcpu
->kvm
)))
482 kvm_timer_vcpu_load_user(vcpu
);
484 kvm_timer_vcpu_load_vgic(vcpu
);
486 set_cntvoff(vtimer
->cntvoff
);
488 vtimer_restore_state(vcpu
);
490 /* Set the background timer for the physical timer emulation. */
491 phys_timer_emulate(vcpu
);
494 bool kvm_timer_should_notify_user(struct kvm_vcpu
*vcpu
)
496 struct arch_timer_context
*vtimer
= vcpu_vtimer(vcpu
);
497 struct arch_timer_context
*ptimer
= vcpu_ptimer(vcpu
);
498 struct kvm_sync_regs
*sregs
= &vcpu
->run
->s
.regs
;
501 if (likely(irqchip_in_kernel(vcpu
->kvm
)))
504 vlevel
= sregs
->device_irq_level
& KVM_ARM_DEV_EL1_VTIMER
;
505 plevel
= sregs
->device_irq_level
& KVM_ARM_DEV_EL1_PTIMER
;
507 return vtimer
->irq
.level
!= vlevel
||
508 ptimer
->irq
.level
!= plevel
;
511 void kvm_timer_vcpu_put(struct kvm_vcpu
*vcpu
)
513 struct arch_timer_cpu
*timer
= &vcpu
->arch
.timer_cpu
;
515 if (unlikely(!timer
->enabled
))
518 vtimer_save_state(vcpu
);
521 * Cancel the physical timer emulation, because the only case where we
522 * need it after a vcpu_put is in the context of a sleeping VCPU, and
523 * in that case we already factor in the deadline for the physical
524 * timer when scheduling the bg_timer.
526 * In any case, we re-schedule the hrtimer for the physical timer when
527 * coming back to the VCPU thread in kvm_timer_vcpu_load().
529 soft_timer_cancel(&timer
->phys_timer
, NULL
);
532 * The kernel may decide to run userspace after calling vcpu_put, so
533 * we reset cntvoff to 0 to ensure a consistent read between user
534 * accesses to the virtual counter and kernel access to the physical
540 static void unmask_vtimer_irq(struct kvm_vcpu
*vcpu
)
542 struct arch_timer_context
*vtimer
= vcpu_vtimer(vcpu
);
544 if (unlikely(!irqchip_in_kernel(vcpu
->kvm
))) {
545 kvm_vtimer_update_mask_user(vcpu
);
550 * If the guest disabled the timer without acking the interrupt, then
551 * we must make sure the physical and virtual active states are in
552 * sync by deactivating the physical interrupt, because otherwise we
553 * wouldn't see the next timer interrupt in the host.
555 if (!kvm_vgic_map_is_active(vcpu
, vtimer
->irq
.irq
)) {
557 ret
= irq_set_irqchip_state(host_vtimer_irq
,
558 IRQCHIP_STATE_ACTIVE
,
565 * kvm_timer_sync_hwstate - sync timer state from cpu
566 * @vcpu: The vcpu pointer
568 * Check if any of the timers have expired while we were running in the guest,
569 * and inject an interrupt if that was the case.
571 void kvm_timer_sync_hwstate(struct kvm_vcpu
*vcpu
)
573 struct arch_timer_context
*vtimer
= vcpu_vtimer(vcpu
);
576 * If we entered the guest with the vtimer output asserted we have to
577 * check if the guest has modified the timer so that we should lower
578 * the line at this point.
580 if (vtimer
->irq
.level
) {
581 vtimer
->cnt_ctl
= read_sysreg_el0(cntv_ctl
);
582 vtimer
->cnt_cval
= read_sysreg_el0(cntv_cval
);
583 if (!kvm_timer_should_fire(vtimer
)) {
584 kvm_timer_update_irq(vcpu
, false, vtimer
);
585 unmask_vtimer_irq(vcpu
);
590 int kvm_timer_vcpu_reset(struct kvm_vcpu
*vcpu
)
592 struct arch_timer_cpu
*timer
= &vcpu
->arch
.timer_cpu
;
593 struct arch_timer_context
*vtimer
= vcpu_vtimer(vcpu
);
594 struct arch_timer_context
*ptimer
= vcpu_ptimer(vcpu
);
597 * The bits in CNTV_CTL are architecturally reset to UNKNOWN for ARMv8
598 * and to 0 for ARMv7. We provide an implementation that always
599 * resets the timer to be disabled and unmasked and is compliant with
600 * the ARMv7 architecture.
604 kvm_timer_update_state(vcpu
);
606 if (timer
->enabled
&& irqchip_in_kernel(vcpu
->kvm
))
607 kvm_vgic_reset_mapped_irq(vcpu
, vtimer
->irq
.irq
);
612 /* Make the updates of cntvoff for all vtimer contexts atomic */
613 static void update_vtimer_cntvoff(struct kvm_vcpu
*vcpu
, u64 cntvoff
)
616 struct kvm
*kvm
= vcpu
->kvm
;
617 struct kvm_vcpu
*tmp
;
619 mutex_lock(&kvm
->lock
);
620 kvm_for_each_vcpu(i
, tmp
, kvm
)
621 vcpu_vtimer(tmp
)->cntvoff
= cntvoff
;
624 * When called from the vcpu create path, the CPU being created is not
625 * included in the loop above, so we just set it here as well.
627 vcpu_vtimer(vcpu
)->cntvoff
= cntvoff
;
628 mutex_unlock(&kvm
->lock
);
631 void kvm_timer_vcpu_init(struct kvm_vcpu
*vcpu
)
633 struct arch_timer_cpu
*timer
= &vcpu
->arch
.timer_cpu
;
634 struct arch_timer_context
*vtimer
= vcpu_vtimer(vcpu
);
635 struct arch_timer_context
*ptimer
= vcpu_ptimer(vcpu
);
637 /* Synchronize cntvoff across all vtimers of a VM. */
638 update_vtimer_cntvoff(vcpu
, kvm_phys_timer_read());
639 vcpu_ptimer(vcpu
)->cntvoff
= 0;
641 INIT_WORK(&timer
->expired
, kvm_timer_inject_irq_work
);
642 hrtimer_init(&timer
->bg_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_ABS
);
643 timer
->bg_timer
.function
= kvm_bg_timer_expire
;
645 hrtimer_init(&timer
->phys_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_ABS
);
646 timer
->phys_timer
.function
= kvm_phys_timer_expire
;
648 vtimer
->irq
.irq
= default_vtimer_irq
.irq
;
649 ptimer
->irq
.irq
= default_ptimer_irq
.irq
;
652 static void kvm_timer_init_interrupt(void *info
)
654 enable_percpu_irq(host_vtimer_irq
, host_vtimer_irq_flags
);
657 int kvm_arm_timer_set_reg(struct kvm_vcpu
*vcpu
, u64 regid
, u64 value
)
659 struct arch_timer_context
*vtimer
= vcpu_vtimer(vcpu
);
660 struct arch_timer_context
*ptimer
= vcpu_ptimer(vcpu
);
663 case KVM_REG_ARM_TIMER_CTL
:
664 vtimer
->cnt_ctl
= value
& ~ARCH_TIMER_CTRL_IT_STAT
;
666 case KVM_REG_ARM_TIMER_CNT
:
667 update_vtimer_cntvoff(vcpu
, kvm_phys_timer_read() - value
);
669 case KVM_REG_ARM_TIMER_CVAL
:
670 vtimer
->cnt_cval
= value
;
672 case KVM_REG_ARM_PTIMER_CTL
:
673 ptimer
->cnt_ctl
= value
& ~ARCH_TIMER_CTRL_IT_STAT
;
675 case KVM_REG_ARM_PTIMER_CVAL
:
676 ptimer
->cnt_cval
= value
;
683 kvm_timer_update_state(vcpu
);
687 static u64
read_timer_ctl(struct arch_timer_context
*timer
)
690 * Set ISTATUS bit if it's expired.
691 * Note that according to ARMv8 ARM Issue A.k, ISTATUS bit is
692 * UNKNOWN when ENABLE bit is 0, so we chose to set ISTATUS bit
693 * regardless of ENABLE bit for our implementation convenience.
695 if (!kvm_timer_compute_delta(timer
))
696 return timer
->cnt_ctl
| ARCH_TIMER_CTRL_IT_STAT
;
698 return timer
->cnt_ctl
;
701 u64
kvm_arm_timer_get_reg(struct kvm_vcpu
*vcpu
, u64 regid
)
703 struct arch_timer_context
*ptimer
= vcpu_ptimer(vcpu
);
704 struct arch_timer_context
*vtimer
= vcpu_vtimer(vcpu
);
707 case KVM_REG_ARM_TIMER_CTL
:
708 return read_timer_ctl(vtimer
);
709 case KVM_REG_ARM_TIMER_CNT
:
710 return kvm_phys_timer_read() - vtimer
->cntvoff
;
711 case KVM_REG_ARM_TIMER_CVAL
:
712 return vtimer
->cnt_cval
;
713 case KVM_REG_ARM_PTIMER_CTL
:
714 return read_timer_ctl(ptimer
);
715 case KVM_REG_ARM_PTIMER_CVAL
:
716 return ptimer
->cnt_cval
;
717 case KVM_REG_ARM_PTIMER_CNT
:
718 return kvm_phys_timer_read();
723 static int kvm_timer_starting_cpu(unsigned int cpu
)
725 kvm_timer_init_interrupt(NULL
);
729 static int kvm_timer_dying_cpu(unsigned int cpu
)
731 disable_percpu_irq(host_vtimer_irq
);
735 int kvm_timer_hyp_init(bool has_gic
)
737 struct arch_timer_kvm_info
*info
;
740 info
= arch_timer_get_kvm_info();
741 timecounter
= &info
->timecounter
;
743 if (!timecounter
->cc
) {
744 kvm_err("kvm_arch_timer: uninitialized timecounter\n");
748 if (info
->virtual_irq
<= 0) {
749 kvm_err("kvm_arch_timer: invalid virtual timer IRQ: %d\n",
753 host_vtimer_irq
= info
->virtual_irq
;
755 host_vtimer_irq_flags
= irq_get_trigger_type(host_vtimer_irq
);
756 if (host_vtimer_irq_flags
!= IRQF_TRIGGER_HIGH
&&
757 host_vtimer_irq_flags
!= IRQF_TRIGGER_LOW
) {
758 kvm_err("Invalid trigger for IRQ%d, assuming level low\n",
760 host_vtimer_irq_flags
= IRQF_TRIGGER_LOW
;
763 err
= request_percpu_irq(host_vtimer_irq
, kvm_arch_timer_handler
,
764 "kvm guest timer", kvm_get_running_vcpus());
766 kvm_err("kvm_arch_timer: can't request interrupt %d (%d)\n",
767 host_vtimer_irq
, err
);
772 err
= irq_set_vcpu_affinity(host_vtimer_irq
,
773 kvm_get_running_vcpus());
775 kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
780 kvm_debug("virtual timer IRQ%d\n", host_vtimer_irq
);
782 cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING
,
783 "kvm/arm/timer:starting", kvm_timer_starting_cpu
,
784 kvm_timer_dying_cpu
);
787 free_percpu_irq(host_vtimer_irq
, kvm_get_running_vcpus());
791 void kvm_timer_vcpu_terminate(struct kvm_vcpu
*vcpu
)
793 struct arch_timer_cpu
*timer
= &vcpu
->arch
.timer_cpu
;
794 struct arch_timer_context
*vtimer
= vcpu_vtimer(vcpu
);
796 soft_timer_cancel(&timer
->bg_timer
, &timer
->expired
);
797 soft_timer_cancel(&timer
->phys_timer
, NULL
);
798 kvm_vgic_unmap_phys_irq(vcpu
, vtimer
->irq
.irq
);
801 static bool timer_irqs_are_valid(struct kvm_vcpu
*vcpu
)
803 int vtimer_irq
, ptimer_irq
;
806 vtimer_irq
= vcpu_vtimer(vcpu
)->irq
.irq
;
807 ret
= kvm_vgic_set_owner(vcpu
, vtimer_irq
, vcpu_vtimer(vcpu
));
811 ptimer_irq
= vcpu_ptimer(vcpu
)->irq
.irq
;
812 ret
= kvm_vgic_set_owner(vcpu
, ptimer_irq
, vcpu_ptimer(vcpu
));
816 kvm_for_each_vcpu(i
, vcpu
, vcpu
->kvm
) {
817 if (vcpu_vtimer(vcpu
)->irq
.irq
!= vtimer_irq
||
818 vcpu_ptimer(vcpu
)->irq
.irq
!= ptimer_irq
)
825 int kvm_timer_enable(struct kvm_vcpu
*vcpu
)
827 struct arch_timer_cpu
*timer
= &vcpu
->arch
.timer_cpu
;
828 struct arch_timer_context
*vtimer
= vcpu_vtimer(vcpu
);
834 /* Without a VGIC we do not map virtual IRQs to physical IRQs */
835 if (!irqchip_in_kernel(vcpu
->kvm
))
838 if (!vgic_initialized(vcpu
->kvm
))
841 if (!timer_irqs_are_valid(vcpu
)) {
842 kvm_debug("incorrectly configured timer irqs\n");
846 ret
= kvm_vgic_map_phys_irq(vcpu
, host_vtimer_irq
, vtimer
->irq
.irq
);
853 kvm_timer_vcpu_load(vcpu
);
860 * On VHE system, we only need to configure trap on physical timer and counter
861 * accesses in EL0 and EL1 once, not for every world switch.
862 * The host kernel runs at EL2 with HCR_EL2.TGE == 1,
863 * and this makes those bits have no effect for the host kernel execution.
865 void kvm_timer_init_vhe(void)
867 /* When HCR_EL2.E2H ==1, EL1PCEN and EL1PCTEN are shifted by 10 */
868 u32 cnthctl_shift
= 10;
872 * Disallow physical timer access for the guest.
873 * Physical counter access is allowed.
875 val
= read_sysreg(cnthctl_el2
);
876 val
&= ~(CNTHCTL_EL1PCEN
<< cnthctl_shift
);
877 val
|= (CNTHCTL_EL1PCTEN
<< cnthctl_shift
);
878 write_sysreg(val
, cnthctl_el2
);
881 static void set_timer_irqs(struct kvm
*kvm
, int vtimer_irq
, int ptimer_irq
)
883 struct kvm_vcpu
*vcpu
;
886 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
887 vcpu_vtimer(vcpu
)->irq
.irq
= vtimer_irq
;
888 vcpu_ptimer(vcpu
)->irq
.irq
= ptimer_irq
;
892 int kvm_arm_timer_set_attr(struct kvm_vcpu
*vcpu
, struct kvm_device_attr
*attr
)
894 int __user
*uaddr
= (int __user
*)(long)attr
->addr
;
895 struct arch_timer_context
*vtimer
= vcpu_vtimer(vcpu
);
896 struct arch_timer_context
*ptimer
= vcpu_ptimer(vcpu
);
899 if (!irqchip_in_kernel(vcpu
->kvm
))
902 if (get_user(irq
, uaddr
))
905 if (!(irq_is_ppi(irq
)))
908 if (vcpu
->arch
.timer_cpu
.enabled
)
911 switch (attr
->attr
) {
912 case KVM_ARM_VCPU_TIMER_IRQ_VTIMER
:
913 set_timer_irqs(vcpu
->kvm
, irq
, ptimer
->irq
.irq
);
915 case KVM_ARM_VCPU_TIMER_IRQ_PTIMER
:
916 set_timer_irqs(vcpu
->kvm
, vtimer
->irq
.irq
, irq
);
925 int kvm_arm_timer_get_attr(struct kvm_vcpu
*vcpu
, struct kvm_device_attr
*attr
)
927 int __user
*uaddr
= (int __user
*)(long)attr
->addr
;
928 struct arch_timer_context
*timer
;
931 switch (attr
->attr
) {
932 case KVM_ARM_VCPU_TIMER_IRQ_VTIMER
:
933 timer
= vcpu_vtimer(vcpu
);
935 case KVM_ARM_VCPU_TIMER_IRQ_PTIMER
:
936 timer
= vcpu_ptimer(vcpu
);
942 irq
= timer
->irq
.irq
;
943 return put_user(irq
, uaddr
);
946 int kvm_arm_timer_has_attr(struct kvm_vcpu
*vcpu
, struct kvm_device_attr
*attr
)
948 switch (attr
->attr
) {
949 case KVM_ARM_VCPU_TIMER_IRQ_VTIMER
:
950 case KVM_ARM_VCPU_TIMER_IRQ_PTIMER
: