]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - virt/kvm/arm/arch_timer.c
KVM: arm/arm64: Move timer IRQ default init to arch_timer.c
[mirror_ubuntu-eoan-kernel.git] / virt / kvm / arm / arch_timer.c
CommitLineData
53e72406
MZ
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#include <linux/cpu.h>
53e72406
MZ
20#include <linux/kvm.h>
21#include <linux/kvm_host.h>
22#include <linux/interrupt.h>
b452cb52 23#include <linux/irq.h>
53e72406 24
372b7c1b 25#include <clocksource/arm_arch_timer.h>
53e72406 26#include <asm/arch_timer.h>
488f94d7 27#include <asm/kvm_hyp.h>
53e72406 28
7275acdf
MZ
29#include <kvm/arm_vgic.h>
30#include <kvm/arm_arch_timer.h>
53e72406 31
e21f0910
CD
32#include "trace.h"
33
53e72406 34static struct timecounter *timecounter;
5ae7f87a 35static unsigned int host_vtimer_irq;
cabdc5c5 36static u32 host_vtimer_irq_flags;
53e72406 37
85e69ad7
CD
38static const struct kvm_irq_level default_ptimer_irq = {
39 .irq = 30,
40 .level = 1,
41};
42
43static const struct kvm_irq_level default_vtimer_irq = {
44 .irq = 27,
45 .level = 1,
46};
47
9b4a3004
MZ
48void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
49{
fbb4aeec 50 vcpu_vtimer(vcpu)->active_cleared_last = false;
9b4a3004
MZ
51}
52
7b6b4631 53u64 kvm_phys_timer_read(void)
53e72406
MZ
54{
55 return timecounter->cc->read(timecounter->cc);
56}
57
58static bool timer_is_armed(struct arch_timer_cpu *timer)
59{
60 return timer->armed;
61}
62
63/* timer_arm: as in "arm the timer", not as in ARM the company */
64static void timer_arm(struct arch_timer_cpu *timer, u64 ns)
65{
66 timer->armed = true;
67 hrtimer_start(&timer->timer, ktime_add_ns(ktime_get(), ns),
68 HRTIMER_MODE_ABS);
69}
70
71static void timer_disarm(struct arch_timer_cpu *timer)
72{
73 if (timer_is_armed(timer)) {
74 hrtimer_cancel(&timer->timer);
75 cancel_work_sync(&timer->expired);
76 timer->armed = false;
77 }
78}
79
53e72406
MZ
80static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
81{
82 struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
83
84 /*
85 * We disable the timer in the world switch and let it be
86 * handled by kvm_timer_sync_hwstate(). Getting a timer
87 * interrupt at this point is a sure sign of some major
88 * breakage.
89 */
90 pr_warn("Unexpected interrupt %d on vcpu %p\n", irq, vcpu);
91 return IRQ_HANDLED;
92}
93
1a748478
CD
94/*
95 * Work function for handling the backup timer that we schedule when a vcpu is
96 * no longer running, but had a timer programmed to fire in the future.
97 */
53e72406
MZ
98static void kvm_timer_inject_irq_work(struct work_struct *work)
99{
100 struct kvm_vcpu *vcpu;
101
102 vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
1c5631c7 103
1a748478
CD
104 /*
105 * If the vcpu is blocked we want to wake it up so that it will see
106 * the timer has expired when entering the guest.
107 */
1b6502e5 108 kvm_vcpu_wake_up(vcpu);
53e72406
MZ
109}
110
9171fa2e 111static u64 kvm_timer_compute_delta(struct arch_timer_context *timer_ctx)
1c5631c7 112{
a5a1d1c2 113 u64 cval, now;
1c5631c7 114
9171fa2e
JL
115 cval = timer_ctx->cnt_cval;
116 now = kvm_phys_timer_read() - timer_ctx->cntvoff;
1c5631c7
MZ
117
118 if (now < cval) {
119 u64 ns;
120
121 ns = cyclecounter_cyc2ns(timecounter->cc,
122 cval - now,
123 timecounter->mask,
124 &timecounter->frac);
125 return ns;
126 }
127
128 return 0;
129}
130
fb280e97
JL
131static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx)
132{
133 return !(timer_ctx->cnt_ctl & ARCH_TIMER_CTRL_IT_MASK) &&
134 (timer_ctx->cnt_ctl & ARCH_TIMER_CTRL_ENABLE);
135}
136
137/*
138 * Returns the earliest expiration time in ns among guest timers.
139 * Note that it will return 0 if none of timers can fire.
140 */
141static u64 kvm_timer_earliest_exp(struct kvm_vcpu *vcpu)
142{
143 u64 min_virt = ULLONG_MAX, min_phys = ULLONG_MAX;
144 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
145 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
146
147 if (kvm_timer_irq_can_fire(vtimer))
148 min_virt = kvm_timer_compute_delta(vtimer);
149
150 if (kvm_timer_irq_can_fire(ptimer))
151 min_phys = kvm_timer_compute_delta(ptimer);
152
153 /* If none of timers can fire, then return 0 */
154 if ((min_virt == ULLONG_MAX) && (min_phys == ULLONG_MAX))
155 return 0;
156
157 return min(min_virt, min_phys);
158}
159
53e72406
MZ
160static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
161{
162 struct arch_timer_cpu *timer;
1c5631c7
MZ
163 struct kvm_vcpu *vcpu;
164 u64 ns;
165
53e72406 166 timer = container_of(hrt, struct arch_timer_cpu, timer);
1c5631c7
MZ
167 vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu);
168
169 /*
170 * Check that the timer has really expired from the guest's
171 * PoV (NTP on the host may have forced it to expire
172 * early). If we should have slept longer, restart it.
173 */
fb280e97 174 ns = kvm_timer_earliest_exp(vcpu);
1c5631c7
MZ
175 if (unlikely(ns)) {
176 hrtimer_forward_now(hrt, ns_to_ktime(ns));
177 return HRTIMER_RESTART;
178 }
179
3706feac 180 schedule_work(&timer->expired);
53e72406
MZ
181 return HRTIMER_NORESTART;
182}
183
9171fa2e 184bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx)
1a748478 185{
a5a1d1c2 186 u64 cval, now;
1a748478 187
9171fa2e 188 if (!kvm_timer_irq_can_fire(timer_ctx))
1a748478
CD
189 return false;
190
9171fa2e
JL
191 cval = timer_ctx->cnt_cval;
192 now = kvm_phys_timer_read() - timer_ctx->cntvoff;
1a748478
CD
193
194 return cval <= now;
195}
196
d9e13977
AG
197/*
198 * Reflect the timer output level into the kvm_run structure
199 */
200void kvm_timer_update_run(struct kvm_vcpu *vcpu)
201{
202 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
203 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
204 struct kvm_sync_regs *regs = &vcpu->run->s.regs;
205
d9e13977
AG
206 /* Populate the device bitmap with the timer states */
207 regs->device_irq_level &= ~(KVM_ARM_DEV_EL1_VTIMER |
208 KVM_ARM_DEV_EL1_PTIMER);
209 if (vtimer->irq.level)
210 regs->device_irq_level |= KVM_ARM_DEV_EL1_VTIMER;
211 if (ptimer->irq.level)
212 regs->device_irq_level |= KVM_ARM_DEV_EL1_PTIMER;
213}
214
9171fa2e
JL
215static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
216 struct arch_timer_context *timer_ctx)
4b4b4512
CD
217{
218 int ret;
4b4b4512 219
9171fa2e
JL
220 timer_ctx->active_cleared_last = false;
221 timer_ctx->irq.level = new_level;
222 trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_ctx->irq.irq,
223 timer_ctx->irq.level);
11710dec 224
d9e13977
AG
225 if (likely(irqchip_in_kernel(vcpu->kvm))) {
226 ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
227 timer_ctx->irq.irq,
228 timer_ctx->irq.level);
229 WARN_ON(ret);
230 }
4b4b4512
CD
231}
232
233/*
234 * Check if there was a change in the timer state (should we raise or lower
235 * the line level to the GIC).
236 */
b22e7df2 237static void kvm_timer_update_state(struct kvm_vcpu *vcpu)
4b4b4512
CD
238{
239 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
fbb4aeec 240 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
58e0c973 241 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
4b4b4512
CD
242
243 /*
244 * If userspace modified the timer registers via SET_ONE_REG before
fbb4aeec 245 * the vgic was initialized, we mustn't set the vtimer->irq.level value
4b4b4512
CD
246 * because the guest would never see the interrupt. Instead wait
247 * until we call this function from kvm_timer_flush_hwstate.
248 */
d9e13977 249 if (unlikely(!timer->enabled))
b22e7df2 250 return;
4b4b4512 251
9171fa2e
JL
252 if (kvm_timer_should_fire(vtimer) != vtimer->irq.level)
253 kvm_timer_update_irq(vcpu, !vtimer->irq.level, vtimer);
b3aff6cc 254
58e0c973
JL
255 if (kvm_timer_should_fire(ptimer) != ptimer->irq.level)
256 kvm_timer_update_irq(vcpu, !ptimer->irq.level, ptimer);
4b4b4512
CD
257}
258
f242adaf
JL
259/* Schedule the background timer for the emulated timer. */
260static void kvm_timer_emulate(struct kvm_vcpu *vcpu,
261 struct arch_timer_context *timer_ctx)
262{
263 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
264
265 if (kvm_timer_should_fire(timer_ctx))
266 return;
267
268 if (!kvm_timer_irq_can_fire(timer_ctx))
269 return;
270
271 /* The timer has not yet expired, schedule a background timer */
272 timer_arm(timer, kvm_timer_compute_delta(timer_ctx));
273}
274
d35268da
CD
275/*
276 * Schedule the background timer before calling kvm_vcpu_block, so that this
277 * thread is removed from its waitqueue and made runnable when there's a timer
278 * interrupt to handle.
279 */
280void kvm_timer_schedule(struct kvm_vcpu *vcpu)
281{
282 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
9171fa2e 283 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
fb280e97 284 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
d35268da
CD
285
286 BUG_ON(timer_is_armed(timer));
287
288 /*
fb280e97 289 * No need to schedule a background timer if any guest timer has
d35268da
CD
290 * already expired, because kvm_vcpu_block will return before putting
291 * the thread to sleep.
292 */
fb280e97 293 if (kvm_timer_should_fire(vtimer) || kvm_timer_should_fire(ptimer))
d35268da
CD
294 return;
295
296 /*
fb280e97 297 * If both timers are not capable of raising interrupts (disabled or
d35268da
CD
298 * masked), then there's no more work for us to do.
299 */
fb280e97 300 if (!kvm_timer_irq_can_fire(vtimer) && !kvm_timer_irq_can_fire(ptimer))
d35268da
CD
301 return;
302
fb280e97
JL
303 /*
304 * The guest timers have not yet expired, schedule a background timer.
305 * Set the earliest expiration time among the guest timers.
306 */
307 timer_arm(timer, kvm_timer_earliest_exp(vcpu));
d35268da
CD
308}
309
310void kvm_timer_unschedule(struct kvm_vcpu *vcpu)
311{
312 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
313 timer_disarm(timer);
314}
315
d9e13977 316static void kvm_timer_flush_hwstate_vgic(struct kvm_vcpu *vcpu)
53e72406 317{
fbb4aeec 318 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
cff9211e
CD
319 bool phys_active;
320 int ret;
53e72406 321
cff9211e 322 /*
0e3dfda9
CD
323 * If we enter the guest with the virtual input level to the VGIC
324 * asserted, then we have already told the VGIC what we need to, and
325 * we don't need to exit from the guest until the guest deactivates
326 * the already injected interrupt, so therefore we should set the
327 * hardware active state to prevent unnecessary exits from the guest.
328 *
329 * Also, if we enter the guest with the virtual timer interrupt active,
330 * then it must be active on the physical distributor, because we set
331 * the HW bit and the guest must be able to deactivate the virtual and
332 * physical interrupt at the same time.
333 *
334 * Conversely, if the virtual input level is deasserted and the virtual
335 * interrupt is not active, then always clear the hardware active state
336 * to ensure that hardware interrupts from the timer triggers a guest
337 * exit.
338 */
fbb4aeec
JL
339 phys_active = vtimer->irq.level ||
340 kvm_vgic_map_is_active(vcpu, vtimer->irq.irq);
cff9211e 341
9b4a3004
MZ
342 /*
343 * We want to avoid hitting the (re)distributor as much as
344 * possible, as this is a potentially expensive MMIO access
345 * (not to mention locks in the irq layer), and a solution for
346 * this is to cache the "active" state in memory.
347 *
348 * Things to consider: we cannot cache an "active set" state,
349 * because the HW can change this behind our back (it becomes
350 * "clear" in the HW). We must then restrict the caching to
351 * the "clear" state.
352 *
353 * The cache is invalidated on:
354 * - vcpu put, indicating that the HW cannot be trusted to be
355 * in a sane state on the next vcpu load,
356 * - any change in the interrupt state
357 *
358 * Usage conditions:
359 * - cached value is "active clear"
360 * - value to be programmed is "active clear"
361 */
fbb4aeec 362 if (vtimer->active_cleared_last && !phys_active)
9b4a3004
MZ
363 return;
364
b452cb52 365 ret = irq_set_irqchip_state(host_vtimer_irq,
cff9211e
CD
366 IRQCHIP_STATE_ACTIVE,
367 phys_active);
368 WARN_ON(ret);
9b4a3004 369
fbb4aeec 370 vtimer->active_cleared_last = !phys_active;
53e72406
MZ
371}
372
d9e13977
AG
373bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu)
374{
375 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
376 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
377 struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
378 bool vlevel, plevel;
379
380 if (likely(irqchip_in_kernel(vcpu->kvm)))
381 return false;
382
383 vlevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_VTIMER;
384 plevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_PTIMER;
385
386 return vtimer->irq.level != vlevel ||
387 ptimer->irq.level != plevel;
388}
389
390static void kvm_timer_flush_hwstate_user(struct kvm_vcpu *vcpu)
391{
392 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
393
394 /*
395 * To prevent continuously exiting from the guest, we mask the
396 * physical interrupt such that the guest can make forward progress.
397 * Once we detect the output level being deasserted, we unmask the
398 * interrupt again so that we exit from the guest when the timer
399 * fires.
400 */
401 if (vtimer->irq.level)
402 disable_percpu_irq(host_vtimer_irq);
403 else
404 enable_percpu_irq(host_vtimer_irq, 0);
405}
406
407/**
408 * kvm_timer_flush_hwstate - prepare timers before running the vcpu
409 * @vcpu: The vcpu pointer
410 *
411 * Check if the virtual timer has expired while we were running in the host,
412 * and inject an interrupt if that was the case, making sure the timer is
413 * masked or disabled on the host so that we keep executing. Also schedule a
414 * software timer for the physical timer if it is enabled.
415 */
416void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
417{
418 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
419
420 if (unlikely(!timer->enabled))
421 return;
422
423 kvm_timer_update_state(vcpu);
424
425 /* Set the background timer for the physical timer emulation. */
426 kvm_timer_emulate(vcpu, vcpu_ptimer(vcpu));
427
428 if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
429 kvm_timer_flush_hwstate_user(vcpu);
430 else
431 kvm_timer_flush_hwstate_vgic(vcpu);
432}
433
53e72406
MZ
434/**
435 * kvm_timer_sync_hwstate - sync timer state from cpu
436 * @vcpu: The vcpu pointer
437 *
d9e13977 438 * Check if any of the timers have expired while we were running in the guest,
d35268da 439 * and inject an interrupt if that was the case.
53e72406
MZ
440 */
441void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
442{
443 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
53e72406 444
f242adaf
JL
445 /*
446 * This is to cancel the background timer for the physical timer
447 * emulation if it is set.
448 */
449 timer_disarm(timer);
53e72406 450
4b4b4512
CD
451 /*
452 * The guest could have modified the timer registers or the timer
453 * could have expired, update the timer state.
454 */
455 kvm_timer_update_state(vcpu);
53e72406
MZ
456}
457
85e69ad7 458int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
5ae7f87a 459{
fbb4aeec 460 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
a91d1855 461 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
5ae7f87a 462
4ad9e16a
CD
463 /*
464 * The bits in CNTV_CTL are architecturally reset to UNKNOWN for ARMv8
465 * and to 0 for ARMv7. We provide an implementation that always
466 * resets the timer to be disabled and unmasked and is compliant with
467 * the ARMv7 architecture.
468 */
fbb4aeec 469 vtimer->cnt_ctl = 0;
a91d1855 470 ptimer->cnt_ctl = 0;
4b4b4512 471 kvm_timer_update_state(vcpu);
4ad9e16a 472
41a54482 473 return 0;
5ae7f87a
AP
474}
475
90de943a
JL
476/* Make the updates of cntvoff for all vtimer contexts atomic */
477static void update_vtimer_cntvoff(struct kvm_vcpu *vcpu, u64 cntvoff)
478{
479 int i;
480 struct kvm *kvm = vcpu->kvm;
481 struct kvm_vcpu *tmp;
482
483 mutex_lock(&kvm->lock);
484 kvm_for_each_vcpu(i, tmp, kvm)
485 vcpu_vtimer(tmp)->cntvoff = cntvoff;
486
487 /*
488 * When called from the vcpu create path, the CPU being created is not
489 * included in the loop above, so we just set it here as well.
490 */
491 vcpu_vtimer(vcpu)->cntvoff = cntvoff;
492 mutex_unlock(&kvm->lock);
493}
494
53e72406
MZ
495void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
496{
497 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
85e69ad7
CD
498 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
499 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
53e72406 500
90de943a
JL
501 /* Synchronize cntvoff across all vtimers of a VM. */
502 update_vtimer_cntvoff(vcpu, kvm_phys_timer_read());
a91d1855 503 vcpu_ptimer(vcpu)->cntvoff = 0;
90de943a 504
53e72406
MZ
505 INIT_WORK(&timer->expired, kvm_timer_inject_irq_work);
506 hrtimer_init(&timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
507 timer->timer.function = kvm_timer_expire;
85e69ad7
CD
508
509 vtimer->irq.irq = default_vtimer_irq.irq;
510 ptimer->irq.irq = default_ptimer_irq.irq;
53e72406
MZ
511}
512
513static void kvm_timer_init_interrupt(void *info)
514{
cabdc5c5 515 enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
53e72406
MZ
516}
517
39735a3a
AP
518int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
519{
fbb4aeec 520 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
39735a3a
AP
521
522 switch (regid) {
523 case KVM_REG_ARM_TIMER_CTL:
fbb4aeec 524 vtimer->cnt_ctl = value;
39735a3a
AP
525 break;
526 case KVM_REG_ARM_TIMER_CNT:
90de943a 527 update_vtimer_cntvoff(vcpu, kvm_phys_timer_read() - value);
39735a3a
AP
528 break;
529 case KVM_REG_ARM_TIMER_CVAL:
fbb4aeec 530 vtimer->cnt_cval = value;
39735a3a
AP
531 break;
532 default:
533 return -1;
534 }
4b4b4512
CD
535
536 kvm_timer_update_state(vcpu);
39735a3a
AP
537 return 0;
538}
539
540u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
541{
fbb4aeec 542 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
39735a3a
AP
543
544 switch (regid) {
545 case KVM_REG_ARM_TIMER_CTL:
fbb4aeec 546 return vtimer->cnt_ctl;
39735a3a 547 case KVM_REG_ARM_TIMER_CNT:
90de943a 548 return kvm_phys_timer_read() - vtimer->cntvoff;
39735a3a 549 case KVM_REG_ARM_TIMER_CVAL:
fbb4aeec 550 return vtimer->cnt_cval;
39735a3a
AP
551 }
552 return (u64)-1;
553}
53e72406 554
b3c9950a 555static int kvm_timer_starting_cpu(unsigned int cpu)
53e72406 556{
b3c9950a
RC
557 kvm_timer_init_interrupt(NULL);
558 return 0;
53e72406
MZ
559}
560
b3c9950a
RC
561static int kvm_timer_dying_cpu(unsigned int cpu)
562{
563 disable_percpu_irq(host_vtimer_irq);
564 return 0;
565}
53e72406 566
53e72406
MZ
567int kvm_timer_hyp_init(void)
568{
29c2d6ff 569 struct arch_timer_kvm_info *info;
53e72406
MZ
570 int err;
571
29c2d6ff
JG
572 info = arch_timer_get_kvm_info();
573 timecounter = &info->timecounter;
53e72406 574
8e1a0476
CD
575 if (!timecounter->cc) {
576 kvm_err("kvm_arch_timer: uninitialized timecounter\n");
577 return -ENODEV;
578 }
579
29c2d6ff
JG
580 if (info->virtual_irq <= 0) {
581 kvm_err("kvm_arch_timer: invalid virtual timer IRQ: %d\n",
582 info->virtual_irq);
53e72406
MZ
583 return -ENODEV;
584 }
29c2d6ff 585 host_vtimer_irq = info->virtual_irq;
53e72406 586
cabdc5c5
MZ
587 host_vtimer_irq_flags = irq_get_trigger_type(host_vtimer_irq);
588 if (host_vtimer_irq_flags != IRQF_TRIGGER_HIGH &&
589 host_vtimer_irq_flags != IRQF_TRIGGER_LOW) {
590 kvm_err("Invalid trigger for IRQ%d, assuming level low\n",
591 host_vtimer_irq);
592 host_vtimer_irq_flags = IRQF_TRIGGER_LOW;
593 }
594
29c2d6ff 595 err = request_percpu_irq(host_vtimer_irq, kvm_arch_timer_handler,
53e72406
MZ
596 "kvm guest timer", kvm_get_running_vcpus());
597 if (err) {
598 kvm_err("kvm_arch_timer: can't request interrupt %d (%d)\n",
29c2d6ff 599 host_vtimer_irq, err);
5d947a14 600 return err;
53e72406
MZ
601 }
602
29c2d6ff 603 kvm_info("virtual timer IRQ%d\n", host_vtimer_irq);
53e72406 604
b3c9950a 605 cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING,
73c1b41e 606 "kvm/arm/timer:starting", kvm_timer_starting_cpu,
b3c9950a 607 kvm_timer_dying_cpu);
53e72406
MZ
608 return err;
609}
610
611void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
612{
613 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
fbb4aeec 614 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
53e72406
MZ
615
616 timer_disarm(timer);
fbb4aeec 617 kvm_vgic_unmap_phys_irq(vcpu, vtimer->irq.irq);
53e72406
MZ
618}
619
41a54482 620int kvm_timer_enable(struct kvm_vcpu *vcpu)
53e72406 621{
41a54482 622 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
fbb4aeec 623 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
41a54482
CD
624 struct irq_desc *desc;
625 struct irq_data *data;
626 int phys_irq;
627 int ret;
628
629 if (timer->enabled)
630 return 0;
631
d9e13977
AG
632 /* Without a VGIC we do not map virtual IRQs to physical IRQs */
633 if (!irqchip_in_kernel(vcpu->kvm))
634 goto no_vgic;
635
636 if (!vgic_initialized(vcpu->kvm))
637 return -ENODEV;
638
41a54482
CD
639 /*
640 * Find the physical IRQ number corresponding to the host_vtimer_irq
641 */
642 desc = irq_to_desc(host_vtimer_irq);
643 if (!desc) {
644 kvm_err("%s: no interrupt descriptor\n", __func__);
645 return -EINVAL;
646 }
647
648 data = irq_desc_get_irq_data(desc);
649 while (data->parent_data)
650 data = data->parent_data;
651
652 phys_irq = data->hwirq;
653
654 /*
655 * Tell the VGIC that the virtual interrupt is tied to a
656 * physical interrupt. We do that once per VCPU.
657 */
fbb4aeec 658 ret = kvm_vgic_map_phys_irq(vcpu, vtimer->irq.irq, phys_irq);
41a54482
CD
659 if (ret)
660 return ret;
661
d9e13977 662no_vgic:
fd5ebf99 663 timer->enabled = 1;
41a54482 664 return 0;
05971120 665}
53e72406 666
488f94d7
JL
667/*
668 * On VHE system, we only need to configure trap on physical timer and counter
669 * accesses in EL0 and EL1 once, not for every world switch.
670 * The host kernel runs at EL2 with HCR_EL2.TGE == 1,
671 * and this makes those bits have no effect for the host kernel execution.
672 */
673void kvm_timer_init_vhe(void)
674{
675 /* When HCR_EL2.E2H ==1, EL1PCEN and EL1PCTEN are shifted by 10 */
676 u32 cnthctl_shift = 10;
677 u64 val;
678
679 /*
680 * Disallow physical timer access for the guest.
681 * Physical counter access is allowed.
682 */
683 val = read_sysreg(cnthctl_el2);
684 val &= ~(CNTHCTL_EL1PCEN << cnthctl_shift);
685 val |= (CNTHCTL_EL1PCTEN << cnthctl_shift);
686 write_sysreg(val, cnthctl_el2);
687}