]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - virt/kvm/arm/vgic/vgic.c
KVM: arm/arm64: Guard kvm_vgic_map_is_active against !vgic_initialized
[mirror_ubuntu-bionic-kernel.git] / virt / kvm / arm / vgic / vgic.c
CommitLineData
64a959d6
CD
1/*
2 * Copyright (C) 2015, 2016 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/kvm.h>
18#include <linux/kvm_host.h>
8e444745 19#include <linux/list_sort.h>
64a959d6
CD
20
21#include "vgic.h"
22
81eeb95d 23#define CREATE_TRACE_POINTS
35d2d5d4 24#include "trace.h"
81eeb95d
CD
25
26#ifdef CONFIG_DEBUG_SPINLOCK
27#define DEBUG_SPINLOCK_BUG_ON(p) BUG_ON(p)
28#else
29#define DEBUG_SPINLOCK_BUG_ON(p)
30#endif
31
63d7c6af
AB
32struct vgic_global kvm_vgic_global_state __ro_after_init = {
33 .gicv3_cpuif = STATIC_KEY_FALSE_INIT,
34};
64a959d6 35
81eeb95d
CD
36/*
37 * Locking order is always:
abd72296
CD
38 * kvm->lock (mutex)
39 * its->cmd_lock (mutex)
40 * its->its_lock (mutex)
41 * vgic_cpu->ap_list_lock
42 * kvm->lpi_list_lock
43 * vgic_irq->irq_lock
81eeb95d 44 *
424c3383
AP
45 * If you need to take multiple locks, always take the upper lock first,
46 * then the lower ones, e.g. first take the its_lock, then the irq_lock.
47 * If you are already holding a lock and need to take a higher one, you
48 * have to drop the lower ranking lock first and re-aquire it after having
49 * taken the upper one.
81eeb95d
CD
50 *
51 * When taking more than one ap_list_lock at the same time, always take the
52 * lowest numbered VCPU's ap_list_lock first, so:
53 * vcpuX->vcpu_id < vcpuY->vcpu_id:
54 * spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock);
55 * spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock);
56 */
57
3802411d
AP
58/*
59 * Iterate over the VM's list of mapped LPIs to find the one with a
60 * matching interrupt ID and return a reference to the IRQ structure.
61 */
62static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
63{
64 struct vgic_dist *dist = &kvm->arch.vgic;
65 struct vgic_irq *irq = NULL;
66
67 spin_lock(&dist->lpi_list_lock);
68
69 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
70 if (irq->intid != intid)
71 continue;
72
73 /*
74 * This increases the refcount, the caller is expected to
75 * call vgic_put_irq() later once it's finished with the IRQ.
76 */
d97594e6 77 vgic_get_irq_kref(irq);
3802411d
AP
78 goto out_unlock;
79 }
80 irq = NULL;
81
82out_unlock:
83 spin_unlock(&dist->lpi_list_lock);
84
85 return irq;
86}
87
88/*
89 * This looks up the virtual interrupt ID to get the corresponding
90 * struct vgic_irq. It also increases the refcount, so any caller is expected
91 * to call vgic_put_irq() once it's finished with this IRQ.
92 */
64a959d6
CD
93struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
94 u32 intid)
95{
96 /* SGIs and PPIs */
97 if (intid <= VGIC_MAX_PRIVATE)
98 return &vcpu->arch.vgic_cpu.private_irqs[intid];
99
100 /* SPIs */
101 if (intid <= VGIC_MAX_SPI)
102 return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS];
103
3802411d 104 /* LPIs */
64a959d6 105 if (intid >= VGIC_MIN_LPI)
3802411d 106 return vgic_get_lpi(kvm, intid);
64a959d6
CD
107
108 WARN(1, "Looking up struct vgic_irq for reserved INTID");
109 return NULL;
110}
81eeb95d 111
3802411d
AP
112/*
113 * We can't do anything in here, because we lack the kvm pointer to
114 * lock and remove the item from the lpi_list. So we keep this function
115 * empty and use the return value of kref_put() to trigger the freeing.
116 */
5dd4b924
AP
117static void vgic_irq_release(struct kref *ref)
118{
5dd4b924
AP
119}
120
121void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
122{
2cccbb36 123 struct vgic_dist *dist = &kvm->arch.vgic;
3802411d 124
5dd4b924
AP
125 if (irq->intid < VGIC_MIN_LPI)
126 return;
127
2cccbb36
CD
128 spin_lock(&dist->lpi_list_lock);
129 if (!kref_put(&irq->refcount, vgic_irq_release)) {
130 spin_unlock(&dist->lpi_list_lock);
3802411d 131 return;
2cccbb36 132 };
3802411d 133
3802411d
AP
134 list_del(&irq->lpi_list);
135 dist->lpi_list_count--;
136 spin_unlock(&dist->lpi_list_lock);
137
138 kfree(irq);
5dd4b924
AP
139}
140
81eeb95d
CD
141/**
142 * kvm_vgic_target_oracle - compute the target vcpu for an irq
143 *
144 * @irq: The irq to route. Must be already locked.
145 *
146 * Based on the current state of the interrupt (enabled, pending,
147 * active, vcpu and target_vcpu), compute the next vcpu this should be
148 * given to. Return NULL if this shouldn't be injected at all.
149 *
150 * Requires the IRQ lock to be held.
151 */
152static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq)
153{
154 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
155
156 /* If the interrupt is active, it must stay on the current vcpu */
157 if (irq->active)
158 return irq->vcpu ? : irq->target_vcpu;
159
160 /*
161 * If the IRQ is not active but enabled and pending, we should direct
162 * it to its configured target VCPU.
163 * If the distributor is disabled, pending interrupts shouldn't be
164 * forwarded.
165 */
8694e4da 166 if (irq->enabled && irq_is_pending(irq)) {
81eeb95d
CD
167 if (unlikely(irq->target_vcpu &&
168 !irq->target_vcpu->kvm->arch.vgic.enabled))
169 return NULL;
170
171 return irq->target_vcpu;
172 }
173
174 /* If neither active nor pending and enabled, then this IRQ should not
175 * be queued to any VCPU.
176 */
177 return NULL;
178}
179
8e444745
CD
180/*
181 * The order of items in the ap_lists defines how we'll pack things in LRs as
182 * well, the first items in the list being the first things populated in the
183 * LRs.
184 *
185 * A hard rule is that active interrupts can never be pushed out of the LRs
186 * (and therefore take priority) since we cannot reliably trap on deactivation
187 * of IRQs and therefore they have to be present in the LRs.
188 *
189 * Otherwise things should be sorted by the priority field and the GIC
190 * hardware support will take care of preemption of priority groups etc.
191 *
192 * Return negative if "a" sorts before "b", 0 to preserve order, and positive
193 * to sort "b" before "a".
194 */
195static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b)
196{
197 struct vgic_irq *irqa = container_of(a, struct vgic_irq, ap_list);
198 struct vgic_irq *irqb = container_of(b, struct vgic_irq, ap_list);
199 bool penda, pendb;
200 int ret;
201
202 spin_lock(&irqa->irq_lock);
203 spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING);
204
205 if (irqa->active || irqb->active) {
206 ret = (int)irqb->active - (int)irqa->active;
207 goto out;
208 }
209
8694e4da
CD
210 penda = irqa->enabled && irq_is_pending(irqa);
211 pendb = irqb->enabled && irq_is_pending(irqb);
8e444745
CD
212
213 if (!penda || !pendb) {
214 ret = (int)pendb - (int)penda;
215 goto out;
216 }
217
218 /* Both pending and enabled, sort by priority */
219 ret = irqa->priority - irqb->priority;
220out:
221 spin_unlock(&irqb->irq_lock);
222 spin_unlock(&irqa->irq_lock);
223 return ret;
224}
225
226/* Must be called with the ap_list_lock held */
227static void vgic_sort_ap_list(struct kvm_vcpu *vcpu)
228{
229 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
230
231 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
232
233 list_sort(NULL, &vgic_cpu->ap_list_head, vgic_irq_cmp);
234}
235
81eeb95d
CD
236/*
237 * Only valid injection if changing level for level-triggered IRQs or for a
cb3f0ad8
CD
238 * rising edge, and in-kernel connected IRQ lines can only be controlled by
239 * their owner.
81eeb95d 240 */
cb3f0ad8 241static bool vgic_validate_injection(struct vgic_irq *irq, bool level, void *owner)
81eeb95d 242{
cb3f0ad8
CD
243 if (irq->owner != owner)
244 return false;
245
81eeb95d
CD
246 switch (irq->config) {
247 case VGIC_CONFIG_LEVEL:
248 return irq->line_level != level;
249 case VGIC_CONFIG_EDGE:
250 return level;
251 }
252
253 return false;
254}
255
256/*
257 * Check whether an IRQ needs to (and can) be queued to a VCPU's ap list.
258 * Do the queuing if necessary, taking the right locks in the right order.
259 * Returns true when the IRQ was queued, false otherwise.
260 *
261 * Needs to be entered with the IRQ lock already held, but will return
262 * with all locks dropped.
263 */
264bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq)
265{
266 struct kvm_vcpu *vcpu;
267
268 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
269
270retry:
271 vcpu = vgic_target_oracle(irq);
272 if (irq->vcpu || !vcpu) {
273 /*
274 * If this IRQ is already on a VCPU's ap_list, then it
275 * cannot be moved or modified and there is no more work for
276 * us to do.
277 *
278 * Otherwise, if the irq is not pending and enabled, it does
279 * not need to be inserted into an ap_list and there is also
280 * no more work for us to do.
281 */
282 spin_unlock(&irq->irq_lock);
d42c7970
SWL
283
284 /*
285 * We have to kick the VCPU here, because we could be
286 * queueing an edge-triggered interrupt for which we
287 * get no EOI maintenance interrupt. In that case,
288 * while the IRQ is already on the VCPU's AP list, the
289 * VCPU could have EOI'ed the original interrupt and
290 * won't see this one until it exits for some other
291 * reason.
292 */
325f9c64
AJ
293 if (vcpu) {
294 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
d42c7970 295 kvm_vcpu_kick(vcpu);
325f9c64 296 }
81eeb95d
CD
297 return false;
298 }
299
300 /*
301 * We must unlock the irq lock to take the ap_list_lock where
302 * we are going to insert this new pending interrupt.
303 */
304 spin_unlock(&irq->irq_lock);
305
306 /* someone can do stuff here, which we re-check below */
307
308 spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
309 spin_lock(&irq->irq_lock);
310
311 /*
312 * Did something change behind our backs?
313 *
314 * There are two cases:
315 * 1) The irq lost its pending state or was disabled behind our
316 * backs and/or it was queued to another VCPU's ap_list.
317 * 2) Someone changed the affinity on this irq behind our
318 * backs and we are now holding the wrong ap_list_lock.
319 *
320 * In both cases, drop the locks and retry.
321 */
322
323 if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) {
324 spin_unlock(&irq->irq_lock);
325 spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
326
327 spin_lock(&irq->irq_lock);
328 goto retry;
329 }
330
5dd4b924
AP
331 /*
332 * Grab a reference to the irq to reflect the fact that it is
333 * now in the ap_list.
334 */
335 vgic_get_irq_kref(irq);
81eeb95d
CD
336 list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head);
337 irq->vcpu = vcpu;
338
339 spin_unlock(&irq->irq_lock);
340 spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
341
325f9c64 342 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
81eeb95d
CD
343 kvm_vcpu_kick(vcpu);
344
345 return true;
346}
347
11710dec
CD
348/**
349 * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
350 * @kvm: The VM structure pointer
351 * @cpuid: The CPU for PPIs
352 * @intid: The INTID to inject a new state to.
353 * @level: Edge-triggered: true: to trigger the interrupt
354 * false: to ignore the call
355 * Level-sensitive true: raise the input signal
356 * false: lower the input signal
cb3f0ad8
CD
357 * @owner: The opaque pointer to the owner of the IRQ being raised to verify
358 * that the caller is allowed to inject this IRQ. Userspace
359 * injections will have owner == NULL.
11710dec
CD
360 *
361 * The VGIC is not concerned with devices being active-LOW or active-HIGH for
362 * level-sensitive interrupts. You can think of the level parameter as 1
363 * being HIGH and 0 being LOW and all devices being active-HIGH.
364 */
365int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
cb3f0ad8 366 bool level, void *owner)
81eeb95d
CD
367{
368 struct kvm_vcpu *vcpu;
369 struct vgic_irq *irq;
370 int ret;
371
372 trace_vgic_update_irq_pending(cpuid, intid, level);
373
ad275b8b
EA
374 ret = vgic_lazy_init(kvm);
375 if (ret)
376 return ret;
377
81eeb95d
CD
378 vcpu = kvm_get_vcpu(kvm, cpuid);
379 if (!vcpu && intid < VGIC_NR_PRIVATE_IRQS)
380 return -EINVAL;
381
382 irq = vgic_get_irq(kvm, vcpu, intid);
383 if (!irq)
384 return -EINVAL;
385
81eeb95d
CD
386 spin_lock(&irq->irq_lock);
387
cb3f0ad8 388 if (!vgic_validate_injection(irq, level, owner)) {
81eeb95d
CD
389 /* Nothing to see here, move along... */
390 spin_unlock(&irq->irq_lock);
5dd4b924 391 vgic_put_irq(kvm, irq);
81eeb95d
CD
392 return 0;
393 }
394
8694e4da 395 if (irq->config == VGIC_CONFIG_LEVEL)
81eeb95d 396 irq->line_level = level;
8694e4da
CD
397 else
398 irq->pending_latch = true;
81eeb95d
CD
399
400 vgic_queue_irq_unlock(kvm, irq);
5dd4b924 401 vgic_put_irq(kvm, irq);
81eeb95d
CD
402
403 return 0;
404}
405
568e8c90
AP
406int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, u32 virt_irq, u32 phys_irq)
407{
408 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq);
409
410 BUG_ON(!irq);
411
412 spin_lock(&irq->irq_lock);
413
414 irq->hw = true;
415 irq->hwintid = phys_irq;
416
417 spin_unlock(&irq->irq_lock);
5dd4b924 418 vgic_put_irq(vcpu->kvm, irq);
568e8c90
AP
419
420 return 0;
421}
422
423int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq)
424{
5dd4b924 425 struct vgic_irq *irq;
568e8c90
AP
426
427 if (!vgic_initialized(vcpu->kvm))
428 return -EAGAIN;
429
5dd4b924
AP
430 irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq);
431 BUG_ON(!irq);
432
568e8c90
AP
433 spin_lock(&irq->irq_lock);
434
435 irq->hw = false;
436 irq->hwintid = 0;
437
438 spin_unlock(&irq->irq_lock);
5dd4b924 439 vgic_put_irq(vcpu->kvm, irq);
568e8c90
AP
440
441 return 0;
442}
443
c6ccd30e
CD
444/**
445 * kvm_vgic_set_owner - Set the owner of an interrupt for a VM
446 *
447 * @vcpu: Pointer to the VCPU (used for PPIs)
448 * @intid: The virtual INTID identifying the interrupt (PPI or SPI)
449 * @owner: Opaque pointer to the owner
450 *
451 * Returns 0 if intid is not already used by another in-kernel device and the
452 * owner is set, otherwise returns an error code.
453 */
454int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner)
455{
456 struct vgic_irq *irq;
457 int ret = 0;
458
459 if (!vgic_initialized(vcpu->kvm))
460 return -EAGAIN;
461
462 /* SGIs and LPIs cannot be wired up to any device */
463 if (!irq_is_ppi(intid) && !vgic_valid_spi(vcpu->kvm, intid))
464 return -EINVAL;
465
466 irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
467 spin_lock(&irq->irq_lock);
468 if (irq->owner && irq->owner != owner)
469 ret = -EEXIST;
470 else
471 irq->owner = owner;
472 spin_unlock(&irq->irq_lock);
473
474 return ret;
475}
476
0919e84c
MZ
477/**
478 * vgic_prune_ap_list - Remove non-relevant interrupts from the list
479 *
480 * @vcpu: The VCPU pointer
481 *
482 * Go over the list of "interesting" interrupts, and prune those that we
483 * won't have to consider in the near future.
484 */
485static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
486{
487 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
488 struct vgic_irq *irq, *tmp;
489
490retry:
491 spin_lock(&vgic_cpu->ap_list_lock);
492
493 list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
494 struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB;
495
496 spin_lock(&irq->irq_lock);
497
498 BUG_ON(vcpu != irq->vcpu);
499
500 target_vcpu = vgic_target_oracle(irq);
501
502 if (!target_vcpu) {
503 /*
504 * We don't need to process this interrupt any
505 * further, move it off the list.
506 */
507 list_del(&irq->ap_list);
508 irq->vcpu = NULL;
509 spin_unlock(&irq->irq_lock);
5dd4b924
AP
510
511 /*
512 * This vgic_put_irq call matches the
513 * vgic_get_irq_kref in vgic_queue_irq_unlock,
514 * where we added the LPI to the ap_list. As
515 * we remove the irq from the list, we drop
516 * also drop the refcount.
517 */
518 vgic_put_irq(vcpu->kvm, irq);
0919e84c
MZ
519 continue;
520 }
521
522 if (target_vcpu == vcpu) {
523 /* We're on the right CPU */
524 spin_unlock(&irq->irq_lock);
525 continue;
526 }
527
528 /* This interrupt looks like it has to be migrated. */
529
530 spin_unlock(&irq->irq_lock);
531 spin_unlock(&vgic_cpu->ap_list_lock);
532
533 /*
534 * Ensure locking order by always locking the smallest
535 * ID first.
536 */
537 if (vcpu->vcpu_id < target_vcpu->vcpu_id) {
538 vcpuA = vcpu;
539 vcpuB = target_vcpu;
540 } else {
541 vcpuA = target_vcpu;
542 vcpuB = vcpu;
543 }
544
545 spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock);
546 spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock,
547 SINGLE_DEPTH_NESTING);
548 spin_lock(&irq->irq_lock);
549
550 /*
551 * If the affinity has been preserved, move the
552 * interrupt around. Otherwise, it means things have
553 * changed while the interrupt was unlocked, and we
554 * need to replay this.
555 *
556 * In all cases, we cannot trust the list not to have
557 * changed, so we restart from the beginning.
558 */
559 if (target_vcpu == vgic_target_oracle(irq)) {
560 struct vgic_cpu *new_cpu = &target_vcpu->arch.vgic_cpu;
561
562 list_del(&irq->ap_list);
563 irq->vcpu = target_vcpu;
564 list_add_tail(&irq->ap_list, &new_cpu->ap_list_head);
565 }
566
567 spin_unlock(&irq->irq_lock);
568 spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
569 spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock);
570 goto retry;
571 }
572
573 spin_unlock(&vgic_cpu->ap_list_lock);
574}
575
0919e84c
MZ
576static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
577{
59529f69
MZ
578 if (kvm_vgic_global_state.type == VGIC_V2)
579 vgic_v2_fold_lr_state(vcpu);
580 else
581 vgic_v3_fold_lr_state(vcpu);
0919e84c
MZ
582}
583
584/* Requires the irq_lock to be held. */
585static inline void vgic_populate_lr(struct kvm_vcpu *vcpu,
586 struct vgic_irq *irq, int lr)
587{
588 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
140b086d 589
59529f69
MZ
590 if (kvm_vgic_global_state.type == VGIC_V2)
591 vgic_v2_populate_lr(vcpu, irq, lr);
592 else
593 vgic_v3_populate_lr(vcpu, irq, lr);
0919e84c
MZ
594}
595
596static inline void vgic_clear_lr(struct kvm_vcpu *vcpu, int lr)
597{
59529f69
MZ
598 if (kvm_vgic_global_state.type == VGIC_V2)
599 vgic_v2_clear_lr(vcpu, lr);
600 else
601 vgic_v3_clear_lr(vcpu, lr);
0919e84c
MZ
602}
603
604static inline void vgic_set_underflow(struct kvm_vcpu *vcpu)
605{
59529f69
MZ
606 if (kvm_vgic_global_state.type == VGIC_V2)
607 vgic_v2_set_underflow(vcpu);
608 else
609 vgic_v3_set_underflow(vcpu);
0919e84c
MZ
610}
611
612/* Requires the ap_list_lock to be held. */
613static int compute_ap_list_depth(struct kvm_vcpu *vcpu)
614{
615 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
616 struct vgic_irq *irq;
617 int count = 0;
618
619 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
620
621 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
622 spin_lock(&irq->irq_lock);
623 /* GICv2 SGIs can count for more than one... */
624 if (vgic_irq_is_sgi(irq->intid) && irq->source)
625 count += hweight8(irq->source);
626 else
627 count++;
628 spin_unlock(&irq->irq_lock);
629 }
630 return count;
631}
632
633/* Requires the VCPU's ap_list_lock to be held. */
634static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
635{
636 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
637 struct vgic_irq *irq;
638 int count = 0;
639
640 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
641
90cac1f5 642 if (compute_ap_list_depth(vcpu) > kvm_vgic_global_state.nr_lr)
0919e84c 643 vgic_sort_ap_list(vcpu);
0919e84c
MZ
644
645 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
646 spin_lock(&irq->irq_lock);
647
648 if (unlikely(vgic_target_oracle(irq) != vcpu))
649 goto next;
650
651 /*
652 * If we get an SGI with multiple sources, try to get
653 * them in all at once.
654 */
655 do {
656 vgic_populate_lr(vcpu, irq, count++);
657 } while (irq->source && count < kvm_vgic_global_state.nr_lr);
658
659next:
660 spin_unlock(&irq->irq_lock);
661
90cac1f5
CD
662 if (count == kvm_vgic_global_state.nr_lr) {
663 if (!list_is_last(&irq->ap_list,
664 &vgic_cpu->ap_list_head))
665 vgic_set_underflow(vcpu);
0919e84c 666 break;
90cac1f5 667 }
0919e84c
MZ
668 }
669
670 vcpu->arch.vgic_cpu.used_lrs = count;
671
672 /* Nuke remaining LRs */
673 for ( ; count < kvm_vgic_global_state.nr_lr; count++)
674 vgic_clear_lr(vcpu, count);
675}
676
677/* Sync back the hardware VGIC state into our emulation after a guest's run. */
678void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
679{
f6769581
SWL
680 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
681
8ac76ef4
CD
682 /* An empty ap_list_head implies used_lrs == 0 */
683 if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
0099b770
CD
684 return;
685
8ac76ef4
CD
686 if (vgic_cpu->used_lrs)
687 vgic_fold_lr_state(vcpu);
0919e84c
MZ
688 vgic_prune_ap_list(vcpu);
689}
690
691/* Flush our emulation state into the GIC hardware before entering the guest. */
692void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
693{
f6769581
SWL
694 /*
695 * If there are no virtual interrupts active or pending for this
696 * VCPU, then there is no work to do and we can bail out without
697 * taking any lock. There is a potential race with someone injecting
698 * interrupts to the VCPU, but it is a benign race as the VCPU will
699 * either observe the new interrupt before or after doing this check,
700 * and introducing additional synchronization mechanism doesn't change
701 * this.
702 */
703 if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
0099b770
CD
704 return;
705
0919e84c
MZ
706 spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
707 vgic_flush_lr_state(vcpu);
708 spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
709}
90eee56c 710
328e5664
CD
711void kvm_vgic_load(struct kvm_vcpu *vcpu)
712{
713 if (unlikely(!vgic_initialized(vcpu->kvm)))
714 return;
715
716 if (kvm_vgic_global_state.type == VGIC_V2)
717 vgic_v2_load(vcpu);
718 else
719 vgic_v3_load(vcpu);
720}
721
722void kvm_vgic_put(struct kvm_vcpu *vcpu)
723{
724 if (unlikely(!vgic_initialized(vcpu->kvm)))
725 return;
726
727 if (kvm_vgic_global_state.type == VGIC_V2)
728 vgic_v2_put(vcpu);
729 else
730 vgic_v3_put(vcpu);
731}
732
90eee56c
EA
733int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
734{
735 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
736 struct vgic_irq *irq;
737 bool pending = false;
738
739 if (!vcpu->kvm->arch.vgic.enabled)
740 return false;
741
742 spin_lock(&vgic_cpu->ap_list_lock);
743
744 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
745 spin_lock(&irq->irq_lock);
8694e4da 746 pending = irq_is_pending(irq) && irq->enabled;
90eee56c
EA
747 spin_unlock(&irq->irq_lock);
748
749 if (pending)
750 break;
751 }
752
753 spin_unlock(&vgic_cpu->ap_list_lock);
754
755 return pending;
756}
2b0cda87
MZ
757
758void vgic_kick_vcpus(struct kvm *kvm)
759{
760 struct kvm_vcpu *vcpu;
761 int c;
762
763 /*
764 * We've injected an interrupt, time to find out who deserves
765 * a good kick...
766 */
767 kvm_for_each_vcpu(c, vcpu, kvm) {
325f9c64
AJ
768 if (kvm_vgic_vcpu_pending_irq(vcpu)) {
769 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
2b0cda87 770 kvm_vcpu_kick(vcpu);
325f9c64 771 }
2b0cda87
MZ
772 }
773}
568e8c90
AP
774
775bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int virt_irq)
776{
777 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq);
778 bool map_is_active;
779
f39d16cb
CD
780 if (!vgic_initialized(vcpu->kvm))
781 return false;
782
568e8c90
AP
783 spin_lock(&irq->irq_lock);
784 map_is_active = irq->hw && irq->active;
785 spin_unlock(&irq->irq_lock);
5dd4b924 786 vgic_put_irq(vcpu->kvm, irq);
568e8c90
AP
787
788 return map_is_active;
789}
0e4e82f1 790