2 * Copyright (C) 2015, 2016 ARM Ltd.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 #include <linux/interrupt.h>
18 #include <linux/irq.h>
19 #include <linux/kvm.h>
20 #include <linux/kvm_host.h>
21 #include <linux/list_sort.h>
22 #include <linux/nospec.h>
26 #define CREATE_TRACE_POINTS
29 #ifdef CONFIG_DEBUG_SPINLOCK
30 #define DEBUG_SPINLOCK_BUG_ON(p) BUG_ON(p)
32 #define DEBUG_SPINLOCK_BUG_ON(p)
35 struct vgic_global kvm_vgic_global_state __ro_after_init
= {
36 .gicv3_cpuif
= STATIC_KEY_FALSE_INIT
,
40 * Locking order is always:
42 * its->cmd_lock (mutex)
43 * its->its_lock (mutex)
44 * vgic_cpu->ap_list_lock must be taken with IRQs disabled
45 * kvm->lpi_list_lock must be taken with IRQs disabled
46 * vgic_irq->irq_lock must be taken with IRQs disabled
48 * As the ap_list_lock might be taken from the timer interrupt handler,
49 * we have to disable IRQs before taking this lock and everything lower
52 * If you need to take multiple locks, always take the upper lock first,
53 * then the lower ones, e.g. first take the its_lock, then the irq_lock.
54 * If you are already holding a lock and need to take a higher one, you
55 * have to drop the lower ranking lock first and re-aquire it after having
56 * taken the upper one.
58 * When taking more than one ap_list_lock at the same time, always take the
59 * lowest numbered VCPU's ap_list_lock first, so:
60 * vcpuX->vcpu_id < vcpuY->vcpu_id:
61 * spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock);
62 * spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock);
64 * Since the VGIC must support injecting virtual interrupts from ISRs, we have
65 * to use the spin_lock_irqsave/spin_unlock_irqrestore versions of outer
66 * spinlocks for any lock that may be taken while injecting an interrupt.
70 * Iterate over the VM's list of mapped LPIs to find the one with a
71 * matching interrupt ID and return a reference to the IRQ structure.
73 static struct vgic_irq
*vgic_get_lpi(struct kvm
*kvm
, u32 intid
)
75 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
76 struct vgic_irq
*irq
= NULL
;
79 spin_lock_irqsave(&dist
->lpi_list_lock
, flags
);
81 list_for_each_entry(irq
, &dist
->lpi_list_head
, lpi_list
) {
82 if (irq
->intid
!= intid
)
86 * This increases the refcount, the caller is expected to
87 * call vgic_put_irq() later once it's finished with the IRQ.
89 vgic_get_irq_kref(irq
);
95 spin_unlock_irqrestore(&dist
->lpi_list_lock
, flags
);
101 * This looks up the virtual interrupt ID to get the corresponding
102 * struct vgic_irq. It also increases the refcount, so any caller is expected
103 * to call vgic_put_irq() once it's finished with this IRQ.
105 struct vgic_irq
*vgic_get_irq(struct kvm
*kvm
, struct kvm_vcpu
*vcpu
,
109 if (intid
<= VGIC_MAX_PRIVATE
) {
110 intid
= array_index_nospec(intid
, VGIC_MAX_PRIVATE
+ 1);
111 return &vcpu
->arch
.vgic_cpu
.private_irqs
[intid
];
115 if (intid
< (kvm
->arch
.vgic
.nr_spis
+ VGIC_NR_PRIVATE_IRQS
)) {
116 intid
= array_index_nospec(intid
, kvm
->arch
.vgic
.nr_spis
+ VGIC_NR_PRIVATE_IRQS
);
117 return &kvm
->arch
.vgic
.spis
[intid
- VGIC_NR_PRIVATE_IRQS
];
121 if (intid
>= VGIC_MIN_LPI
)
122 return vgic_get_lpi(kvm
, intid
);
124 WARN(1, "Looking up struct vgic_irq for reserved INTID");
129 * We can't do anything in here, because we lack the kvm pointer to
130 * lock and remove the item from the lpi_list. So we keep this function
131 * empty and use the return value of kref_put() to trigger the freeing.
133 static void vgic_irq_release(struct kref
*ref
)
137 void vgic_put_irq(struct kvm
*kvm
, struct vgic_irq
*irq
)
139 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
142 if (irq
->intid
< VGIC_MIN_LPI
)
145 spin_lock_irqsave(&dist
->lpi_list_lock
, flags
);
146 if (!kref_put(&irq
->refcount
, vgic_irq_release
)) {
147 spin_unlock_irqrestore(&dist
->lpi_list_lock
, flags
);
151 list_del(&irq
->lpi_list
);
152 dist
->lpi_list_count
--;
153 spin_unlock_irqrestore(&dist
->lpi_list_lock
, flags
);
159 * kvm_vgic_target_oracle - compute the target vcpu for an irq
161 * @irq: The irq to route. Must be already locked.
163 * Based on the current state of the interrupt (enabled, pending,
164 * active, vcpu and target_vcpu), compute the next vcpu this should be
165 * given to. Return NULL if this shouldn't be injected at all.
167 * Requires the IRQ lock to be held.
169 static struct kvm_vcpu
*vgic_target_oracle(struct vgic_irq
*irq
)
171 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq
->irq_lock
));
173 /* If the interrupt is active, it must stay on the current vcpu */
175 return irq
->vcpu
? : irq
->target_vcpu
;
178 * If the IRQ is not active but enabled and pending, we should direct
179 * it to its configured target VCPU.
180 * If the distributor is disabled, pending interrupts shouldn't be
183 if (irq
->enabled
&& irq_is_pending(irq
)) {
184 if (unlikely(irq
->target_vcpu
&&
185 !irq
->target_vcpu
->kvm
->arch
.vgic
.enabled
))
188 return irq
->target_vcpu
;
191 /* If neither active nor pending and enabled, then this IRQ should not
192 * be queued to any VCPU.
198 * The order of items in the ap_lists defines how we'll pack things in LRs as
199 * well, the first items in the list being the first things populated in the
202 * A hard rule is that active interrupts can never be pushed out of the LRs
203 * (and therefore take priority) since we cannot reliably trap on deactivation
204 * of IRQs and therefore they have to be present in the LRs.
206 * Otherwise things should be sorted by the priority field and the GIC
207 * hardware support will take care of preemption of priority groups etc.
209 * Return negative if "a" sorts before "b", 0 to preserve order, and positive
210 * to sort "b" before "a".
212 static int vgic_irq_cmp(void *priv
, struct list_head
*a
, struct list_head
*b
)
214 struct vgic_irq
*irqa
= container_of(a
, struct vgic_irq
, ap_list
);
215 struct vgic_irq
*irqb
= container_of(b
, struct vgic_irq
, ap_list
);
219 spin_lock(&irqa
->irq_lock
);
220 spin_lock_nested(&irqb
->irq_lock
, SINGLE_DEPTH_NESTING
);
222 if (irqa
->active
|| irqb
->active
) {
223 ret
= (int)irqb
->active
- (int)irqa
->active
;
227 penda
= irqa
->enabled
&& irq_is_pending(irqa
);
228 pendb
= irqb
->enabled
&& irq_is_pending(irqb
);
230 if (!penda
|| !pendb
) {
231 ret
= (int)pendb
- (int)penda
;
235 /* Both pending and enabled, sort by priority */
236 ret
= irqa
->priority
- irqb
->priority
;
238 spin_unlock(&irqb
->irq_lock
);
239 spin_unlock(&irqa
->irq_lock
);
243 /* Must be called with the ap_list_lock held */
244 static void vgic_sort_ap_list(struct kvm_vcpu
*vcpu
)
246 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
248 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu
->ap_list_lock
));
250 list_sort(NULL
, &vgic_cpu
->ap_list_head
, vgic_irq_cmp
);
254 * Only valid injection if changing level for level-triggered IRQs or for a
255 * rising edge, and in-kernel connected IRQ lines can only be controlled by
258 static bool vgic_validate_injection(struct vgic_irq
*irq
, bool level
, void *owner
)
260 if (irq
->owner
!= owner
)
263 switch (irq
->config
) {
264 case VGIC_CONFIG_LEVEL
:
265 return irq
->line_level
!= level
;
266 case VGIC_CONFIG_EDGE
:
274 * Check whether an IRQ needs to (and can) be queued to a VCPU's ap list.
275 * Do the queuing if necessary, taking the right locks in the right order.
276 * Returns true when the IRQ was queued, false otherwise.
278 * Needs to be entered with the IRQ lock already held, but will return
279 * with all locks dropped.
281 bool vgic_queue_irq_unlock(struct kvm
*kvm
, struct vgic_irq
*irq
,
284 struct kvm_vcpu
*vcpu
;
286 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq
->irq_lock
));
289 vcpu
= vgic_target_oracle(irq
);
290 if (irq
->vcpu
|| !vcpu
) {
292 * If this IRQ is already on a VCPU's ap_list, then it
293 * cannot be moved or modified and there is no more work for
296 * Otherwise, if the irq is not pending and enabled, it does
297 * not need to be inserted into an ap_list and there is also
298 * no more work for us to do.
300 spin_unlock_irqrestore(&irq
->irq_lock
, flags
);
303 * We have to kick the VCPU here, because we could be
304 * queueing an edge-triggered interrupt for which we
305 * get no EOI maintenance interrupt. In that case,
306 * while the IRQ is already on the VCPU's AP list, the
307 * VCPU could have EOI'ed the original interrupt and
308 * won't see this one until it exits for some other
312 kvm_make_request(KVM_REQ_IRQ_PENDING
, vcpu
);
319 * We must unlock the irq lock to take the ap_list_lock where
320 * we are going to insert this new pending interrupt.
322 spin_unlock_irqrestore(&irq
->irq_lock
, flags
);
324 /* someone can do stuff here, which we re-check below */
326 spin_lock_irqsave(&vcpu
->arch
.vgic_cpu
.ap_list_lock
, flags
);
327 spin_lock(&irq
->irq_lock
);
330 * Did something change behind our backs?
332 * There are two cases:
333 * 1) The irq lost its pending state or was disabled behind our
334 * backs and/or it was queued to another VCPU's ap_list.
335 * 2) Someone changed the affinity on this irq behind our
336 * backs and we are now holding the wrong ap_list_lock.
338 * In both cases, drop the locks and retry.
341 if (unlikely(irq
->vcpu
|| vcpu
!= vgic_target_oracle(irq
))) {
342 spin_unlock(&irq
->irq_lock
);
343 spin_unlock_irqrestore(&vcpu
->arch
.vgic_cpu
.ap_list_lock
, flags
);
345 spin_lock_irqsave(&irq
->irq_lock
, flags
);
350 * Grab a reference to the irq to reflect the fact that it is
351 * now in the ap_list.
353 vgic_get_irq_kref(irq
);
354 list_add_tail(&irq
->ap_list
, &vcpu
->arch
.vgic_cpu
.ap_list_head
);
357 spin_unlock(&irq
->irq_lock
);
358 spin_unlock_irqrestore(&vcpu
->arch
.vgic_cpu
.ap_list_lock
, flags
);
360 kvm_make_request(KVM_REQ_IRQ_PENDING
, vcpu
);
367 * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
368 * @kvm: The VM structure pointer
369 * @cpuid: The CPU for PPIs
370 * @intid: The INTID to inject a new state to.
371 * @level: Edge-triggered: true: to trigger the interrupt
372 * false: to ignore the call
373 * Level-sensitive true: raise the input signal
374 * false: lower the input signal
375 * @owner: The opaque pointer to the owner of the IRQ being raised to verify
376 * that the caller is allowed to inject this IRQ. Userspace
377 * injections will have owner == NULL.
379 * The VGIC is not concerned with devices being active-LOW or active-HIGH for
380 * level-sensitive interrupts. You can think of the level parameter as 1
381 * being HIGH and 0 being LOW and all devices being active-HIGH.
383 int kvm_vgic_inject_irq(struct kvm
*kvm
, int cpuid
, unsigned int intid
,
384 bool level
, void *owner
)
386 struct kvm_vcpu
*vcpu
;
387 struct vgic_irq
*irq
;
391 trace_vgic_update_irq_pending(cpuid
, intid
, level
);
393 ret
= vgic_lazy_init(kvm
);
397 vcpu
= kvm_get_vcpu(kvm
, cpuid
);
398 if (!vcpu
&& intid
< VGIC_NR_PRIVATE_IRQS
)
401 irq
= vgic_get_irq(kvm
, vcpu
, intid
);
405 spin_lock_irqsave(&irq
->irq_lock
, flags
);
407 if (!vgic_validate_injection(irq
, level
, owner
)) {
408 /* Nothing to see here, move along... */
409 spin_unlock_irqrestore(&irq
->irq_lock
, flags
);
410 vgic_put_irq(kvm
, irq
);
414 if (irq
->config
== VGIC_CONFIG_LEVEL
)
415 irq
->line_level
= level
;
417 irq
->pending_latch
= true;
419 vgic_queue_irq_unlock(kvm
, irq
, flags
);
420 vgic_put_irq(kvm
, irq
);
425 /* @irq->irq_lock must be held */
426 static int kvm_vgic_map_irq(struct kvm_vcpu
*vcpu
, struct vgic_irq
*irq
,
427 unsigned int host_irq
)
429 struct irq_desc
*desc
;
430 struct irq_data
*data
;
433 * Find the physical IRQ number corresponding to @host_irq
435 desc
= irq_to_desc(host_irq
);
437 kvm_err("%s: no interrupt descriptor\n", __func__
);
440 data
= irq_desc_get_irq_data(desc
);
441 while (data
->parent_data
)
442 data
= data
->parent_data
;
445 irq
->host_irq
= host_irq
;
446 irq
->hwintid
= data
->hwirq
;
450 /* @irq->irq_lock must be held */
451 static inline void kvm_vgic_unmap_irq(struct vgic_irq
*irq
)
457 int kvm_vgic_map_phys_irq(struct kvm_vcpu
*vcpu
, unsigned int host_irq
,
460 struct vgic_irq
*irq
= vgic_get_irq(vcpu
->kvm
, vcpu
, vintid
);
466 spin_lock_irqsave(&irq
->irq_lock
, flags
);
467 ret
= kvm_vgic_map_irq(vcpu
, irq
, host_irq
);
468 spin_unlock_irqrestore(&irq
->irq_lock
, flags
);
469 vgic_put_irq(vcpu
->kvm
, irq
);
475 * kvm_vgic_reset_mapped_irq - Reset a mapped IRQ
476 * @vcpu: The VCPU pointer
477 * @vintid: The INTID of the interrupt
479 * Reset the active and pending states of a mapped interrupt. Kernel
480 * subsystems injecting mapped interrupts should reset their interrupt lines
481 * when we are doing a reset of the VM.
483 void kvm_vgic_reset_mapped_irq(struct kvm_vcpu
*vcpu
, u32 vintid
)
485 struct vgic_irq
*irq
= vgic_get_irq(vcpu
->kvm
, vcpu
, vintid
);
491 spin_lock_irqsave(&irq
->irq_lock
, flags
);
493 irq
->pending_latch
= false;
494 irq
->line_level
= false;
495 spin_unlock_irqrestore(&irq
->irq_lock
, flags
);
497 vgic_put_irq(vcpu
->kvm
, irq
);
500 int kvm_vgic_unmap_phys_irq(struct kvm_vcpu
*vcpu
, unsigned int vintid
)
502 struct vgic_irq
*irq
;
505 if (!vgic_initialized(vcpu
->kvm
))
508 irq
= vgic_get_irq(vcpu
->kvm
, vcpu
, vintid
);
511 spin_lock_irqsave(&irq
->irq_lock
, flags
);
512 kvm_vgic_unmap_irq(irq
);
513 spin_unlock_irqrestore(&irq
->irq_lock
, flags
);
514 vgic_put_irq(vcpu
->kvm
, irq
);
520 * kvm_vgic_set_owner - Set the owner of an interrupt for a VM
522 * @vcpu: Pointer to the VCPU (used for PPIs)
523 * @intid: The virtual INTID identifying the interrupt (PPI or SPI)
524 * @owner: Opaque pointer to the owner
526 * Returns 0 if intid is not already used by another in-kernel device and the
527 * owner is set, otherwise returns an error code.
529 int kvm_vgic_set_owner(struct kvm_vcpu
*vcpu
, unsigned int intid
, void *owner
)
531 struct vgic_irq
*irq
;
535 if (!vgic_initialized(vcpu
->kvm
))
538 /* SGIs and LPIs cannot be wired up to any device */
539 if (!irq_is_ppi(intid
) && !vgic_valid_spi(vcpu
->kvm
, intid
))
542 irq
= vgic_get_irq(vcpu
->kvm
, vcpu
, intid
);
543 spin_lock_irqsave(&irq
->irq_lock
, flags
);
544 if (irq
->owner
&& irq
->owner
!= owner
)
548 spin_unlock_irqrestore(&irq
->irq_lock
, flags
);
554 * vgic_prune_ap_list - Remove non-relevant interrupts from the list
556 * @vcpu: The VCPU pointer
558 * Go over the list of "interesting" interrupts, and prune those that we
559 * won't have to consider in the near future.
561 static void vgic_prune_ap_list(struct kvm_vcpu
*vcpu
)
563 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
564 struct vgic_irq
*irq
, *tmp
;
568 spin_lock_irqsave(&vgic_cpu
->ap_list_lock
, flags
);
570 list_for_each_entry_safe(irq
, tmp
, &vgic_cpu
->ap_list_head
, ap_list
) {
571 struct kvm_vcpu
*target_vcpu
, *vcpuA
, *vcpuB
;
572 bool target_vcpu_needs_kick
= false;
574 spin_lock(&irq
->irq_lock
);
576 BUG_ON(vcpu
!= irq
->vcpu
);
578 target_vcpu
= vgic_target_oracle(irq
);
582 * We don't need to process this interrupt any
583 * further, move it off the list.
585 list_del(&irq
->ap_list
);
587 spin_unlock(&irq
->irq_lock
);
590 * This vgic_put_irq call matches the
591 * vgic_get_irq_kref in vgic_queue_irq_unlock,
592 * where we added the LPI to the ap_list. As
593 * we remove the irq from the list, we drop
594 * also drop the refcount.
596 vgic_put_irq(vcpu
->kvm
, irq
);
600 if (target_vcpu
== vcpu
) {
601 /* We're on the right CPU */
602 spin_unlock(&irq
->irq_lock
);
606 /* This interrupt looks like it has to be migrated. */
608 spin_unlock(&irq
->irq_lock
);
609 spin_unlock_irqrestore(&vgic_cpu
->ap_list_lock
, flags
);
612 * Ensure locking order by always locking the smallest
615 if (vcpu
->vcpu_id
< target_vcpu
->vcpu_id
) {
623 spin_lock_irqsave(&vcpuA
->arch
.vgic_cpu
.ap_list_lock
, flags
);
624 spin_lock_nested(&vcpuB
->arch
.vgic_cpu
.ap_list_lock
,
625 SINGLE_DEPTH_NESTING
);
626 spin_lock(&irq
->irq_lock
);
629 * If the affinity has been preserved, move the
630 * interrupt around. Otherwise, it means things have
631 * changed while the interrupt was unlocked, and we
632 * need to replay this.
634 * In all cases, we cannot trust the list not to have
635 * changed, so we restart from the beginning.
637 if (target_vcpu
== vgic_target_oracle(irq
)) {
638 struct vgic_cpu
*new_cpu
= &target_vcpu
->arch
.vgic_cpu
;
640 list_del(&irq
->ap_list
);
641 irq
->vcpu
= target_vcpu
;
642 list_add_tail(&irq
->ap_list
, &new_cpu
->ap_list_head
);
643 target_vcpu_needs_kick
= true;
646 spin_unlock(&irq
->irq_lock
);
647 spin_unlock(&vcpuB
->arch
.vgic_cpu
.ap_list_lock
);
648 spin_unlock_irqrestore(&vcpuA
->arch
.vgic_cpu
.ap_list_lock
, flags
);
650 if (target_vcpu_needs_kick
) {
651 kvm_make_request(KVM_REQ_IRQ_PENDING
, target_vcpu
);
652 kvm_vcpu_kick(target_vcpu
);
658 spin_unlock_irqrestore(&vgic_cpu
->ap_list_lock
, flags
);
661 static inline void vgic_fold_lr_state(struct kvm_vcpu
*vcpu
)
663 if (kvm_vgic_global_state
.type
== VGIC_V2
)
664 vgic_v2_fold_lr_state(vcpu
);
666 vgic_v3_fold_lr_state(vcpu
);
669 /* Requires the irq_lock to be held. */
670 static inline void vgic_populate_lr(struct kvm_vcpu
*vcpu
,
671 struct vgic_irq
*irq
, int lr
)
673 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq
->irq_lock
));
675 if (kvm_vgic_global_state
.type
== VGIC_V2
)
676 vgic_v2_populate_lr(vcpu
, irq
, lr
);
678 vgic_v3_populate_lr(vcpu
, irq
, lr
);
681 static inline void vgic_clear_lr(struct kvm_vcpu
*vcpu
, int lr
)
683 if (kvm_vgic_global_state
.type
== VGIC_V2
)
684 vgic_v2_clear_lr(vcpu
, lr
);
686 vgic_v3_clear_lr(vcpu
, lr
);
689 static inline void vgic_set_underflow(struct kvm_vcpu
*vcpu
)
691 if (kvm_vgic_global_state
.type
== VGIC_V2
)
692 vgic_v2_set_underflow(vcpu
);
694 vgic_v3_set_underflow(vcpu
);
697 static inline void vgic_set_npie(struct kvm_vcpu
*vcpu
)
699 if (kvm_vgic_global_state
.type
== VGIC_V2
)
700 vgic_v2_set_npie(vcpu
);
702 vgic_v3_set_npie(vcpu
);
705 /* Requires the ap_list_lock to be held. */
706 static int compute_ap_list_depth(struct kvm_vcpu
*vcpu
,
709 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
710 struct vgic_irq
*irq
;
715 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu
->ap_list_lock
));
717 list_for_each_entry(irq
, &vgic_cpu
->ap_list_head
, ap_list
) {
718 spin_lock(&irq
->irq_lock
);
719 /* GICv2 SGIs can count for more than one... */
720 if (vgic_irq_is_sgi(irq
->intid
) && irq
->source
) {
721 int w
= hweight8(irq
->source
);
724 *multi_sgi
|= (w
> 1);
728 spin_unlock(&irq
->irq_lock
);
733 /* Requires the VCPU's ap_list_lock to be held. */
734 static void vgic_flush_lr_state(struct kvm_vcpu
*vcpu
)
736 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
737 struct vgic_irq
*irq
;
743 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu
->ap_list_lock
));
745 count
= compute_ap_list_depth(vcpu
, &multi_sgi
);
746 if (count
> kvm_vgic_global_state
.nr_lr
|| multi_sgi
)
747 vgic_sort_ap_list(vcpu
);
751 list_for_each_entry(irq
, &vgic_cpu
->ap_list_head
, ap_list
) {
752 spin_lock(&irq
->irq_lock
);
755 * If we have multi-SGIs in the pipeline, we need to
756 * guarantee that they are all seen before any IRQ of
757 * lower priority. In that case, we need to filter out
758 * these interrupts by exiting early. This is easy as
759 * the AP list has been sorted already.
761 if (multi_sgi
&& irq
->priority
> prio
) {
762 spin_unlock(&irq
->irq_lock
);
766 if (likely(vgic_target_oracle(irq
) == vcpu
)) {
767 vgic_populate_lr(vcpu
, irq
, count
++);
771 prio
= irq
->priority
;
775 spin_unlock(&irq
->irq_lock
);
777 if (count
== kvm_vgic_global_state
.nr_lr
) {
778 if (!list_is_last(&irq
->ap_list
,
779 &vgic_cpu
->ap_list_head
))
780 vgic_set_underflow(vcpu
);
788 vcpu
->arch
.vgic_cpu
.used_lrs
= count
;
790 /* Nuke remaining LRs */
791 for ( ; count
< kvm_vgic_global_state
.nr_lr
; count
++)
792 vgic_clear_lr(vcpu
, count
);
795 /* Sync back the hardware VGIC state into our emulation after a guest's run. */
796 void kvm_vgic_sync_hwstate(struct kvm_vcpu
*vcpu
)
798 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
800 WARN_ON(vgic_v4_sync_hwstate(vcpu
));
802 /* An empty ap_list_head implies used_lrs == 0 */
803 if (list_empty(&vcpu
->arch
.vgic_cpu
.ap_list_head
))
806 if (vgic_cpu
->used_lrs
)
807 vgic_fold_lr_state(vcpu
);
808 vgic_prune_ap_list(vcpu
);
811 /* Flush our emulation state into the GIC hardware before entering the guest. */
812 void kvm_vgic_flush_hwstate(struct kvm_vcpu
*vcpu
)
814 WARN_ON(vgic_v4_flush_hwstate(vcpu
));
817 * If there are no virtual interrupts active or pending for this
818 * VCPU, then there is no work to do and we can bail out without
819 * taking any lock. There is a potential race with someone injecting
820 * interrupts to the VCPU, but it is a benign race as the VCPU will
821 * either observe the new interrupt before or after doing this check,
822 * and introducing additional synchronization mechanism doesn't change
825 if (list_empty(&vcpu
->arch
.vgic_cpu
.ap_list_head
))
828 DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
830 spin_lock(&vcpu
->arch
.vgic_cpu
.ap_list_lock
);
831 vgic_flush_lr_state(vcpu
);
832 spin_unlock(&vcpu
->arch
.vgic_cpu
.ap_list_lock
);
835 void kvm_vgic_load(struct kvm_vcpu
*vcpu
)
837 if (unlikely(!vgic_initialized(vcpu
->kvm
)))
840 if (kvm_vgic_global_state
.type
== VGIC_V2
)
846 void kvm_vgic_put(struct kvm_vcpu
*vcpu
)
848 if (unlikely(!vgic_initialized(vcpu
->kvm
)))
851 if (kvm_vgic_global_state
.type
== VGIC_V2
)
857 int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu
*vcpu
)
859 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
860 struct vgic_irq
*irq
;
861 bool pending
= false;
864 if (!vcpu
->kvm
->arch
.vgic
.enabled
)
867 if (vcpu
->arch
.vgic_cpu
.vgic_v3
.its_vpe
.pending_last
)
870 spin_lock_irqsave(&vgic_cpu
->ap_list_lock
, flags
);
872 list_for_each_entry(irq
, &vgic_cpu
->ap_list_head
, ap_list
) {
873 spin_lock(&irq
->irq_lock
);
874 pending
= irq_is_pending(irq
) && irq
->enabled
;
875 spin_unlock(&irq
->irq_lock
);
881 spin_unlock_irqrestore(&vgic_cpu
->ap_list_lock
, flags
);
886 void vgic_kick_vcpus(struct kvm
*kvm
)
888 struct kvm_vcpu
*vcpu
;
892 * We've injected an interrupt, time to find out who deserves
895 kvm_for_each_vcpu(c
, vcpu
, kvm
) {
896 if (kvm_vgic_vcpu_pending_irq(vcpu
)) {
897 kvm_make_request(KVM_REQ_IRQ_PENDING
, vcpu
);
903 bool kvm_vgic_map_is_active(struct kvm_vcpu
*vcpu
, unsigned int vintid
)
905 struct vgic_irq
*irq
;
909 if (!vgic_initialized(vcpu
->kvm
))
912 irq
= vgic_get_irq(vcpu
->kvm
, vcpu
, vintid
);
913 spin_lock_irqsave(&irq
->irq_lock
, flags
);
914 map_is_active
= irq
->hw
&& irq
->active
;
915 spin_unlock_irqrestore(&irq
->irq_lock
, flags
);
916 vgic_put_irq(vcpu
->kvm
, irq
);
918 return map_is_active
;