]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - virt/kvm/arm/vgic/vgic.c
4332bef09228e49c2318e9d365bf4f26dc0c66bc
[mirror_ubuntu-bionic-kernel.git] / virt / kvm / arm / vgic / vgic.c
1 /*
2 * Copyright (C) 2015, 2016 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17 #include <linux/interrupt.h>
18 #include <linux/irq.h>
19 #include <linux/kvm.h>
20 #include <linux/kvm_host.h>
21 #include <linux/list_sort.h>
22 #include <linux/nospec.h>
23
24 #include "vgic.h"
25
26 #define CREATE_TRACE_POINTS
27 #include "trace.h"
28
29 #ifdef CONFIG_DEBUG_SPINLOCK
30 #define DEBUG_SPINLOCK_BUG_ON(p) BUG_ON(p)
31 #else
32 #define DEBUG_SPINLOCK_BUG_ON(p)
33 #endif
34
35 struct vgic_global kvm_vgic_global_state __ro_after_init = {
36 .gicv3_cpuif = STATIC_KEY_FALSE_INIT,
37 };
38
39 /*
40 * Locking order is always:
41 * kvm->lock (mutex)
42 * its->cmd_lock (mutex)
43 * its->its_lock (mutex)
44 * vgic_cpu->ap_list_lock must be taken with IRQs disabled
45 * kvm->lpi_list_lock must be taken with IRQs disabled
46 * vgic_irq->irq_lock must be taken with IRQs disabled
47 *
48 * As the ap_list_lock might be taken from the timer interrupt handler,
49 * we have to disable IRQs before taking this lock and everything lower
50 * than it.
51 *
52 * If you need to take multiple locks, always take the upper lock first,
53 * then the lower ones, e.g. first take the its_lock, then the irq_lock.
54 * If you are already holding a lock and need to take a higher one, you
55 * have to drop the lower ranking lock first and re-aquire it after having
56 * taken the upper one.
57 *
58 * When taking more than one ap_list_lock at the same time, always take the
59 * lowest numbered VCPU's ap_list_lock first, so:
60 * vcpuX->vcpu_id < vcpuY->vcpu_id:
61 * spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock);
62 * spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock);
63 *
64 * Since the VGIC must support injecting virtual interrupts from ISRs, we have
65 * to use the spin_lock_irqsave/spin_unlock_irqrestore versions of outer
66 * spinlocks for any lock that may be taken while injecting an interrupt.
67 */
68
69 /*
70 * Iterate over the VM's list of mapped LPIs to find the one with a
71 * matching interrupt ID and return a reference to the IRQ structure.
72 */
73 static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
74 {
75 struct vgic_dist *dist = &kvm->arch.vgic;
76 struct vgic_irq *irq = NULL;
77 unsigned long flags;
78
79 spin_lock_irqsave(&dist->lpi_list_lock, flags);
80
81 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
82 if (irq->intid != intid)
83 continue;
84
85 /*
86 * This increases the refcount, the caller is expected to
87 * call vgic_put_irq() later once it's finished with the IRQ.
88 */
89 vgic_get_irq_kref(irq);
90 goto out_unlock;
91 }
92 irq = NULL;
93
94 out_unlock:
95 spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
96
97 return irq;
98 }
99
100 /*
101 * This looks up the virtual interrupt ID to get the corresponding
102 * struct vgic_irq. It also increases the refcount, so any caller is expected
103 * to call vgic_put_irq() once it's finished with this IRQ.
104 */
105 struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
106 u32 intid)
107 {
108 /* SGIs and PPIs */
109 if (intid <= VGIC_MAX_PRIVATE) {
110 intid = array_index_nospec(intid, VGIC_MAX_PRIVATE);
111 return &vcpu->arch.vgic_cpu.private_irqs[intid];
112 }
113
114 /* SPIs */
115 if (intid <= VGIC_MAX_SPI) {
116 intid = array_index_nospec(intid, VGIC_MAX_SPI);
117 return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS];
118 }
119
120 /* LPIs */
121 if (intid >= VGIC_MIN_LPI)
122 return vgic_get_lpi(kvm, intid);
123
124 WARN(1, "Looking up struct vgic_irq for reserved INTID");
125 return NULL;
126 }
127
128 /*
129 * We can't do anything in here, because we lack the kvm pointer to
130 * lock and remove the item from the lpi_list. So we keep this function
131 * empty and use the return value of kref_put() to trigger the freeing.
132 */
133 static void vgic_irq_release(struct kref *ref)
134 {
135 }
136
137 void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
138 {
139 struct vgic_dist *dist = &kvm->arch.vgic;
140 unsigned long flags;
141
142 if (irq->intid < VGIC_MIN_LPI)
143 return;
144
145 spin_lock_irqsave(&dist->lpi_list_lock, flags);
146 if (!kref_put(&irq->refcount, vgic_irq_release)) {
147 spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
148 return;
149 };
150
151 list_del(&irq->lpi_list);
152 dist->lpi_list_count--;
153 spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
154
155 kfree(irq);
156 }
157
158 /**
159 * kvm_vgic_target_oracle - compute the target vcpu for an irq
160 *
161 * @irq: The irq to route. Must be already locked.
162 *
163 * Based on the current state of the interrupt (enabled, pending,
164 * active, vcpu and target_vcpu), compute the next vcpu this should be
165 * given to. Return NULL if this shouldn't be injected at all.
166 *
167 * Requires the IRQ lock to be held.
168 */
169 static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq)
170 {
171 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
172
173 /* If the interrupt is active, it must stay on the current vcpu */
174 if (irq->active)
175 return irq->vcpu ? : irq->target_vcpu;
176
177 /*
178 * If the IRQ is not active but enabled and pending, we should direct
179 * it to its configured target VCPU.
180 * If the distributor is disabled, pending interrupts shouldn't be
181 * forwarded.
182 */
183 if (irq->enabled && irq_is_pending(irq)) {
184 if (unlikely(irq->target_vcpu &&
185 !irq->target_vcpu->kvm->arch.vgic.enabled))
186 return NULL;
187
188 return irq->target_vcpu;
189 }
190
191 /* If neither active nor pending and enabled, then this IRQ should not
192 * be queued to any VCPU.
193 */
194 return NULL;
195 }
196
197 /*
198 * The order of items in the ap_lists defines how we'll pack things in LRs as
199 * well, the first items in the list being the first things populated in the
200 * LRs.
201 *
202 * A hard rule is that active interrupts can never be pushed out of the LRs
203 * (and therefore take priority) since we cannot reliably trap on deactivation
204 * of IRQs and therefore they have to be present in the LRs.
205 *
206 * Otherwise things should be sorted by the priority field and the GIC
207 * hardware support will take care of preemption of priority groups etc.
208 *
209 * Return negative if "a" sorts before "b", 0 to preserve order, and positive
210 * to sort "b" before "a".
211 */
212 static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b)
213 {
214 struct vgic_irq *irqa = container_of(a, struct vgic_irq, ap_list);
215 struct vgic_irq *irqb = container_of(b, struct vgic_irq, ap_list);
216 bool penda, pendb;
217 int ret;
218
219 spin_lock(&irqa->irq_lock);
220 spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING);
221
222 if (irqa->active || irqb->active) {
223 ret = (int)irqb->active - (int)irqa->active;
224 goto out;
225 }
226
227 penda = irqa->enabled && irq_is_pending(irqa);
228 pendb = irqb->enabled && irq_is_pending(irqb);
229
230 if (!penda || !pendb) {
231 ret = (int)pendb - (int)penda;
232 goto out;
233 }
234
235 /* Both pending and enabled, sort by priority */
236 ret = irqa->priority - irqb->priority;
237 out:
238 spin_unlock(&irqb->irq_lock);
239 spin_unlock(&irqa->irq_lock);
240 return ret;
241 }
242
243 /* Must be called with the ap_list_lock held */
244 static void vgic_sort_ap_list(struct kvm_vcpu *vcpu)
245 {
246 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
247
248 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
249
250 list_sort(NULL, &vgic_cpu->ap_list_head, vgic_irq_cmp);
251 }
252
253 /*
254 * Only valid injection if changing level for level-triggered IRQs or for a
255 * rising edge, and in-kernel connected IRQ lines can only be controlled by
256 * their owner.
257 */
258 static bool vgic_validate_injection(struct vgic_irq *irq, bool level, void *owner)
259 {
260 if (irq->owner != owner)
261 return false;
262
263 switch (irq->config) {
264 case VGIC_CONFIG_LEVEL:
265 return irq->line_level != level;
266 case VGIC_CONFIG_EDGE:
267 return level;
268 }
269
270 return false;
271 }
272
273 /*
274 * Check whether an IRQ needs to (and can) be queued to a VCPU's ap list.
275 * Do the queuing if necessary, taking the right locks in the right order.
276 * Returns true when the IRQ was queued, false otherwise.
277 *
278 * Needs to be entered with the IRQ lock already held, but will return
279 * with all locks dropped.
280 */
281 bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
282 unsigned long flags)
283 {
284 struct kvm_vcpu *vcpu;
285
286 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
287
288 retry:
289 vcpu = vgic_target_oracle(irq);
290 if (irq->vcpu || !vcpu) {
291 /*
292 * If this IRQ is already on a VCPU's ap_list, then it
293 * cannot be moved or modified and there is no more work for
294 * us to do.
295 *
296 * Otherwise, if the irq is not pending and enabled, it does
297 * not need to be inserted into an ap_list and there is also
298 * no more work for us to do.
299 */
300 spin_unlock_irqrestore(&irq->irq_lock, flags);
301
302 /*
303 * We have to kick the VCPU here, because we could be
304 * queueing an edge-triggered interrupt for which we
305 * get no EOI maintenance interrupt. In that case,
306 * while the IRQ is already on the VCPU's AP list, the
307 * VCPU could have EOI'ed the original interrupt and
308 * won't see this one until it exits for some other
309 * reason.
310 */
311 if (vcpu) {
312 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
313 kvm_vcpu_kick(vcpu);
314 }
315 return false;
316 }
317
318 /*
319 * We must unlock the irq lock to take the ap_list_lock where
320 * we are going to insert this new pending interrupt.
321 */
322 spin_unlock_irqrestore(&irq->irq_lock, flags);
323
324 /* someone can do stuff here, which we re-check below */
325
326 spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
327 spin_lock(&irq->irq_lock);
328
329 /*
330 * Did something change behind our backs?
331 *
332 * There are two cases:
333 * 1) The irq lost its pending state or was disabled behind our
334 * backs and/or it was queued to another VCPU's ap_list.
335 * 2) Someone changed the affinity on this irq behind our
336 * backs and we are now holding the wrong ap_list_lock.
337 *
338 * In both cases, drop the locks and retry.
339 */
340
341 if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) {
342 spin_unlock(&irq->irq_lock);
343 spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
344
345 spin_lock_irqsave(&irq->irq_lock, flags);
346 goto retry;
347 }
348
349 /*
350 * Grab a reference to the irq to reflect the fact that it is
351 * now in the ap_list.
352 */
353 vgic_get_irq_kref(irq);
354 list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head);
355 irq->vcpu = vcpu;
356
357 spin_unlock(&irq->irq_lock);
358 spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
359
360 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
361 kvm_vcpu_kick(vcpu);
362
363 return true;
364 }
365
366 /**
367 * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
368 * @kvm: The VM structure pointer
369 * @cpuid: The CPU for PPIs
370 * @intid: The INTID to inject a new state to.
371 * @level: Edge-triggered: true: to trigger the interrupt
372 * false: to ignore the call
373 * Level-sensitive true: raise the input signal
374 * false: lower the input signal
375 * @owner: The opaque pointer to the owner of the IRQ being raised to verify
376 * that the caller is allowed to inject this IRQ. Userspace
377 * injections will have owner == NULL.
378 *
379 * The VGIC is not concerned with devices being active-LOW or active-HIGH for
380 * level-sensitive interrupts. You can think of the level parameter as 1
381 * being HIGH and 0 being LOW and all devices being active-HIGH.
382 */
383 int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
384 bool level, void *owner)
385 {
386 struct kvm_vcpu *vcpu;
387 struct vgic_irq *irq;
388 unsigned long flags;
389 int ret;
390
391 trace_vgic_update_irq_pending(cpuid, intid, level);
392
393 ret = vgic_lazy_init(kvm);
394 if (ret)
395 return ret;
396
397 vcpu = kvm_get_vcpu(kvm, cpuid);
398 if (!vcpu && intid < VGIC_NR_PRIVATE_IRQS)
399 return -EINVAL;
400
401 irq = vgic_get_irq(kvm, vcpu, intid);
402 if (!irq)
403 return -EINVAL;
404
405 spin_lock_irqsave(&irq->irq_lock, flags);
406
407 if (!vgic_validate_injection(irq, level, owner)) {
408 /* Nothing to see here, move along... */
409 spin_unlock_irqrestore(&irq->irq_lock, flags);
410 vgic_put_irq(kvm, irq);
411 return 0;
412 }
413
414 if (irq->config == VGIC_CONFIG_LEVEL)
415 irq->line_level = level;
416 else
417 irq->pending_latch = true;
418
419 vgic_queue_irq_unlock(kvm, irq, flags);
420 vgic_put_irq(kvm, irq);
421
422 return 0;
423 }
424
425 /* @irq->irq_lock must be held */
426 static int kvm_vgic_map_irq(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
427 unsigned int host_irq)
428 {
429 struct irq_desc *desc;
430 struct irq_data *data;
431
432 /*
433 * Find the physical IRQ number corresponding to @host_irq
434 */
435 desc = irq_to_desc(host_irq);
436 if (!desc) {
437 kvm_err("%s: no interrupt descriptor\n", __func__);
438 return -EINVAL;
439 }
440 data = irq_desc_get_irq_data(desc);
441 while (data->parent_data)
442 data = data->parent_data;
443
444 irq->hw = true;
445 irq->host_irq = host_irq;
446 irq->hwintid = data->hwirq;
447 return 0;
448 }
449
450 /* @irq->irq_lock must be held */
451 static inline void kvm_vgic_unmap_irq(struct vgic_irq *irq)
452 {
453 irq->hw = false;
454 irq->hwintid = 0;
455 }
456
457 int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
458 u32 vintid)
459 {
460 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
461 unsigned long flags;
462 int ret;
463
464 BUG_ON(!irq);
465
466 spin_lock_irqsave(&irq->irq_lock, flags);
467 ret = kvm_vgic_map_irq(vcpu, irq, host_irq);
468 spin_unlock_irqrestore(&irq->irq_lock, flags);
469 vgic_put_irq(vcpu->kvm, irq);
470
471 return ret;
472 }
473
474 /**
475 * kvm_vgic_reset_mapped_irq - Reset a mapped IRQ
476 * @vcpu: The VCPU pointer
477 * @vintid: The INTID of the interrupt
478 *
479 * Reset the active and pending states of a mapped interrupt. Kernel
480 * subsystems injecting mapped interrupts should reset their interrupt lines
481 * when we are doing a reset of the VM.
482 */
483 void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid)
484 {
485 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
486 unsigned long flags;
487
488 if (!irq->hw)
489 goto out;
490
491 spin_lock_irqsave(&irq->irq_lock, flags);
492 irq->active = false;
493 irq->pending_latch = false;
494 irq->line_level = false;
495 spin_unlock_irqrestore(&irq->irq_lock, flags);
496 out:
497 vgic_put_irq(vcpu->kvm, irq);
498 }
499
500 int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid)
501 {
502 struct vgic_irq *irq;
503 unsigned long flags;
504
505 if (!vgic_initialized(vcpu->kvm))
506 return -EAGAIN;
507
508 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
509 BUG_ON(!irq);
510
511 spin_lock_irqsave(&irq->irq_lock, flags);
512 kvm_vgic_unmap_irq(irq);
513 spin_unlock_irqrestore(&irq->irq_lock, flags);
514 vgic_put_irq(vcpu->kvm, irq);
515
516 return 0;
517 }
518
519 /**
520 * kvm_vgic_set_owner - Set the owner of an interrupt for a VM
521 *
522 * @vcpu: Pointer to the VCPU (used for PPIs)
523 * @intid: The virtual INTID identifying the interrupt (PPI or SPI)
524 * @owner: Opaque pointer to the owner
525 *
526 * Returns 0 if intid is not already used by another in-kernel device and the
527 * owner is set, otherwise returns an error code.
528 */
529 int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner)
530 {
531 struct vgic_irq *irq;
532 unsigned long flags;
533 int ret = 0;
534
535 if (!vgic_initialized(vcpu->kvm))
536 return -EAGAIN;
537
538 /* SGIs and LPIs cannot be wired up to any device */
539 if (!irq_is_ppi(intid) && !vgic_valid_spi(vcpu->kvm, intid))
540 return -EINVAL;
541
542 irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
543 spin_lock_irqsave(&irq->irq_lock, flags);
544 if (irq->owner && irq->owner != owner)
545 ret = -EEXIST;
546 else
547 irq->owner = owner;
548 spin_unlock_irqrestore(&irq->irq_lock, flags);
549
550 return ret;
551 }
552
553 /**
554 * vgic_prune_ap_list - Remove non-relevant interrupts from the list
555 *
556 * @vcpu: The VCPU pointer
557 *
558 * Go over the list of "interesting" interrupts, and prune those that we
559 * won't have to consider in the near future.
560 */
561 static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
562 {
563 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
564 struct vgic_irq *irq, *tmp;
565 unsigned long flags;
566
567 retry:
568 spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
569
570 list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
571 struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB;
572 bool target_vcpu_needs_kick = false;
573
574 spin_lock(&irq->irq_lock);
575
576 BUG_ON(vcpu != irq->vcpu);
577
578 target_vcpu = vgic_target_oracle(irq);
579
580 if (!target_vcpu) {
581 /*
582 * We don't need to process this interrupt any
583 * further, move it off the list.
584 */
585 list_del(&irq->ap_list);
586 irq->vcpu = NULL;
587 spin_unlock(&irq->irq_lock);
588
589 /*
590 * This vgic_put_irq call matches the
591 * vgic_get_irq_kref in vgic_queue_irq_unlock,
592 * where we added the LPI to the ap_list. As
593 * we remove the irq from the list, we drop
594 * also drop the refcount.
595 */
596 vgic_put_irq(vcpu->kvm, irq);
597 continue;
598 }
599
600 if (target_vcpu == vcpu) {
601 /* We're on the right CPU */
602 spin_unlock(&irq->irq_lock);
603 continue;
604 }
605
606 /* This interrupt looks like it has to be migrated. */
607
608 spin_unlock(&irq->irq_lock);
609 spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
610
611 /*
612 * Ensure locking order by always locking the smallest
613 * ID first.
614 */
615 if (vcpu->vcpu_id < target_vcpu->vcpu_id) {
616 vcpuA = vcpu;
617 vcpuB = target_vcpu;
618 } else {
619 vcpuA = target_vcpu;
620 vcpuB = vcpu;
621 }
622
623 spin_lock_irqsave(&vcpuA->arch.vgic_cpu.ap_list_lock, flags);
624 spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock,
625 SINGLE_DEPTH_NESTING);
626 spin_lock(&irq->irq_lock);
627
628 /*
629 * If the affinity has been preserved, move the
630 * interrupt around. Otherwise, it means things have
631 * changed while the interrupt was unlocked, and we
632 * need to replay this.
633 *
634 * In all cases, we cannot trust the list not to have
635 * changed, so we restart from the beginning.
636 */
637 if (target_vcpu == vgic_target_oracle(irq)) {
638 struct vgic_cpu *new_cpu = &target_vcpu->arch.vgic_cpu;
639
640 list_del(&irq->ap_list);
641 irq->vcpu = target_vcpu;
642 list_add_tail(&irq->ap_list, &new_cpu->ap_list_head);
643 target_vcpu_needs_kick = true;
644 }
645
646 spin_unlock(&irq->irq_lock);
647 spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
648 spin_unlock_irqrestore(&vcpuA->arch.vgic_cpu.ap_list_lock, flags);
649
650 if (target_vcpu_needs_kick) {
651 kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu);
652 kvm_vcpu_kick(target_vcpu);
653 }
654
655 goto retry;
656 }
657
658 spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
659 }
660
661 static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
662 {
663 if (kvm_vgic_global_state.type == VGIC_V2)
664 vgic_v2_fold_lr_state(vcpu);
665 else
666 vgic_v3_fold_lr_state(vcpu);
667 }
668
669 /* Requires the irq_lock to be held. */
670 static inline void vgic_populate_lr(struct kvm_vcpu *vcpu,
671 struct vgic_irq *irq, int lr)
672 {
673 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
674
675 if (kvm_vgic_global_state.type == VGIC_V2)
676 vgic_v2_populate_lr(vcpu, irq, lr);
677 else
678 vgic_v3_populate_lr(vcpu, irq, lr);
679 }
680
681 static inline void vgic_clear_lr(struct kvm_vcpu *vcpu, int lr)
682 {
683 if (kvm_vgic_global_state.type == VGIC_V2)
684 vgic_v2_clear_lr(vcpu, lr);
685 else
686 vgic_v3_clear_lr(vcpu, lr);
687 }
688
689 static inline void vgic_set_underflow(struct kvm_vcpu *vcpu)
690 {
691 if (kvm_vgic_global_state.type == VGIC_V2)
692 vgic_v2_set_underflow(vcpu);
693 else
694 vgic_v3_set_underflow(vcpu);
695 }
696
697 static inline void vgic_set_npie(struct kvm_vcpu *vcpu)
698 {
699 if (kvm_vgic_global_state.type == VGIC_V2)
700 vgic_v2_set_npie(vcpu);
701 else
702 vgic_v3_set_npie(vcpu);
703 }
704
705 /* Requires the ap_list_lock to be held. */
706 static int compute_ap_list_depth(struct kvm_vcpu *vcpu,
707 bool *multi_sgi)
708 {
709 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
710 struct vgic_irq *irq;
711 int count = 0;
712
713 *multi_sgi = false;
714
715 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
716
717 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
718 spin_lock(&irq->irq_lock);
719 /* GICv2 SGIs can count for more than one... */
720 if (vgic_irq_is_sgi(irq->intid) && irq->source) {
721 int w = hweight8(irq->source);
722
723 count += w;
724 *multi_sgi |= (w > 1);
725 } else {
726 count++;
727 }
728 spin_unlock(&irq->irq_lock);
729 }
730 return count;
731 }
732
733 /* Requires the VCPU's ap_list_lock to be held. */
734 static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
735 {
736 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
737 struct vgic_irq *irq;
738 int count;
739 bool npie = false;
740 bool multi_sgi;
741 u8 prio = 0xff;
742
743 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
744
745 count = compute_ap_list_depth(vcpu, &multi_sgi);
746 if (count > kvm_vgic_global_state.nr_lr || multi_sgi)
747 vgic_sort_ap_list(vcpu);
748
749 count = 0;
750
751 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
752 spin_lock(&irq->irq_lock);
753
754 /*
755 * If we have multi-SGIs in the pipeline, we need to
756 * guarantee that they are all seen before any IRQ of
757 * lower priority. In that case, we need to filter out
758 * these interrupts by exiting early. This is easy as
759 * the AP list has been sorted already.
760 */
761 if (multi_sgi && irq->priority > prio) {
762 spin_unlock(&irq->irq_lock);
763 break;
764 }
765
766 if (likely(vgic_target_oracle(irq) == vcpu)) {
767 vgic_populate_lr(vcpu, irq, count++);
768
769 if (irq->source) {
770 npie = true;
771 prio = irq->priority;
772 }
773 }
774
775 spin_unlock(&irq->irq_lock);
776
777 if (count == kvm_vgic_global_state.nr_lr) {
778 if (!list_is_last(&irq->ap_list,
779 &vgic_cpu->ap_list_head))
780 vgic_set_underflow(vcpu);
781 break;
782 }
783 }
784
785 if (npie)
786 vgic_set_npie(vcpu);
787
788 vcpu->arch.vgic_cpu.used_lrs = count;
789
790 /* Nuke remaining LRs */
791 for ( ; count < kvm_vgic_global_state.nr_lr; count++)
792 vgic_clear_lr(vcpu, count);
793 }
794
795 /* Sync back the hardware VGIC state into our emulation after a guest's run. */
796 void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
797 {
798 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
799
800 WARN_ON(vgic_v4_sync_hwstate(vcpu));
801
802 /* An empty ap_list_head implies used_lrs == 0 */
803 if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
804 return;
805
806 if (vgic_cpu->used_lrs)
807 vgic_fold_lr_state(vcpu);
808 vgic_prune_ap_list(vcpu);
809 }
810
811 /* Flush our emulation state into the GIC hardware before entering the guest. */
812 void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
813 {
814 WARN_ON(vgic_v4_flush_hwstate(vcpu));
815
816 /*
817 * If there are no virtual interrupts active or pending for this
818 * VCPU, then there is no work to do and we can bail out without
819 * taking any lock. There is a potential race with someone injecting
820 * interrupts to the VCPU, but it is a benign race as the VCPU will
821 * either observe the new interrupt before or after doing this check,
822 * and introducing additional synchronization mechanism doesn't change
823 * this.
824 */
825 if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
826 return;
827
828 DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
829
830 spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
831 vgic_flush_lr_state(vcpu);
832 spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
833 }
834
835 void kvm_vgic_load(struct kvm_vcpu *vcpu)
836 {
837 if (unlikely(!vgic_initialized(vcpu->kvm)))
838 return;
839
840 if (kvm_vgic_global_state.type == VGIC_V2)
841 vgic_v2_load(vcpu);
842 else
843 vgic_v3_load(vcpu);
844 }
845
846 void kvm_vgic_put(struct kvm_vcpu *vcpu)
847 {
848 if (unlikely(!vgic_initialized(vcpu->kvm)))
849 return;
850
851 if (kvm_vgic_global_state.type == VGIC_V2)
852 vgic_v2_put(vcpu);
853 else
854 vgic_v3_put(vcpu);
855 }
856
857 int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
858 {
859 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
860 struct vgic_irq *irq;
861 bool pending = false;
862 unsigned long flags;
863
864 if (!vcpu->kvm->arch.vgic.enabled)
865 return false;
866
867 if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last)
868 return true;
869
870 spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
871
872 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
873 spin_lock(&irq->irq_lock);
874 pending = irq_is_pending(irq) && irq->enabled;
875 spin_unlock(&irq->irq_lock);
876
877 if (pending)
878 break;
879 }
880
881 spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
882
883 return pending;
884 }
885
886 void vgic_kick_vcpus(struct kvm *kvm)
887 {
888 struct kvm_vcpu *vcpu;
889 int c;
890
891 /*
892 * We've injected an interrupt, time to find out who deserves
893 * a good kick...
894 */
895 kvm_for_each_vcpu(c, vcpu, kvm) {
896 if (kvm_vgic_vcpu_pending_irq(vcpu)) {
897 kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
898 kvm_vcpu_kick(vcpu);
899 }
900 }
901 }
902
903 bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid)
904 {
905 struct vgic_irq *irq;
906 bool map_is_active;
907 unsigned long flags;
908
909 if (!vgic_initialized(vcpu->kvm))
910 return false;
911
912 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
913 spin_lock_irqsave(&irq->irq_lock, flags);
914 map_is_active = irq->hw && irq->active;
915 spin_unlock_irqrestore(&irq->irq_lock, flags);
916 vgic_put_irq(vcpu->kvm, irq);
917
918 return map_is_active;
919 }
920