]>
Commit | Line | Data |
---|---|---|
64a959d6 CD |
1 | /* |
2 | * Copyright (C) 2015, 2016 ARM Ltd. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License version 2 as | |
6 | * published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
11 | * GNU General Public License for more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public License | |
14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
15 | */ | |
16 | ||
17 | #include <linux/kvm.h> | |
18 | #include <linux/kvm_host.h> | |
8e444745 | 19 | #include <linux/list_sort.h> |
64a959d6 CD |
20 | |
21 | #include "vgic.h" | |
22 | ||
81eeb95d | 23 | #define CREATE_TRACE_POINTS |
35d2d5d4 | 24 | #include "trace.h" |
81eeb95d CD |
25 | |
26 | #ifdef CONFIG_DEBUG_SPINLOCK | |
27 | #define DEBUG_SPINLOCK_BUG_ON(p) BUG_ON(p) | |
28 | #else | |
29 | #define DEBUG_SPINLOCK_BUG_ON(p) | |
30 | #endif | |
31 | ||
63d7c6af AB |
32 | struct vgic_global kvm_vgic_global_state __ro_after_init = { |
33 | .gicv3_cpuif = STATIC_KEY_FALSE_INIT, | |
34 | }; | |
64a959d6 | 35 | |
81eeb95d CD |
36 | /* |
37 | * Locking order is always: | |
424c3383 AP |
38 | * its->cmd_lock (mutex) |
39 | * its->its_lock (mutex) | |
40 | * vgic_cpu->ap_list_lock | |
3802411d AP |
41 | * kvm->lpi_list_lock |
42 | * vgic_irq->irq_lock | |
81eeb95d | 43 | * |
424c3383 AP |
44 | * If you need to take multiple locks, always take the upper lock first, |
45 | * then the lower ones, e.g. first take the its_lock, then the irq_lock. | |
46 | * If you are already holding a lock and need to take a higher one, you | |
47 | * have to drop the lower ranking lock first and re-aquire it after having | |
48 | * taken the upper one. | |
81eeb95d CD |
49 | * |
50 | * When taking more than one ap_list_lock at the same time, always take the | |
51 | * lowest numbered VCPU's ap_list_lock first, so: | |
52 | * vcpuX->vcpu_id < vcpuY->vcpu_id: | |
53 | * spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock); | |
54 | * spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock); | |
55 | */ | |
56 | ||
3802411d AP |
57 | /* |
58 | * Iterate over the VM's list of mapped LPIs to find the one with a | |
59 | * matching interrupt ID and return a reference to the IRQ structure. | |
60 | */ | |
61 | static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid) | |
62 | { | |
63 | struct vgic_dist *dist = &kvm->arch.vgic; | |
64 | struct vgic_irq *irq = NULL; | |
65 | ||
66 | spin_lock(&dist->lpi_list_lock); | |
67 | ||
68 | list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { | |
69 | if (irq->intid != intid) | |
70 | continue; | |
71 | ||
72 | /* | |
73 | * This increases the refcount, the caller is expected to | |
74 | * call vgic_put_irq() later once it's finished with the IRQ. | |
75 | */ | |
d97594e6 | 76 | vgic_get_irq_kref(irq); |
3802411d AP |
77 | goto out_unlock; |
78 | } | |
79 | irq = NULL; | |
80 | ||
81 | out_unlock: | |
82 | spin_unlock(&dist->lpi_list_lock); | |
83 | ||
84 | return irq; | |
85 | } | |
86 | ||
87 | /* | |
88 | * This looks up the virtual interrupt ID to get the corresponding | |
89 | * struct vgic_irq. It also increases the refcount, so any caller is expected | |
90 | * to call vgic_put_irq() once it's finished with this IRQ. | |
91 | */ | |
64a959d6 CD |
92 | struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu, |
93 | u32 intid) | |
94 | { | |
95 | /* SGIs and PPIs */ | |
96 | if (intid <= VGIC_MAX_PRIVATE) | |
97 | return &vcpu->arch.vgic_cpu.private_irqs[intid]; | |
98 | ||
99 | /* SPIs */ | |
100 | if (intid <= VGIC_MAX_SPI) | |
101 | return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS]; | |
102 | ||
3802411d | 103 | /* LPIs */ |
64a959d6 | 104 | if (intid >= VGIC_MIN_LPI) |
3802411d | 105 | return vgic_get_lpi(kvm, intid); |
64a959d6 CD |
106 | |
107 | WARN(1, "Looking up struct vgic_irq for reserved INTID"); | |
108 | return NULL; | |
109 | } | |
81eeb95d | 110 | |
3802411d AP |
111 | /* |
112 | * We can't do anything in here, because we lack the kvm pointer to | |
113 | * lock and remove the item from the lpi_list. So we keep this function | |
114 | * empty and use the return value of kref_put() to trigger the freeing. | |
115 | */ | |
5dd4b924 AP |
116 | static void vgic_irq_release(struct kref *ref) |
117 | { | |
5dd4b924 AP |
118 | } |
119 | ||
120 | void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq) | |
121 | { | |
2cccbb36 | 122 | struct vgic_dist *dist = &kvm->arch.vgic; |
3802411d | 123 | |
5dd4b924 AP |
124 | if (irq->intid < VGIC_MIN_LPI) |
125 | return; | |
126 | ||
2cccbb36 CD |
127 | spin_lock(&dist->lpi_list_lock); |
128 | if (!kref_put(&irq->refcount, vgic_irq_release)) { | |
129 | spin_unlock(&dist->lpi_list_lock); | |
3802411d | 130 | return; |
2cccbb36 | 131 | }; |
3802411d | 132 | |
3802411d AP |
133 | list_del(&irq->lpi_list); |
134 | dist->lpi_list_count--; | |
135 | spin_unlock(&dist->lpi_list_lock); | |
136 | ||
137 | kfree(irq); | |
5dd4b924 AP |
138 | } |
139 | ||
81eeb95d CD |
140 | /** |
141 | * kvm_vgic_target_oracle - compute the target vcpu for an irq | |
142 | * | |
143 | * @irq: The irq to route. Must be already locked. | |
144 | * | |
145 | * Based on the current state of the interrupt (enabled, pending, | |
146 | * active, vcpu and target_vcpu), compute the next vcpu this should be | |
147 | * given to. Return NULL if this shouldn't be injected at all. | |
148 | * | |
149 | * Requires the IRQ lock to be held. | |
150 | */ | |
151 | static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq) | |
152 | { | |
153 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock)); | |
154 | ||
155 | /* If the interrupt is active, it must stay on the current vcpu */ | |
156 | if (irq->active) | |
157 | return irq->vcpu ? : irq->target_vcpu; | |
158 | ||
159 | /* | |
160 | * If the IRQ is not active but enabled and pending, we should direct | |
161 | * it to its configured target VCPU. | |
162 | * If the distributor is disabled, pending interrupts shouldn't be | |
163 | * forwarded. | |
164 | */ | |
8694e4da | 165 | if (irq->enabled && irq_is_pending(irq)) { |
81eeb95d CD |
166 | if (unlikely(irq->target_vcpu && |
167 | !irq->target_vcpu->kvm->arch.vgic.enabled)) | |
168 | return NULL; | |
169 | ||
170 | return irq->target_vcpu; | |
171 | } | |
172 | ||
173 | /* If neither active nor pending and enabled, then this IRQ should not | |
174 | * be queued to any VCPU. | |
175 | */ | |
176 | return NULL; | |
177 | } | |
178 | ||
8e444745 CD |
179 | /* |
180 | * The order of items in the ap_lists defines how we'll pack things in LRs as | |
181 | * well, the first items in the list being the first things populated in the | |
182 | * LRs. | |
183 | * | |
184 | * A hard rule is that active interrupts can never be pushed out of the LRs | |
185 | * (and therefore take priority) since we cannot reliably trap on deactivation | |
186 | * of IRQs and therefore they have to be present in the LRs. | |
187 | * | |
188 | * Otherwise things should be sorted by the priority field and the GIC | |
189 | * hardware support will take care of preemption of priority groups etc. | |
190 | * | |
191 | * Return negative if "a" sorts before "b", 0 to preserve order, and positive | |
192 | * to sort "b" before "a". | |
193 | */ | |
194 | static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b) | |
195 | { | |
196 | struct vgic_irq *irqa = container_of(a, struct vgic_irq, ap_list); | |
197 | struct vgic_irq *irqb = container_of(b, struct vgic_irq, ap_list); | |
198 | bool penda, pendb; | |
199 | int ret; | |
200 | ||
201 | spin_lock(&irqa->irq_lock); | |
202 | spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING); | |
203 | ||
204 | if (irqa->active || irqb->active) { | |
205 | ret = (int)irqb->active - (int)irqa->active; | |
206 | goto out; | |
207 | } | |
208 | ||
8694e4da CD |
209 | penda = irqa->enabled && irq_is_pending(irqa); |
210 | pendb = irqb->enabled && irq_is_pending(irqb); | |
8e444745 CD |
211 | |
212 | if (!penda || !pendb) { | |
213 | ret = (int)pendb - (int)penda; | |
214 | goto out; | |
215 | } | |
216 | ||
217 | /* Both pending and enabled, sort by priority */ | |
218 | ret = irqa->priority - irqb->priority; | |
219 | out: | |
220 | spin_unlock(&irqb->irq_lock); | |
221 | spin_unlock(&irqa->irq_lock); | |
222 | return ret; | |
223 | } | |
224 | ||
225 | /* Must be called with the ap_list_lock held */ | |
226 | static void vgic_sort_ap_list(struct kvm_vcpu *vcpu) | |
227 | { | |
228 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | |
229 | ||
230 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock)); | |
231 | ||
232 | list_sort(NULL, &vgic_cpu->ap_list_head, vgic_irq_cmp); | |
233 | } | |
234 | ||
81eeb95d CD |
235 | /* |
236 | * Only valid injection if changing level for level-triggered IRQs or for a | |
237 | * rising edge. | |
238 | */ | |
239 | static bool vgic_validate_injection(struct vgic_irq *irq, bool level) | |
240 | { | |
241 | switch (irq->config) { | |
242 | case VGIC_CONFIG_LEVEL: | |
243 | return irq->line_level != level; | |
244 | case VGIC_CONFIG_EDGE: | |
245 | return level; | |
246 | } | |
247 | ||
248 | return false; | |
249 | } | |
250 | ||
251 | /* | |
252 | * Check whether an IRQ needs to (and can) be queued to a VCPU's ap list. | |
253 | * Do the queuing if necessary, taking the right locks in the right order. | |
254 | * Returns true when the IRQ was queued, false otherwise. | |
255 | * | |
256 | * Needs to be entered with the IRQ lock already held, but will return | |
257 | * with all locks dropped. | |
258 | */ | |
259 | bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq) | |
260 | { | |
261 | struct kvm_vcpu *vcpu; | |
262 | ||
263 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock)); | |
264 | ||
265 | retry: | |
266 | vcpu = vgic_target_oracle(irq); | |
267 | if (irq->vcpu || !vcpu) { | |
268 | /* | |
269 | * If this IRQ is already on a VCPU's ap_list, then it | |
270 | * cannot be moved or modified and there is no more work for | |
271 | * us to do. | |
272 | * | |
273 | * Otherwise, if the irq is not pending and enabled, it does | |
274 | * not need to be inserted into an ap_list and there is also | |
275 | * no more work for us to do. | |
276 | */ | |
277 | spin_unlock(&irq->irq_lock); | |
d42c7970 SWL |
278 | |
279 | /* | |
280 | * We have to kick the VCPU here, because we could be | |
281 | * queueing an edge-triggered interrupt for which we | |
282 | * get no EOI maintenance interrupt. In that case, | |
283 | * while the IRQ is already on the VCPU's AP list, the | |
284 | * VCPU could have EOI'ed the original interrupt and | |
285 | * won't see this one until it exits for some other | |
286 | * reason. | |
287 | */ | |
288 | if (vcpu) | |
289 | kvm_vcpu_kick(vcpu); | |
81eeb95d CD |
290 | return false; |
291 | } | |
292 | ||
293 | /* | |
294 | * We must unlock the irq lock to take the ap_list_lock where | |
295 | * we are going to insert this new pending interrupt. | |
296 | */ | |
297 | spin_unlock(&irq->irq_lock); | |
298 | ||
299 | /* someone can do stuff here, which we re-check below */ | |
300 | ||
301 | spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock); | |
302 | spin_lock(&irq->irq_lock); | |
303 | ||
304 | /* | |
305 | * Did something change behind our backs? | |
306 | * | |
307 | * There are two cases: | |
308 | * 1) The irq lost its pending state or was disabled behind our | |
309 | * backs and/or it was queued to another VCPU's ap_list. | |
310 | * 2) Someone changed the affinity on this irq behind our | |
311 | * backs and we are now holding the wrong ap_list_lock. | |
312 | * | |
313 | * In both cases, drop the locks and retry. | |
314 | */ | |
315 | ||
316 | if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) { | |
317 | spin_unlock(&irq->irq_lock); | |
318 | spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); | |
319 | ||
320 | spin_lock(&irq->irq_lock); | |
321 | goto retry; | |
322 | } | |
323 | ||
5dd4b924 AP |
324 | /* |
325 | * Grab a reference to the irq to reflect the fact that it is | |
326 | * now in the ap_list. | |
327 | */ | |
328 | vgic_get_irq_kref(irq); | |
81eeb95d CD |
329 | list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head); |
330 | irq->vcpu = vcpu; | |
331 | ||
332 | spin_unlock(&irq->irq_lock); | |
333 | spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); | |
334 | ||
335 | kvm_vcpu_kick(vcpu); | |
336 | ||
337 | return true; | |
338 | } | |
339 | ||
11710dec CD |
340 | /** |
341 | * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic | |
342 | * @kvm: The VM structure pointer | |
343 | * @cpuid: The CPU for PPIs | |
344 | * @intid: The INTID to inject a new state to. | |
345 | * @level: Edge-triggered: true: to trigger the interrupt | |
346 | * false: to ignore the call | |
347 | * Level-sensitive true: raise the input signal | |
348 | * false: lower the input signal | |
349 | * | |
350 | * The VGIC is not concerned with devices being active-LOW or active-HIGH for | |
351 | * level-sensitive interrupts. You can think of the level parameter as 1 | |
352 | * being HIGH and 0 being LOW and all devices being active-HIGH. | |
353 | */ | |
354 | int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid, | |
355 | bool level) | |
81eeb95d CD |
356 | { |
357 | struct kvm_vcpu *vcpu; | |
358 | struct vgic_irq *irq; | |
359 | int ret; | |
360 | ||
361 | trace_vgic_update_irq_pending(cpuid, intid, level); | |
362 | ||
ad275b8b EA |
363 | ret = vgic_lazy_init(kvm); |
364 | if (ret) | |
365 | return ret; | |
366 | ||
81eeb95d CD |
367 | vcpu = kvm_get_vcpu(kvm, cpuid); |
368 | if (!vcpu && intid < VGIC_NR_PRIVATE_IRQS) | |
369 | return -EINVAL; | |
370 | ||
371 | irq = vgic_get_irq(kvm, vcpu, intid); | |
372 | if (!irq) | |
373 | return -EINVAL; | |
374 | ||
81eeb95d CD |
375 | spin_lock(&irq->irq_lock); |
376 | ||
377 | if (!vgic_validate_injection(irq, level)) { | |
378 | /* Nothing to see here, move along... */ | |
379 | spin_unlock(&irq->irq_lock); | |
5dd4b924 | 380 | vgic_put_irq(kvm, irq); |
81eeb95d CD |
381 | return 0; |
382 | } | |
383 | ||
8694e4da | 384 | if (irq->config == VGIC_CONFIG_LEVEL) |
81eeb95d | 385 | irq->line_level = level; |
8694e4da CD |
386 | else |
387 | irq->pending_latch = true; | |
81eeb95d CD |
388 | |
389 | vgic_queue_irq_unlock(kvm, irq); | |
5dd4b924 | 390 | vgic_put_irq(kvm, irq); |
81eeb95d CD |
391 | |
392 | return 0; | |
393 | } | |
394 | ||
568e8c90 AP |
395 | int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, u32 virt_irq, u32 phys_irq) |
396 | { | |
397 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq); | |
398 | ||
399 | BUG_ON(!irq); | |
400 | ||
401 | spin_lock(&irq->irq_lock); | |
402 | ||
403 | irq->hw = true; | |
404 | irq->hwintid = phys_irq; | |
405 | ||
406 | spin_unlock(&irq->irq_lock); | |
5dd4b924 | 407 | vgic_put_irq(vcpu->kvm, irq); |
568e8c90 AP |
408 | |
409 | return 0; | |
410 | } | |
411 | ||
412 | int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq) | |
413 | { | |
5dd4b924 | 414 | struct vgic_irq *irq; |
568e8c90 AP |
415 | |
416 | if (!vgic_initialized(vcpu->kvm)) | |
417 | return -EAGAIN; | |
418 | ||
5dd4b924 AP |
419 | irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq); |
420 | BUG_ON(!irq); | |
421 | ||
568e8c90 AP |
422 | spin_lock(&irq->irq_lock); |
423 | ||
424 | irq->hw = false; | |
425 | irq->hwintid = 0; | |
426 | ||
427 | spin_unlock(&irq->irq_lock); | |
5dd4b924 | 428 | vgic_put_irq(vcpu->kvm, irq); |
568e8c90 AP |
429 | |
430 | return 0; | |
431 | } | |
432 | ||
0919e84c MZ |
433 | /** |
434 | * vgic_prune_ap_list - Remove non-relevant interrupts from the list | |
435 | * | |
436 | * @vcpu: The VCPU pointer | |
437 | * | |
438 | * Go over the list of "interesting" interrupts, and prune those that we | |
439 | * won't have to consider in the near future. | |
440 | */ | |
441 | static void vgic_prune_ap_list(struct kvm_vcpu *vcpu) | |
442 | { | |
443 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | |
444 | struct vgic_irq *irq, *tmp; | |
445 | ||
446 | retry: | |
447 | spin_lock(&vgic_cpu->ap_list_lock); | |
448 | ||
449 | list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { | |
450 | struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB; | |
451 | ||
452 | spin_lock(&irq->irq_lock); | |
453 | ||
454 | BUG_ON(vcpu != irq->vcpu); | |
455 | ||
456 | target_vcpu = vgic_target_oracle(irq); | |
457 | ||
458 | if (!target_vcpu) { | |
459 | /* | |
460 | * We don't need to process this interrupt any | |
461 | * further, move it off the list. | |
462 | */ | |
463 | list_del(&irq->ap_list); | |
464 | irq->vcpu = NULL; | |
465 | spin_unlock(&irq->irq_lock); | |
5dd4b924 AP |
466 | |
467 | /* | |
468 | * This vgic_put_irq call matches the | |
469 | * vgic_get_irq_kref in vgic_queue_irq_unlock, | |
470 | * where we added the LPI to the ap_list. As | |
471 | * we remove the irq from the list, we drop | |
472 | * also drop the refcount. | |
473 | */ | |
474 | vgic_put_irq(vcpu->kvm, irq); | |
0919e84c MZ |
475 | continue; |
476 | } | |
477 | ||
478 | if (target_vcpu == vcpu) { | |
479 | /* We're on the right CPU */ | |
480 | spin_unlock(&irq->irq_lock); | |
481 | continue; | |
482 | } | |
483 | ||
484 | /* This interrupt looks like it has to be migrated. */ | |
485 | ||
486 | spin_unlock(&irq->irq_lock); | |
487 | spin_unlock(&vgic_cpu->ap_list_lock); | |
488 | ||
489 | /* | |
490 | * Ensure locking order by always locking the smallest | |
491 | * ID first. | |
492 | */ | |
493 | if (vcpu->vcpu_id < target_vcpu->vcpu_id) { | |
494 | vcpuA = vcpu; | |
495 | vcpuB = target_vcpu; | |
496 | } else { | |
497 | vcpuA = target_vcpu; | |
498 | vcpuB = vcpu; | |
499 | } | |
500 | ||
501 | spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock); | |
502 | spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock, | |
503 | SINGLE_DEPTH_NESTING); | |
504 | spin_lock(&irq->irq_lock); | |
505 | ||
506 | /* | |
507 | * If the affinity has been preserved, move the | |
508 | * interrupt around. Otherwise, it means things have | |
509 | * changed while the interrupt was unlocked, and we | |
510 | * need to replay this. | |
511 | * | |
512 | * In all cases, we cannot trust the list not to have | |
513 | * changed, so we restart from the beginning. | |
514 | */ | |
515 | if (target_vcpu == vgic_target_oracle(irq)) { | |
516 | struct vgic_cpu *new_cpu = &target_vcpu->arch.vgic_cpu; | |
517 | ||
518 | list_del(&irq->ap_list); | |
519 | irq->vcpu = target_vcpu; | |
520 | list_add_tail(&irq->ap_list, &new_cpu->ap_list_head); | |
521 | } | |
522 | ||
523 | spin_unlock(&irq->irq_lock); | |
524 | spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock); | |
525 | spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock); | |
526 | goto retry; | |
527 | } | |
528 | ||
529 | spin_unlock(&vgic_cpu->ap_list_lock); | |
530 | } | |
531 | ||
0919e84c MZ |
532 | static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu) |
533 | { | |
59529f69 MZ |
534 | if (kvm_vgic_global_state.type == VGIC_V2) |
535 | vgic_v2_fold_lr_state(vcpu); | |
536 | else | |
537 | vgic_v3_fold_lr_state(vcpu); | |
0919e84c MZ |
538 | } |
539 | ||
540 | /* Requires the irq_lock to be held. */ | |
541 | static inline void vgic_populate_lr(struct kvm_vcpu *vcpu, | |
542 | struct vgic_irq *irq, int lr) | |
543 | { | |
544 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock)); | |
140b086d | 545 | |
59529f69 MZ |
546 | if (kvm_vgic_global_state.type == VGIC_V2) |
547 | vgic_v2_populate_lr(vcpu, irq, lr); | |
548 | else | |
549 | vgic_v3_populate_lr(vcpu, irq, lr); | |
0919e84c MZ |
550 | } |
551 | ||
552 | static inline void vgic_clear_lr(struct kvm_vcpu *vcpu, int lr) | |
553 | { | |
59529f69 MZ |
554 | if (kvm_vgic_global_state.type == VGIC_V2) |
555 | vgic_v2_clear_lr(vcpu, lr); | |
556 | else | |
557 | vgic_v3_clear_lr(vcpu, lr); | |
0919e84c MZ |
558 | } |
559 | ||
560 | static inline void vgic_set_underflow(struct kvm_vcpu *vcpu) | |
561 | { | |
59529f69 MZ |
562 | if (kvm_vgic_global_state.type == VGIC_V2) |
563 | vgic_v2_set_underflow(vcpu); | |
564 | else | |
565 | vgic_v3_set_underflow(vcpu); | |
0919e84c MZ |
566 | } |
567 | ||
568 | /* Requires the ap_list_lock to be held. */ | |
569 | static int compute_ap_list_depth(struct kvm_vcpu *vcpu) | |
570 | { | |
571 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | |
572 | struct vgic_irq *irq; | |
573 | int count = 0; | |
574 | ||
575 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock)); | |
576 | ||
577 | list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { | |
578 | spin_lock(&irq->irq_lock); | |
579 | /* GICv2 SGIs can count for more than one... */ | |
580 | if (vgic_irq_is_sgi(irq->intid) && irq->source) | |
581 | count += hweight8(irq->source); | |
582 | else | |
583 | count++; | |
584 | spin_unlock(&irq->irq_lock); | |
585 | } | |
586 | return count; | |
587 | } | |
588 | ||
589 | /* Requires the VCPU's ap_list_lock to be held. */ | |
590 | static void vgic_flush_lr_state(struct kvm_vcpu *vcpu) | |
591 | { | |
592 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | |
593 | struct vgic_irq *irq; | |
594 | int count = 0; | |
595 | ||
596 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock)); | |
597 | ||
90cac1f5 | 598 | if (compute_ap_list_depth(vcpu) > kvm_vgic_global_state.nr_lr) |
0919e84c | 599 | vgic_sort_ap_list(vcpu); |
0919e84c MZ |
600 | |
601 | list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { | |
602 | spin_lock(&irq->irq_lock); | |
603 | ||
604 | if (unlikely(vgic_target_oracle(irq) != vcpu)) | |
605 | goto next; | |
606 | ||
607 | /* | |
608 | * If we get an SGI with multiple sources, try to get | |
609 | * them in all at once. | |
610 | */ | |
611 | do { | |
612 | vgic_populate_lr(vcpu, irq, count++); | |
613 | } while (irq->source && count < kvm_vgic_global_state.nr_lr); | |
614 | ||
615 | next: | |
616 | spin_unlock(&irq->irq_lock); | |
617 | ||
90cac1f5 CD |
618 | if (count == kvm_vgic_global_state.nr_lr) { |
619 | if (!list_is_last(&irq->ap_list, | |
620 | &vgic_cpu->ap_list_head)) | |
621 | vgic_set_underflow(vcpu); | |
0919e84c | 622 | break; |
90cac1f5 | 623 | } |
0919e84c MZ |
624 | } |
625 | ||
626 | vcpu->arch.vgic_cpu.used_lrs = count; | |
627 | ||
628 | /* Nuke remaining LRs */ | |
629 | for ( ; count < kvm_vgic_global_state.nr_lr; count++) | |
630 | vgic_clear_lr(vcpu, count); | |
631 | } | |
632 | ||
633 | /* Sync back the hardware VGIC state into our emulation after a guest's run. */ | |
634 | void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) | |
635 | { | |
f6769581 SWL |
636 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
637 | ||
8ac76ef4 CD |
638 | /* An empty ap_list_head implies used_lrs == 0 */ |
639 | if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) | |
0099b770 CD |
640 | return; |
641 | ||
8ac76ef4 CD |
642 | if (vgic_cpu->used_lrs) |
643 | vgic_fold_lr_state(vcpu); | |
0919e84c MZ |
644 | vgic_prune_ap_list(vcpu); |
645 | } | |
646 | ||
647 | /* Flush our emulation state into the GIC hardware before entering the guest. */ | |
648 | void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) | |
649 | { | |
f6769581 SWL |
650 | /* |
651 | * If there are no virtual interrupts active or pending for this | |
652 | * VCPU, then there is no work to do and we can bail out without | |
653 | * taking any lock. There is a potential race with someone injecting | |
654 | * interrupts to the VCPU, but it is a benign race as the VCPU will | |
655 | * either observe the new interrupt before or after doing this check, | |
656 | * and introducing additional synchronization mechanism doesn't change | |
657 | * this. | |
658 | */ | |
659 | if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) | |
0099b770 CD |
660 | return; |
661 | ||
0919e84c MZ |
662 | spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock); |
663 | vgic_flush_lr_state(vcpu); | |
664 | spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); | |
665 | } | |
90eee56c | 666 | |
328e5664 CD |
667 | void kvm_vgic_load(struct kvm_vcpu *vcpu) |
668 | { | |
669 | if (unlikely(!vgic_initialized(vcpu->kvm))) | |
670 | return; | |
671 | ||
672 | if (kvm_vgic_global_state.type == VGIC_V2) | |
673 | vgic_v2_load(vcpu); | |
674 | else | |
675 | vgic_v3_load(vcpu); | |
676 | } | |
677 | ||
678 | void kvm_vgic_put(struct kvm_vcpu *vcpu) | |
679 | { | |
680 | if (unlikely(!vgic_initialized(vcpu->kvm))) | |
681 | return; | |
682 | ||
683 | if (kvm_vgic_global_state.type == VGIC_V2) | |
684 | vgic_v2_put(vcpu); | |
685 | else | |
686 | vgic_v3_put(vcpu); | |
687 | } | |
688 | ||
90eee56c EA |
689 | int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) |
690 | { | |
691 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | |
692 | struct vgic_irq *irq; | |
693 | bool pending = false; | |
694 | ||
695 | if (!vcpu->kvm->arch.vgic.enabled) | |
696 | return false; | |
697 | ||
698 | spin_lock(&vgic_cpu->ap_list_lock); | |
699 | ||
700 | list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { | |
701 | spin_lock(&irq->irq_lock); | |
8694e4da | 702 | pending = irq_is_pending(irq) && irq->enabled; |
90eee56c EA |
703 | spin_unlock(&irq->irq_lock); |
704 | ||
705 | if (pending) | |
706 | break; | |
707 | } | |
708 | ||
709 | spin_unlock(&vgic_cpu->ap_list_lock); | |
710 | ||
711 | return pending; | |
712 | } | |
2b0cda87 MZ |
713 | |
714 | void vgic_kick_vcpus(struct kvm *kvm) | |
715 | { | |
716 | struct kvm_vcpu *vcpu; | |
717 | int c; | |
718 | ||
719 | /* | |
720 | * We've injected an interrupt, time to find out who deserves | |
721 | * a good kick... | |
722 | */ | |
723 | kvm_for_each_vcpu(c, vcpu, kvm) { | |
724 | if (kvm_vgic_vcpu_pending_irq(vcpu)) | |
725 | kvm_vcpu_kick(vcpu); | |
726 | } | |
727 | } | |
568e8c90 AP |
728 | |
729 | bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int virt_irq) | |
730 | { | |
731 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq); | |
732 | bool map_is_active; | |
733 | ||
734 | spin_lock(&irq->irq_lock); | |
735 | map_is_active = irq->hw && irq->active; | |
736 | spin_unlock(&irq->irq_lock); | |
5dd4b924 | 737 | vgic_put_irq(vcpu->kvm, irq); |
568e8c90 AP |
738 | |
739 | return map_is_active; | |
740 | } | |
0e4e82f1 | 741 |