]>
Commit | Line | Data |
---|---|---|
64a959d6 CD |
1 | /* |
2 | * Copyright (C) 2015, 2016 ARM Ltd. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License version 2 as | |
6 | * published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
11 | * GNU General Public License for more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public License | |
14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
15 | */ | |
16 | ||
17 | #include <linux/kvm.h> | |
18 | #include <linux/kvm_host.h> | |
8e444745 | 19 | #include <linux/list_sort.h> |
64a959d6 CD |
20 | |
21 | #include "vgic.h" | |
22 | ||
81eeb95d CD |
23 | #define CREATE_TRACE_POINTS |
24 | #include "../trace.h" | |
25 | ||
26 | #ifdef CONFIG_DEBUG_SPINLOCK | |
27 | #define DEBUG_SPINLOCK_BUG_ON(p) BUG_ON(p) | |
28 | #else | |
29 | #define DEBUG_SPINLOCK_BUG_ON(p) | |
30 | #endif | |
31 | ||
64a959d6 CD |
32 | struct vgic_global __section(.hyp.text) kvm_vgic_global_state; |
33 | ||
81eeb95d CD |
34 | /* |
35 | * Locking order is always: | |
36 | * vgic_cpu->ap_list_lock | |
37 | * vgic_irq->irq_lock | |
38 | * | |
39 | * (that is, always take the ap_list_lock before the struct vgic_irq lock). | |
40 | * | |
41 | * When taking more than one ap_list_lock at the same time, always take the | |
42 | * lowest numbered VCPU's ap_list_lock first, so: | |
43 | * vcpuX->vcpu_id < vcpuY->vcpu_id: | |
44 | * spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock); | |
45 | * spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock); | |
46 | */ | |
47 | ||
64a959d6 CD |
48 | struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu, |
49 | u32 intid) | |
50 | { | |
51 | /* SGIs and PPIs */ | |
52 | if (intid <= VGIC_MAX_PRIVATE) | |
53 | return &vcpu->arch.vgic_cpu.private_irqs[intid]; | |
54 | ||
55 | /* SPIs */ | |
56 | if (intid <= VGIC_MAX_SPI) | |
57 | return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS]; | |
58 | ||
59 | /* LPIs are not yet covered */ | |
60 | if (intid >= VGIC_MIN_LPI) | |
61 | return NULL; | |
62 | ||
63 | WARN(1, "Looking up struct vgic_irq for reserved INTID"); | |
64 | return NULL; | |
65 | } | |
81eeb95d CD |
66 | |
67 | /** | |
68 | * kvm_vgic_target_oracle - compute the target vcpu for an irq | |
69 | * | |
70 | * @irq: The irq to route. Must be already locked. | |
71 | * | |
72 | * Based on the current state of the interrupt (enabled, pending, | |
73 | * active, vcpu and target_vcpu), compute the next vcpu this should be | |
74 | * given to. Return NULL if this shouldn't be injected at all. | |
75 | * | |
76 | * Requires the IRQ lock to be held. | |
77 | */ | |
78 | static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq) | |
79 | { | |
80 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock)); | |
81 | ||
82 | /* If the interrupt is active, it must stay on the current vcpu */ | |
83 | if (irq->active) | |
84 | return irq->vcpu ? : irq->target_vcpu; | |
85 | ||
86 | /* | |
87 | * If the IRQ is not active but enabled and pending, we should direct | |
88 | * it to its configured target VCPU. | |
89 | * If the distributor is disabled, pending interrupts shouldn't be | |
90 | * forwarded. | |
91 | */ | |
92 | if (irq->enabled && irq->pending) { | |
93 | if (unlikely(irq->target_vcpu && | |
94 | !irq->target_vcpu->kvm->arch.vgic.enabled)) | |
95 | return NULL; | |
96 | ||
97 | return irq->target_vcpu; | |
98 | } | |
99 | ||
100 | /* If neither active nor pending and enabled, then this IRQ should not | |
101 | * be queued to any VCPU. | |
102 | */ | |
103 | return NULL; | |
104 | } | |
105 | ||
8e444745 CD |
106 | /* |
107 | * The order of items in the ap_lists defines how we'll pack things in LRs as | |
108 | * well, the first items in the list being the first things populated in the | |
109 | * LRs. | |
110 | * | |
111 | * A hard rule is that active interrupts can never be pushed out of the LRs | |
112 | * (and therefore take priority) since we cannot reliably trap on deactivation | |
113 | * of IRQs and therefore they have to be present in the LRs. | |
114 | * | |
115 | * Otherwise things should be sorted by the priority field and the GIC | |
116 | * hardware support will take care of preemption of priority groups etc. | |
117 | * | |
118 | * Return negative if "a" sorts before "b", 0 to preserve order, and positive | |
119 | * to sort "b" before "a". | |
120 | */ | |
121 | static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b) | |
122 | { | |
123 | struct vgic_irq *irqa = container_of(a, struct vgic_irq, ap_list); | |
124 | struct vgic_irq *irqb = container_of(b, struct vgic_irq, ap_list); | |
125 | bool penda, pendb; | |
126 | int ret; | |
127 | ||
128 | spin_lock(&irqa->irq_lock); | |
129 | spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING); | |
130 | ||
131 | if (irqa->active || irqb->active) { | |
132 | ret = (int)irqb->active - (int)irqa->active; | |
133 | goto out; | |
134 | } | |
135 | ||
136 | penda = irqa->enabled && irqa->pending; | |
137 | pendb = irqb->enabled && irqb->pending; | |
138 | ||
139 | if (!penda || !pendb) { | |
140 | ret = (int)pendb - (int)penda; | |
141 | goto out; | |
142 | } | |
143 | ||
144 | /* Both pending and enabled, sort by priority */ | |
145 | ret = irqa->priority - irqb->priority; | |
146 | out: | |
147 | spin_unlock(&irqb->irq_lock); | |
148 | spin_unlock(&irqa->irq_lock); | |
149 | return ret; | |
150 | } | |
151 | ||
152 | /* Must be called with the ap_list_lock held */ | |
153 | static void vgic_sort_ap_list(struct kvm_vcpu *vcpu) | |
154 | { | |
155 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | |
156 | ||
157 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock)); | |
158 | ||
159 | list_sort(NULL, &vgic_cpu->ap_list_head, vgic_irq_cmp); | |
160 | } | |
161 | ||
81eeb95d CD |
162 | /* |
163 | * Only valid injection if changing level for level-triggered IRQs or for a | |
164 | * rising edge. | |
165 | */ | |
166 | static bool vgic_validate_injection(struct vgic_irq *irq, bool level) | |
167 | { | |
168 | switch (irq->config) { | |
169 | case VGIC_CONFIG_LEVEL: | |
170 | return irq->line_level != level; | |
171 | case VGIC_CONFIG_EDGE: | |
172 | return level; | |
173 | } | |
174 | ||
175 | return false; | |
176 | } | |
177 | ||
178 | /* | |
179 | * Check whether an IRQ needs to (and can) be queued to a VCPU's ap list. | |
180 | * Do the queuing if necessary, taking the right locks in the right order. | |
181 | * Returns true when the IRQ was queued, false otherwise. | |
182 | * | |
183 | * Needs to be entered with the IRQ lock already held, but will return | |
184 | * with all locks dropped. | |
185 | */ | |
186 | bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq) | |
187 | { | |
188 | struct kvm_vcpu *vcpu; | |
189 | ||
190 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock)); | |
191 | ||
192 | retry: | |
193 | vcpu = vgic_target_oracle(irq); | |
194 | if (irq->vcpu || !vcpu) { | |
195 | /* | |
196 | * If this IRQ is already on a VCPU's ap_list, then it | |
197 | * cannot be moved or modified and there is no more work for | |
198 | * us to do. | |
199 | * | |
200 | * Otherwise, if the irq is not pending and enabled, it does | |
201 | * not need to be inserted into an ap_list and there is also | |
202 | * no more work for us to do. | |
203 | */ | |
204 | spin_unlock(&irq->irq_lock); | |
205 | return false; | |
206 | } | |
207 | ||
208 | /* | |
209 | * We must unlock the irq lock to take the ap_list_lock where | |
210 | * we are going to insert this new pending interrupt. | |
211 | */ | |
212 | spin_unlock(&irq->irq_lock); | |
213 | ||
214 | /* someone can do stuff here, which we re-check below */ | |
215 | ||
216 | spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock); | |
217 | spin_lock(&irq->irq_lock); | |
218 | ||
219 | /* | |
220 | * Did something change behind our backs? | |
221 | * | |
222 | * There are two cases: | |
223 | * 1) The irq lost its pending state or was disabled behind our | |
224 | * backs and/or it was queued to another VCPU's ap_list. | |
225 | * 2) Someone changed the affinity on this irq behind our | |
226 | * backs and we are now holding the wrong ap_list_lock. | |
227 | * | |
228 | * In both cases, drop the locks and retry. | |
229 | */ | |
230 | ||
231 | if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) { | |
232 | spin_unlock(&irq->irq_lock); | |
233 | spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); | |
234 | ||
235 | spin_lock(&irq->irq_lock); | |
236 | goto retry; | |
237 | } | |
238 | ||
239 | list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head); | |
240 | irq->vcpu = vcpu; | |
241 | ||
242 | spin_unlock(&irq->irq_lock); | |
243 | spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); | |
244 | ||
245 | kvm_vcpu_kick(vcpu); | |
246 | ||
247 | return true; | |
248 | } | |
249 | ||
250 | static int vgic_update_irq_pending(struct kvm *kvm, int cpuid, | |
251 | unsigned int intid, bool level, | |
252 | bool mapped_irq) | |
253 | { | |
254 | struct kvm_vcpu *vcpu; | |
255 | struct vgic_irq *irq; | |
256 | int ret; | |
257 | ||
258 | trace_vgic_update_irq_pending(cpuid, intid, level); | |
259 | ||
260 | vcpu = kvm_get_vcpu(kvm, cpuid); | |
261 | if (!vcpu && intid < VGIC_NR_PRIVATE_IRQS) | |
262 | return -EINVAL; | |
263 | ||
264 | irq = vgic_get_irq(kvm, vcpu, intid); | |
265 | if (!irq) | |
266 | return -EINVAL; | |
267 | ||
268 | if (irq->hw != mapped_irq) | |
269 | return -EINVAL; | |
270 | ||
271 | spin_lock(&irq->irq_lock); | |
272 | ||
273 | if (!vgic_validate_injection(irq, level)) { | |
274 | /* Nothing to see here, move along... */ | |
275 | spin_unlock(&irq->irq_lock); | |
276 | return 0; | |
277 | } | |
278 | ||
279 | if (irq->config == VGIC_CONFIG_LEVEL) { | |
280 | irq->line_level = level; | |
281 | irq->pending = level || irq->soft_pending; | |
282 | } else { | |
283 | irq->pending = true; | |
284 | } | |
285 | ||
286 | vgic_queue_irq_unlock(kvm, irq); | |
287 | ||
288 | return 0; | |
289 | } | |
290 | ||
291 | /** | |
292 | * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic | |
293 | * @kvm: The VM structure pointer | |
294 | * @cpuid: The CPU for PPIs | |
295 | * @intid: The INTID to inject a new state to. | |
296 | * @level: Edge-triggered: true: to trigger the interrupt | |
297 | * false: to ignore the call | |
298 | * Level-sensitive true: raise the input signal | |
299 | * false: lower the input signal | |
300 | * | |
301 | * The VGIC is not concerned with devices being active-LOW or active-HIGH for | |
302 | * level-sensitive interrupts. You can think of the level parameter as 1 | |
303 | * being HIGH and 0 being LOW and all devices being active-HIGH. | |
304 | */ | |
305 | int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid, | |
306 | bool level) | |
307 | { | |
308 | return vgic_update_irq_pending(kvm, cpuid, intid, level, false); | |
309 | } | |
0919e84c MZ |
310 | |
311 | /** | |
312 | * vgic_prune_ap_list - Remove non-relevant interrupts from the list | |
313 | * | |
314 | * @vcpu: The VCPU pointer | |
315 | * | |
316 | * Go over the list of "interesting" interrupts, and prune those that we | |
317 | * won't have to consider in the near future. | |
318 | */ | |
319 | static void vgic_prune_ap_list(struct kvm_vcpu *vcpu) | |
320 | { | |
321 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | |
322 | struct vgic_irq *irq, *tmp; | |
323 | ||
324 | retry: | |
325 | spin_lock(&vgic_cpu->ap_list_lock); | |
326 | ||
327 | list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { | |
328 | struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB; | |
329 | ||
330 | spin_lock(&irq->irq_lock); | |
331 | ||
332 | BUG_ON(vcpu != irq->vcpu); | |
333 | ||
334 | target_vcpu = vgic_target_oracle(irq); | |
335 | ||
336 | if (!target_vcpu) { | |
337 | /* | |
338 | * We don't need to process this interrupt any | |
339 | * further, move it off the list. | |
340 | */ | |
341 | list_del(&irq->ap_list); | |
342 | irq->vcpu = NULL; | |
343 | spin_unlock(&irq->irq_lock); | |
344 | continue; | |
345 | } | |
346 | ||
347 | if (target_vcpu == vcpu) { | |
348 | /* We're on the right CPU */ | |
349 | spin_unlock(&irq->irq_lock); | |
350 | continue; | |
351 | } | |
352 | ||
353 | /* This interrupt looks like it has to be migrated. */ | |
354 | ||
355 | spin_unlock(&irq->irq_lock); | |
356 | spin_unlock(&vgic_cpu->ap_list_lock); | |
357 | ||
358 | /* | |
359 | * Ensure locking order by always locking the smallest | |
360 | * ID first. | |
361 | */ | |
362 | if (vcpu->vcpu_id < target_vcpu->vcpu_id) { | |
363 | vcpuA = vcpu; | |
364 | vcpuB = target_vcpu; | |
365 | } else { | |
366 | vcpuA = target_vcpu; | |
367 | vcpuB = vcpu; | |
368 | } | |
369 | ||
370 | spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock); | |
371 | spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock, | |
372 | SINGLE_DEPTH_NESTING); | |
373 | spin_lock(&irq->irq_lock); | |
374 | ||
375 | /* | |
376 | * If the affinity has been preserved, move the | |
377 | * interrupt around. Otherwise, it means things have | |
378 | * changed while the interrupt was unlocked, and we | |
379 | * need to replay this. | |
380 | * | |
381 | * In all cases, we cannot trust the list not to have | |
382 | * changed, so we restart from the beginning. | |
383 | */ | |
384 | if (target_vcpu == vgic_target_oracle(irq)) { | |
385 | struct vgic_cpu *new_cpu = &target_vcpu->arch.vgic_cpu; | |
386 | ||
387 | list_del(&irq->ap_list); | |
388 | irq->vcpu = target_vcpu; | |
389 | list_add_tail(&irq->ap_list, &new_cpu->ap_list_head); | |
390 | } | |
391 | ||
392 | spin_unlock(&irq->irq_lock); | |
393 | spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock); | |
394 | spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock); | |
395 | goto retry; | |
396 | } | |
397 | ||
398 | spin_unlock(&vgic_cpu->ap_list_lock); | |
399 | } | |
400 | ||
401 | static inline void vgic_process_maintenance_interrupt(struct kvm_vcpu *vcpu) | |
402 | { | |
403 | } | |
404 | ||
405 | static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu) | |
406 | { | |
407 | } | |
408 | ||
409 | /* Requires the irq_lock to be held. */ | |
410 | static inline void vgic_populate_lr(struct kvm_vcpu *vcpu, | |
411 | struct vgic_irq *irq, int lr) | |
412 | { | |
413 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock)); | |
414 | } | |
415 | ||
416 | static inline void vgic_clear_lr(struct kvm_vcpu *vcpu, int lr) | |
417 | { | |
418 | } | |
419 | ||
420 | static inline void vgic_set_underflow(struct kvm_vcpu *vcpu) | |
421 | { | |
422 | } | |
423 | ||
424 | /* Requires the ap_list_lock to be held. */ | |
425 | static int compute_ap_list_depth(struct kvm_vcpu *vcpu) | |
426 | { | |
427 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | |
428 | struct vgic_irq *irq; | |
429 | int count = 0; | |
430 | ||
431 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock)); | |
432 | ||
433 | list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { | |
434 | spin_lock(&irq->irq_lock); | |
435 | /* GICv2 SGIs can count for more than one... */ | |
436 | if (vgic_irq_is_sgi(irq->intid) && irq->source) | |
437 | count += hweight8(irq->source); | |
438 | else | |
439 | count++; | |
440 | spin_unlock(&irq->irq_lock); | |
441 | } | |
442 | return count; | |
443 | } | |
444 | ||
445 | /* Requires the VCPU's ap_list_lock to be held. */ | |
446 | static void vgic_flush_lr_state(struct kvm_vcpu *vcpu) | |
447 | { | |
448 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | |
449 | struct vgic_irq *irq; | |
450 | int count = 0; | |
451 | ||
452 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock)); | |
453 | ||
454 | if (compute_ap_list_depth(vcpu) > kvm_vgic_global_state.nr_lr) { | |
455 | vgic_set_underflow(vcpu); | |
456 | vgic_sort_ap_list(vcpu); | |
457 | } | |
458 | ||
459 | list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { | |
460 | spin_lock(&irq->irq_lock); | |
461 | ||
462 | if (unlikely(vgic_target_oracle(irq) != vcpu)) | |
463 | goto next; | |
464 | ||
465 | /* | |
466 | * If we get an SGI with multiple sources, try to get | |
467 | * them in all at once. | |
468 | */ | |
469 | do { | |
470 | vgic_populate_lr(vcpu, irq, count++); | |
471 | } while (irq->source && count < kvm_vgic_global_state.nr_lr); | |
472 | ||
473 | next: | |
474 | spin_unlock(&irq->irq_lock); | |
475 | ||
476 | if (count == kvm_vgic_global_state.nr_lr) | |
477 | break; | |
478 | } | |
479 | ||
480 | vcpu->arch.vgic_cpu.used_lrs = count; | |
481 | ||
482 | /* Nuke remaining LRs */ | |
483 | for ( ; count < kvm_vgic_global_state.nr_lr; count++) | |
484 | vgic_clear_lr(vcpu, count); | |
485 | } | |
486 | ||
487 | /* Sync back the hardware VGIC state into our emulation after a guest's run. */ | |
488 | void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) | |
489 | { | |
490 | vgic_process_maintenance_interrupt(vcpu); | |
491 | vgic_fold_lr_state(vcpu); | |
492 | vgic_prune_ap_list(vcpu); | |
493 | } | |
494 | ||
495 | /* Flush our emulation state into the GIC hardware before entering the guest. */ | |
496 | void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) | |
497 | { | |
498 | spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock); | |
499 | vgic_flush_lr_state(vcpu); | |
500 | spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); | |
501 | } |