]>
Commit | Line | Data |
---|---|---|
4493b1c4 MZ |
1 | /* |
2 | * VGIC MMIO handling functions | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License version 2 as | |
6 | * published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
11 | * GNU General Public License for more details. | |
12 | */ | |
13 | ||
14 | #include <linux/bitops.h> | |
15 | #include <linux/bsearch.h> | |
16 | #include <linux/kvm.h> | |
17 | #include <linux/kvm_host.h> | |
18 | #include <kvm/iodev.h> | |
df635c5b | 19 | #include <kvm/arm_arch_timer.h> |
4493b1c4 MZ |
20 | #include <kvm/arm_vgic.h> |
21 | ||
22 | #include "vgic.h" | |
23 | #include "vgic-mmio.h" | |
24 | ||
25 | unsigned long vgic_mmio_read_raz(struct kvm_vcpu *vcpu, | |
26 | gpa_t addr, unsigned int len) | |
27 | { | |
28 | return 0; | |
29 | } | |
30 | ||
31 | unsigned long vgic_mmio_read_rao(struct kvm_vcpu *vcpu, | |
32 | gpa_t addr, unsigned int len) | |
33 | { | |
34 | return -1UL; | |
35 | } | |
36 | ||
37 | void vgic_mmio_write_wi(struct kvm_vcpu *vcpu, gpa_t addr, | |
38 | unsigned int len, unsigned long val) | |
39 | { | |
40 | /* Ignore */ | |
41 | } | |
42 | ||
fd122e62 AP |
43 | /* |
44 | * Read accesses to both GICD_ICENABLER and GICD_ISENABLER return the value | |
45 | * of the enabled bit, so there is only one function for both here. | |
46 | */ | |
47 | unsigned long vgic_mmio_read_enable(struct kvm_vcpu *vcpu, | |
48 | gpa_t addr, unsigned int len) | |
49 | { | |
50 | u32 intid = VGIC_ADDR_TO_INTID(addr, 1); | |
51 | u32 value = 0; | |
52 | int i; | |
53 | ||
54 | /* Loop over all IRQs affected by this read */ | |
55 | for (i = 0; i < len * 8; i++) { | |
56 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | |
57 | ||
58 | if (irq->enabled) | |
59 | value |= (1U << i); | |
5dd4b924 AP |
60 | |
61 | vgic_put_irq(vcpu->kvm, irq); | |
fd122e62 AP |
62 | } |
63 | ||
64 | return value; | |
65 | } | |
66 | ||
67 | void vgic_mmio_write_senable(struct kvm_vcpu *vcpu, | |
68 | gpa_t addr, unsigned int len, | |
69 | unsigned long val) | |
70 | { | |
71 | u32 intid = VGIC_ADDR_TO_INTID(addr, 1); | |
72 | int i; | |
006df0f3 | 73 | unsigned long flags; |
fd122e62 AP |
74 | |
75 | for_each_set_bit(i, &val, len * 8) { | |
76 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | |
77 | ||
006df0f3 | 78 | spin_lock_irqsave(&irq->irq_lock, flags); |
fd122e62 | 79 | irq->enabled = true; |
006df0f3 | 80 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); |
5dd4b924 AP |
81 | |
82 | vgic_put_irq(vcpu->kvm, irq); | |
fd122e62 AP |
83 | } |
84 | } | |
85 | ||
86 | void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu, | |
87 | gpa_t addr, unsigned int len, | |
88 | unsigned long val) | |
89 | { | |
90 | u32 intid = VGIC_ADDR_TO_INTID(addr, 1); | |
91 | int i; | |
006df0f3 | 92 | unsigned long flags; |
fd122e62 AP |
93 | |
94 | for_each_set_bit(i, &val, len * 8) { | |
95 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | |
96 | ||
006df0f3 | 97 | spin_lock_irqsave(&irq->irq_lock, flags); |
fd122e62 AP |
98 | |
99 | irq->enabled = false; | |
100 | ||
006df0f3 | 101 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
5dd4b924 | 102 | vgic_put_irq(vcpu->kvm, irq); |
fd122e62 AP |
103 | } |
104 | } | |
105 | ||
96b29800 AP |
106 | unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu, |
107 | gpa_t addr, unsigned int len) | |
108 | { | |
109 | u32 intid = VGIC_ADDR_TO_INTID(addr, 1); | |
110 | u32 value = 0; | |
111 | int i; | |
112 | ||
113 | /* Loop over all IRQs affected by this read */ | |
114 | for (i = 0; i < len * 8; i++) { | |
115 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | |
116 | ||
8694e4da | 117 | if (irq_is_pending(irq)) |
96b29800 | 118 | value |= (1U << i); |
5dd4b924 AP |
119 | |
120 | vgic_put_irq(vcpu->kvm, irq); | |
96b29800 AP |
121 | } |
122 | ||
123 | return value; | |
124 | } | |
125 | ||
6c1b7521 CD |
126 | /* |
127 | * This function will return the VCPU that performed the MMIO access and | |
128 | * trapped from within the VM, and will return NULL if this is a userspace | |
129 | * access. | |
130 | * | |
131 | * We can disable preemption locally around accessing the per-CPU variable, | |
132 | * and use the resolved vcpu pointer after enabling preemption again, because | |
133 | * even if the current thread is migrated to another CPU, reading the per-CPU | |
134 | * value later will give us the same value as we update the per-CPU variable | |
135 | * in the preempt notifier handlers. | |
136 | */ | |
137 | static struct kvm_vcpu *vgic_get_mmio_requester_vcpu(void) | |
138 | { | |
139 | struct kvm_vcpu *vcpu; | |
140 | ||
141 | preempt_disable(); | |
142 | vcpu = kvm_arm_get_running_vcpu(); | |
143 | preempt_enable(); | |
144 | return vcpu; | |
145 | } | |
146 | ||
df635c5b CD |
147 | /* Must be called with irq->irq_lock held */ |
148 | static void vgic_hw_irq_spending(struct kvm_vcpu *vcpu, struct vgic_irq *irq, | |
149 | bool is_uaccess) | |
150 | { | |
151 | if (is_uaccess) | |
152 | return; | |
153 | ||
154 | irq->pending_latch = true; | |
155 | vgic_irq_set_phys_active(irq, true); | |
156 | } | |
157 | ||
96b29800 AP |
158 | void vgic_mmio_write_spending(struct kvm_vcpu *vcpu, |
159 | gpa_t addr, unsigned int len, | |
160 | unsigned long val) | |
161 | { | |
df635c5b | 162 | bool is_uaccess = !vgic_get_mmio_requester_vcpu(); |
96b29800 AP |
163 | u32 intid = VGIC_ADDR_TO_INTID(addr, 1); |
164 | int i; | |
006df0f3 | 165 | unsigned long flags; |
96b29800 AP |
166 | |
167 | for_each_set_bit(i, &val, len * 8) { | |
168 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | |
169 | ||
006df0f3 | 170 | spin_lock_irqsave(&irq->irq_lock, flags); |
df635c5b CD |
171 | if (irq->hw) |
172 | vgic_hw_irq_spending(vcpu, irq, is_uaccess); | |
173 | else | |
174 | irq->pending_latch = true; | |
006df0f3 | 175 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); |
5dd4b924 | 176 | vgic_put_irq(vcpu->kvm, irq); |
96b29800 AP |
177 | } |
178 | } | |
179 | ||
df635c5b CD |
180 | /* Must be called with irq->irq_lock held */ |
181 | static void vgic_hw_irq_cpending(struct kvm_vcpu *vcpu, struct vgic_irq *irq, | |
182 | bool is_uaccess) | |
183 | { | |
184 | if (is_uaccess) | |
185 | return; | |
186 | ||
187 | irq->pending_latch = false; | |
188 | ||
189 | /* | |
190 | * We don't want the guest to effectively mask the physical | |
191 | * interrupt by doing a write to SPENDR followed by a write to | |
192 | * CPENDR for HW interrupts, so we clear the active state on | |
193 | * the physical side if the virtual interrupt is not active. | |
194 | * This may lead to taking an additional interrupt on the | |
195 | * host, but that should not be a problem as the worst that | |
196 | * can happen is an additional vgic injection. We also clear | |
197 | * the pending state to maintain proper semantics for edge HW | |
198 | * interrupts. | |
199 | */ | |
200 | vgic_irq_set_phys_pending(irq, false); | |
201 | if (!irq->active) | |
202 | vgic_irq_set_phys_active(irq, false); | |
203 | } | |
204 | ||
96b29800 AP |
205 | void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu, |
206 | gpa_t addr, unsigned int len, | |
207 | unsigned long val) | |
208 | { | |
df635c5b | 209 | bool is_uaccess = !vgic_get_mmio_requester_vcpu(); |
96b29800 AP |
210 | u32 intid = VGIC_ADDR_TO_INTID(addr, 1); |
211 | int i; | |
006df0f3 | 212 | unsigned long flags; |
96b29800 AP |
213 | |
214 | for_each_set_bit(i, &val, len * 8) { | |
215 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | |
216 | ||
006df0f3 | 217 | spin_lock_irqsave(&irq->irq_lock, flags); |
96b29800 | 218 | |
df635c5b CD |
219 | if (irq->hw) |
220 | vgic_hw_irq_cpending(vcpu, irq, is_uaccess); | |
221 | else | |
222 | irq->pending_latch = false; | |
96b29800 | 223 | |
006df0f3 | 224 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
5dd4b924 | 225 | vgic_put_irq(vcpu->kvm, irq); |
96b29800 AP |
226 | } |
227 | } | |
228 | ||
69b6fe0c AP |
229 | unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu, |
230 | gpa_t addr, unsigned int len) | |
231 | { | |
232 | u32 intid = VGIC_ADDR_TO_INTID(addr, 1); | |
233 | u32 value = 0; | |
234 | int i; | |
235 | ||
236 | /* Loop over all IRQs affected by this read */ | |
237 | for (i = 0; i < len * 8; i++) { | |
238 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | |
239 | ||
240 | if (irq->active) | |
241 | value |= (1U << i); | |
5dd4b924 AP |
242 | |
243 | vgic_put_irq(vcpu->kvm, irq); | |
69b6fe0c AP |
244 | } |
245 | ||
246 | return value; | |
247 | } | |
248 | ||
df635c5b CD |
249 | /* Must be called with irq->irq_lock held */ |
250 | static void vgic_hw_irq_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, | |
251 | bool active, bool is_uaccess) | |
252 | { | |
253 | if (is_uaccess) | |
254 | return; | |
255 | ||
256 | irq->active = active; | |
257 | vgic_irq_set_phys_active(irq, active); | |
258 | } | |
259 | ||
35a2d585 | 260 | static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, |
df635c5b | 261 | bool active) |
35a2d585 | 262 | { |
006df0f3 | 263 | unsigned long flags; |
6c1b7521 | 264 | struct kvm_vcpu *requester_vcpu = vgic_get_mmio_requester_vcpu(); |
370a0ec1 | 265 | |
6c1b7521 | 266 | spin_lock_irqsave(&irq->irq_lock, flags); |
370a0ec1 | 267 | |
35a2d585 CD |
268 | /* |
269 | * If this virtual IRQ was written into a list register, we | |
270 | * have to make sure the CPU that runs the VCPU thread has | |
370a0ec1 | 271 | * synced back the LR state to the struct vgic_irq. |
35a2d585 | 272 | * |
370a0ec1 JL |
273 | * As long as the conditions below are true, we know the VCPU thread |
274 | * may be on its way back from the guest (we kicked the VCPU thread in | |
275 | * vgic_change_active_prepare) and still has to sync back this IRQ, | |
276 | * so we release and re-acquire the spin_lock to let the other thread | |
277 | * sync back the IRQ. | |
6c1b7521 CD |
278 | * |
279 | * When accessing VGIC state from user space, requester_vcpu is | |
280 | * NULL, which is fine, because we guarantee that no VCPUs are running | |
281 | * when accessing VGIC state from user space so irq->vcpu->cpu is | |
282 | * always -1. | |
35a2d585 CD |
283 | */ |
284 | while (irq->vcpu && /* IRQ may have state in an LR somewhere */ | |
370a0ec1 | 285 | irq->vcpu != requester_vcpu && /* Current thread is not the VCPU thread */ |
05fb05a6 | 286 | irq->vcpu->cpu != -1) /* VCPU thread is running */ |
35a2d585 | 287 | cond_resched_lock(&irq->irq_lock); |
35a2d585 | 288 | |
df635c5b CD |
289 | if (irq->hw) |
290 | vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu); | |
291 | else | |
292 | irq->active = active; | |
293 | ||
294 | if (irq->active) | |
006df0f3 | 295 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); |
35a2d585 | 296 | else |
006df0f3 | 297 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
35a2d585 CD |
298 | } |
299 | ||
300 | /* | |
301 | * If we are fiddling with an IRQ's active state, we have to make sure the IRQ | |
302 | * is not queued on some running VCPU's LRs, because then the change to the | |
303 | * active state can be overwritten when the VCPU's state is synced coming back | |
304 | * from the guest. | |
305 | * | |
306 | * For shared interrupts, we have to stop all the VCPUs because interrupts can | |
307 | * be migrated while we don't hold the IRQ locks and we don't want to be | |
308 | * chasing moving targets. | |
309 | * | |
abd72296 CD |
310 | * For private interrupts we don't have to do anything because userspace |
311 | * accesses to the VGIC state already require all VCPUs to be stopped, and | |
312 | * only the VCPU itself can modify its private interrupts active state, which | |
313 | * guarantees that the VCPU is not running. | |
35a2d585 CD |
314 | */ |
315 | static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid) | |
316 | { | |
abd72296 | 317 | if (intid > VGIC_NR_PRIVATE_IRQS) |
35a2d585 CD |
318 | kvm_arm_halt_guest(vcpu->kvm); |
319 | } | |
320 | ||
321 | /* See vgic_change_active_prepare */ | |
322 | static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid) | |
323 | { | |
abd72296 | 324 | if (intid > VGIC_NR_PRIVATE_IRQS) |
35a2d585 CD |
325 | kvm_arm_resume_guest(vcpu->kvm); |
326 | } | |
327 | ||
3197191e CD |
328 | static void __vgic_mmio_write_cactive(struct kvm_vcpu *vcpu, |
329 | gpa_t addr, unsigned int len, | |
330 | unsigned long val) | |
69b6fe0c AP |
331 | { |
332 | u32 intid = VGIC_ADDR_TO_INTID(addr, 1); | |
333 | int i; | |
334 | ||
69b6fe0c AP |
335 | for_each_set_bit(i, &val, len * 8) { |
336 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | |
35a2d585 | 337 | vgic_mmio_change_active(vcpu, irq, false); |
5dd4b924 | 338 | vgic_put_irq(vcpu->kvm, irq); |
69b6fe0c | 339 | } |
69b6fe0c AP |
340 | } |
341 | ||
3197191e | 342 | void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu, |
69b6fe0c AP |
343 | gpa_t addr, unsigned int len, |
344 | unsigned long val) | |
345 | { | |
346 | u32 intid = VGIC_ADDR_TO_INTID(addr, 1); | |
69b6fe0c | 347 | |
abd72296 | 348 | mutex_lock(&vcpu->kvm->lock); |
35a2d585 | 349 | vgic_change_active_prepare(vcpu, intid); |
3197191e CD |
350 | |
351 | __vgic_mmio_write_cactive(vcpu, addr, len, val); | |
352 | ||
353 | vgic_change_active_finish(vcpu, intid); | |
abd72296 | 354 | mutex_unlock(&vcpu->kvm->lock); |
3197191e CD |
355 | } |
356 | ||
357 | void vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu, | |
358 | gpa_t addr, unsigned int len, | |
359 | unsigned long val) | |
360 | { | |
361 | __vgic_mmio_write_cactive(vcpu, addr, len, val); | |
362 | } | |
363 | ||
364 | static void __vgic_mmio_write_sactive(struct kvm_vcpu *vcpu, | |
365 | gpa_t addr, unsigned int len, | |
366 | unsigned long val) | |
367 | { | |
368 | u32 intid = VGIC_ADDR_TO_INTID(addr, 1); | |
369 | int i; | |
370 | ||
69b6fe0c AP |
371 | for_each_set_bit(i, &val, len * 8) { |
372 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | |
35a2d585 | 373 | vgic_mmio_change_active(vcpu, irq, true); |
5dd4b924 | 374 | vgic_put_irq(vcpu->kvm, irq); |
69b6fe0c | 375 | } |
3197191e CD |
376 | } |
377 | ||
378 | void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu, | |
379 | gpa_t addr, unsigned int len, | |
380 | unsigned long val) | |
381 | { | |
382 | u32 intid = VGIC_ADDR_TO_INTID(addr, 1); | |
383 | ||
abd72296 | 384 | mutex_lock(&vcpu->kvm->lock); |
3197191e CD |
385 | vgic_change_active_prepare(vcpu, intid); |
386 | ||
387 | __vgic_mmio_write_sactive(vcpu, addr, len, val); | |
388 | ||
35a2d585 | 389 | vgic_change_active_finish(vcpu, intid); |
abd72296 | 390 | mutex_unlock(&vcpu->kvm->lock); |
69b6fe0c AP |
391 | } |
392 | ||
3197191e CD |
393 | void vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu, |
394 | gpa_t addr, unsigned int len, | |
395 | unsigned long val) | |
396 | { | |
397 | __vgic_mmio_write_sactive(vcpu, addr, len, val); | |
398 | } | |
399 | ||
055658bf AP |
400 | unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu, |
401 | gpa_t addr, unsigned int len) | |
402 | { | |
403 | u32 intid = VGIC_ADDR_TO_INTID(addr, 8); | |
404 | int i; | |
405 | u64 val = 0; | |
406 | ||
407 | for (i = 0; i < len; i++) { | |
408 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | |
409 | ||
410 | val |= (u64)irq->priority << (i * 8); | |
5dd4b924 AP |
411 | |
412 | vgic_put_irq(vcpu->kvm, irq); | |
055658bf AP |
413 | } |
414 | ||
415 | return val; | |
416 | } | |
417 | ||
418 | /* | |
419 | * We currently don't handle changing the priority of an interrupt that | |
420 | * is already pending on a VCPU. If there is a need for this, we would | |
421 | * need to make this VCPU exit and re-evaluate the priorities, potentially | |
422 | * leading to this interrupt getting presented now to the guest (if it has | |
423 | * been masked by the priority mask before). | |
424 | */ | |
425 | void vgic_mmio_write_priority(struct kvm_vcpu *vcpu, | |
426 | gpa_t addr, unsigned int len, | |
427 | unsigned long val) | |
428 | { | |
429 | u32 intid = VGIC_ADDR_TO_INTID(addr, 8); | |
430 | int i; | |
006df0f3 | 431 | unsigned long flags; |
055658bf AP |
432 | |
433 | for (i = 0; i < len; i++) { | |
434 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | |
435 | ||
006df0f3 | 436 | spin_lock_irqsave(&irq->irq_lock, flags); |
055658bf AP |
437 | /* Narrow the priority range to what we actually support */ |
438 | irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS); | |
006df0f3 | 439 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
5dd4b924 AP |
440 | |
441 | vgic_put_irq(vcpu->kvm, irq); | |
055658bf AP |
442 | } |
443 | } | |
444 | ||
79717e4a AP |
445 | unsigned long vgic_mmio_read_config(struct kvm_vcpu *vcpu, |
446 | gpa_t addr, unsigned int len) | |
447 | { | |
448 | u32 intid = VGIC_ADDR_TO_INTID(addr, 2); | |
449 | u32 value = 0; | |
450 | int i; | |
451 | ||
452 | for (i = 0; i < len * 4; i++) { | |
453 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | |
454 | ||
455 | if (irq->config == VGIC_CONFIG_EDGE) | |
456 | value |= (2U << (i * 2)); | |
5dd4b924 AP |
457 | |
458 | vgic_put_irq(vcpu->kvm, irq); | |
79717e4a AP |
459 | } |
460 | ||
461 | return value; | |
462 | } | |
463 | ||
464 | void vgic_mmio_write_config(struct kvm_vcpu *vcpu, | |
465 | gpa_t addr, unsigned int len, | |
466 | unsigned long val) | |
467 | { | |
468 | u32 intid = VGIC_ADDR_TO_INTID(addr, 2); | |
469 | int i; | |
006df0f3 | 470 | unsigned long flags; |
79717e4a AP |
471 | |
472 | for (i = 0; i < len * 4; i++) { | |
5dd4b924 | 473 | struct vgic_irq *irq; |
79717e4a AP |
474 | |
475 | /* | |
476 | * The configuration cannot be changed for SGIs in general, | |
477 | * for PPIs this is IMPLEMENTATION DEFINED. The arch timer | |
478 | * code relies on PPIs being level triggered, so we also | |
479 | * make them read-only here. | |
480 | */ | |
481 | if (intid + i < VGIC_NR_PRIVATE_IRQS) | |
482 | continue; | |
483 | ||
5dd4b924 | 484 | irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
006df0f3 | 485 | spin_lock_irqsave(&irq->irq_lock, flags); |
5dd4b924 | 486 | |
8694e4da | 487 | if (test_bit(i * 2 + 1, &val)) |
79717e4a | 488 | irq->config = VGIC_CONFIG_EDGE; |
8694e4da | 489 | else |
79717e4a | 490 | irq->config = VGIC_CONFIG_LEVEL; |
5dd4b924 | 491 | |
006df0f3 | 492 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
5dd4b924 | 493 | vgic_put_irq(vcpu->kvm, irq); |
79717e4a AP |
494 | } |
495 | } | |
496 | ||
e96a006c VK |
497 | u64 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid) |
498 | { | |
499 | int i; | |
500 | u64 val = 0; | |
501 | int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS; | |
502 | ||
503 | for (i = 0; i < 32; i++) { | |
504 | struct vgic_irq *irq; | |
505 | ||
506 | if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs) | |
507 | continue; | |
508 | ||
509 | irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | |
510 | if (irq->config == VGIC_CONFIG_LEVEL && irq->line_level) | |
511 | val |= (1U << i); | |
512 | ||
513 | vgic_put_irq(vcpu->kvm, irq); | |
514 | } | |
515 | ||
516 | return val; | |
517 | } | |
518 | ||
519 | void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid, | |
520 | const u64 val) | |
521 | { | |
522 | int i; | |
523 | int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS; | |
006df0f3 | 524 | unsigned long flags; |
e96a006c VK |
525 | |
526 | for (i = 0; i < 32; i++) { | |
527 | struct vgic_irq *irq; | |
528 | bool new_level; | |
529 | ||
530 | if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs) | |
531 | continue; | |
532 | ||
533 | irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | |
534 | ||
535 | /* | |
536 | * Line level is set irrespective of irq type | |
537 | * (level or edge) to avoid dependency that VM should | |
538 | * restore irq config before line level. | |
539 | */ | |
540 | new_level = !!(val & (1U << i)); | |
006df0f3 | 541 | spin_lock_irqsave(&irq->irq_lock, flags); |
e96a006c VK |
542 | irq->line_level = new_level; |
543 | if (new_level) | |
006df0f3 | 544 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); |
e96a006c | 545 | else |
006df0f3 | 546 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
e96a006c VK |
547 | |
548 | vgic_put_irq(vcpu->kvm, irq); | |
549 | } | |
550 | } | |
551 | ||
4493b1c4 MZ |
552 | static int match_region(const void *key, const void *elt) |
553 | { | |
554 | const unsigned int offset = (unsigned long)key; | |
555 | const struct vgic_register_region *region = elt; | |
556 | ||
557 | if (offset < region->reg_offset) | |
558 | return -1; | |
559 | ||
560 | if (offset >= region->reg_offset + region->len) | |
561 | return 1; | |
562 | ||
563 | return 0; | |
564 | } | |
565 | ||
4b7171ac EA |
566 | const struct vgic_register_region * |
567 | vgic_find_mmio_region(const struct vgic_register_region *regions, | |
568 | int nr_regions, unsigned int offset) | |
4493b1c4 | 569 | { |
4b7171ac EA |
570 | return bsearch((void *)(uintptr_t)offset, regions, nr_regions, |
571 | sizeof(regions[0]), match_region); | |
4493b1c4 MZ |
572 | } |
573 | ||
5fb247d7 VK |
574 | void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr) |
575 | { | |
576 | if (kvm_vgic_global_state.type == VGIC_V2) | |
577 | vgic_v2_set_vmcr(vcpu, vmcr); | |
578 | else | |
579 | vgic_v3_set_vmcr(vcpu, vmcr); | |
580 | } | |
581 | ||
582 | void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr) | |
583 | { | |
584 | if (kvm_vgic_global_state.type == VGIC_V2) | |
585 | vgic_v2_get_vmcr(vcpu, vmcr); | |
586 | else | |
587 | vgic_v3_get_vmcr(vcpu, vmcr); | |
588 | } | |
589 | ||
4493b1c4 MZ |
590 | /* |
591 | * kvm_mmio_read_buf() returns a value in a format where it can be converted | |
592 | * to a byte array and be directly observed as the guest wanted it to appear | |
593 | * in memory if it had done the store itself, which is LE for the GIC, as the | |
594 | * guest knows the GIC is always LE. | |
595 | * | |
596 | * We convert this value to the CPUs native format to deal with it as a data | |
597 | * value. | |
598 | */ | |
599 | unsigned long vgic_data_mmio_bus_to_host(const void *val, unsigned int len) | |
600 | { | |
601 | unsigned long data = kvm_mmio_read_buf(val, len); | |
602 | ||
603 | switch (len) { | |
604 | case 1: | |
605 | return data; | |
606 | case 2: | |
607 | return le16_to_cpu(data); | |
608 | case 4: | |
609 | return le32_to_cpu(data); | |
610 | default: | |
611 | return le64_to_cpu(data); | |
612 | } | |
613 | } | |
614 | ||
615 | /* | |
616 | * kvm_mmio_write_buf() expects a value in a format such that if converted to | |
617 | * a byte array it is observed as the guest would see it if it could perform | |
618 | * the load directly. Since the GIC is LE, and the guest knows this, the | |
619 | * guest expects a value in little endian format. | |
620 | * | |
621 | * We convert the data value from the CPUs native format to LE so that the | |
622 | * value is returned in the proper format. | |
623 | */ | |
624 | void vgic_data_host_to_mmio_bus(void *buf, unsigned int len, | |
625 | unsigned long data) | |
626 | { | |
627 | switch (len) { | |
628 | case 1: | |
629 | break; | |
630 | case 2: | |
631 | data = cpu_to_le16(data); | |
632 | break; | |
633 | case 4: | |
634 | data = cpu_to_le32(data); | |
635 | break; | |
636 | default: | |
637 | data = cpu_to_le64(data); | |
638 | } | |
639 | ||
640 | kvm_mmio_write_buf(buf, len, data); | |
641 | } | |
642 | ||
643 | static | |
644 | struct vgic_io_device *kvm_to_vgic_iodev(const struct kvm_io_device *dev) | |
645 | { | |
646 | return container_of(dev, struct vgic_io_device, dev); | |
647 | } | |
648 | ||
112b0b8f AP |
649 | static bool check_region(const struct kvm *kvm, |
650 | const struct vgic_register_region *region, | |
4493b1c4 MZ |
651 | gpa_t addr, int len) |
652 | { | |
112b0b8f AP |
653 | int flags, nr_irqs = kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS; |
654 | ||
655 | switch (len) { | |
656 | case sizeof(u8): | |
657 | flags = VGIC_ACCESS_8bit; | |
658 | break; | |
659 | case sizeof(u32): | |
660 | flags = VGIC_ACCESS_32bit; | |
661 | break; | |
662 | case sizeof(u64): | |
663 | flags = VGIC_ACCESS_64bit; | |
664 | break; | |
665 | default: | |
666 | return false; | |
667 | } | |
668 | ||
669 | if ((region->access_flags & flags) && IS_ALIGNED(addr, len)) { | |
670 | if (!region->bits_per_irq) | |
671 | return true; | |
672 | ||
673 | /* Do we access a non-allocated IRQ? */ | |
674 | return VGIC_ADDR_TO_INTID(addr, region->bits_per_irq) < nr_irqs; | |
675 | } | |
4493b1c4 MZ |
676 | |
677 | return false; | |
678 | } | |
679 | ||
94574c94 | 680 | const struct vgic_register_region * |
2df903a8 VK |
681 | vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev, |
682 | gpa_t addr, int len) | |
683 | { | |
684 | const struct vgic_register_region *region; | |
685 | ||
686 | region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions, | |
687 | addr - iodev->base_addr); | |
688 | if (!region || !check_region(vcpu->kvm, region, addr, len)) | |
689 | return NULL; | |
690 | ||
691 | return region; | |
692 | } | |
693 | ||
694 | static int vgic_uaccess_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, | |
695 | gpa_t addr, u32 *val) | |
696 | { | |
697 | struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev); | |
698 | const struct vgic_register_region *region; | |
699 | struct kvm_vcpu *r_vcpu; | |
700 | ||
701 | region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32)); | |
702 | if (!region) { | |
703 | *val = 0; | |
704 | return 0; | |
705 | } | |
706 | ||
707 | r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu; | |
708 | if (region->uaccess_read) | |
709 | *val = region->uaccess_read(r_vcpu, addr, sizeof(u32)); | |
710 | else | |
711 | *val = region->read(r_vcpu, addr, sizeof(u32)); | |
712 | ||
713 | return 0; | |
714 | } | |
715 | ||
716 | static int vgic_uaccess_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, | |
717 | gpa_t addr, const u32 *val) | |
718 | { | |
719 | struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev); | |
720 | const struct vgic_register_region *region; | |
721 | struct kvm_vcpu *r_vcpu; | |
722 | ||
723 | region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32)); | |
724 | if (!region) | |
725 | return 0; | |
726 | ||
727 | r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu; | |
728 | if (region->uaccess_write) | |
729 | region->uaccess_write(r_vcpu, addr, sizeof(u32), *val); | |
730 | else | |
731 | region->write(r_vcpu, addr, sizeof(u32), *val); | |
732 | ||
733 | return 0; | |
734 | } | |
735 | ||
736 | /* | |
737 | * Userland access to VGIC registers. | |
738 | */ | |
739 | int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev, | |
740 | bool is_write, int offset, u32 *val) | |
741 | { | |
742 | if (is_write) | |
743 | return vgic_uaccess_write(vcpu, &dev->dev, offset, val); | |
744 | else | |
745 | return vgic_uaccess_read(vcpu, &dev->dev, offset, val); | |
746 | } | |
747 | ||
4493b1c4 MZ |
748 | static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, |
749 | gpa_t addr, int len, void *val) | |
750 | { | |
751 | struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev); | |
752 | const struct vgic_register_region *region; | |
59c5ab40 | 753 | unsigned long data = 0; |
4493b1c4 | 754 | |
2df903a8 VK |
755 | region = vgic_get_mmio_region(vcpu, iodev, addr, len); |
756 | if (!region) { | |
4493b1c4 MZ |
757 | memset(val, 0, len); |
758 | return 0; | |
759 | } | |
760 | ||
59c5ab40 AP |
761 | switch (iodev->iodev_type) { |
762 | case IODEV_CPUIF: | |
9d5fcb9d EA |
763 | data = region->read(vcpu, addr, len); |
764 | break; | |
59c5ab40 AP |
765 | case IODEV_DIST: |
766 | data = region->read(vcpu, addr, len); | |
767 | break; | |
768 | case IODEV_REDIST: | |
769 | data = region->read(iodev->redist_vcpu, addr, len); | |
770 | break; | |
771 | case IODEV_ITS: | |
772 | data = region->its_read(vcpu->kvm, iodev->its, addr, len); | |
773 | break; | |
774 | } | |
775 | ||
4493b1c4 MZ |
776 | vgic_data_host_to_mmio_bus(val, len, data); |
777 | return 0; | |
778 | } | |
779 | ||
780 | static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, | |
781 | gpa_t addr, int len, const void *val) | |
782 | { | |
783 | struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev); | |
784 | const struct vgic_register_region *region; | |
4493b1c4 MZ |
785 | unsigned long data = vgic_data_mmio_bus_to_host(val, len); |
786 | ||
2df903a8 VK |
787 | region = vgic_get_mmio_region(vcpu, iodev, addr, len); |
788 | if (!region) | |
4493b1c4 MZ |
789 | return 0; |
790 | ||
59c5ab40 AP |
791 | switch (iodev->iodev_type) { |
792 | case IODEV_CPUIF: | |
9d5fcb9d | 793 | region->write(vcpu, addr, len, data); |
59c5ab40 AP |
794 | break; |
795 | case IODEV_DIST: | |
796 | region->write(vcpu, addr, len, data); | |
797 | break; | |
798 | case IODEV_REDIST: | |
799 | region->write(iodev->redist_vcpu, addr, len, data); | |
800 | break; | |
801 | case IODEV_ITS: | |
802 | region->its_write(vcpu->kvm, iodev->its, addr, len, data); | |
803 | break; | |
804 | } | |
805 | ||
4493b1c4 MZ |
806 | return 0; |
807 | } | |
808 | ||
809 | struct kvm_io_device_ops kvm_io_gic_ops = { | |
810 | .read = dispatch_mmio_read, | |
811 | .write = dispatch_mmio_write, | |
812 | }; | |
fb848db3 AP |
813 | |
814 | int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address, | |
815 | enum vgic_type type) | |
816 | { | |
817 | struct vgic_io_device *io_device = &kvm->arch.vgic.dist_iodev; | |
818 | int ret = 0; | |
819 | unsigned int len; | |
820 | ||
821 | switch (type) { | |
822 | case VGIC_V2: | |
823 | len = vgic_v2_init_dist_iodev(io_device); | |
824 | break; | |
ed9b8cef AP |
825 | case VGIC_V3: |
826 | len = vgic_v3_init_dist_iodev(io_device); | |
827 | break; | |
fb848db3 AP |
828 | default: |
829 | BUG_ON(1); | |
830 | } | |
831 | ||
832 | io_device->base_addr = dist_base_address; | |
59c5ab40 | 833 | io_device->iodev_type = IODEV_DIST; |
fb848db3 AP |
834 | io_device->redist_vcpu = NULL; |
835 | ||
836 | mutex_lock(&kvm->slots_lock); | |
837 | ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address, | |
838 | len, &io_device->dev); | |
839 | mutex_unlock(&kvm->slots_lock); | |
840 | ||
841 | return ret; | |
842 | } |