]>
Commit | Line | Data |
---|---|---|
4493b1c4 MZ |
1 | /* |
2 | * VGIC MMIO handling functions | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License version 2 as | |
6 | * published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
11 | * GNU General Public License for more details. | |
12 | */ | |
13 | ||
14 | #include <linux/bitops.h> | |
15 | #include <linux/bsearch.h> | |
16 | #include <linux/kvm.h> | |
17 | #include <linux/kvm_host.h> | |
18 | #include <kvm/iodev.h> | |
19 | #include <kvm/arm_vgic.h> | |
20 | ||
21 | #include "vgic.h" | |
22 | #include "vgic-mmio.h" | |
23 | ||
24 | unsigned long vgic_mmio_read_raz(struct kvm_vcpu *vcpu, | |
25 | gpa_t addr, unsigned int len) | |
26 | { | |
27 | return 0; | |
28 | } | |
29 | ||
30 | unsigned long vgic_mmio_read_rao(struct kvm_vcpu *vcpu, | |
31 | gpa_t addr, unsigned int len) | |
32 | { | |
33 | return -1UL; | |
34 | } | |
35 | ||
36 | void vgic_mmio_write_wi(struct kvm_vcpu *vcpu, gpa_t addr, | |
37 | unsigned int len, unsigned long val) | |
38 | { | |
39 | /* Ignore */ | |
40 | } | |
41 | ||
fd122e62 AP |
42 | /* |
43 | * Read accesses to both GICD_ICENABLER and GICD_ISENABLER return the value | |
44 | * of the enabled bit, so there is only one function for both here. | |
45 | */ | |
46 | unsigned long vgic_mmio_read_enable(struct kvm_vcpu *vcpu, | |
47 | gpa_t addr, unsigned int len) | |
48 | { | |
49 | u32 intid = VGIC_ADDR_TO_INTID(addr, 1); | |
50 | u32 value = 0; | |
51 | int i; | |
52 | ||
53 | /* Loop over all IRQs affected by this read */ | |
54 | for (i = 0; i < len * 8; i++) { | |
55 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | |
56 | ||
57 | if (irq->enabled) | |
58 | value |= (1U << i); | |
5dd4b924 AP |
59 | |
60 | vgic_put_irq(vcpu->kvm, irq); | |
fd122e62 AP |
61 | } |
62 | ||
63 | return value; | |
64 | } | |
65 | ||
66 | void vgic_mmio_write_senable(struct kvm_vcpu *vcpu, | |
67 | gpa_t addr, unsigned int len, | |
68 | unsigned long val) | |
69 | { | |
70 | u32 intid = VGIC_ADDR_TO_INTID(addr, 1); | |
71 | int i; | |
006df0f3 | 72 | unsigned long flags; |
fd122e62 AP |
73 | |
74 | for_each_set_bit(i, &val, len * 8) { | |
75 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | |
76 | ||
006df0f3 | 77 | spin_lock_irqsave(&irq->irq_lock, flags); |
fd122e62 | 78 | irq->enabled = true; |
006df0f3 | 79 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); |
5dd4b924 AP |
80 | |
81 | vgic_put_irq(vcpu->kvm, irq); | |
fd122e62 AP |
82 | } |
83 | } | |
84 | ||
85 | void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu, | |
86 | gpa_t addr, unsigned int len, | |
87 | unsigned long val) | |
88 | { | |
89 | u32 intid = VGIC_ADDR_TO_INTID(addr, 1); | |
90 | int i; | |
006df0f3 | 91 | unsigned long flags; |
fd122e62 AP |
92 | |
93 | for_each_set_bit(i, &val, len * 8) { | |
94 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | |
95 | ||
006df0f3 | 96 | spin_lock_irqsave(&irq->irq_lock, flags); |
fd122e62 AP |
97 | |
98 | irq->enabled = false; | |
99 | ||
006df0f3 | 100 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
5dd4b924 | 101 | vgic_put_irq(vcpu->kvm, irq); |
fd122e62 AP |
102 | } |
103 | } | |
104 | ||
96b29800 AP |
105 | unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu, |
106 | gpa_t addr, unsigned int len) | |
107 | { | |
108 | u32 intid = VGIC_ADDR_TO_INTID(addr, 1); | |
109 | u32 value = 0; | |
110 | int i; | |
111 | ||
112 | /* Loop over all IRQs affected by this read */ | |
113 | for (i = 0; i < len * 8; i++) { | |
114 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | |
be4d20a9 | 115 | unsigned long flags; |
96b29800 | 116 | |
be4d20a9 | 117 | spin_lock_irqsave(&irq->irq_lock, flags); |
8694e4da | 118 | if (irq_is_pending(irq)) |
96b29800 | 119 | value |= (1U << i); |
be4d20a9 | 120 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
5dd4b924 AP |
121 | |
122 | vgic_put_irq(vcpu->kvm, irq); | |
96b29800 AP |
123 | } |
124 | ||
125 | return value; | |
126 | } | |
127 | ||
128 | void vgic_mmio_write_spending(struct kvm_vcpu *vcpu, | |
129 | gpa_t addr, unsigned int len, | |
130 | unsigned long val) | |
131 | { | |
132 | u32 intid = VGIC_ADDR_TO_INTID(addr, 1); | |
133 | int i; | |
006df0f3 | 134 | unsigned long flags; |
96b29800 AP |
135 | |
136 | for_each_set_bit(i, &val, len * 8) { | |
137 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | |
138 | ||
006df0f3 | 139 | spin_lock_irqsave(&irq->irq_lock, flags); |
8694e4da | 140 | irq->pending_latch = true; |
96b29800 | 141 | |
006df0f3 | 142 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); |
5dd4b924 | 143 | vgic_put_irq(vcpu->kvm, irq); |
96b29800 AP |
144 | } |
145 | } | |
146 | ||
147 | void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu, | |
148 | gpa_t addr, unsigned int len, | |
149 | unsigned long val) | |
150 | { | |
151 | u32 intid = VGIC_ADDR_TO_INTID(addr, 1); | |
152 | int i; | |
006df0f3 | 153 | unsigned long flags; |
96b29800 AP |
154 | |
155 | for_each_set_bit(i, &val, len * 8) { | |
156 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | |
157 | ||
006df0f3 | 158 | spin_lock_irqsave(&irq->irq_lock, flags); |
96b29800 | 159 | |
8694e4da | 160 | irq->pending_latch = false; |
96b29800 | 161 | |
006df0f3 | 162 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
5dd4b924 | 163 | vgic_put_irq(vcpu->kvm, irq); |
96b29800 AP |
164 | } |
165 | } | |
166 | ||
69b6fe0c AP |
167 | unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu, |
168 | gpa_t addr, unsigned int len) | |
169 | { | |
170 | u32 intid = VGIC_ADDR_TO_INTID(addr, 1); | |
171 | u32 value = 0; | |
172 | int i; | |
173 | ||
174 | /* Loop over all IRQs affected by this read */ | |
175 | for (i = 0; i < len * 8; i++) { | |
176 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | |
177 | ||
178 | if (irq->active) | |
179 | value |= (1U << i); | |
5dd4b924 AP |
180 | |
181 | vgic_put_irq(vcpu->kvm, irq); | |
69b6fe0c AP |
182 | } |
183 | ||
184 | return value; | |
185 | } | |
186 | ||
35a2d585 CD |
187 | static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, |
188 | bool new_active_state) | |
189 | { | |
370a0ec1 | 190 | struct kvm_vcpu *requester_vcpu; |
006df0f3 CD |
191 | unsigned long flags; |
192 | spin_lock_irqsave(&irq->irq_lock, flags); | |
370a0ec1 JL |
193 | |
194 | /* | |
195 | * The vcpu parameter here can mean multiple things depending on how | |
196 | * this function is called; when handling a trap from the kernel it | |
197 | * depends on the GIC version, and these functions are also called as | |
198 | * part of save/restore from userspace. | |
199 | * | |
200 | * Therefore, we have to figure out the requester in a reliable way. | |
201 | * | |
202 | * When accessing VGIC state from user space, the requester_vcpu is | |
203 | * NULL, which is fine, because we guarantee that no VCPUs are running | |
204 | * when accessing VGIC state from user space so irq->vcpu->cpu is | |
205 | * always -1. | |
206 | */ | |
207 | requester_vcpu = kvm_arm_get_running_vcpu(); | |
208 | ||
35a2d585 CD |
209 | irq->active = new_active_state; |
210 | if (new_active_state) | |
006df0f3 | 211 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); |
35a2d585 | 212 | else |
006df0f3 | 213 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
35a2d585 CD |
214 | } |
215 | ||
216 | /* | |
217 | * If we are fiddling with an IRQ's active state, we have to make sure the IRQ | |
218 | * is not queued on some running VCPU's LRs, because then the change to the | |
219 | * active state can be overwritten when the VCPU's state is synced coming back | |
220 | * from the guest. | |
221 | * | |
222 | * For shared interrupts, we have to stop all the VCPUs because interrupts can | |
223 | * be migrated while we don't hold the IRQ locks and we don't want to be | |
224 | * chasing moving targets. | |
225 | * | |
abd72296 CD |
226 | * For private interrupts we don't have to do anything because userspace |
227 | * accesses to the VGIC state already require all VCPUs to be stopped, and | |
228 | * only the VCPU itself can modify its private interrupts active state, which | |
229 | * guarantees that the VCPU is not running. | |
35a2d585 CD |
230 | */ |
231 | static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid) | |
232 | { | |
f4108e35 MZ |
233 | if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 || |
234 | intid > VGIC_NR_PRIVATE_IRQS) | |
35a2d585 CD |
235 | kvm_arm_halt_guest(vcpu->kvm); |
236 | } | |
237 | ||
238 | /* See vgic_change_active_prepare */ | |
239 | static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid) | |
240 | { | |
f4108e35 MZ |
241 | if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 || |
242 | intid > VGIC_NR_PRIVATE_IRQS) | |
35a2d585 CD |
243 | kvm_arm_resume_guest(vcpu->kvm); |
244 | } | |
245 | ||
3197191e CD |
246 | static void __vgic_mmio_write_cactive(struct kvm_vcpu *vcpu, |
247 | gpa_t addr, unsigned int len, | |
248 | unsigned long val) | |
69b6fe0c AP |
249 | { |
250 | u32 intid = VGIC_ADDR_TO_INTID(addr, 1); | |
251 | int i; | |
252 | ||
69b6fe0c AP |
253 | for_each_set_bit(i, &val, len * 8) { |
254 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | |
35a2d585 | 255 | vgic_mmio_change_active(vcpu, irq, false); |
5dd4b924 | 256 | vgic_put_irq(vcpu->kvm, irq); |
69b6fe0c | 257 | } |
69b6fe0c AP |
258 | } |
259 | ||
3197191e | 260 | void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu, |
69b6fe0c AP |
261 | gpa_t addr, unsigned int len, |
262 | unsigned long val) | |
263 | { | |
264 | u32 intid = VGIC_ADDR_TO_INTID(addr, 1); | |
69b6fe0c | 265 | |
abd72296 | 266 | mutex_lock(&vcpu->kvm->lock); |
35a2d585 | 267 | vgic_change_active_prepare(vcpu, intid); |
3197191e CD |
268 | |
269 | __vgic_mmio_write_cactive(vcpu, addr, len, val); | |
270 | ||
271 | vgic_change_active_finish(vcpu, intid); | |
abd72296 | 272 | mutex_unlock(&vcpu->kvm->lock); |
3197191e CD |
273 | } |
274 | ||
275 | void vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu, | |
276 | gpa_t addr, unsigned int len, | |
277 | unsigned long val) | |
278 | { | |
279 | __vgic_mmio_write_cactive(vcpu, addr, len, val); | |
280 | } | |
281 | ||
282 | static void __vgic_mmio_write_sactive(struct kvm_vcpu *vcpu, | |
283 | gpa_t addr, unsigned int len, | |
284 | unsigned long val) | |
285 | { | |
286 | u32 intid = VGIC_ADDR_TO_INTID(addr, 1); | |
287 | int i; | |
288 | ||
69b6fe0c AP |
289 | for_each_set_bit(i, &val, len * 8) { |
290 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | |
35a2d585 | 291 | vgic_mmio_change_active(vcpu, irq, true); |
5dd4b924 | 292 | vgic_put_irq(vcpu->kvm, irq); |
69b6fe0c | 293 | } |
3197191e CD |
294 | } |
295 | ||
296 | void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu, | |
297 | gpa_t addr, unsigned int len, | |
298 | unsigned long val) | |
299 | { | |
300 | u32 intid = VGIC_ADDR_TO_INTID(addr, 1); | |
301 | ||
abd72296 | 302 | mutex_lock(&vcpu->kvm->lock); |
3197191e CD |
303 | vgic_change_active_prepare(vcpu, intid); |
304 | ||
305 | __vgic_mmio_write_sactive(vcpu, addr, len, val); | |
306 | ||
35a2d585 | 307 | vgic_change_active_finish(vcpu, intid); |
abd72296 | 308 | mutex_unlock(&vcpu->kvm->lock); |
69b6fe0c AP |
309 | } |
310 | ||
3197191e CD |
311 | void vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu, |
312 | gpa_t addr, unsigned int len, | |
313 | unsigned long val) | |
314 | { | |
315 | __vgic_mmio_write_sactive(vcpu, addr, len, val); | |
316 | } | |
317 | ||
055658bf AP |
318 | unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu, |
319 | gpa_t addr, unsigned int len) | |
320 | { | |
321 | u32 intid = VGIC_ADDR_TO_INTID(addr, 8); | |
322 | int i; | |
323 | u64 val = 0; | |
324 | ||
325 | for (i = 0; i < len; i++) { | |
326 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | |
327 | ||
328 | val |= (u64)irq->priority << (i * 8); | |
5dd4b924 AP |
329 | |
330 | vgic_put_irq(vcpu->kvm, irq); | |
055658bf AP |
331 | } |
332 | ||
333 | return val; | |
334 | } | |
335 | ||
336 | /* | |
337 | * We currently don't handle changing the priority of an interrupt that | |
338 | * is already pending on a VCPU. If there is a need for this, we would | |
339 | * need to make this VCPU exit and re-evaluate the priorities, potentially | |
340 | * leading to this interrupt getting presented now to the guest (if it has | |
341 | * been masked by the priority mask before). | |
342 | */ | |
343 | void vgic_mmio_write_priority(struct kvm_vcpu *vcpu, | |
344 | gpa_t addr, unsigned int len, | |
345 | unsigned long val) | |
346 | { | |
347 | u32 intid = VGIC_ADDR_TO_INTID(addr, 8); | |
348 | int i; | |
006df0f3 | 349 | unsigned long flags; |
055658bf AP |
350 | |
351 | for (i = 0; i < len; i++) { | |
352 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | |
353 | ||
006df0f3 | 354 | spin_lock_irqsave(&irq->irq_lock, flags); |
055658bf AP |
355 | /* Narrow the priority range to what we actually support */ |
356 | irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS); | |
006df0f3 | 357 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
5dd4b924 AP |
358 | |
359 | vgic_put_irq(vcpu->kvm, irq); | |
055658bf AP |
360 | } |
361 | } | |
362 | ||
79717e4a AP |
363 | unsigned long vgic_mmio_read_config(struct kvm_vcpu *vcpu, |
364 | gpa_t addr, unsigned int len) | |
365 | { | |
366 | u32 intid = VGIC_ADDR_TO_INTID(addr, 2); | |
367 | u32 value = 0; | |
368 | int i; | |
369 | ||
370 | for (i = 0; i < len * 4; i++) { | |
371 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | |
372 | ||
373 | if (irq->config == VGIC_CONFIG_EDGE) | |
374 | value |= (2U << (i * 2)); | |
5dd4b924 AP |
375 | |
376 | vgic_put_irq(vcpu->kvm, irq); | |
79717e4a AP |
377 | } |
378 | ||
379 | return value; | |
380 | } | |
381 | ||
382 | void vgic_mmio_write_config(struct kvm_vcpu *vcpu, | |
383 | gpa_t addr, unsigned int len, | |
384 | unsigned long val) | |
385 | { | |
386 | u32 intid = VGIC_ADDR_TO_INTID(addr, 2); | |
387 | int i; | |
006df0f3 | 388 | unsigned long flags; |
79717e4a AP |
389 | |
390 | for (i = 0; i < len * 4; i++) { | |
5dd4b924 | 391 | struct vgic_irq *irq; |
79717e4a AP |
392 | |
393 | /* | |
394 | * The configuration cannot be changed for SGIs in general, | |
395 | * for PPIs this is IMPLEMENTATION DEFINED. The arch timer | |
396 | * code relies on PPIs being level triggered, so we also | |
397 | * make them read-only here. | |
398 | */ | |
399 | if (intid + i < VGIC_NR_PRIVATE_IRQS) | |
400 | continue; | |
401 | ||
5dd4b924 | 402 | irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
006df0f3 | 403 | spin_lock_irqsave(&irq->irq_lock, flags); |
5dd4b924 | 404 | |
8694e4da | 405 | if (test_bit(i * 2 + 1, &val)) |
79717e4a | 406 | irq->config = VGIC_CONFIG_EDGE; |
8694e4da | 407 | else |
79717e4a | 408 | irq->config = VGIC_CONFIG_LEVEL; |
5dd4b924 | 409 | |
006df0f3 | 410 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
5dd4b924 | 411 | vgic_put_irq(vcpu->kvm, irq); |
79717e4a AP |
412 | } |
413 | } | |
414 | ||
e96a006c VK |
415 | u64 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid) |
416 | { | |
417 | int i; | |
418 | u64 val = 0; | |
419 | int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS; | |
420 | ||
421 | for (i = 0; i < 32; i++) { | |
422 | struct vgic_irq *irq; | |
423 | ||
424 | if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs) | |
425 | continue; | |
426 | ||
427 | irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | |
428 | if (irq->config == VGIC_CONFIG_LEVEL && irq->line_level) | |
429 | val |= (1U << i); | |
430 | ||
431 | vgic_put_irq(vcpu->kvm, irq); | |
432 | } | |
433 | ||
434 | return val; | |
435 | } | |
436 | ||
437 | void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid, | |
438 | const u64 val) | |
439 | { | |
440 | int i; | |
441 | int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS; | |
006df0f3 | 442 | unsigned long flags; |
e96a006c VK |
443 | |
444 | for (i = 0; i < 32; i++) { | |
445 | struct vgic_irq *irq; | |
446 | bool new_level; | |
447 | ||
448 | if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs) | |
449 | continue; | |
450 | ||
451 | irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | |
452 | ||
453 | /* | |
454 | * Line level is set irrespective of irq type | |
455 | * (level or edge) to avoid dependency that VM should | |
456 | * restore irq config before line level. | |
457 | */ | |
458 | new_level = !!(val & (1U << i)); | |
006df0f3 | 459 | spin_lock_irqsave(&irq->irq_lock, flags); |
e96a006c VK |
460 | irq->line_level = new_level; |
461 | if (new_level) | |
006df0f3 | 462 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); |
e96a006c | 463 | else |
006df0f3 | 464 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
e96a006c VK |
465 | |
466 | vgic_put_irq(vcpu->kvm, irq); | |
467 | } | |
468 | } | |
469 | ||
4493b1c4 MZ |
470 | static int match_region(const void *key, const void *elt) |
471 | { | |
472 | const unsigned int offset = (unsigned long)key; | |
473 | const struct vgic_register_region *region = elt; | |
474 | ||
475 | if (offset < region->reg_offset) | |
476 | return -1; | |
477 | ||
478 | if (offset >= region->reg_offset + region->len) | |
479 | return 1; | |
480 | ||
481 | return 0; | |
482 | } | |
483 | ||
4b7171ac EA |
484 | const struct vgic_register_region * |
485 | vgic_find_mmio_region(const struct vgic_register_region *regions, | |
486 | int nr_regions, unsigned int offset) | |
4493b1c4 | 487 | { |
4b7171ac EA |
488 | return bsearch((void *)(uintptr_t)offset, regions, nr_regions, |
489 | sizeof(regions[0]), match_region); | |
4493b1c4 MZ |
490 | } |
491 | ||
5fb247d7 VK |
492 | void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr) |
493 | { | |
494 | if (kvm_vgic_global_state.type == VGIC_V2) | |
495 | vgic_v2_set_vmcr(vcpu, vmcr); | |
496 | else | |
497 | vgic_v3_set_vmcr(vcpu, vmcr); | |
498 | } | |
499 | ||
500 | void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr) | |
501 | { | |
502 | if (kvm_vgic_global_state.type == VGIC_V2) | |
503 | vgic_v2_get_vmcr(vcpu, vmcr); | |
504 | else | |
505 | vgic_v3_get_vmcr(vcpu, vmcr); | |
506 | } | |
507 | ||
4493b1c4 MZ |
508 | /* |
509 | * kvm_mmio_read_buf() returns a value in a format where it can be converted | |
510 | * to a byte array and be directly observed as the guest wanted it to appear | |
511 | * in memory if it had done the store itself, which is LE for the GIC, as the | |
512 | * guest knows the GIC is always LE. | |
513 | * | |
514 | * We convert this value to the CPUs native format to deal with it as a data | |
515 | * value. | |
516 | */ | |
517 | unsigned long vgic_data_mmio_bus_to_host(const void *val, unsigned int len) | |
518 | { | |
519 | unsigned long data = kvm_mmio_read_buf(val, len); | |
520 | ||
521 | switch (len) { | |
522 | case 1: | |
523 | return data; | |
524 | case 2: | |
525 | return le16_to_cpu(data); | |
526 | case 4: | |
527 | return le32_to_cpu(data); | |
528 | default: | |
529 | return le64_to_cpu(data); | |
530 | } | |
531 | } | |
532 | ||
533 | /* | |
534 | * kvm_mmio_write_buf() expects a value in a format such that if converted to | |
535 | * a byte array it is observed as the guest would see it if it could perform | |
536 | * the load directly. Since the GIC is LE, and the guest knows this, the | |
537 | * guest expects a value in little endian format. | |
538 | * | |
539 | * We convert the data value from the CPUs native format to LE so that the | |
540 | * value is returned in the proper format. | |
541 | */ | |
542 | void vgic_data_host_to_mmio_bus(void *buf, unsigned int len, | |
543 | unsigned long data) | |
544 | { | |
545 | switch (len) { | |
546 | case 1: | |
547 | break; | |
548 | case 2: | |
549 | data = cpu_to_le16(data); | |
550 | break; | |
551 | case 4: | |
552 | data = cpu_to_le32(data); | |
553 | break; | |
554 | default: | |
555 | data = cpu_to_le64(data); | |
556 | } | |
557 | ||
558 | kvm_mmio_write_buf(buf, len, data); | |
559 | } | |
560 | ||
561 | static | |
562 | struct vgic_io_device *kvm_to_vgic_iodev(const struct kvm_io_device *dev) | |
563 | { | |
564 | return container_of(dev, struct vgic_io_device, dev); | |
565 | } | |
566 | ||
112b0b8f AP |
567 | static bool check_region(const struct kvm *kvm, |
568 | const struct vgic_register_region *region, | |
4493b1c4 MZ |
569 | gpa_t addr, int len) |
570 | { | |
112b0b8f AP |
571 | int flags, nr_irqs = kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS; |
572 | ||
573 | switch (len) { | |
574 | case sizeof(u8): | |
575 | flags = VGIC_ACCESS_8bit; | |
576 | break; | |
577 | case sizeof(u32): | |
578 | flags = VGIC_ACCESS_32bit; | |
579 | break; | |
580 | case sizeof(u64): | |
581 | flags = VGIC_ACCESS_64bit; | |
582 | break; | |
583 | default: | |
584 | return false; | |
585 | } | |
586 | ||
587 | if ((region->access_flags & flags) && IS_ALIGNED(addr, len)) { | |
588 | if (!region->bits_per_irq) | |
589 | return true; | |
590 | ||
591 | /* Do we access a non-allocated IRQ? */ | |
592 | return VGIC_ADDR_TO_INTID(addr, region->bits_per_irq) < nr_irqs; | |
593 | } | |
4493b1c4 MZ |
594 | |
595 | return false; | |
596 | } | |
597 | ||
94574c94 | 598 | const struct vgic_register_region * |
2df903a8 VK |
599 | vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev, |
600 | gpa_t addr, int len) | |
601 | { | |
602 | const struct vgic_register_region *region; | |
603 | ||
604 | region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions, | |
605 | addr - iodev->base_addr); | |
606 | if (!region || !check_region(vcpu->kvm, region, addr, len)) | |
607 | return NULL; | |
608 | ||
609 | return region; | |
610 | } | |
611 | ||
612 | static int vgic_uaccess_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, | |
613 | gpa_t addr, u32 *val) | |
614 | { | |
615 | struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev); | |
616 | const struct vgic_register_region *region; | |
617 | struct kvm_vcpu *r_vcpu; | |
618 | ||
619 | region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32)); | |
620 | if (!region) { | |
621 | *val = 0; | |
622 | return 0; | |
623 | } | |
624 | ||
625 | r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu; | |
626 | if (region->uaccess_read) | |
627 | *val = region->uaccess_read(r_vcpu, addr, sizeof(u32)); | |
628 | else | |
629 | *val = region->read(r_vcpu, addr, sizeof(u32)); | |
630 | ||
631 | return 0; | |
632 | } | |
633 | ||
634 | static int vgic_uaccess_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, | |
635 | gpa_t addr, const u32 *val) | |
636 | { | |
637 | struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev); | |
638 | const struct vgic_register_region *region; | |
639 | struct kvm_vcpu *r_vcpu; | |
640 | ||
641 | region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32)); | |
642 | if (!region) | |
643 | return 0; | |
644 | ||
645 | r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu; | |
646 | if (region->uaccess_write) | |
647 | region->uaccess_write(r_vcpu, addr, sizeof(u32), *val); | |
648 | else | |
649 | region->write(r_vcpu, addr, sizeof(u32), *val); | |
650 | ||
651 | return 0; | |
652 | } | |
653 | ||
654 | /* | |
655 | * Userland access to VGIC registers. | |
656 | */ | |
657 | int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev, | |
658 | bool is_write, int offset, u32 *val) | |
659 | { | |
660 | if (is_write) | |
661 | return vgic_uaccess_write(vcpu, &dev->dev, offset, val); | |
662 | else | |
663 | return vgic_uaccess_read(vcpu, &dev->dev, offset, val); | |
664 | } | |
665 | ||
4493b1c4 MZ |
666 | static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, |
667 | gpa_t addr, int len, void *val) | |
668 | { | |
669 | struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev); | |
670 | const struct vgic_register_region *region; | |
59c5ab40 | 671 | unsigned long data = 0; |
4493b1c4 | 672 | |
2df903a8 VK |
673 | region = vgic_get_mmio_region(vcpu, iodev, addr, len); |
674 | if (!region) { | |
4493b1c4 MZ |
675 | memset(val, 0, len); |
676 | return 0; | |
677 | } | |
678 | ||
59c5ab40 AP |
679 | switch (iodev->iodev_type) { |
680 | case IODEV_CPUIF: | |
9d5fcb9d EA |
681 | data = region->read(vcpu, addr, len); |
682 | break; | |
59c5ab40 AP |
683 | case IODEV_DIST: |
684 | data = region->read(vcpu, addr, len); | |
685 | break; | |
686 | case IODEV_REDIST: | |
687 | data = region->read(iodev->redist_vcpu, addr, len); | |
688 | break; | |
689 | case IODEV_ITS: | |
690 | data = region->its_read(vcpu->kvm, iodev->its, addr, len); | |
691 | break; | |
692 | } | |
693 | ||
4493b1c4 MZ |
694 | vgic_data_host_to_mmio_bus(val, len, data); |
695 | return 0; | |
696 | } | |
697 | ||
698 | static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, | |
699 | gpa_t addr, int len, const void *val) | |
700 | { | |
701 | struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev); | |
702 | const struct vgic_register_region *region; | |
4493b1c4 MZ |
703 | unsigned long data = vgic_data_mmio_bus_to_host(val, len); |
704 | ||
2df903a8 VK |
705 | region = vgic_get_mmio_region(vcpu, iodev, addr, len); |
706 | if (!region) | |
4493b1c4 MZ |
707 | return 0; |
708 | ||
59c5ab40 AP |
709 | switch (iodev->iodev_type) { |
710 | case IODEV_CPUIF: | |
9d5fcb9d | 711 | region->write(vcpu, addr, len, data); |
59c5ab40 AP |
712 | break; |
713 | case IODEV_DIST: | |
714 | region->write(vcpu, addr, len, data); | |
715 | break; | |
716 | case IODEV_REDIST: | |
717 | region->write(iodev->redist_vcpu, addr, len, data); | |
718 | break; | |
719 | case IODEV_ITS: | |
720 | region->its_write(vcpu->kvm, iodev->its, addr, len, data); | |
721 | break; | |
722 | } | |
723 | ||
4493b1c4 MZ |
724 | return 0; |
725 | } | |
726 | ||
727 | struct kvm_io_device_ops kvm_io_gic_ops = { | |
728 | .read = dispatch_mmio_read, | |
729 | .write = dispatch_mmio_write, | |
730 | }; | |
fb848db3 AP |
731 | |
732 | int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address, | |
733 | enum vgic_type type) | |
734 | { | |
735 | struct vgic_io_device *io_device = &kvm->arch.vgic.dist_iodev; | |
736 | int ret = 0; | |
737 | unsigned int len; | |
738 | ||
739 | switch (type) { | |
740 | case VGIC_V2: | |
741 | len = vgic_v2_init_dist_iodev(io_device); | |
742 | break; | |
ed9b8cef AP |
743 | case VGIC_V3: |
744 | len = vgic_v3_init_dist_iodev(io_device); | |
745 | break; | |
fb848db3 AP |
746 | default: |
747 | BUG_ON(1); | |
748 | } | |
749 | ||
750 | io_device->base_addr = dist_base_address; | |
59c5ab40 | 751 | io_device->iodev_type = IODEV_DIST; |
fb848db3 AP |
752 | io_device->redist_vcpu = NULL; |
753 | ||
754 | mutex_lock(&kvm->slots_lock); | |
755 | ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address, | |
756 | len, &io_device->dev); | |
757 | mutex_unlock(&kvm->slots_lock); | |
758 | ||
759 | return ret; | |
760 | } |