]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - virt/kvm/arm/vgic/vgic-mmio.c
UBUNTU: Ubuntu-4.15.0-96.97
[mirror_ubuntu-bionic-kernel.git] / virt / kvm / arm / vgic / vgic-mmio.c
1 /*
2 * VGIC MMIO handling functions
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14 #include <linux/bitops.h>
15 #include <linux/bsearch.h>
16 #include <linux/kvm.h>
17 #include <linux/kvm_host.h>
18 #include <kvm/iodev.h>
19 #include <kvm/arm_vgic.h>
20
21 #include "vgic.h"
22 #include "vgic-mmio.h"
23
24 unsigned long vgic_mmio_read_raz(struct kvm_vcpu *vcpu,
25 gpa_t addr, unsigned int len)
26 {
27 return 0;
28 }
29
30 unsigned long vgic_mmio_read_rao(struct kvm_vcpu *vcpu,
31 gpa_t addr, unsigned int len)
32 {
33 return -1UL;
34 }
35
36 void vgic_mmio_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
37 unsigned int len, unsigned long val)
38 {
39 /* Ignore */
40 }
41
42 /*
43 * Read accesses to both GICD_ICENABLER and GICD_ISENABLER return the value
44 * of the enabled bit, so there is only one function for both here.
45 */
46 unsigned long vgic_mmio_read_enable(struct kvm_vcpu *vcpu,
47 gpa_t addr, unsigned int len)
48 {
49 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
50 u32 value = 0;
51 int i;
52
53 /* Loop over all IRQs affected by this read */
54 for (i = 0; i < len * 8; i++) {
55 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
56
57 if (irq->enabled)
58 value |= (1U << i);
59
60 vgic_put_irq(vcpu->kvm, irq);
61 }
62
63 return value;
64 }
65
66 void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
67 gpa_t addr, unsigned int len,
68 unsigned long val)
69 {
70 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
71 int i;
72 unsigned long flags;
73
74 for_each_set_bit(i, &val, len * 8) {
75 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
76
77 spin_lock_irqsave(&irq->irq_lock, flags);
78 irq->enabled = true;
79 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
80
81 vgic_put_irq(vcpu->kvm, irq);
82 }
83 }
84
85 void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
86 gpa_t addr, unsigned int len,
87 unsigned long val)
88 {
89 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
90 int i;
91 unsigned long flags;
92
93 for_each_set_bit(i, &val, len * 8) {
94 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
95
96 spin_lock_irqsave(&irq->irq_lock, flags);
97
98 irq->enabled = false;
99
100 spin_unlock_irqrestore(&irq->irq_lock, flags);
101 vgic_put_irq(vcpu->kvm, irq);
102 }
103 }
104
105 unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
106 gpa_t addr, unsigned int len)
107 {
108 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
109 u32 value = 0;
110 int i;
111
112 /* Loop over all IRQs affected by this read */
113 for (i = 0; i < len * 8; i++) {
114 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
115 unsigned long flags;
116
117 spin_lock_irqsave(&irq->irq_lock, flags);
118 if (irq_is_pending(irq))
119 value |= (1U << i);
120 spin_unlock_irqrestore(&irq->irq_lock, flags);
121
122 vgic_put_irq(vcpu->kvm, irq);
123 }
124
125 return value;
126 }
127
128 static bool is_vgic_v2_sgi(struct kvm_vcpu *vcpu, struct vgic_irq *irq)
129 {
130 return (vgic_irq_is_sgi(irq->intid) &&
131 vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2);
132 }
133
134 void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
135 gpa_t addr, unsigned int len,
136 unsigned long val)
137 {
138 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
139 int i;
140 unsigned long flags;
141
142 for_each_set_bit(i, &val, len * 8) {
143 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
144
145 /* GICD_ISPENDR0 SGI bits are WI */
146 if (is_vgic_v2_sgi(vcpu, irq)) {
147 vgic_put_irq(vcpu->kvm, irq);
148 continue;
149 }
150
151 spin_lock_irqsave(&irq->irq_lock, flags);
152 irq->pending_latch = true;
153
154 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
155 vgic_put_irq(vcpu->kvm, irq);
156 }
157 }
158
159 void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
160 gpa_t addr, unsigned int len,
161 unsigned long val)
162 {
163 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
164 int i;
165 unsigned long flags;
166
167 for_each_set_bit(i, &val, len * 8) {
168 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
169
170 /* GICD_ICPENDR0 SGI bits are WI */
171 if (is_vgic_v2_sgi(vcpu, irq)) {
172 vgic_put_irq(vcpu->kvm, irq);
173 continue;
174 }
175
176 spin_lock_irqsave(&irq->irq_lock, flags);
177
178 irq->pending_latch = false;
179
180 spin_unlock_irqrestore(&irq->irq_lock, flags);
181 vgic_put_irq(vcpu->kvm, irq);
182 }
183 }
184
185 unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
186 gpa_t addr, unsigned int len)
187 {
188 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
189 u32 value = 0;
190 int i;
191
192 /* Loop over all IRQs affected by this read */
193 for (i = 0; i < len * 8; i++) {
194 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
195
196 if (irq->active)
197 value |= (1U << i);
198
199 vgic_put_irq(vcpu->kvm, irq);
200 }
201
202 return value;
203 }
204
205 static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
206 bool new_active_state)
207 {
208 struct kvm_vcpu *requester_vcpu;
209 unsigned long flags;
210 spin_lock_irqsave(&irq->irq_lock, flags);
211
212 /*
213 * The vcpu parameter here can mean multiple things depending on how
214 * this function is called; when handling a trap from the kernel it
215 * depends on the GIC version, and these functions are also called as
216 * part of save/restore from userspace.
217 *
218 * Therefore, we have to figure out the requester in a reliable way.
219 *
220 * When accessing VGIC state from user space, the requester_vcpu is
221 * NULL, which is fine, because we guarantee that no VCPUs are running
222 * when accessing VGIC state from user space so irq->vcpu->cpu is
223 * always -1.
224 */
225 requester_vcpu = kvm_arm_get_running_vcpu();
226
227 irq->active = new_active_state;
228 if (new_active_state)
229 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
230 else
231 spin_unlock_irqrestore(&irq->irq_lock, flags);
232 }
233
234 /*
235 * If we are fiddling with an IRQ's active state, we have to make sure the IRQ
236 * is not queued on some running VCPU's LRs, because then the change to the
237 * active state can be overwritten when the VCPU's state is synced coming back
238 * from the guest.
239 *
240 * For shared interrupts, we have to stop all the VCPUs because interrupts can
241 * be migrated while we don't hold the IRQ locks and we don't want to be
242 * chasing moving targets.
243 *
244 * For private interrupts we don't have to do anything because userspace
245 * accesses to the VGIC state already require all VCPUs to be stopped, and
246 * only the VCPU itself can modify its private interrupts active state, which
247 * guarantees that the VCPU is not running.
248 */
249 static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
250 {
251 if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
252 intid > VGIC_NR_PRIVATE_IRQS)
253 kvm_arm_halt_guest(vcpu->kvm);
254 }
255
256 /* See vgic_change_active_prepare */
257 static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid)
258 {
259 if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
260 intid > VGIC_NR_PRIVATE_IRQS)
261 kvm_arm_resume_guest(vcpu->kvm);
262 }
263
264 static void __vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
265 gpa_t addr, unsigned int len,
266 unsigned long val)
267 {
268 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
269 int i;
270
271 for_each_set_bit(i, &val, len * 8) {
272 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
273 vgic_mmio_change_active(vcpu, irq, false);
274 vgic_put_irq(vcpu->kvm, irq);
275 }
276 }
277
278 void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
279 gpa_t addr, unsigned int len,
280 unsigned long val)
281 {
282 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
283
284 mutex_lock(&vcpu->kvm->lock);
285 vgic_change_active_prepare(vcpu, intid);
286
287 __vgic_mmio_write_cactive(vcpu, addr, len, val);
288
289 vgic_change_active_finish(vcpu, intid);
290 mutex_unlock(&vcpu->kvm->lock);
291 }
292
293 void vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu,
294 gpa_t addr, unsigned int len,
295 unsigned long val)
296 {
297 __vgic_mmio_write_cactive(vcpu, addr, len, val);
298 }
299
300 static void __vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
301 gpa_t addr, unsigned int len,
302 unsigned long val)
303 {
304 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
305 int i;
306
307 for_each_set_bit(i, &val, len * 8) {
308 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
309 vgic_mmio_change_active(vcpu, irq, true);
310 vgic_put_irq(vcpu->kvm, irq);
311 }
312 }
313
314 void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
315 gpa_t addr, unsigned int len,
316 unsigned long val)
317 {
318 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
319
320 mutex_lock(&vcpu->kvm->lock);
321 vgic_change_active_prepare(vcpu, intid);
322
323 __vgic_mmio_write_sactive(vcpu, addr, len, val);
324
325 vgic_change_active_finish(vcpu, intid);
326 mutex_unlock(&vcpu->kvm->lock);
327 }
328
329 void vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu,
330 gpa_t addr, unsigned int len,
331 unsigned long val)
332 {
333 __vgic_mmio_write_sactive(vcpu, addr, len, val);
334 }
335
336 unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu,
337 gpa_t addr, unsigned int len)
338 {
339 u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
340 int i;
341 u64 val = 0;
342
343 for (i = 0; i < len; i++) {
344 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
345
346 val |= (u64)irq->priority << (i * 8);
347
348 vgic_put_irq(vcpu->kvm, irq);
349 }
350
351 return val;
352 }
353
354 /*
355 * We currently don't handle changing the priority of an interrupt that
356 * is already pending on a VCPU. If there is a need for this, we would
357 * need to make this VCPU exit and re-evaluate the priorities, potentially
358 * leading to this interrupt getting presented now to the guest (if it has
359 * been masked by the priority mask before).
360 */
361 void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
362 gpa_t addr, unsigned int len,
363 unsigned long val)
364 {
365 u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
366 int i;
367 unsigned long flags;
368
369 for (i = 0; i < len; i++) {
370 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
371
372 spin_lock_irqsave(&irq->irq_lock, flags);
373 /* Narrow the priority range to what we actually support */
374 irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS);
375 spin_unlock_irqrestore(&irq->irq_lock, flags);
376
377 vgic_put_irq(vcpu->kvm, irq);
378 }
379 }
380
381 unsigned long vgic_mmio_read_config(struct kvm_vcpu *vcpu,
382 gpa_t addr, unsigned int len)
383 {
384 u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
385 u32 value = 0;
386 int i;
387
388 for (i = 0; i < len * 4; i++) {
389 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
390
391 if (irq->config == VGIC_CONFIG_EDGE)
392 value |= (2U << (i * 2));
393
394 vgic_put_irq(vcpu->kvm, irq);
395 }
396
397 return value;
398 }
399
400 void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
401 gpa_t addr, unsigned int len,
402 unsigned long val)
403 {
404 u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
405 int i;
406 unsigned long flags;
407
408 for (i = 0; i < len * 4; i++) {
409 struct vgic_irq *irq;
410
411 /*
412 * The configuration cannot be changed for SGIs in general,
413 * for PPIs this is IMPLEMENTATION DEFINED. The arch timer
414 * code relies on PPIs being level triggered, so we also
415 * make them read-only here.
416 */
417 if (intid + i < VGIC_NR_PRIVATE_IRQS)
418 continue;
419
420 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
421 spin_lock_irqsave(&irq->irq_lock, flags);
422
423 if (test_bit(i * 2 + 1, &val))
424 irq->config = VGIC_CONFIG_EDGE;
425 else
426 irq->config = VGIC_CONFIG_LEVEL;
427
428 spin_unlock_irqrestore(&irq->irq_lock, flags);
429 vgic_put_irq(vcpu->kvm, irq);
430 }
431 }
432
433 u64 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid)
434 {
435 int i;
436 u64 val = 0;
437 int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
438
439 for (i = 0; i < 32; i++) {
440 struct vgic_irq *irq;
441
442 if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs)
443 continue;
444
445 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
446 if (irq->config == VGIC_CONFIG_LEVEL && irq->line_level)
447 val |= (1U << i);
448
449 vgic_put_irq(vcpu->kvm, irq);
450 }
451
452 return val;
453 }
454
455 void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
456 const u64 val)
457 {
458 int i;
459 int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
460 unsigned long flags;
461
462 for (i = 0; i < 32; i++) {
463 struct vgic_irq *irq;
464 bool new_level;
465
466 if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs)
467 continue;
468
469 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
470
471 /*
472 * Line level is set irrespective of irq type
473 * (level or edge) to avoid dependency that VM should
474 * restore irq config before line level.
475 */
476 new_level = !!(val & (1U << i));
477 spin_lock_irqsave(&irq->irq_lock, flags);
478 irq->line_level = new_level;
479 if (new_level)
480 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
481 else
482 spin_unlock_irqrestore(&irq->irq_lock, flags);
483
484 vgic_put_irq(vcpu->kvm, irq);
485 }
486 }
487
488 static int match_region(const void *key, const void *elt)
489 {
490 const unsigned int offset = (unsigned long)key;
491 const struct vgic_register_region *region = elt;
492
493 if (offset < region->reg_offset)
494 return -1;
495
496 if (offset >= region->reg_offset + region->len)
497 return 1;
498
499 return 0;
500 }
501
502 const struct vgic_register_region *
503 vgic_find_mmio_region(const struct vgic_register_region *regions,
504 int nr_regions, unsigned int offset)
505 {
506 return bsearch((void *)(uintptr_t)offset, regions, nr_regions,
507 sizeof(regions[0]), match_region);
508 }
509
510 void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
511 {
512 if (kvm_vgic_global_state.type == VGIC_V2)
513 vgic_v2_set_vmcr(vcpu, vmcr);
514 else
515 vgic_v3_set_vmcr(vcpu, vmcr);
516 }
517
518 void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
519 {
520 if (kvm_vgic_global_state.type == VGIC_V2)
521 vgic_v2_get_vmcr(vcpu, vmcr);
522 else
523 vgic_v3_get_vmcr(vcpu, vmcr);
524 }
525
526 /*
527 * kvm_mmio_read_buf() returns a value in a format where it can be converted
528 * to a byte array and be directly observed as the guest wanted it to appear
529 * in memory if it had done the store itself, which is LE for the GIC, as the
530 * guest knows the GIC is always LE.
531 *
532 * We convert this value to the CPUs native format to deal with it as a data
533 * value.
534 */
535 unsigned long vgic_data_mmio_bus_to_host(const void *val, unsigned int len)
536 {
537 unsigned long data = kvm_mmio_read_buf(val, len);
538
539 switch (len) {
540 case 1:
541 return data;
542 case 2:
543 return le16_to_cpu(data);
544 case 4:
545 return le32_to_cpu(data);
546 default:
547 return le64_to_cpu(data);
548 }
549 }
550
551 /*
552 * kvm_mmio_write_buf() expects a value in a format such that if converted to
553 * a byte array it is observed as the guest would see it if it could perform
554 * the load directly. Since the GIC is LE, and the guest knows this, the
555 * guest expects a value in little endian format.
556 *
557 * We convert the data value from the CPUs native format to LE so that the
558 * value is returned in the proper format.
559 */
560 void vgic_data_host_to_mmio_bus(void *buf, unsigned int len,
561 unsigned long data)
562 {
563 switch (len) {
564 case 1:
565 break;
566 case 2:
567 data = cpu_to_le16(data);
568 break;
569 case 4:
570 data = cpu_to_le32(data);
571 break;
572 default:
573 data = cpu_to_le64(data);
574 }
575
576 kvm_mmio_write_buf(buf, len, data);
577 }
578
579 static
580 struct vgic_io_device *kvm_to_vgic_iodev(const struct kvm_io_device *dev)
581 {
582 return container_of(dev, struct vgic_io_device, dev);
583 }
584
585 static bool check_region(const struct kvm *kvm,
586 const struct vgic_register_region *region,
587 gpa_t addr, int len)
588 {
589 int flags, nr_irqs = kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
590
591 switch (len) {
592 case sizeof(u8):
593 flags = VGIC_ACCESS_8bit;
594 break;
595 case sizeof(u32):
596 flags = VGIC_ACCESS_32bit;
597 break;
598 case sizeof(u64):
599 flags = VGIC_ACCESS_64bit;
600 break;
601 default:
602 return false;
603 }
604
605 if ((region->access_flags & flags) && IS_ALIGNED(addr, len)) {
606 if (!region->bits_per_irq)
607 return true;
608
609 /* Do we access a non-allocated IRQ? */
610 return VGIC_ADDR_TO_INTID(addr, region->bits_per_irq) < nr_irqs;
611 }
612
613 return false;
614 }
615
616 const struct vgic_register_region *
617 vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
618 gpa_t addr, int len)
619 {
620 const struct vgic_register_region *region;
621
622 region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions,
623 addr - iodev->base_addr);
624 if (!region || !check_region(vcpu->kvm, region, addr, len))
625 return NULL;
626
627 return region;
628 }
629
630 static int vgic_uaccess_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
631 gpa_t addr, u32 *val)
632 {
633 struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
634 const struct vgic_register_region *region;
635 struct kvm_vcpu *r_vcpu;
636
637 region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32));
638 if (!region) {
639 *val = 0;
640 return 0;
641 }
642
643 r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
644 if (region->uaccess_read)
645 *val = region->uaccess_read(r_vcpu, addr, sizeof(u32));
646 else
647 *val = region->read(r_vcpu, addr, sizeof(u32));
648
649 return 0;
650 }
651
652 static int vgic_uaccess_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
653 gpa_t addr, const u32 *val)
654 {
655 struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
656 const struct vgic_register_region *region;
657 struct kvm_vcpu *r_vcpu;
658
659 region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32));
660 if (!region)
661 return 0;
662
663 r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
664 if (region->uaccess_write)
665 region->uaccess_write(r_vcpu, addr, sizeof(u32), *val);
666 else
667 region->write(r_vcpu, addr, sizeof(u32), *val);
668
669 return 0;
670 }
671
672 /*
673 * Userland access to VGIC registers.
674 */
675 int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev,
676 bool is_write, int offset, u32 *val)
677 {
678 if (is_write)
679 return vgic_uaccess_write(vcpu, &dev->dev, offset, val);
680 else
681 return vgic_uaccess_read(vcpu, &dev->dev, offset, val);
682 }
683
684 static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
685 gpa_t addr, int len, void *val)
686 {
687 struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
688 const struct vgic_register_region *region;
689 unsigned long data = 0;
690
691 region = vgic_get_mmio_region(vcpu, iodev, addr, len);
692 if (!region) {
693 memset(val, 0, len);
694 return 0;
695 }
696
697 switch (iodev->iodev_type) {
698 case IODEV_CPUIF:
699 data = region->read(vcpu, addr, len);
700 break;
701 case IODEV_DIST:
702 data = region->read(vcpu, addr, len);
703 break;
704 case IODEV_REDIST:
705 data = region->read(iodev->redist_vcpu, addr, len);
706 break;
707 case IODEV_ITS:
708 data = region->its_read(vcpu->kvm, iodev->its, addr, len);
709 break;
710 }
711
712 vgic_data_host_to_mmio_bus(val, len, data);
713 return 0;
714 }
715
716 static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
717 gpa_t addr, int len, const void *val)
718 {
719 struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
720 const struct vgic_register_region *region;
721 unsigned long data = vgic_data_mmio_bus_to_host(val, len);
722
723 region = vgic_get_mmio_region(vcpu, iodev, addr, len);
724 if (!region)
725 return 0;
726
727 switch (iodev->iodev_type) {
728 case IODEV_CPUIF:
729 region->write(vcpu, addr, len, data);
730 break;
731 case IODEV_DIST:
732 region->write(vcpu, addr, len, data);
733 break;
734 case IODEV_REDIST:
735 region->write(iodev->redist_vcpu, addr, len, data);
736 break;
737 case IODEV_ITS:
738 region->its_write(vcpu->kvm, iodev->its, addr, len, data);
739 break;
740 }
741
742 return 0;
743 }
744
745 struct kvm_io_device_ops kvm_io_gic_ops = {
746 .read = dispatch_mmio_read,
747 .write = dispatch_mmio_write,
748 };
749
750 int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
751 enum vgic_type type)
752 {
753 struct vgic_io_device *io_device = &kvm->arch.vgic.dist_iodev;
754 int ret = 0;
755 unsigned int len;
756
757 switch (type) {
758 case VGIC_V2:
759 len = vgic_v2_init_dist_iodev(io_device);
760 break;
761 case VGIC_V3:
762 len = vgic_v3_init_dist_iodev(io_device);
763 break;
764 default:
765 BUG_ON(1);
766 }
767
768 io_device->base_addr = dist_base_address;
769 io_device->iodev_type = IODEV_DIST;
770 io_device->redist_vcpu = NULL;
771
772 mutex_lock(&kvm->slots_lock);
773 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address,
774 len, &io_device->dev);
775 mutex_unlock(&kvm->slots_lock);
776
777 return ret;
778 }