2 * VGICv3 MMIO handling functions
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
14 #include <linux/irqchip/arm-gic-v3.h>
15 #include <linux/kvm.h>
16 #include <linux/kvm_host.h>
17 #include <kvm/iodev.h>
18 #include <kvm/arm_vgic.h>
20 #include <asm/kvm_emulate.h>
21 #include <asm/kvm_arm.h>
22 #include <asm/kvm_mmu.h>
25 #include "vgic-mmio.h"
27 /* extract @num bytes at @offset bytes offset in data */
28 unsigned long extract_bytes(u64 data
, unsigned int offset
,
31 return (data
>> (offset
* 8)) & GENMASK_ULL(num
* 8 - 1, 0);
34 /* allows updates of any half of a 64-bit register (or the whole thing) */
35 u64
update_64bit_reg(u64 reg
, unsigned int offset
, unsigned int len
,
38 int lower
= (offset
& 4) * 8;
39 int upper
= lower
+ 8 * len
- 1;
41 reg
&= ~GENMASK_ULL(upper
, lower
);
42 val
&= GENMASK_ULL(len
* 8 - 1, 0);
44 return reg
| ((u64
)val
<< lower
);
47 bool vgic_has_its(struct kvm
*kvm
)
49 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
51 if (dist
->vgic_model
!= KVM_DEV_TYPE_ARM_VGIC_V3
)
57 static unsigned long vgic_mmio_read_v3_misc(struct kvm_vcpu
*vcpu
,
58 gpa_t addr
, unsigned int len
)
62 switch (addr
& 0x0c) {
64 if (vcpu
->kvm
->arch
.vgic
.enabled
)
65 value
|= GICD_CTLR_ENABLE_SS_G1
;
66 value
|= GICD_CTLR_ARE_NS
| GICD_CTLR_DS
;
69 value
= vcpu
->kvm
->arch
.vgic
.nr_spis
+ VGIC_NR_PRIVATE_IRQS
;
70 value
= (value
>> 5) - 1;
71 if (vgic_has_its(vcpu
->kvm
)) {
72 value
|= (INTERRUPT_ID_BITS_ITS
- 1) << 19;
73 value
|= GICD_TYPER_LPIS
;
75 value
|= (INTERRUPT_ID_BITS_SPIS
- 1) << 19;
79 value
= (PRODUCT_ID_KVM
<< 24) | (IMPLEMENTER_ARM
<< 0);
88 static void vgic_mmio_write_v3_misc(struct kvm_vcpu
*vcpu
,
89 gpa_t addr
, unsigned int len
,
92 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
93 bool was_enabled
= dist
->enabled
;
95 switch (addr
& 0x0c) {
97 dist
->enabled
= val
& GICD_CTLR_ENABLE_SS_G1
;
99 if (!was_enabled
&& dist
->enabled
)
100 vgic_kick_vcpus(vcpu
->kvm
);
108 static unsigned long vgic_mmio_read_irouter(struct kvm_vcpu
*vcpu
,
109 gpa_t addr
, unsigned int len
)
111 int intid
= VGIC_ADDR_TO_INTID(addr
, 64);
112 struct vgic_irq
*irq
= vgic_get_irq(vcpu
->kvm
, NULL
, intid
);
113 unsigned long ret
= 0;
118 /* The upper word is RAZ for us. */
120 ret
= extract_bytes(READ_ONCE(irq
->mpidr
), addr
& 7, len
);
122 vgic_put_irq(vcpu
->kvm
, irq
);
126 static void vgic_mmio_write_irouter(struct kvm_vcpu
*vcpu
,
127 gpa_t addr
, unsigned int len
,
130 int intid
= VGIC_ADDR_TO_INTID(addr
, 64);
131 struct vgic_irq
*irq
;
133 /* The upper word is WI for us since we don't implement Aff3. */
137 irq
= vgic_get_irq(vcpu
->kvm
, NULL
, intid
);
142 spin_lock(&irq
->irq_lock
);
144 /* We only care about and preserve Aff0, Aff1 and Aff2. */
145 irq
->mpidr
= val
& GENMASK(23, 0);
146 irq
->target_vcpu
= kvm_mpidr_to_vcpu(vcpu
->kvm
, irq
->mpidr
);
148 spin_unlock(&irq
->irq_lock
);
149 vgic_put_irq(vcpu
->kvm
, irq
);
152 static unsigned long vgic_mmio_read_v3r_ctlr(struct kvm_vcpu
*vcpu
,
153 gpa_t addr
, unsigned int len
)
155 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
157 return vgic_cpu
->lpis_enabled
? GICR_CTLR_ENABLE_LPIS
: 0;
161 static void vgic_mmio_write_v3r_ctlr(struct kvm_vcpu
*vcpu
,
162 gpa_t addr
, unsigned int len
,
165 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
166 bool was_enabled
= vgic_cpu
->lpis_enabled
;
168 if (!vgic_has_its(vcpu
->kvm
))
171 vgic_cpu
->lpis_enabled
= val
& GICR_CTLR_ENABLE_LPIS
;
173 if (!was_enabled
&& vgic_cpu
->lpis_enabled
)
174 vgic_enable_lpis(vcpu
);
177 static unsigned long vgic_mmio_read_v3r_typer(struct kvm_vcpu
*vcpu
,
178 gpa_t addr
, unsigned int len
)
180 unsigned long mpidr
= kvm_vcpu_get_mpidr_aff(vcpu
);
181 int target_vcpu_id
= vcpu
->vcpu_id
;
184 value
= (u64
)(mpidr
& GENMASK(23, 0)) << 32;
185 value
|= ((target_vcpu_id
& 0xffff) << 8);
186 if (target_vcpu_id
== atomic_read(&vcpu
->kvm
->online_vcpus
) - 1)
187 value
|= GICR_TYPER_LAST
;
188 if (vgic_has_its(vcpu
->kvm
))
189 value
|= GICR_TYPER_PLPIS
;
191 return extract_bytes(value
, addr
& 7, len
);
194 static unsigned long vgic_mmio_read_v3r_iidr(struct kvm_vcpu
*vcpu
,
195 gpa_t addr
, unsigned int len
)
197 return (PRODUCT_ID_KVM
<< 24) | (IMPLEMENTER_ARM
<< 0);
200 static unsigned long vgic_mmio_read_v3_idregs(struct kvm_vcpu
*vcpu
,
201 gpa_t addr
, unsigned int len
)
203 switch (addr
& 0xffff) {
205 /* report a GICv3 compliant implementation */
212 static unsigned long vgic_v3_uaccess_read_pending(struct kvm_vcpu
*vcpu
,
213 gpa_t addr
, unsigned int len
)
215 u32 intid
= VGIC_ADDR_TO_INTID(addr
, 1);
220 * pending state of interrupt is latched in pending_latch variable.
221 * Userspace will save and restore pending state and line_level
223 * Refer to Documentation/virtual/kvm/devices/arm-vgic-v3.txt
224 * for handling of ISPENDR and ICPENDR.
226 for (i
= 0; i
< len
* 8; i
++) {
227 struct vgic_irq
*irq
= vgic_get_irq(vcpu
->kvm
, vcpu
, intid
+ i
);
229 if (irq
->pending_latch
)
232 vgic_put_irq(vcpu
->kvm
, irq
);
238 static void vgic_v3_uaccess_write_pending(struct kvm_vcpu
*vcpu
,
239 gpa_t addr
, unsigned int len
,
242 u32 intid
= VGIC_ADDR_TO_INTID(addr
, 1);
245 for (i
= 0; i
< len
* 8; i
++) {
246 struct vgic_irq
*irq
= vgic_get_irq(vcpu
->kvm
, vcpu
, intid
+ i
);
248 spin_lock(&irq
->irq_lock
);
249 if (test_bit(i
, &val
)) {
251 * pending_latch is set irrespective of irq type
252 * (level or edge) to avoid dependency that VM should
253 * restore irq config before pending info.
255 irq
->pending_latch
= true;
256 vgic_queue_irq_unlock(vcpu
->kvm
, irq
);
258 irq
->pending_latch
= false;
259 spin_unlock(&irq
->irq_lock
);
262 vgic_put_irq(vcpu
->kvm
, irq
);
266 /* We want to avoid outer shareable. */
267 u64
vgic_sanitise_shareability(u64 field
)
270 case GIC_BASER_OuterShareable
:
271 return GIC_BASER_InnerShareable
;
277 /* Avoid any inner non-cacheable mapping. */
278 u64
vgic_sanitise_inner_cacheability(u64 field
)
281 case GIC_BASER_CACHE_nCnB
:
282 case GIC_BASER_CACHE_nC
:
283 return GIC_BASER_CACHE_RaWb
;
289 /* Non-cacheable or same-as-inner are OK. */
290 u64
vgic_sanitise_outer_cacheability(u64 field
)
293 case GIC_BASER_CACHE_SameAsInner
:
294 case GIC_BASER_CACHE_nC
:
297 return GIC_BASER_CACHE_nC
;
301 u64
vgic_sanitise_field(u64 reg
, u64 field_mask
, int field_shift
,
302 u64 (*sanitise_fn
)(u64
))
304 u64 field
= (reg
& field_mask
) >> field_shift
;
306 field
= sanitise_fn(field
) << field_shift
;
307 return (reg
& ~field_mask
) | field
;
310 #define PROPBASER_RES0_MASK \
311 (GENMASK_ULL(63, 59) | GENMASK_ULL(55, 52) | GENMASK_ULL(6, 5))
312 #define PENDBASER_RES0_MASK \
313 (BIT_ULL(63) | GENMASK_ULL(61, 59) | GENMASK_ULL(55, 52) | \
314 GENMASK_ULL(15, 12) | GENMASK_ULL(6, 0))
316 static u64
vgic_sanitise_pendbaser(u64 reg
)
318 reg
= vgic_sanitise_field(reg
, GICR_PENDBASER_SHAREABILITY_MASK
,
319 GICR_PENDBASER_SHAREABILITY_SHIFT
,
320 vgic_sanitise_shareability
);
321 reg
= vgic_sanitise_field(reg
, GICR_PENDBASER_INNER_CACHEABILITY_MASK
,
322 GICR_PENDBASER_INNER_CACHEABILITY_SHIFT
,
323 vgic_sanitise_inner_cacheability
);
324 reg
= vgic_sanitise_field(reg
, GICR_PENDBASER_OUTER_CACHEABILITY_MASK
,
325 GICR_PENDBASER_OUTER_CACHEABILITY_SHIFT
,
326 vgic_sanitise_outer_cacheability
);
328 reg
&= ~PENDBASER_RES0_MASK
;
329 reg
&= ~GENMASK_ULL(51, 48);
334 static u64
vgic_sanitise_propbaser(u64 reg
)
336 reg
= vgic_sanitise_field(reg
, GICR_PROPBASER_SHAREABILITY_MASK
,
337 GICR_PROPBASER_SHAREABILITY_SHIFT
,
338 vgic_sanitise_shareability
);
339 reg
= vgic_sanitise_field(reg
, GICR_PROPBASER_INNER_CACHEABILITY_MASK
,
340 GICR_PROPBASER_INNER_CACHEABILITY_SHIFT
,
341 vgic_sanitise_inner_cacheability
);
342 reg
= vgic_sanitise_field(reg
, GICR_PROPBASER_OUTER_CACHEABILITY_MASK
,
343 GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT
,
344 vgic_sanitise_outer_cacheability
);
346 reg
&= ~PROPBASER_RES0_MASK
;
347 reg
&= ~GENMASK_ULL(51, 48);
351 static unsigned long vgic_mmio_read_propbase(struct kvm_vcpu
*vcpu
,
352 gpa_t addr
, unsigned int len
)
354 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
356 return extract_bytes(dist
->propbaser
, addr
& 7, len
);
359 static void vgic_mmio_write_propbase(struct kvm_vcpu
*vcpu
,
360 gpa_t addr
, unsigned int len
,
363 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
364 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
365 u64 old_propbaser
, propbaser
;
367 /* Storing a value with LPIs already enabled is undefined */
368 if (vgic_cpu
->lpis_enabled
)
372 old_propbaser
= READ_ONCE(dist
->propbaser
);
373 propbaser
= old_propbaser
;
374 propbaser
= update_64bit_reg(propbaser
, addr
& 4, len
, val
);
375 propbaser
= vgic_sanitise_propbaser(propbaser
);
376 } while (cmpxchg64(&dist
->propbaser
, old_propbaser
,
377 propbaser
) != old_propbaser
);
380 static unsigned long vgic_mmio_read_pendbase(struct kvm_vcpu
*vcpu
,
381 gpa_t addr
, unsigned int len
)
383 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
385 return extract_bytes(vgic_cpu
->pendbaser
, addr
& 7, len
);
388 static void vgic_mmio_write_pendbase(struct kvm_vcpu
*vcpu
,
389 gpa_t addr
, unsigned int len
,
392 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
393 u64 old_pendbaser
, pendbaser
;
395 /* Storing a value with LPIs already enabled is undefined */
396 if (vgic_cpu
->lpis_enabled
)
400 old_pendbaser
= READ_ONCE(vgic_cpu
->pendbaser
);
401 pendbaser
= old_pendbaser
;
402 pendbaser
= update_64bit_reg(pendbaser
, addr
& 4, len
, val
);
403 pendbaser
= vgic_sanitise_pendbaser(pendbaser
);
404 } while (cmpxchg64(&vgic_cpu
->pendbaser
, old_pendbaser
,
405 pendbaser
) != old_pendbaser
);
409 * The GICv3 per-IRQ registers are split to control PPIs and SGIs in the
410 * redistributors, while SPIs are covered by registers in the distributor
411 * block. Trying to set private IRQs in this block gets ignored.
412 * We take some special care here to fix the calculation of the register
415 #define REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(off, rd, wr, ur, uw, bpi, acc) \
418 .bits_per_irq = bpi, \
419 .len = (bpi * VGIC_NR_PRIVATE_IRQS) / 8, \
420 .access_flags = acc, \
421 .read = vgic_mmio_read_raz, \
422 .write = vgic_mmio_write_wi, \
424 .reg_offset = off + (bpi * VGIC_NR_PRIVATE_IRQS) / 8, \
425 .bits_per_irq = bpi, \
426 .len = (bpi * (1024 - VGIC_NR_PRIVATE_IRQS)) / 8, \
427 .access_flags = acc, \
430 .uaccess_read = ur, \
431 .uaccess_write = uw, \
434 static const struct vgic_register_region vgic_v3_dist_registers
[] = {
435 REGISTER_DESC_WITH_LENGTH(GICD_CTLR
,
436 vgic_mmio_read_v3_misc
, vgic_mmio_write_v3_misc
, 16,
438 REGISTER_DESC_WITH_LENGTH(GICD_STATUSR
,
439 vgic_mmio_read_rao
, vgic_mmio_write_wi
, 4,
441 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IGROUPR
,
442 vgic_mmio_read_rao
, vgic_mmio_write_wi
, NULL
, NULL
, 1,
444 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISENABLER
,
445 vgic_mmio_read_enable
, vgic_mmio_write_senable
, NULL
, NULL
, 1,
447 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICENABLER
,
448 vgic_mmio_read_enable
, vgic_mmio_write_cenable
, NULL
, NULL
, 1,
450 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISPENDR
,
451 vgic_mmio_read_pending
, vgic_mmio_write_spending
,
452 vgic_v3_uaccess_read_pending
, vgic_v3_uaccess_write_pending
, 1,
454 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICPENDR
,
455 vgic_mmio_read_pending
, vgic_mmio_write_cpending
,
456 vgic_mmio_read_raz
, vgic_mmio_write_wi
, 1,
458 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISACTIVER
,
459 vgic_mmio_read_active
, vgic_mmio_write_sactive
,
460 NULL
, vgic_mmio_uaccess_write_sactive
, 1,
462 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICACTIVER
,
463 vgic_mmio_read_active
, vgic_mmio_write_cactive
,
464 NULL
, vgic_mmio_uaccess_write_cactive
,
465 1, VGIC_ACCESS_32bit
),
466 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IPRIORITYR
,
467 vgic_mmio_read_priority
, vgic_mmio_write_priority
, NULL
, NULL
,
468 8, VGIC_ACCESS_32bit
| VGIC_ACCESS_8bit
),
469 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ITARGETSR
,
470 vgic_mmio_read_raz
, vgic_mmio_write_wi
, NULL
, NULL
, 8,
471 VGIC_ACCESS_32bit
| VGIC_ACCESS_8bit
),
472 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICFGR
,
473 vgic_mmio_read_config
, vgic_mmio_write_config
, NULL
, NULL
, 2,
475 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IGRPMODR
,
476 vgic_mmio_read_raz
, vgic_mmio_write_wi
, NULL
, NULL
, 1,
478 REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IROUTER
,
479 vgic_mmio_read_irouter
, vgic_mmio_write_irouter
, NULL
, NULL
, 64,
480 VGIC_ACCESS_64bit
| VGIC_ACCESS_32bit
),
481 REGISTER_DESC_WITH_LENGTH(GICD_IDREGS
,
482 vgic_mmio_read_v3_idregs
, vgic_mmio_write_wi
, 48,
486 static const struct vgic_register_region vgic_v3_rdbase_registers
[] = {
487 REGISTER_DESC_WITH_LENGTH(GICR_CTLR
,
488 vgic_mmio_read_v3r_ctlr
, vgic_mmio_write_v3r_ctlr
, 4,
490 REGISTER_DESC_WITH_LENGTH(GICR_STATUSR
,
491 vgic_mmio_read_raz
, vgic_mmio_write_wi
, 4,
493 REGISTER_DESC_WITH_LENGTH(GICR_IIDR
,
494 vgic_mmio_read_v3r_iidr
, vgic_mmio_write_wi
, 4,
496 REGISTER_DESC_WITH_LENGTH(GICR_TYPER
,
497 vgic_mmio_read_v3r_typer
, vgic_mmio_write_wi
, 8,
498 VGIC_ACCESS_64bit
| VGIC_ACCESS_32bit
),
499 REGISTER_DESC_WITH_LENGTH(GICR_WAKER
,
500 vgic_mmio_read_raz
, vgic_mmio_write_wi
, 4,
502 REGISTER_DESC_WITH_LENGTH(GICR_PROPBASER
,
503 vgic_mmio_read_propbase
, vgic_mmio_write_propbase
, 8,
504 VGIC_ACCESS_64bit
| VGIC_ACCESS_32bit
),
505 REGISTER_DESC_WITH_LENGTH(GICR_PENDBASER
,
506 vgic_mmio_read_pendbase
, vgic_mmio_write_pendbase
, 8,
507 VGIC_ACCESS_64bit
| VGIC_ACCESS_32bit
),
508 REGISTER_DESC_WITH_LENGTH(GICR_IDREGS
,
509 vgic_mmio_read_v3_idregs
, vgic_mmio_write_wi
, 48,
513 static const struct vgic_register_region vgic_v3_sgibase_registers
[] = {
514 REGISTER_DESC_WITH_LENGTH(GICR_IGROUPR0
,
515 vgic_mmio_read_rao
, vgic_mmio_write_wi
, 4,
517 REGISTER_DESC_WITH_LENGTH(GICR_ISENABLER0
,
518 vgic_mmio_read_enable
, vgic_mmio_write_senable
, 4,
520 REGISTER_DESC_WITH_LENGTH(GICR_ICENABLER0
,
521 vgic_mmio_read_enable
, vgic_mmio_write_cenable
, 4,
523 REGISTER_DESC_WITH_LENGTH_UACCESS(GICR_ISPENDR0
,
524 vgic_mmio_read_pending
, vgic_mmio_write_spending
,
525 vgic_v3_uaccess_read_pending
, vgic_v3_uaccess_write_pending
, 4,
527 REGISTER_DESC_WITH_LENGTH_UACCESS(GICR_ICPENDR0
,
528 vgic_mmio_read_pending
, vgic_mmio_write_cpending
,
529 vgic_mmio_read_raz
, vgic_mmio_write_wi
, 4,
531 REGISTER_DESC_WITH_LENGTH_UACCESS(GICR_ISACTIVER0
,
532 vgic_mmio_read_active
, vgic_mmio_write_sactive
,
533 NULL
, vgic_mmio_uaccess_write_sactive
,
534 4, VGIC_ACCESS_32bit
),
535 REGISTER_DESC_WITH_LENGTH_UACCESS(GICR_ICACTIVER0
,
536 vgic_mmio_read_active
, vgic_mmio_write_cactive
,
537 NULL
, vgic_mmio_uaccess_write_cactive
,
538 4, VGIC_ACCESS_32bit
),
539 REGISTER_DESC_WITH_LENGTH(GICR_IPRIORITYR0
,
540 vgic_mmio_read_priority
, vgic_mmio_write_priority
, 32,
541 VGIC_ACCESS_32bit
| VGIC_ACCESS_8bit
),
542 REGISTER_DESC_WITH_LENGTH(GICR_ICFGR0
,
543 vgic_mmio_read_config
, vgic_mmio_write_config
, 8,
545 REGISTER_DESC_WITH_LENGTH(GICR_IGRPMODR0
,
546 vgic_mmio_read_raz
, vgic_mmio_write_wi
, 4,
548 REGISTER_DESC_WITH_LENGTH(GICR_NSACR
,
549 vgic_mmio_read_raz
, vgic_mmio_write_wi
, 4,
553 unsigned int vgic_v3_init_dist_iodev(struct vgic_io_device
*dev
)
555 dev
->regions
= vgic_v3_dist_registers
;
556 dev
->nr_regions
= ARRAY_SIZE(vgic_v3_dist_registers
);
558 kvm_iodevice_init(&dev
->dev
, &kvm_io_gic_ops
);
564 * vgic_register_redist_iodev - register a single redist iodev
565 * @vcpu: The VCPU to which the redistributor belongs
567 * Register a KVM iodev for this VCPU's redistributor using the address
570 * Return 0 on success, -ERRNO otherwise.
572 int vgic_register_redist_iodev(struct kvm_vcpu
*vcpu
)
574 struct kvm
*kvm
= vcpu
->kvm
;
575 struct vgic_dist
*vgic
= &kvm
->arch
.vgic
;
576 struct vgic_io_device
*rd_dev
= &vcpu
->arch
.vgic_cpu
.rd_iodev
;
577 struct vgic_io_device
*sgi_dev
= &vcpu
->arch
.vgic_cpu
.sgi_iodev
;
578 gpa_t rd_base
, sgi_base
;
582 * We may be creating VCPUs before having set the base address for the
583 * redistributor region, in which case we will come back to this
584 * function for all VCPUs when the base address is set. Just return
585 * without doing any work for now.
587 if (IS_VGIC_ADDR_UNDEF(vgic
->vgic_redist_base
))
590 if (!vgic_v3_check_base(kvm
))
593 rd_base
= vgic
->vgic_redist_base
+ vgic
->vgic_redist_free_offset
;
594 sgi_base
= rd_base
+ SZ_64K
;
596 kvm_iodevice_init(&rd_dev
->dev
, &kvm_io_gic_ops
);
597 rd_dev
->base_addr
= rd_base
;
598 rd_dev
->iodev_type
= IODEV_REDIST
;
599 rd_dev
->regions
= vgic_v3_rdbase_registers
;
600 rd_dev
->nr_regions
= ARRAY_SIZE(vgic_v3_rdbase_registers
);
601 rd_dev
->redist_vcpu
= vcpu
;
603 mutex_lock(&kvm
->slots_lock
);
604 ret
= kvm_io_bus_register_dev(kvm
, KVM_MMIO_BUS
, rd_base
,
605 SZ_64K
, &rd_dev
->dev
);
606 mutex_unlock(&kvm
->slots_lock
);
611 kvm_iodevice_init(&sgi_dev
->dev
, &kvm_io_gic_ops
);
612 sgi_dev
->base_addr
= sgi_base
;
613 sgi_dev
->iodev_type
= IODEV_REDIST
;
614 sgi_dev
->regions
= vgic_v3_sgibase_registers
;
615 sgi_dev
->nr_regions
= ARRAY_SIZE(vgic_v3_sgibase_registers
);
616 sgi_dev
->redist_vcpu
= vcpu
;
618 mutex_lock(&kvm
->slots_lock
);
619 ret
= kvm_io_bus_register_dev(kvm
, KVM_MMIO_BUS
, sgi_base
,
620 SZ_64K
, &sgi_dev
->dev
);
622 kvm_io_bus_unregister_dev(kvm
, KVM_MMIO_BUS
,
627 vgic
->vgic_redist_free_offset
+= 2 * SZ_64K
;
629 mutex_unlock(&kvm
->slots_lock
);
633 static void vgic_unregister_redist_iodev(struct kvm_vcpu
*vcpu
)
635 struct vgic_io_device
*rd_dev
= &vcpu
->arch
.vgic_cpu
.rd_iodev
;
636 struct vgic_io_device
*sgi_dev
= &vcpu
->arch
.vgic_cpu
.sgi_iodev
;
638 kvm_io_bus_unregister_dev(vcpu
->kvm
, KVM_MMIO_BUS
, &rd_dev
->dev
);
639 kvm_io_bus_unregister_dev(vcpu
->kvm
, KVM_MMIO_BUS
, &sgi_dev
->dev
);
642 static int vgic_register_all_redist_iodevs(struct kvm
*kvm
)
644 struct kvm_vcpu
*vcpu
;
647 kvm_for_each_vcpu(c
, vcpu
, kvm
) {
648 ret
= vgic_register_redist_iodev(vcpu
);
654 /* The current c failed, so we start with the previous one. */
655 mutex_lock(&kvm
->slots_lock
);
656 for (c
--; c
>= 0; c
--) {
657 vcpu
= kvm_get_vcpu(kvm
, c
);
658 vgic_unregister_redist_iodev(vcpu
);
660 mutex_unlock(&kvm
->slots_lock
);
666 int vgic_v3_set_redist_base(struct kvm
*kvm
, u64 addr
)
668 struct vgic_dist
*vgic
= &kvm
->arch
.vgic
;
671 /* vgic_check_ioaddr makes sure we don't do this twice */
672 ret
= vgic_check_ioaddr(kvm
, &vgic
->vgic_redist_base
, addr
, SZ_64K
);
676 vgic
->vgic_redist_base
= addr
;
677 if (!vgic_v3_check_base(kvm
)) {
678 vgic
->vgic_redist_base
= VGIC_ADDR_UNDEF
;
683 * Register iodevs for each existing VCPU. Adding more VCPUs
684 * afterwards will register the iodevs when needed.
686 ret
= vgic_register_all_redist_iodevs(kvm
);
693 int vgic_v3_has_attr_regs(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
695 const struct vgic_register_region
*region
;
696 struct vgic_io_device iodev
;
697 struct vgic_reg_attr reg_attr
;
698 struct kvm_vcpu
*vcpu
;
702 ret
= vgic_v3_parse_attr(dev
, attr
, ®_attr
);
706 vcpu
= reg_attr
.vcpu
;
707 addr
= reg_attr
.addr
;
709 switch (attr
->group
) {
710 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS
:
711 iodev
.regions
= vgic_v3_dist_registers
;
712 iodev
.nr_regions
= ARRAY_SIZE(vgic_v3_dist_registers
);
715 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS
:{
716 iodev
.regions
= vgic_v3_rdbase_registers
;
717 iodev
.nr_regions
= ARRAY_SIZE(vgic_v3_rdbase_registers
);
721 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS
: {
724 id
= (attr
->attr
& KVM_DEV_ARM_VGIC_SYSREG_INSTR_MASK
);
725 return vgic_v3_has_cpu_sysregs_attr(vcpu
, 0, id
, ®
);
731 /* We only support aligned 32-bit accesses. */
735 region
= vgic_get_mmio_region(vcpu
, &iodev
, addr
, sizeof(u32
));
742 * Compare a given affinity (level 1-3 and a level 0 mask, from the SGI
743 * generation register ICC_SGI1R_EL1) with a given VCPU.
744 * If the VCPU's MPIDR matches, return the level0 affinity, otherwise
747 static int match_mpidr(u64 sgi_aff
, u16 sgi_cpu_mask
, struct kvm_vcpu
*vcpu
)
749 unsigned long affinity
;
753 * Split the current VCPU's MPIDR into affinity level 0 and the
754 * rest as this is what we have to compare against.
756 affinity
= kvm_vcpu_get_mpidr_aff(vcpu
);
757 level0
= MPIDR_AFFINITY_LEVEL(affinity
, 0);
758 affinity
&= ~MPIDR_LEVEL_MASK
;
760 /* bail out if the upper three levels don't match */
761 if (sgi_aff
!= affinity
)
764 /* Is this VCPU's bit set in the mask ? */
765 if (!(sgi_cpu_mask
& BIT(level0
)))
772 * The ICC_SGI* registers encode the affinity differently from the MPIDR,
773 * so provide a wrapper to use the existing defines to isolate a certain
776 #define SGI_AFFINITY_LEVEL(reg, level) \
777 ((((reg) & ICC_SGI1R_AFFINITY_## level ##_MASK) \
778 >> ICC_SGI1R_AFFINITY_## level ##_SHIFT) << MPIDR_LEVEL_SHIFT(level))
781 * vgic_v3_dispatch_sgi - handle SGI requests from VCPUs
782 * @vcpu: The VCPU requesting a SGI
783 * @reg: The value written into the ICC_SGI1R_EL1 register by that VCPU
785 * With GICv3 (and ARE=1) CPUs trigger SGIs by writing to a system register.
786 * This will trap in sys_regs.c and call this function.
787 * This ICC_SGI1R_EL1 register contains the upper three affinity levels of the
788 * target processors as well as a bitmask of 16 Aff0 CPUs.
789 * If the interrupt routing mode bit is not set, we iterate over all VCPUs to
790 * check for matching ones. If this bit is set, we signal all, but not the
793 void vgic_v3_dispatch_sgi(struct kvm_vcpu
*vcpu
, u64 reg
)
795 struct kvm
*kvm
= vcpu
->kvm
;
796 struct kvm_vcpu
*c_vcpu
;
800 int vcpu_id
= vcpu
->vcpu_id
;
803 sgi
= (reg
& ICC_SGI1R_SGI_ID_MASK
) >> ICC_SGI1R_SGI_ID_SHIFT
;
804 broadcast
= reg
& BIT_ULL(ICC_SGI1R_IRQ_ROUTING_MODE_BIT
);
805 target_cpus
= (reg
& ICC_SGI1R_TARGET_LIST_MASK
) >> ICC_SGI1R_TARGET_LIST_SHIFT
;
806 mpidr
= SGI_AFFINITY_LEVEL(reg
, 3);
807 mpidr
|= SGI_AFFINITY_LEVEL(reg
, 2);
808 mpidr
|= SGI_AFFINITY_LEVEL(reg
, 1);
811 * We iterate over all VCPUs to find the MPIDRs matching the request.
812 * If we have handled one CPU, we clear its bit to detect early
813 * if we are already finished. This avoids iterating through all
814 * VCPUs when most of the times we just signal a single VCPU.
816 kvm_for_each_vcpu(c
, c_vcpu
, kvm
) {
817 struct vgic_irq
*irq
;
819 /* Exit early if we have dealt with all requested CPUs */
820 if (!broadcast
&& target_cpus
== 0)
823 /* Don't signal the calling VCPU */
824 if (broadcast
&& c
== vcpu_id
)
830 level0
= match_mpidr(mpidr
, target_cpus
, c_vcpu
);
834 /* remove this matching VCPU from the mask */
835 target_cpus
&= ~BIT(level0
);
838 irq
= vgic_get_irq(vcpu
->kvm
, c_vcpu
, sgi
);
840 spin_lock(&irq
->irq_lock
);
841 irq
->pending_latch
= true;
843 vgic_queue_irq_unlock(vcpu
->kvm
, irq
);
844 vgic_put_irq(vcpu
->kvm
, irq
);
848 int vgic_v3_dist_uaccess(struct kvm_vcpu
*vcpu
, bool is_write
,
849 int offset
, u32
*val
)
851 struct vgic_io_device dev
= {
852 .regions
= vgic_v3_dist_registers
,
853 .nr_regions
= ARRAY_SIZE(vgic_v3_dist_registers
),
856 return vgic_uaccess(vcpu
, &dev
, is_write
, offset
, val
);
859 int vgic_v3_redist_uaccess(struct kvm_vcpu
*vcpu
, bool is_write
,
860 int offset
, u32
*val
)
862 struct vgic_io_device rd_dev
= {
863 .regions
= vgic_v3_rdbase_registers
,
864 .nr_regions
= ARRAY_SIZE(vgic_v3_rdbase_registers
),
867 struct vgic_io_device sgi_dev
= {
868 .regions
= vgic_v3_sgibase_registers
,
869 .nr_regions
= ARRAY_SIZE(vgic_v3_sgibase_registers
),
872 /* SGI_base is the next 64K frame after RD_base */
873 if (offset
>= SZ_64K
)
874 return vgic_uaccess(vcpu
, &sgi_dev
, is_write
, offset
- SZ_64K
,
877 return vgic_uaccess(vcpu
, &rd_dev
, is_write
, offset
, val
);
880 int vgic_v3_line_level_info_uaccess(struct kvm_vcpu
*vcpu
, bool is_write
,
887 vgic_write_irq_line_level_info(vcpu
, intid
, *val
);
889 *val
= vgic_read_irq_line_level_info(vcpu
, intid
);