2 * VGICv2 MMIO handling functions
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
14 #include <linux/irqchip/arm-gic.h>
15 #include <linux/kvm.h>
16 #include <linux/kvm_host.h>
17 #include <kvm/iodev.h>
18 #include <kvm/arm_vgic.h>
21 #include "vgic-mmio.h"
23 static unsigned long vgic_mmio_read_v2_misc(struct kvm_vcpu
*vcpu
,
24 gpa_t addr
, unsigned int len
)
28 switch (addr
& 0x0c) {
30 value
= vcpu
->kvm
->arch
.vgic
.enabled
? GICD_ENABLE
: 0;
33 value
= vcpu
->kvm
->arch
.vgic
.nr_spis
+ VGIC_NR_PRIVATE_IRQS
;
34 value
= (value
>> 5) - 1;
35 value
|= (atomic_read(&vcpu
->kvm
->online_vcpus
) - 1) << 5;
38 value
= (PRODUCT_ID_KVM
<< 24) | (IMPLEMENTER_ARM
<< 0);
47 static void vgic_mmio_write_v2_misc(struct kvm_vcpu
*vcpu
,
48 gpa_t addr
, unsigned int len
,
51 struct vgic_dist
*dist
= &vcpu
->kvm
->arch
.vgic
;
52 bool was_enabled
= dist
->enabled
;
54 switch (addr
& 0x0c) {
56 dist
->enabled
= val
& GICD_ENABLE
;
57 if (!was_enabled
&& dist
->enabled
)
58 vgic_kick_vcpus(vcpu
->kvm
);
67 static void vgic_mmio_write_sgir(struct kvm_vcpu
*source_vcpu
,
68 gpa_t addr
, unsigned int len
,
71 int nr_vcpus
= atomic_read(&source_vcpu
->kvm
->online_vcpus
);
72 int intid
= val
& 0xf;
73 int targets
= (val
>> 16) & 0xff;
74 int mode
= (val
>> 24) & 0x03;
76 struct kvm_vcpu
*vcpu
;
79 case 0x0: /* as specified by targets */
82 targets
= (1U << nr_vcpus
) - 1; /* all, ... */
83 targets
&= ~(1U << source_vcpu
->vcpu_id
); /* but self */
85 case 0x2: /* this very vCPU only */
86 targets
= (1U << source_vcpu
->vcpu_id
);
88 case 0x3: /* reserved */
92 kvm_for_each_vcpu(c
, vcpu
, source_vcpu
->kvm
) {
95 if (!(targets
& (1U << c
)))
98 irq
= vgic_get_irq(source_vcpu
->kvm
, vcpu
, intid
);
100 spin_lock(&irq
->irq_lock
);
101 irq
->pending_latch
= true;
102 irq
->source
|= 1U << source_vcpu
->vcpu_id
;
104 vgic_queue_irq_unlock(source_vcpu
->kvm
, irq
);
105 vgic_put_irq(source_vcpu
->kvm
, irq
);
109 static unsigned long vgic_mmio_read_target(struct kvm_vcpu
*vcpu
,
110 gpa_t addr
, unsigned int len
)
112 u32 intid
= VGIC_ADDR_TO_INTID(addr
, 8);
116 for (i
= 0; i
< len
; i
++) {
117 struct vgic_irq
*irq
= vgic_get_irq(vcpu
->kvm
, vcpu
, intid
+ i
);
119 val
|= (u64
)irq
->targets
<< (i
* 8);
121 vgic_put_irq(vcpu
->kvm
, irq
);
127 static void vgic_mmio_write_target(struct kvm_vcpu
*vcpu
,
128 gpa_t addr
, unsigned int len
,
131 u32 intid
= VGIC_ADDR_TO_INTID(addr
, 8);
132 u8 cpu_mask
= GENMASK(atomic_read(&vcpu
->kvm
->online_vcpus
) - 1, 0);
135 /* GICD_ITARGETSR[0-7] are read-only */
136 if (intid
< VGIC_NR_PRIVATE_IRQS
)
139 for (i
= 0; i
< len
; i
++) {
140 struct vgic_irq
*irq
= vgic_get_irq(vcpu
->kvm
, NULL
, intid
+ i
);
143 spin_lock(&irq
->irq_lock
);
145 irq
->targets
= (val
>> (i
* 8)) & cpu_mask
;
146 target
= irq
->targets
? __ffs(irq
->targets
) : 0;
147 irq
->target_vcpu
= kvm_get_vcpu(vcpu
->kvm
, target
);
149 spin_unlock(&irq
->irq_lock
);
150 vgic_put_irq(vcpu
->kvm
, irq
);
154 static unsigned long vgic_mmio_read_sgipend(struct kvm_vcpu
*vcpu
,
155 gpa_t addr
, unsigned int len
)
157 u32 intid
= addr
& 0x0f;
161 for (i
= 0; i
< len
; i
++) {
162 struct vgic_irq
*irq
= vgic_get_irq(vcpu
->kvm
, vcpu
, intid
+ i
);
164 val
|= (u64
)irq
->source
<< (i
* 8);
166 vgic_put_irq(vcpu
->kvm
, irq
);
171 static void vgic_mmio_write_sgipendc(struct kvm_vcpu
*vcpu
,
172 gpa_t addr
, unsigned int len
,
175 u32 intid
= addr
& 0x0f;
178 for (i
= 0; i
< len
; i
++) {
179 struct vgic_irq
*irq
= vgic_get_irq(vcpu
->kvm
, vcpu
, intid
+ i
);
181 spin_lock(&irq
->irq_lock
);
183 irq
->source
&= ~((val
>> (i
* 8)) & 0xff);
185 irq
->pending_latch
= false;
187 spin_unlock(&irq
->irq_lock
);
188 vgic_put_irq(vcpu
->kvm
, irq
);
192 static void vgic_mmio_write_sgipends(struct kvm_vcpu
*vcpu
,
193 gpa_t addr
, unsigned int len
,
196 u32 intid
= addr
& 0x0f;
199 for (i
= 0; i
< len
; i
++) {
200 struct vgic_irq
*irq
= vgic_get_irq(vcpu
->kvm
, vcpu
, intid
+ i
);
202 spin_lock(&irq
->irq_lock
);
204 irq
->source
|= (val
>> (i
* 8)) & 0xff;
207 irq
->pending_latch
= true;
208 vgic_queue_irq_unlock(vcpu
->kvm
, irq
);
210 spin_unlock(&irq
->irq_lock
);
212 vgic_put_irq(vcpu
->kvm
, irq
);
216 #define GICC_ARCH_VERSION_V2 0x2
218 /* These are for userland accesses only, there is no guest-facing emulation. */
219 static unsigned long vgic_mmio_read_vcpuif(struct kvm_vcpu
*vcpu
,
220 gpa_t addr
, unsigned int len
)
222 struct vgic_vmcr vmcr
;
225 vgic_get_vmcr(vcpu
, &vmcr
);
227 switch (addr
& 0xff) {
231 case GIC_CPU_PRIMASK
:
233 * Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the
234 * the PMR field as GICH_VMCR.VMPriMask rather than
235 * GICC_PMR.Priority, so we expose the upper five bits of
236 * priority mask to userspace using the lower bits in the
239 val
= (vmcr
.pmr
& GICV_PMR_PRIORITY_MASK
) >>
240 GICV_PMR_PRIORITY_SHIFT
;
242 case GIC_CPU_BINPOINT
:
245 case GIC_CPU_ALIAS_BINPOINT
:
249 val
= ((PRODUCT_ID_KVM
<< 20) |
250 (GICC_ARCH_VERSION_V2
<< 16) |
260 static void vgic_mmio_write_vcpuif(struct kvm_vcpu
*vcpu
,
261 gpa_t addr
, unsigned int len
,
264 struct vgic_vmcr vmcr
;
266 vgic_get_vmcr(vcpu
, &vmcr
);
268 switch (addr
& 0xff) {
272 case GIC_CPU_PRIMASK
:
274 * Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the
275 * the PMR field as GICH_VMCR.VMPriMask rather than
276 * GICC_PMR.Priority, so we expose the upper five bits of
277 * priority mask to userspace using the lower bits in the
280 vmcr
.pmr
= (val
<< GICV_PMR_PRIORITY_SHIFT
) &
281 GICV_PMR_PRIORITY_MASK
;
283 case GIC_CPU_BINPOINT
:
286 case GIC_CPU_ALIAS_BINPOINT
:
291 vgic_set_vmcr(vcpu
, &vmcr
);
294 static const struct vgic_register_region vgic_v2_dist_registers
[] = {
295 REGISTER_DESC_WITH_LENGTH(GIC_DIST_CTRL
,
296 vgic_mmio_read_v2_misc
, vgic_mmio_write_v2_misc
, 12,
298 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_IGROUP
,
299 vgic_mmio_read_rao
, vgic_mmio_write_wi
, 1,
301 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_SET
,
302 vgic_mmio_read_enable
, vgic_mmio_write_senable
, 1,
304 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_CLEAR
,
305 vgic_mmio_read_enable
, vgic_mmio_write_cenable
, 1,
307 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_SET
,
308 vgic_mmio_read_pending
, vgic_mmio_write_spending
, 1,
310 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_CLEAR
,
311 vgic_mmio_read_pending
, vgic_mmio_write_cpending
, 1,
313 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_SET
,
314 vgic_mmio_read_active
, vgic_mmio_write_sactive
, 1,
316 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_CLEAR
,
317 vgic_mmio_read_active
, vgic_mmio_write_cactive
, 1,
319 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PRI
,
320 vgic_mmio_read_priority
, vgic_mmio_write_priority
, 8,
321 VGIC_ACCESS_32bit
| VGIC_ACCESS_8bit
),
322 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_TARGET
,
323 vgic_mmio_read_target
, vgic_mmio_write_target
, 8,
324 VGIC_ACCESS_32bit
| VGIC_ACCESS_8bit
),
325 REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_CONFIG
,
326 vgic_mmio_read_config
, vgic_mmio_write_config
, 2,
328 REGISTER_DESC_WITH_LENGTH(GIC_DIST_SOFTINT
,
329 vgic_mmio_read_raz
, vgic_mmio_write_sgir
, 4,
331 REGISTER_DESC_WITH_LENGTH(GIC_DIST_SGI_PENDING_CLEAR
,
332 vgic_mmio_read_sgipend
, vgic_mmio_write_sgipendc
, 16,
333 VGIC_ACCESS_32bit
| VGIC_ACCESS_8bit
),
334 REGISTER_DESC_WITH_LENGTH(GIC_DIST_SGI_PENDING_SET
,
335 vgic_mmio_read_sgipend
, vgic_mmio_write_sgipends
, 16,
336 VGIC_ACCESS_32bit
| VGIC_ACCESS_8bit
),
339 static const struct vgic_register_region vgic_v2_cpu_registers
[] = {
340 REGISTER_DESC_WITH_LENGTH(GIC_CPU_CTRL
,
341 vgic_mmio_read_vcpuif
, vgic_mmio_write_vcpuif
, 4,
343 REGISTER_DESC_WITH_LENGTH(GIC_CPU_PRIMASK
,
344 vgic_mmio_read_vcpuif
, vgic_mmio_write_vcpuif
, 4,
346 REGISTER_DESC_WITH_LENGTH(GIC_CPU_BINPOINT
,
347 vgic_mmio_read_vcpuif
, vgic_mmio_write_vcpuif
, 4,
349 REGISTER_DESC_WITH_LENGTH(GIC_CPU_ALIAS_BINPOINT
,
350 vgic_mmio_read_vcpuif
, vgic_mmio_write_vcpuif
, 4,
352 REGISTER_DESC_WITH_LENGTH(GIC_CPU_ACTIVEPRIO
,
353 vgic_mmio_read_raz
, vgic_mmio_write_wi
, 16,
355 REGISTER_DESC_WITH_LENGTH(GIC_CPU_IDENT
,
356 vgic_mmio_read_vcpuif
, vgic_mmio_write_vcpuif
, 4,
360 unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device
*dev
)
362 dev
->regions
= vgic_v2_dist_registers
;
363 dev
->nr_regions
= ARRAY_SIZE(vgic_v2_dist_registers
);
365 kvm_iodevice_init(&dev
->dev
, &kvm_io_gic_ops
);
370 int vgic_v2_has_attr_regs(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
372 const struct vgic_register_region
*region
;
373 struct vgic_io_device iodev
;
374 struct vgic_reg_attr reg_attr
;
375 struct kvm_vcpu
*vcpu
;
379 ret
= vgic_v2_parse_attr(dev
, attr
, ®_attr
);
383 vcpu
= reg_attr
.vcpu
;
384 addr
= reg_attr
.addr
;
386 switch (attr
->group
) {
387 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS
:
388 iodev
.regions
= vgic_v2_dist_registers
;
389 iodev
.nr_regions
= ARRAY_SIZE(vgic_v2_dist_registers
);
392 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS
:
393 iodev
.regions
= vgic_v2_cpu_registers
;
394 iodev
.nr_regions
= ARRAY_SIZE(vgic_v2_cpu_registers
);
401 /* We only support aligned 32-bit accesses. */
405 region
= vgic_get_mmio_region(vcpu
, &iodev
, addr
, sizeof(u32
));
412 int vgic_v2_cpuif_uaccess(struct kvm_vcpu
*vcpu
, bool is_write
,
413 int offset
, u32
*val
)
415 struct vgic_io_device dev
= {
416 .regions
= vgic_v2_cpu_registers
,
417 .nr_regions
= ARRAY_SIZE(vgic_v2_cpu_registers
),
418 .iodev_type
= IODEV_CPUIF
,
421 return vgic_uaccess(vcpu
, &dev
, is_write
, offset
, val
);
424 int vgic_v2_dist_uaccess(struct kvm_vcpu
*vcpu
, bool is_write
,
425 int offset
, u32
*val
)
427 struct vgic_io_device dev
= {
428 .regions
= vgic_v2_dist_registers
,
429 .nr_regions
= ARRAY_SIZE(vgic_v2_dist_registers
),
430 .iodev_type
= IODEV_DIST
,
433 return vgic_uaccess(vcpu
, &dev
, is_write
, offset
, val
);