2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 #include <linux/irqchip/arm-gic-v3.h>
16 #include <linux/kvm.h>
17 #include <linux/kvm_host.h>
18 #include <kvm/arm_vgic.h>
19 #include <asm/kvm_mmu.h>
20 #include <asm/kvm_asm.h>
24 void vgic_v3_process_maintenance(struct kvm_vcpu
*vcpu
)
26 struct vgic_v3_cpu_if
*cpuif
= &vcpu
->arch
.vgic_cpu
.vgic_v3
;
27 u32 model
= vcpu
->kvm
->arch
.vgic
.vgic_model
;
29 if (cpuif
->vgic_misr
& ICH_MISR_EOI
) {
30 unsigned long eisr_bmap
= cpuif
->vgic_eisr
;
33 for_each_set_bit(lr
, &eisr_bmap
, kvm_vgic_global_state
.nr_lr
) {
35 u64 val
= cpuif
->vgic_lr
[lr
];
37 if (model
== KVM_DEV_TYPE_ARM_VGIC_V3
)
38 intid
= val
& ICH_LR_VIRTUAL_ID_MASK
;
40 intid
= val
& GICH_LR_VIRTUALID
;
42 WARN_ON(cpuif
->vgic_lr
[lr
] & ICH_LR_STATE
);
44 /* Only SPIs require notification */
45 if (vgic_valid_spi(vcpu
->kvm
, intid
))
46 kvm_notify_acked_irq(vcpu
->kvm
, 0,
47 intid
- VGIC_NR_PRIVATE_IRQS
);
51 * In the next iterations of the vcpu loop, if we sync
52 * the vgic state after flushing it, but before
53 * entering the guest (this happens for pending
54 * signals and vmid rollovers), then make sure we
55 * don't pick up any old maintenance interrupts here.
60 cpuif
->vgic_hcr
&= ~ICH_HCR_UIE
;
63 static bool group0_trap
;
64 static bool group1_trap
;
66 void vgic_v3_set_underflow(struct kvm_vcpu
*vcpu
)
68 struct vgic_v3_cpu_if
*cpuif
= &vcpu
->arch
.vgic_cpu
.vgic_v3
;
70 cpuif
->vgic_hcr
|= ICH_HCR_UIE
;
73 void vgic_v3_fold_lr_state(struct kvm_vcpu
*vcpu
)
75 struct vgic_v3_cpu_if
*cpuif
= &vcpu
->arch
.vgic_cpu
.vgic_v3
;
76 u32 model
= vcpu
->kvm
->arch
.vgic
.vgic_model
;
79 for (lr
= 0; lr
< vcpu
->arch
.vgic_cpu
.used_lrs
; lr
++) {
80 u64 val
= cpuif
->vgic_lr
[lr
];
84 if (model
== KVM_DEV_TYPE_ARM_VGIC_V3
)
85 intid
= val
& ICH_LR_VIRTUAL_ID_MASK
;
87 intid
= val
& GICH_LR_VIRTUALID
;
88 irq
= vgic_get_irq(vcpu
->kvm
, vcpu
, intid
);
89 if (!irq
) /* An LPI could have been unmapped. */
92 spin_lock(&irq
->irq_lock
);
94 /* Always preserve the active bit */
95 irq
->active
= !!(val
& ICH_LR_ACTIVE_BIT
);
97 /* Edge is the only case where we preserve the pending bit */
98 if (irq
->config
== VGIC_CONFIG_EDGE
&&
99 (val
& ICH_LR_PENDING_BIT
)) {
102 if (vgic_irq_is_sgi(intid
) &&
103 model
== KVM_DEV_TYPE_ARM_VGIC_V2
) {
104 u32 cpuid
= val
& GICH_LR_PHYSID_CPUID
;
106 cpuid
>>= GICH_LR_PHYSID_CPUID_SHIFT
;
107 irq
->source
|= (1 << cpuid
);
112 * Clear soft pending state when level irqs have been acked.
113 * Always regenerate the pending state.
115 if (irq
->config
== VGIC_CONFIG_LEVEL
) {
116 if (!(val
& ICH_LR_PENDING_BIT
))
117 irq
->soft_pending
= false;
119 irq
->pending
= irq
->line_level
|| irq
->soft_pending
;
122 spin_unlock(&irq
->irq_lock
);
123 vgic_put_irq(vcpu
->kvm
, irq
);
127 /* Requires the irq to be locked already */
128 void vgic_v3_populate_lr(struct kvm_vcpu
*vcpu
, struct vgic_irq
*irq
, int lr
)
130 u32 model
= vcpu
->kvm
->arch
.vgic
.vgic_model
;
131 u64 val
= irq
->intid
;
134 val
|= ICH_LR_PENDING_BIT
;
136 if (irq
->config
== VGIC_CONFIG_EDGE
)
137 irq
->pending
= false;
139 if (vgic_irq_is_sgi(irq
->intid
) &&
140 model
== KVM_DEV_TYPE_ARM_VGIC_V2
) {
141 u32 src
= ffs(irq
->source
);
144 val
|= (src
- 1) << GICH_LR_PHYSID_CPUID_SHIFT
;
145 irq
->source
&= ~(1 << (src
- 1));
152 val
|= ICH_LR_ACTIVE_BIT
;
156 val
|= ((u64
)irq
->hwintid
) << ICH_LR_PHYS_ID_SHIFT
;
158 if (irq
->config
== VGIC_CONFIG_LEVEL
)
163 * We currently only support Group1 interrupts, which is a
164 * known defect. This needs to be addressed at some point.
166 if (model
== KVM_DEV_TYPE_ARM_VGIC_V3
)
169 val
|= (u64
)irq
->priority
<< ICH_LR_PRIORITY_SHIFT
;
171 vcpu
->arch
.vgic_cpu
.vgic_v3
.vgic_lr
[lr
] = val
;
174 void vgic_v3_clear_lr(struct kvm_vcpu
*vcpu
, int lr
)
176 vcpu
->arch
.vgic_cpu
.vgic_v3
.vgic_lr
[lr
] = 0;
179 void vgic_v3_set_vmcr(struct kvm_vcpu
*vcpu
, struct vgic_vmcr
*vmcrp
)
183 vmcr
= (vmcrp
->ctlr
<< ICH_VMCR_CTLR_SHIFT
) & ICH_VMCR_CTLR_MASK
;
184 vmcr
|= (vmcrp
->abpr
<< ICH_VMCR_BPR1_SHIFT
) & ICH_VMCR_BPR1_MASK
;
185 vmcr
|= (vmcrp
->bpr
<< ICH_VMCR_BPR0_SHIFT
) & ICH_VMCR_BPR0_MASK
;
186 vmcr
|= (vmcrp
->pmr
<< ICH_VMCR_PMR_SHIFT
) & ICH_VMCR_PMR_MASK
;
188 vcpu
->arch
.vgic_cpu
.vgic_v3
.vgic_vmcr
= vmcr
;
191 void vgic_v3_get_vmcr(struct kvm_vcpu
*vcpu
, struct vgic_vmcr
*vmcrp
)
193 u32 vmcr
= vcpu
->arch
.vgic_cpu
.vgic_v3
.vgic_vmcr
;
195 vmcrp
->ctlr
= (vmcr
& ICH_VMCR_CTLR_MASK
) >> ICH_VMCR_CTLR_SHIFT
;
196 vmcrp
->abpr
= (vmcr
& ICH_VMCR_BPR1_MASK
) >> ICH_VMCR_BPR1_SHIFT
;
197 vmcrp
->bpr
= (vmcr
& ICH_VMCR_BPR0_MASK
) >> ICH_VMCR_BPR0_SHIFT
;
198 vmcrp
->pmr
= (vmcr
& ICH_VMCR_PMR_MASK
) >> ICH_VMCR_PMR_SHIFT
;
201 #define INITIAL_PENDBASER_VALUE \
202 (GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWb) | \
203 GIC_BASER_CACHEABILITY(GICR_PENDBASER, OUTER, SameAsInner) | \
204 GIC_BASER_SHAREABILITY(GICR_PENDBASER, InnerShareable))
206 void vgic_v3_enable(struct kvm_vcpu
*vcpu
)
208 struct vgic_v3_cpu_if
*vgic_v3
= &vcpu
->arch
.vgic_cpu
.vgic_v3
;
211 * By forcing VMCR to zero, the GIC will restore the binary
212 * points to their reset values. Anything else resets to zero
215 vgic_v3
->vgic_vmcr
= 0;
216 vgic_v3
->vgic_elrsr
= ~0;
219 * If we are emulating a GICv3, we do it in an non-GICv2-compatible
220 * way, so we force SRE to 1 to demonstrate this to the guest.
221 * This goes with the spec allowing the value to be RAO/WI.
223 if (vcpu
->kvm
->arch
.vgic
.vgic_model
== KVM_DEV_TYPE_ARM_VGIC_V3
) {
224 vgic_v3
->vgic_sre
= ICC_SRE_EL1_SRE
;
225 vcpu
->arch
.vgic_cpu
.pendbaser
= INITIAL_PENDBASER_VALUE
;
227 vgic_v3
->vgic_sre
= 0;
230 /* Get the show on the road... */
231 vgic_v3
->vgic_hcr
= ICH_HCR_EN
;
233 vgic_v3
->vgic_hcr
|= ICH_HCR_TALL0
;
235 vgic_v3
->vgic_hcr
|= ICH_HCR_TALL1
;
238 /* check for overlapping regions and for regions crossing the end of memory */
239 static bool vgic_v3_check_base(struct kvm
*kvm
)
241 struct vgic_dist
*d
= &kvm
->arch
.vgic
;
242 gpa_t redist_size
= KVM_VGIC_V3_REDIST_SIZE
;
244 redist_size
*= atomic_read(&kvm
->online_vcpus
);
246 if (d
->vgic_dist_base
+ KVM_VGIC_V3_DIST_SIZE
< d
->vgic_dist_base
)
248 if (d
->vgic_redist_base
+ redist_size
< d
->vgic_redist_base
)
251 if (d
->vgic_dist_base
+ KVM_VGIC_V3_DIST_SIZE
<= d
->vgic_redist_base
)
253 if (d
->vgic_redist_base
+ redist_size
<= d
->vgic_dist_base
)
259 int vgic_v3_map_resources(struct kvm
*kvm
)
262 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
267 if (IS_VGIC_ADDR_UNDEF(dist
->vgic_dist_base
) ||
268 IS_VGIC_ADDR_UNDEF(dist
->vgic_redist_base
)) {
269 kvm_err("Need to set vgic distributor addresses first\n");
274 if (!vgic_v3_check_base(kvm
)) {
275 kvm_err("VGIC redist and dist frames overlap\n");
281 * For a VGICv3 we require the userland to explicitly initialize
282 * the VGIC before we need to use it.
284 if (!vgic_initialized(kvm
)) {
289 ret
= vgic_register_dist_iodev(kvm
, dist
->vgic_dist_base
, VGIC_V3
);
291 kvm_err("Unable to register VGICv3 dist MMIO regions\n");
295 ret
= vgic_register_redist_iodevs(kvm
, dist
->vgic_redist_base
);
297 kvm_err("Unable to register VGICv3 redist MMIO regions\n");
301 if (vgic_has_its(kvm
)) {
302 ret
= vgic_register_its_iodevs(kvm
);
304 kvm_err("Unable to register VGIC ITS MMIO regions\n");
315 DEFINE_STATIC_KEY_FALSE(vgic_v3_cpuif_trap
);
317 static int __init
early_group0_trap_cfg(char *buf
)
319 return strtobool(buf
, &group0_trap
);
321 early_param("kvm-arm.vgic_v3_group0_trap", early_group0_trap_cfg
);
323 static int __init
early_group1_trap_cfg(char *buf
)
325 return strtobool(buf
, &group1_trap
);
327 early_param("kvm-arm.vgic_v3_group1_trap", early_group1_trap_cfg
);
330 * vgic_v3_probe - probe for a GICv3 compatible interrupt controller in DT
331 * @node: pointer to the DT node
333 * Returns 0 if a GICv3 has been found, returns an error code otherwise
335 int vgic_v3_probe(const struct gic_kvm_info
*info
)
337 u32 ich_vtr_el2
= kvm_call_hyp(__vgic_v3_get_ich_vtr_el2
);
341 * The ListRegs field is 5 bits, but there is a architectural
342 * maximum of 16 list registers. Just ignore bit 4...
344 kvm_vgic_global_state
.nr_lr
= (ich_vtr_el2
& 0xf) + 1;
345 kvm_vgic_global_state
.can_emulate_gicv2
= false;
347 if (!info
->vcpu
.start
) {
348 kvm_info("GICv3: no GICV resource entry\n");
349 kvm_vgic_global_state
.vcpu_base
= 0;
350 } else if (!PAGE_ALIGNED(info
->vcpu
.start
)) {
351 pr_warn("GICV physical address 0x%llx not page aligned\n",
352 (unsigned long long)info
->vcpu
.start
);
353 kvm_vgic_global_state
.vcpu_base
= 0;
354 } else if (!PAGE_ALIGNED(resource_size(&info
->vcpu
))) {
355 pr_warn("GICV size 0x%llx not a multiple of page size 0x%lx\n",
356 (unsigned long long)resource_size(&info
->vcpu
),
358 kvm_vgic_global_state
.vcpu_base
= 0;
360 kvm_vgic_global_state
.vcpu_base
= info
->vcpu
.start
;
361 kvm_vgic_global_state
.can_emulate_gicv2
= true;
362 ret
= kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2
);
364 kvm_err("Cannot register GICv2 KVM device.\n");
367 kvm_info("vgic-v2@%llx\n", info
->vcpu
.start
);
369 ret
= kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V3
);
371 kvm_err("Cannot register GICv3 KVM device.\n");
372 kvm_unregister_device_ops(KVM_DEV_TYPE_ARM_VGIC_V2
);
376 if (kvm_vgic_global_state
.vcpu_base
== 0)
377 kvm_info("disabling GICv2 emulation\n");
380 if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_30115
)) {
386 if (group0_trap
|| group1_trap
) {
387 kvm_info("GICv3 sysreg trapping enabled (reduced performance)\n");
388 static_branch_enable(&vgic_v3_cpuif_trap
);
391 kvm_vgic_global_state
.vctrl_base
= NULL
;
392 kvm_vgic_global_state
.type
= VGIC_V3
;
393 kvm_vgic_global_state
.max_gic_vcpus
= VGIC_V3_MAX_CPUS
;