]>
Commit | Line | Data |
---|---|---|
59529f69 MZ |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License version 2 as | |
4 | * published by the Free Software Foundation. | |
5 | * | |
6 | * This program is distributed in the hope that it will be useful, | |
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
9 | * GNU General Public License for more details. | |
10 | * | |
11 | * You should have received a copy of the GNU General Public License | |
12 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
13 | */ | |
14 | ||
15 | #include <linux/irqchip/arm-gic-v3.h> | |
16 | #include <linux/kvm.h> | |
17 | #include <linux/kvm_host.h> | |
90977732 EA |
18 | #include <kvm/arm_vgic.h> |
19 | #include <asm/kvm_mmu.h> | |
20 | #include <asm/kvm_asm.h> | |
59529f69 MZ |
21 | |
22 | #include "vgic.h" | |
23 | ||
24 | void vgic_v3_process_maintenance(struct kvm_vcpu *vcpu) | |
25 | { | |
26 | struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3; | |
27 | u32 model = vcpu->kvm->arch.vgic.vgic_model; | |
28 | ||
29 | if (cpuif->vgic_misr & ICH_MISR_EOI) { | |
30 | unsigned long eisr_bmap = cpuif->vgic_eisr; | |
31 | int lr; | |
32 | ||
33 | for_each_set_bit(lr, &eisr_bmap, kvm_vgic_global_state.nr_lr) { | |
34 | u32 intid; | |
35 | u64 val = cpuif->vgic_lr[lr]; | |
36 | ||
37 | if (model == KVM_DEV_TYPE_ARM_VGIC_V3) | |
38 | intid = val & ICH_LR_VIRTUAL_ID_MASK; | |
39 | else | |
40 | intid = val & GICH_LR_VIRTUALID; | |
41 | ||
42 | WARN_ON(cpuif->vgic_lr[lr] & ICH_LR_STATE); | |
43 | ||
8ca18eec MZ |
44 | /* Only SPIs require notification */ |
45 | if (vgic_valid_spi(vcpu->kvm, intid)) | |
46 | kvm_notify_acked_irq(vcpu->kvm, 0, | |
47 | intid - VGIC_NR_PRIVATE_IRQS); | |
59529f69 MZ |
48 | } |
49 | ||
50 | /* | |
51 | * In the next iterations of the vcpu loop, if we sync | |
52 | * the vgic state after flushing it, but before | |
53 | * entering the guest (this happens for pending | |
54 | * signals and vmid rollovers), then make sure we | |
55 | * don't pick up any old maintenance interrupts here. | |
56 | */ | |
57 | cpuif->vgic_eisr = 0; | |
58 | } | |
59 | ||
60 | cpuif->vgic_hcr &= ~ICH_HCR_UIE; | |
61 | } | |
62 | ||
14adc6b1 MZ |
63 | static bool group1_trap; |
64 | ||
59529f69 MZ |
65 | void vgic_v3_set_underflow(struct kvm_vcpu *vcpu) |
66 | { | |
67 | struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3; | |
68 | ||
69 | cpuif->vgic_hcr |= ICH_HCR_UIE; | |
70 | } | |
71 | ||
72 | void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu) | |
73 | { | |
74 | struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3; | |
75 | u32 model = vcpu->kvm->arch.vgic.vgic_model; | |
76 | int lr; | |
77 | ||
78 | for (lr = 0; lr < vcpu->arch.vgic_cpu.used_lrs; lr++) { | |
79 | u64 val = cpuif->vgic_lr[lr]; | |
80 | u32 intid; | |
81 | struct vgic_irq *irq; | |
82 | ||
83 | if (model == KVM_DEV_TYPE_ARM_VGIC_V3) | |
84 | intid = val & ICH_LR_VIRTUAL_ID_MASK; | |
85 | else | |
86 | intid = val & GICH_LR_VIRTUALID; | |
87 | irq = vgic_get_irq(vcpu->kvm, vcpu, intid); | |
3802411d AP |
88 | if (!irq) /* An LPI could have been unmapped. */ |
89 | continue; | |
59529f69 MZ |
90 | |
91 | spin_lock(&irq->irq_lock); | |
92 | ||
93 | /* Always preserve the active bit */ | |
94 | irq->active = !!(val & ICH_LR_ACTIVE_BIT); | |
95 | ||
96 | /* Edge is the only case where we preserve the pending bit */ | |
97 | if (irq->config == VGIC_CONFIG_EDGE && | |
98 | (val & ICH_LR_PENDING_BIT)) { | |
99 | irq->pending = true; | |
100 | ||
101 | if (vgic_irq_is_sgi(intid) && | |
102 | model == KVM_DEV_TYPE_ARM_VGIC_V2) { | |
103 | u32 cpuid = val & GICH_LR_PHYSID_CPUID; | |
104 | ||
105 | cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT; | |
106 | irq->source |= (1 << cpuid); | |
107 | } | |
108 | } | |
109 | ||
637d122b MZ |
110 | /* |
111 | * Clear soft pending state when level irqs have been acked. | |
112 | * Always regenerate the pending state. | |
113 | */ | |
114 | if (irq->config == VGIC_CONFIG_LEVEL) { | |
115 | if (!(val & ICH_LR_PENDING_BIT)) | |
116 | irq->soft_pending = false; | |
117 | ||
118 | irq->pending = irq->line_level || irq->soft_pending; | |
59529f69 MZ |
119 | } |
120 | ||
121 | spin_unlock(&irq->irq_lock); | |
5dd4b924 | 122 | vgic_put_irq(vcpu->kvm, irq); |
59529f69 MZ |
123 | } |
124 | } | |
125 | ||
126 | /* Requires the irq to be locked already */ | |
127 | void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr) | |
128 | { | |
129 | u32 model = vcpu->kvm->arch.vgic.vgic_model; | |
130 | u64 val = irq->intid; | |
131 | ||
132 | if (irq->pending) { | |
133 | val |= ICH_LR_PENDING_BIT; | |
134 | ||
135 | if (irq->config == VGIC_CONFIG_EDGE) | |
136 | irq->pending = false; | |
137 | ||
138 | if (vgic_irq_is_sgi(irq->intid) && | |
139 | model == KVM_DEV_TYPE_ARM_VGIC_V2) { | |
140 | u32 src = ffs(irq->source); | |
141 | ||
142 | BUG_ON(!src); | |
143 | val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT; | |
144 | irq->source &= ~(1 << (src - 1)); | |
145 | if (irq->source) | |
146 | irq->pending = true; | |
147 | } | |
148 | } | |
149 | ||
150 | if (irq->active) | |
151 | val |= ICH_LR_ACTIVE_BIT; | |
152 | ||
153 | if (irq->hw) { | |
154 | val |= ICH_LR_HW; | |
155 | val |= ((u64)irq->hwintid) << ICH_LR_PHYS_ID_SHIFT; | |
156 | } else { | |
157 | if (irq->config == VGIC_CONFIG_LEVEL) | |
158 | val |= ICH_LR_EOI; | |
159 | } | |
160 | ||
161 | /* | |
162 | * We currently only support Group1 interrupts, which is a | |
163 | * known defect. This needs to be addressed at some point. | |
164 | */ | |
165 | if (model == KVM_DEV_TYPE_ARM_VGIC_V3) | |
166 | val |= ICH_LR_GROUP; | |
167 | ||
168 | val |= (u64)irq->priority << ICH_LR_PRIORITY_SHIFT; | |
169 | ||
170 | vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = val; | |
171 | } | |
172 | ||
173 | void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr) | |
174 | { | |
175 | vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = 0; | |
176 | } | |
e4823a7a AP |
177 | |
178 | void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) | |
179 | { | |
180 | u32 vmcr; | |
181 | ||
182 | vmcr = (vmcrp->ctlr << ICH_VMCR_CTLR_SHIFT) & ICH_VMCR_CTLR_MASK; | |
183 | vmcr |= (vmcrp->abpr << ICH_VMCR_BPR1_SHIFT) & ICH_VMCR_BPR1_MASK; | |
184 | vmcr |= (vmcrp->bpr << ICH_VMCR_BPR0_SHIFT) & ICH_VMCR_BPR0_MASK; | |
185 | vmcr |= (vmcrp->pmr << ICH_VMCR_PMR_SHIFT) & ICH_VMCR_PMR_MASK; | |
186 | ||
187 | vcpu->arch.vgic_cpu.vgic_v3.vgic_vmcr = vmcr; | |
188 | } | |
189 | ||
190 | void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) | |
191 | { | |
192 | u32 vmcr = vcpu->arch.vgic_cpu.vgic_v3.vgic_vmcr; | |
193 | ||
194 | vmcrp->ctlr = (vmcr & ICH_VMCR_CTLR_MASK) >> ICH_VMCR_CTLR_SHIFT; | |
195 | vmcrp->abpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT; | |
196 | vmcrp->bpr = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT; | |
197 | vmcrp->pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT; | |
198 | } | |
90977732 | 199 | |
0aa1de57 AP |
200 | #define INITIAL_PENDBASER_VALUE \ |
201 | (GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWb) | \ | |
202 | GIC_BASER_CACHEABILITY(GICR_PENDBASER, OUTER, SameAsInner) | \ | |
203 | GIC_BASER_SHAREABILITY(GICR_PENDBASER, InnerShareable)) | |
204 | ||
ad275b8b EA |
205 | void vgic_v3_enable(struct kvm_vcpu *vcpu) |
206 | { | |
f7b6985c EA |
207 | struct vgic_v3_cpu_if *vgic_v3 = &vcpu->arch.vgic_cpu.vgic_v3; |
208 | ||
209 | /* | |
210 | * By forcing VMCR to zero, the GIC will restore the binary | |
211 | * points to their reset values. Anything else resets to zero | |
212 | * anyway. | |
213 | */ | |
214 | vgic_v3->vgic_vmcr = 0; | |
215 | vgic_v3->vgic_elrsr = ~0; | |
216 | ||
217 | /* | |
218 | * If we are emulating a GICv3, we do it in an non-GICv2-compatible | |
219 | * way, so we force SRE to 1 to demonstrate this to the guest. | |
220 | * This goes with the spec allowing the value to be RAO/WI. | |
221 | */ | |
0aa1de57 | 222 | if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) { |
f7b6985c | 223 | vgic_v3->vgic_sre = ICC_SRE_EL1_SRE; |
0aa1de57 AP |
224 | vcpu->arch.vgic_cpu.pendbaser = INITIAL_PENDBASER_VALUE; |
225 | } else { | |
f7b6985c | 226 | vgic_v3->vgic_sre = 0; |
0aa1de57 | 227 | } |
f7b6985c EA |
228 | |
229 | /* Get the show on the road... */ | |
230 | vgic_v3->vgic_hcr = ICH_HCR_EN; | |
14adc6b1 MZ |
231 | if (group1_trap) |
232 | vgic_v3->vgic_hcr |= ICH_HCR_TALL1; | |
ad275b8b EA |
233 | } |
234 | ||
b0442ee2 EA |
235 | /* check for overlapping regions and for regions crossing the end of memory */ |
236 | static bool vgic_v3_check_base(struct kvm *kvm) | |
237 | { | |
238 | struct vgic_dist *d = &kvm->arch.vgic; | |
239 | gpa_t redist_size = KVM_VGIC_V3_REDIST_SIZE; | |
240 | ||
241 | redist_size *= atomic_read(&kvm->online_vcpus); | |
242 | ||
243 | if (d->vgic_dist_base + KVM_VGIC_V3_DIST_SIZE < d->vgic_dist_base) | |
244 | return false; | |
245 | if (d->vgic_redist_base + redist_size < d->vgic_redist_base) | |
246 | return false; | |
247 | ||
248 | if (d->vgic_dist_base + KVM_VGIC_V3_DIST_SIZE <= d->vgic_redist_base) | |
249 | return true; | |
250 | if (d->vgic_redist_base + redist_size <= d->vgic_dist_base) | |
251 | return true; | |
252 | ||
253 | return false; | |
254 | } | |
255 | ||
256 | int vgic_v3_map_resources(struct kvm *kvm) | |
257 | { | |
258 | int ret = 0; | |
259 | struct vgic_dist *dist = &kvm->arch.vgic; | |
260 | ||
261 | if (vgic_ready(kvm)) | |
262 | goto out; | |
263 | ||
264 | if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) || | |
265 | IS_VGIC_ADDR_UNDEF(dist->vgic_redist_base)) { | |
266 | kvm_err("Need to set vgic distributor addresses first\n"); | |
267 | ret = -ENXIO; | |
268 | goto out; | |
269 | } | |
270 | ||
271 | if (!vgic_v3_check_base(kvm)) { | |
272 | kvm_err("VGIC redist and dist frames overlap\n"); | |
273 | ret = -EINVAL; | |
274 | goto out; | |
275 | } | |
276 | ||
277 | /* | |
278 | * For a VGICv3 we require the userland to explicitly initialize | |
279 | * the VGIC before we need to use it. | |
280 | */ | |
281 | if (!vgic_initialized(kvm)) { | |
282 | ret = -EBUSY; | |
283 | goto out; | |
284 | } | |
285 | ||
286 | ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V3); | |
287 | if (ret) { | |
288 | kvm_err("Unable to register VGICv3 dist MMIO regions\n"); | |
289 | goto out; | |
290 | } | |
291 | ||
292 | ret = vgic_register_redist_iodevs(kvm, dist->vgic_redist_base); | |
293 | if (ret) { | |
294 | kvm_err("Unable to register VGICv3 redist MMIO regions\n"); | |
295 | goto out; | |
296 | } | |
297 | ||
c7735769 AP |
298 | if (vgic_has_its(kvm)) { |
299 | ret = vgic_register_its_iodevs(kvm); | |
300 | if (ret) { | |
301 | kvm_err("Unable to register VGIC ITS MMIO regions\n"); | |
302 | goto out; | |
303 | } | |
304 | } | |
305 | ||
b0442ee2 EA |
306 | dist->ready = true; |
307 | ||
308 | out: | |
b0442ee2 EA |
309 | return ret; |
310 | } | |
311 | ||
0c94a05f MZ |
312 | DEFINE_STATIC_KEY_FALSE(vgic_v3_cpuif_trap); |
313 | ||
243f60e0 MZ |
314 | static int __init early_group1_trap_cfg(char *buf) |
315 | { | |
316 | return strtobool(buf, &group1_trap); | |
317 | } | |
318 | early_param("kvm-arm.vgic_v3_group1_trap", early_group1_trap_cfg); | |
319 | ||
90977732 EA |
320 | /** |
321 | * vgic_v3_probe - probe for a GICv3 compatible interrupt controller in DT | |
322 | * @node: pointer to the DT node | |
323 | * | |
324 | * Returns 0 if a GICv3 has been found, returns an error code otherwise | |
325 | */ | |
326 | int vgic_v3_probe(const struct gic_kvm_info *info) | |
327 | { | |
328 | u32 ich_vtr_el2 = kvm_call_hyp(__vgic_v3_get_ich_vtr_el2); | |
42c8870f | 329 | int ret; |
90977732 EA |
330 | |
331 | /* | |
332 | * The ListRegs field is 5 bits, but there is a architectural | |
333 | * maximum of 16 list registers. Just ignore bit 4... | |
334 | */ | |
335 | kvm_vgic_global_state.nr_lr = (ich_vtr_el2 & 0xf) + 1; | |
336 | kvm_vgic_global_state.can_emulate_gicv2 = false; | |
337 | ||
338 | if (!info->vcpu.start) { | |
339 | kvm_info("GICv3: no GICV resource entry\n"); | |
340 | kvm_vgic_global_state.vcpu_base = 0; | |
341 | } else if (!PAGE_ALIGNED(info->vcpu.start)) { | |
342 | pr_warn("GICV physical address 0x%llx not page aligned\n", | |
343 | (unsigned long long)info->vcpu.start); | |
344 | kvm_vgic_global_state.vcpu_base = 0; | |
345 | } else if (!PAGE_ALIGNED(resource_size(&info->vcpu))) { | |
346 | pr_warn("GICV size 0x%llx not a multiple of page size 0x%lx\n", | |
347 | (unsigned long long)resource_size(&info->vcpu), | |
348 | PAGE_SIZE); | |
349 | kvm_vgic_global_state.vcpu_base = 0; | |
350 | } else { | |
351 | kvm_vgic_global_state.vcpu_base = info->vcpu.start; | |
352 | kvm_vgic_global_state.can_emulate_gicv2 = true; | |
42c8870f AP |
353 | ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2); |
354 | if (ret) { | |
355 | kvm_err("Cannot register GICv2 KVM device.\n"); | |
356 | return ret; | |
357 | } | |
90977732 EA |
358 | kvm_info("vgic-v2@%llx\n", info->vcpu.start); |
359 | } | |
42c8870f AP |
360 | ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V3); |
361 | if (ret) { | |
362 | kvm_err("Cannot register GICv3 KVM device.\n"); | |
363 | kvm_unregister_device_ops(KVM_DEV_TYPE_ARM_VGIC_V2); | |
364 | return ret; | |
365 | } | |
366 | ||
90977732 EA |
367 | if (kvm_vgic_global_state.vcpu_base == 0) |
368 | kvm_info("disabling GICv2 emulation\n"); | |
90977732 | 369 | |
243f60e0 MZ |
370 | if (group1_trap) { |
371 | kvm_info("GICv3 sysreg trapping enabled (reduced performance)\n"); | |
372 | static_branch_enable(&vgic_v3_cpuif_trap); | |
373 | } | |
374 | ||
90977732 EA |
375 | kvm_vgic_global_state.vctrl_base = NULL; |
376 | kvm_vgic_global_state.type = VGIC_V3; | |
377 | kvm_vgic_global_state.max_gic_vcpus = VGIC_V3_MAX_CPUS; | |
378 | ||
379 | return 0; | |
380 | } |