]>
Commit | Line | Data |
---|---|---|
59529f69 MZ |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License version 2 as | |
4 | * published by the Free Software Foundation. | |
5 | * | |
6 | * This program is distributed in the hope that it will be useful, | |
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
9 | * GNU General Public License for more details. | |
10 | * | |
11 | * You should have received a copy of the GNU General Public License | |
12 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
13 | */ | |
14 | ||
15 | #include <linux/irqchip/arm-gic-v3.h> | |
16 | #include <linux/kvm.h> | |
17 | #include <linux/kvm_host.h> | |
90977732 EA |
18 | #include <kvm/arm_vgic.h> |
19 | #include <asm/kvm_mmu.h> | |
20 | #include <asm/kvm_asm.h> | |
59529f69 MZ |
21 | |
22 | #include "vgic.h" | |
23 | ||
9c7bfc28 MZ |
24 | static bool group1_trap; |
25 | ||
af061499 | 26 | void vgic_v3_set_underflow(struct kvm_vcpu *vcpu) |
59529f69 MZ |
27 | { |
28 | struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3; | |
59529f69 | 29 | |
af061499 | 30 | cpuif->vgic_hcr |= ICH_HCR_UIE; |
59529f69 MZ |
31 | } |
32 | ||
af061499 | 33 | static bool lr_signals_eoi_mi(u64 lr_val) |
59529f69 | 34 | { |
af061499 CD |
35 | return !(lr_val & ICH_LR_STATE) && (lr_val & ICH_LR_EOI) && |
36 | !(lr_val & ICH_LR_HW); | |
59529f69 MZ |
37 | } |
38 | ||
39 | void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu) | |
40 | { | |
8ac76ef4 CD |
41 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
42 | struct vgic_v3_cpu_if *cpuif = &vgic_cpu->vgic_v3; | |
59529f69 MZ |
43 | u32 model = vcpu->kvm->arch.vgic.vgic_model; |
44 | int lr; | |
45 | ||
af061499 CD |
46 | cpuif->vgic_hcr &= ~ICH_HCR_UIE; |
47 | ||
8ac76ef4 | 48 | for (lr = 0; lr < vgic_cpu->used_lrs; lr++) { |
59529f69 MZ |
49 | u64 val = cpuif->vgic_lr[lr]; |
50 | u32 intid; | |
51 | struct vgic_irq *irq; | |
52 | ||
53 | if (model == KVM_DEV_TYPE_ARM_VGIC_V3) | |
54 | intid = val & ICH_LR_VIRTUAL_ID_MASK; | |
55 | else | |
56 | intid = val & GICH_LR_VIRTUALID; | |
af061499 CD |
57 | |
58 | /* Notify fds when the guest EOI'ed a level-triggered IRQ */ | |
59 | if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid)) | |
60 | kvm_notify_acked_irq(vcpu->kvm, 0, | |
61 | intid - VGIC_NR_PRIVATE_IRQS); | |
62 | ||
59529f69 | 63 | irq = vgic_get_irq(vcpu->kvm, vcpu, intid); |
3802411d AP |
64 | if (!irq) /* An LPI could have been unmapped. */ |
65 | continue; | |
59529f69 MZ |
66 | |
67 | spin_lock(&irq->irq_lock); | |
68 | ||
69 | /* Always preserve the active bit */ | |
70 | irq->active = !!(val & ICH_LR_ACTIVE_BIT); | |
71 | ||
72 | /* Edge is the only case where we preserve the pending bit */ | |
73 | if (irq->config == VGIC_CONFIG_EDGE && | |
74 | (val & ICH_LR_PENDING_BIT)) { | |
8694e4da | 75 | irq->pending_latch = true; |
59529f69 MZ |
76 | |
77 | if (vgic_irq_is_sgi(intid) && | |
78 | model == KVM_DEV_TYPE_ARM_VGIC_V2) { | |
79 | u32 cpuid = val & GICH_LR_PHYSID_CPUID; | |
80 | ||
81 | cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT; | |
82 | irq->source |= (1 << cpuid); | |
83 | } | |
84 | } | |
85 | ||
637d122b MZ |
86 | /* |
87 | * Clear soft pending state when level irqs have been acked. | |
88 | * Always regenerate the pending state. | |
89 | */ | |
90 | if (irq->config == VGIC_CONFIG_LEVEL) { | |
91 | if (!(val & ICH_LR_PENDING_BIT)) | |
8694e4da | 92 | irq->pending_latch = false; |
59529f69 MZ |
93 | } |
94 | ||
95 | spin_unlock(&irq->irq_lock); | |
5dd4b924 | 96 | vgic_put_irq(vcpu->kvm, irq); |
59529f69 | 97 | } |
8ac76ef4 CD |
98 | |
99 | vgic_cpu->used_lrs = 0; | |
59529f69 MZ |
100 | } |
101 | ||
102 | /* Requires the irq to be locked already */ | |
103 | void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr) | |
104 | { | |
105 | u32 model = vcpu->kvm->arch.vgic.vgic_model; | |
106 | u64 val = irq->intid; | |
107 | ||
8694e4da | 108 | if (irq_is_pending(irq)) { |
59529f69 MZ |
109 | val |= ICH_LR_PENDING_BIT; |
110 | ||
111 | if (irq->config == VGIC_CONFIG_EDGE) | |
8694e4da | 112 | irq->pending_latch = false; |
59529f69 MZ |
113 | |
114 | if (vgic_irq_is_sgi(irq->intid) && | |
115 | model == KVM_DEV_TYPE_ARM_VGIC_V2) { | |
116 | u32 src = ffs(irq->source); | |
117 | ||
118 | BUG_ON(!src); | |
119 | val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT; | |
120 | irq->source &= ~(1 << (src - 1)); | |
121 | if (irq->source) | |
8694e4da | 122 | irq->pending_latch = true; |
59529f69 MZ |
123 | } |
124 | } | |
125 | ||
126 | if (irq->active) | |
127 | val |= ICH_LR_ACTIVE_BIT; | |
128 | ||
129 | if (irq->hw) { | |
130 | val |= ICH_LR_HW; | |
131 | val |= ((u64)irq->hwintid) << ICH_LR_PHYS_ID_SHIFT; | |
3d6e77ad MZ |
132 | /* |
133 | * Never set pending+active on a HW interrupt, as the | |
134 | * pending state is kept at the physical distributor | |
135 | * level. | |
136 | */ | |
137 | if (irq->active && irq_is_pending(irq)) | |
138 | val &= ~ICH_LR_PENDING_BIT; | |
59529f69 MZ |
139 | } else { |
140 | if (irq->config == VGIC_CONFIG_LEVEL) | |
141 | val |= ICH_LR_EOI; | |
142 | } | |
143 | ||
144 | /* | |
145 | * We currently only support Group1 interrupts, which is a | |
146 | * known defect. This needs to be addressed at some point. | |
147 | */ | |
148 | if (model == KVM_DEV_TYPE_ARM_VGIC_V3) | |
149 | val |= ICH_LR_GROUP; | |
150 | ||
151 | val |= (u64)irq->priority << ICH_LR_PRIORITY_SHIFT; | |
152 | ||
153 | vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = val; | |
154 | } | |
155 | ||
156 | void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr) | |
157 | { | |
158 | vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = 0; | |
159 | } | |
e4823a7a AP |
160 | |
161 | void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) | |
162 | { | |
328e5664 | 163 | struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; |
28232a43 | 164 | u32 model = vcpu->kvm->arch.vgic.vgic_model; |
e4823a7a AP |
165 | u32 vmcr; |
166 | ||
28232a43 CD |
167 | if (model == KVM_DEV_TYPE_ARM_VGIC_V2) { |
168 | vmcr = (vmcrp->ackctl << ICH_VMCR_ACK_CTL_SHIFT) & | |
169 | ICH_VMCR_ACK_CTL_MASK; | |
170 | vmcr |= (vmcrp->fiqen << ICH_VMCR_FIQ_EN_SHIFT) & | |
171 | ICH_VMCR_FIQ_EN_MASK; | |
172 | } else { | |
173 | /* | |
174 | * When emulating GICv3 on GICv3 with SRE=1 on the | |
175 | * VFIQEn bit is RES1 and the VAckCtl bit is RES0. | |
176 | */ | |
177 | vmcr = ICH_VMCR_FIQ_EN_MASK; | |
178 | } | |
179 | ||
180 | vmcr |= (vmcrp->cbpr << ICH_VMCR_CBPR_SHIFT) & ICH_VMCR_CBPR_MASK; | |
181 | vmcr |= (vmcrp->eoim << ICH_VMCR_EOIM_SHIFT) & ICH_VMCR_EOIM_MASK; | |
e4823a7a AP |
182 | vmcr |= (vmcrp->abpr << ICH_VMCR_BPR1_SHIFT) & ICH_VMCR_BPR1_MASK; |
183 | vmcr |= (vmcrp->bpr << ICH_VMCR_BPR0_SHIFT) & ICH_VMCR_BPR0_MASK; | |
184 | vmcr |= (vmcrp->pmr << ICH_VMCR_PMR_SHIFT) & ICH_VMCR_PMR_MASK; | |
5fb247d7 VK |
185 | vmcr |= (vmcrp->grpen0 << ICH_VMCR_ENG0_SHIFT) & ICH_VMCR_ENG0_MASK; |
186 | vmcr |= (vmcrp->grpen1 << ICH_VMCR_ENG1_SHIFT) & ICH_VMCR_ENG1_MASK; | |
e4823a7a | 187 | |
328e5664 | 188 | cpu_if->vgic_vmcr = vmcr; |
e4823a7a AP |
189 | } |
190 | ||
191 | void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) | |
192 | { | |
328e5664 | 193 | struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; |
28232a43 | 194 | u32 model = vcpu->kvm->arch.vgic.vgic_model; |
328e5664 CD |
195 | u32 vmcr; |
196 | ||
197 | vmcr = cpu_if->vgic_vmcr; | |
e4823a7a | 198 | |
28232a43 CD |
199 | if (model == KVM_DEV_TYPE_ARM_VGIC_V2) { |
200 | vmcrp->ackctl = (vmcr & ICH_VMCR_ACK_CTL_MASK) >> | |
201 | ICH_VMCR_ACK_CTL_SHIFT; | |
202 | vmcrp->fiqen = (vmcr & ICH_VMCR_FIQ_EN_MASK) >> | |
203 | ICH_VMCR_FIQ_EN_SHIFT; | |
204 | } else { | |
205 | /* | |
206 | * When emulating GICv3 on GICv3 with SRE=1 on the | |
207 | * VFIQEn bit is RES1 and the VAckCtl bit is RES0. | |
208 | */ | |
209 | vmcrp->fiqen = 1; | |
210 | vmcrp->ackctl = 0; | |
211 | } | |
212 | ||
213 | vmcrp->cbpr = (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT; | |
214 | vmcrp->eoim = (vmcr & ICH_VMCR_EOIM_MASK) >> ICH_VMCR_EOIM_SHIFT; | |
e4823a7a AP |
215 | vmcrp->abpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT; |
216 | vmcrp->bpr = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT; | |
217 | vmcrp->pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT; | |
5fb247d7 VK |
218 | vmcrp->grpen0 = (vmcr & ICH_VMCR_ENG0_MASK) >> ICH_VMCR_ENG0_SHIFT; |
219 | vmcrp->grpen1 = (vmcr & ICH_VMCR_ENG1_MASK) >> ICH_VMCR_ENG1_SHIFT; | |
e4823a7a | 220 | } |
90977732 | 221 | |
0aa1de57 AP |
222 | #define INITIAL_PENDBASER_VALUE \ |
223 | (GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWb) | \ | |
224 | GIC_BASER_CACHEABILITY(GICR_PENDBASER, OUTER, SameAsInner) | \ | |
225 | GIC_BASER_SHAREABILITY(GICR_PENDBASER, InnerShareable)) | |
226 | ||
ad275b8b EA |
227 | void vgic_v3_enable(struct kvm_vcpu *vcpu) |
228 | { | |
f7b6985c EA |
229 | struct vgic_v3_cpu_if *vgic_v3 = &vcpu->arch.vgic_cpu.vgic_v3; |
230 | ||
231 | /* | |
232 | * By forcing VMCR to zero, the GIC will restore the binary | |
233 | * points to their reset values. Anything else resets to zero | |
234 | * anyway. | |
235 | */ | |
236 | vgic_v3->vgic_vmcr = 0; | |
237 | vgic_v3->vgic_elrsr = ~0; | |
238 | ||
239 | /* | |
240 | * If we are emulating a GICv3, we do it in an non-GICv2-compatible | |
241 | * way, so we force SRE to 1 to demonstrate this to the guest. | |
4dfc0505 | 242 | * Also, we don't support any form of IRQ/FIQ bypass. |
f7b6985c EA |
243 | * This goes with the spec allowing the value to be RAO/WI. |
244 | */ | |
0aa1de57 | 245 | if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) { |
4dfc0505 MZ |
246 | vgic_v3->vgic_sre = (ICC_SRE_EL1_DIB | |
247 | ICC_SRE_EL1_DFB | | |
248 | ICC_SRE_EL1_SRE); | |
0aa1de57 AP |
249 | vcpu->arch.vgic_cpu.pendbaser = INITIAL_PENDBASER_VALUE; |
250 | } else { | |
f7b6985c | 251 | vgic_v3->vgic_sre = 0; |
0aa1de57 | 252 | } |
f7b6985c | 253 | |
d017d7b0 VK |
254 | vcpu->arch.vgic_cpu.num_id_bits = (kvm_vgic_global_state.ich_vtr_el2 & |
255 | ICH_VTR_ID_BITS_MASK) >> | |
256 | ICH_VTR_ID_BITS_SHIFT; | |
257 | vcpu->arch.vgic_cpu.num_pri_bits = ((kvm_vgic_global_state.ich_vtr_el2 & | |
258 | ICH_VTR_PRI_BITS_MASK) >> | |
259 | ICH_VTR_PRI_BITS_SHIFT) + 1; | |
260 | ||
f7b6985c EA |
261 | /* Get the show on the road... */ |
262 | vgic_v3->vgic_hcr = ICH_HCR_EN; | |
9c7bfc28 MZ |
263 | if (group1_trap) |
264 | vgic_v3->vgic_hcr |= ICH_HCR_TALL1; | |
ad275b8b EA |
265 | } |
266 | ||
44de9d68 EA |
267 | int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq) |
268 | { | |
269 | struct kvm_vcpu *vcpu; | |
270 | int byte_offset, bit_nr; | |
271 | gpa_t pendbase, ptr; | |
272 | bool status; | |
273 | u8 val; | |
274 | int ret; | |
275 | ||
276 | retry: | |
277 | vcpu = irq->target_vcpu; | |
278 | if (!vcpu) | |
279 | return 0; | |
280 | ||
281 | pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser); | |
282 | ||
283 | byte_offset = irq->intid / BITS_PER_BYTE; | |
284 | bit_nr = irq->intid % BITS_PER_BYTE; | |
285 | ptr = pendbase + byte_offset; | |
286 | ||
287 | ret = kvm_read_guest(kvm, ptr, &val, 1); | |
288 | if (ret) | |
289 | return ret; | |
290 | ||
291 | status = val & (1 << bit_nr); | |
292 | ||
293 | spin_lock(&irq->irq_lock); | |
294 | if (irq->target_vcpu != vcpu) { | |
295 | spin_unlock(&irq->irq_lock); | |
296 | goto retry; | |
297 | } | |
298 | irq->pending_latch = status; | |
299 | vgic_queue_irq_unlock(vcpu->kvm, irq); | |
300 | ||
301 | if (status) { | |
302 | /* clear consumed data */ | |
303 | val &= ~(1 << bit_nr); | |
304 | ret = kvm_write_guest(kvm, ptr, &val, 1); | |
305 | if (ret) | |
306 | return ret; | |
307 | } | |
308 | return 0; | |
309 | } | |
310 | ||
28077125 EA |
311 | /** |
312 | * vgic_its_save_pending_tables - Save the pending tables into guest RAM | |
313 | * kvm lock and all vcpu lock must be held | |
314 | */ | |
315 | int vgic_v3_save_pending_tables(struct kvm *kvm) | |
316 | { | |
317 | struct vgic_dist *dist = &kvm->arch.vgic; | |
318 | int last_byte_offset = -1; | |
319 | struct vgic_irq *irq; | |
320 | int ret; | |
321 | ||
322 | list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { | |
323 | int byte_offset, bit_nr; | |
324 | struct kvm_vcpu *vcpu; | |
325 | gpa_t pendbase, ptr; | |
326 | bool stored; | |
327 | u8 val; | |
328 | ||
329 | vcpu = irq->target_vcpu; | |
330 | if (!vcpu) | |
331 | continue; | |
332 | ||
333 | pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser); | |
334 | ||
335 | byte_offset = irq->intid / BITS_PER_BYTE; | |
336 | bit_nr = irq->intid % BITS_PER_BYTE; | |
337 | ptr = pendbase + byte_offset; | |
338 | ||
339 | if (byte_offset != last_byte_offset) { | |
340 | ret = kvm_read_guest(kvm, ptr, &val, 1); | |
341 | if (ret) | |
342 | return ret; | |
343 | last_byte_offset = byte_offset; | |
344 | } | |
345 | ||
346 | stored = val & (1U << bit_nr); | |
347 | if (stored == irq->pending_latch) | |
348 | continue; | |
349 | ||
350 | if (irq->pending_latch) | |
351 | val |= 1 << bit_nr; | |
352 | else | |
353 | val &= ~(1 << bit_nr); | |
354 | ||
355 | ret = kvm_write_guest(kvm, ptr, &val, 1); | |
356 | if (ret) | |
357 | return ret; | |
358 | } | |
359 | return 0; | |
360 | } | |
361 | ||
9a746d75 CD |
362 | /* |
363 | * Check for overlapping regions and for regions crossing the end of memory | |
364 | * for base addresses which have already been set. | |
365 | */ | |
366 | bool vgic_v3_check_base(struct kvm *kvm) | |
b0442ee2 EA |
367 | { |
368 | struct vgic_dist *d = &kvm->arch.vgic; | |
369 | gpa_t redist_size = KVM_VGIC_V3_REDIST_SIZE; | |
370 | ||
371 | redist_size *= atomic_read(&kvm->online_vcpus); | |
372 | ||
9a746d75 CD |
373 | if (!IS_VGIC_ADDR_UNDEF(d->vgic_dist_base) && |
374 | d->vgic_dist_base + KVM_VGIC_V3_DIST_SIZE < d->vgic_dist_base) | |
b0442ee2 | 375 | return false; |
9a746d75 CD |
376 | |
377 | if (!IS_VGIC_ADDR_UNDEF(d->vgic_redist_base) && | |
378 | d->vgic_redist_base + redist_size < d->vgic_redist_base) | |
b0442ee2 EA |
379 | return false; |
380 | ||
9a746d75 CD |
381 | /* Both base addresses must be set to check if they overlap */ |
382 | if (IS_VGIC_ADDR_UNDEF(d->vgic_dist_base) || | |
383 | IS_VGIC_ADDR_UNDEF(d->vgic_redist_base)) | |
384 | return true; | |
385 | ||
b0442ee2 EA |
386 | if (d->vgic_dist_base + KVM_VGIC_V3_DIST_SIZE <= d->vgic_redist_base) |
387 | return true; | |
388 | if (d->vgic_redist_base + redist_size <= d->vgic_dist_base) | |
389 | return true; | |
390 | ||
391 | return false; | |
392 | } | |
393 | ||
394 | int vgic_v3_map_resources(struct kvm *kvm) | |
395 | { | |
396 | int ret = 0; | |
397 | struct vgic_dist *dist = &kvm->arch.vgic; | |
398 | ||
399 | if (vgic_ready(kvm)) | |
400 | goto out; | |
401 | ||
402 | if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) || | |
403 | IS_VGIC_ADDR_UNDEF(dist->vgic_redist_base)) { | |
404 | kvm_err("Need to set vgic distributor addresses first\n"); | |
405 | ret = -ENXIO; | |
406 | goto out; | |
407 | } | |
408 | ||
409 | if (!vgic_v3_check_base(kvm)) { | |
410 | kvm_err("VGIC redist and dist frames overlap\n"); | |
411 | ret = -EINVAL; | |
412 | goto out; | |
413 | } | |
414 | ||
415 | /* | |
416 | * For a VGICv3 we require the userland to explicitly initialize | |
417 | * the VGIC before we need to use it. | |
418 | */ | |
419 | if (!vgic_initialized(kvm)) { | |
420 | ret = -EBUSY; | |
421 | goto out; | |
422 | } | |
423 | ||
424 | ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V3); | |
425 | if (ret) { | |
426 | kvm_err("Unable to register VGICv3 dist MMIO regions\n"); | |
427 | goto out; | |
428 | } | |
429 | ||
b0442ee2 EA |
430 | dist->ready = true; |
431 | ||
432 | out: | |
b0442ee2 EA |
433 | return ret; |
434 | } | |
435 | ||
59da1cbf MZ |
436 | DEFINE_STATIC_KEY_FALSE(vgic_v3_cpuif_trap); |
437 | ||
182936ee MZ |
438 | static int __init early_group1_trap_cfg(char *buf) |
439 | { | |
440 | return strtobool(buf, &group1_trap); | |
441 | } | |
442 | early_param("kvm-arm.vgic_v3_group1_trap", early_group1_trap_cfg); | |
443 | ||
90977732 EA |
444 | /** |
445 | * vgic_v3_probe - probe for a GICv3 compatible interrupt controller in DT | |
446 | * @node: pointer to the DT node | |
447 | * | |
448 | * Returns 0 if a GICv3 has been found, returns an error code otherwise | |
449 | */ | |
450 | int vgic_v3_probe(const struct gic_kvm_info *info) | |
451 | { | |
452 | u32 ich_vtr_el2 = kvm_call_hyp(__vgic_v3_get_ich_vtr_el2); | |
42c8870f | 453 | int ret; |
90977732 EA |
454 | |
455 | /* | |
456 | * The ListRegs field is 5 bits, but there is a architectural | |
457 | * maximum of 16 list registers. Just ignore bit 4... | |
458 | */ | |
459 | kvm_vgic_global_state.nr_lr = (ich_vtr_el2 & 0xf) + 1; | |
460 | kvm_vgic_global_state.can_emulate_gicv2 = false; | |
d017d7b0 | 461 | kvm_vgic_global_state.ich_vtr_el2 = ich_vtr_el2; |
90977732 EA |
462 | |
463 | if (!info->vcpu.start) { | |
464 | kvm_info("GICv3: no GICV resource entry\n"); | |
465 | kvm_vgic_global_state.vcpu_base = 0; | |
466 | } else if (!PAGE_ALIGNED(info->vcpu.start)) { | |
467 | pr_warn("GICV physical address 0x%llx not page aligned\n", | |
468 | (unsigned long long)info->vcpu.start); | |
469 | kvm_vgic_global_state.vcpu_base = 0; | |
470 | } else if (!PAGE_ALIGNED(resource_size(&info->vcpu))) { | |
471 | pr_warn("GICV size 0x%llx not a multiple of page size 0x%lx\n", | |
472 | (unsigned long long)resource_size(&info->vcpu), | |
473 | PAGE_SIZE); | |
474 | kvm_vgic_global_state.vcpu_base = 0; | |
475 | } else { | |
476 | kvm_vgic_global_state.vcpu_base = info->vcpu.start; | |
477 | kvm_vgic_global_state.can_emulate_gicv2 = true; | |
42c8870f AP |
478 | ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2); |
479 | if (ret) { | |
480 | kvm_err("Cannot register GICv2 KVM device.\n"); | |
481 | return ret; | |
482 | } | |
90977732 EA |
483 | kvm_info("vgic-v2@%llx\n", info->vcpu.start); |
484 | } | |
42c8870f AP |
485 | ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V3); |
486 | if (ret) { | |
487 | kvm_err("Cannot register GICv3 KVM device.\n"); | |
488 | kvm_unregister_device_ops(KVM_DEV_TYPE_ARM_VGIC_V2); | |
489 | return ret; | |
490 | } | |
491 | ||
90977732 EA |
492 | if (kvm_vgic_global_state.vcpu_base == 0) |
493 | kvm_info("disabling GICv2 emulation\n"); | |
90977732 | 494 | |
182936ee MZ |
495 | if (group1_trap) { |
496 | kvm_info("GICv3 sysreg trapping enabled (reduced performance)\n"); | |
497 | static_branch_enable(&vgic_v3_cpuif_trap); | |
498 | } | |
499 | ||
90977732 EA |
500 | kvm_vgic_global_state.vctrl_base = NULL; |
501 | kvm_vgic_global_state.type = VGIC_V3; | |
502 | kvm_vgic_global_state.max_gic_vcpus = VGIC_V3_MAX_CPUS; | |
503 | ||
504 | return 0; | |
505 | } | |
328e5664 CD |
506 | |
507 | void vgic_v3_load(struct kvm_vcpu *vcpu) | |
508 | { | |
509 | struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; | |
510 | ||
ff567614 MZ |
511 | /* |
512 | * If dealing with a GICv2 emulation on GICv3, VMCR_EL2.VFIQen | |
513 | * is dependent on ICC_SRE_EL1.SRE, and we have to perform the | |
514 | * VMCR_EL2 save/restore in the world switch. | |
515 | */ | |
516 | if (likely(cpu_if->vgic_sre)) | |
517 | kvm_call_hyp(__vgic_v3_write_vmcr, cpu_if->vgic_vmcr); | |
328e5664 CD |
518 | } |
519 | ||
520 | void vgic_v3_put(struct kvm_vcpu *vcpu) | |
521 | { | |
522 | struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; | |
523 | ||
ff567614 MZ |
524 | if (likely(cpu_if->vgic_sre)) |
525 | cpu_if->vgic_vmcr = kvm_call_hyp(__vgic_v3_read_vmcr); | |
328e5664 | 526 | } |