]>
Commit | Line | Data |
---|---|---|
59529f69 MZ |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License version 2 as | |
4 | * published by the Free Software Foundation. | |
5 | * | |
6 | * This program is distributed in the hope that it will be useful, | |
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
9 | * GNU General Public License for more details. | |
10 | * | |
11 | * You should have received a copy of the GNU General Public License | |
12 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
13 | */ | |
14 | ||
15 | #include <linux/irqchip/arm-gic-v3.h> | |
16 | #include <linux/kvm.h> | |
17 | #include <linux/kvm_host.h> | |
90977732 EA |
18 | #include <kvm/arm_vgic.h> |
19 | #include <asm/kvm_mmu.h> | |
20 | #include <asm/kvm_asm.h> | |
59529f69 MZ |
21 | |
22 | #include "vgic.h" | |
23 | ||
abf55766 | 24 | static bool group0_trap; |
9c7bfc28 | 25 | static bool group1_trap; |
ff89511e | 26 | static bool common_trap; |
a7546054 | 27 | static bool gicv4_enable; |
9c7bfc28 | 28 | |
9c2a3e60 MZ |
29 | void vgic_v3_set_npie(struct kvm_vcpu *vcpu) |
30 | { | |
31 | struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3; | |
32 | ||
33 | cpuif->vgic_hcr |= ICH_HCR_NPIE; | |
34 | } | |
35 | ||
af061499 | 36 | void vgic_v3_set_underflow(struct kvm_vcpu *vcpu) |
59529f69 MZ |
37 | { |
38 | struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3; | |
59529f69 | 39 | |
af061499 | 40 | cpuif->vgic_hcr |= ICH_HCR_UIE; |
59529f69 MZ |
41 | } |
42 | ||
af061499 | 43 | static bool lr_signals_eoi_mi(u64 lr_val) |
59529f69 | 44 | { |
af061499 CD |
45 | return !(lr_val & ICH_LR_STATE) && (lr_val & ICH_LR_EOI) && |
46 | !(lr_val & ICH_LR_HW); | |
59529f69 MZ |
47 | } |
48 | ||
49 | void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu) | |
50 | { | |
8ac76ef4 CD |
51 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
52 | struct vgic_v3_cpu_if *cpuif = &vgic_cpu->vgic_v3; | |
59529f69 MZ |
53 | u32 model = vcpu->kvm->arch.vgic.vgic_model; |
54 | int lr; | |
006df0f3 | 55 | unsigned long flags; |
59529f69 | 56 | |
9c2a3e60 | 57 | cpuif->vgic_hcr &= ~(ICH_HCR_UIE | ICH_HCR_NPIE); |
af061499 | 58 | |
8ac76ef4 | 59 | for (lr = 0; lr < vgic_cpu->used_lrs; lr++) { |
59529f69 MZ |
60 | u64 val = cpuif->vgic_lr[lr]; |
61 | u32 intid; | |
62 | struct vgic_irq *irq; | |
63 | ||
64 | if (model == KVM_DEV_TYPE_ARM_VGIC_V3) | |
65 | intid = val & ICH_LR_VIRTUAL_ID_MASK; | |
66 | else | |
67 | intid = val & GICH_LR_VIRTUALID; | |
af061499 CD |
68 | |
69 | /* Notify fds when the guest EOI'ed a level-triggered IRQ */ | |
70 | if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid)) | |
71 | kvm_notify_acked_irq(vcpu->kvm, 0, | |
72 | intid - VGIC_NR_PRIVATE_IRQS); | |
73 | ||
59529f69 | 74 | irq = vgic_get_irq(vcpu->kvm, vcpu, intid); |
3802411d AP |
75 | if (!irq) /* An LPI could have been unmapped. */ |
76 | continue; | |
59529f69 | 77 | |
006df0f3 | 78 | spin_lock_irqsave(&irq->irq_lock, flags); |
59529f69 MZ |
79 | |
80 | /* Always preserve the active bit */ | |
81 | irq->active = !!(val & ICH_LR_ACTIVE_BIT); | |
82 | ||
83 | /* Edge is the only case where we preserve the pending bit */ | |
84 | if (irq->config == VGIC_CONFIG_EDGE && | |
85 | (val & ICH_LR_PENDING_BIT)) { | |
8694e4da | 86 | irq->pending_latch = true; |
59529f69 MZ |
87 | |
88 | if (vgic_irq_is_sgi(intid) && | |
89 | model == KVM_DEV_TYPE_ARM_VGIC_V2) { | |
90 | u32 cpuid = val & GICH_LR_PHYSID_CPUID; | |
91 | ||
92 | cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT; | |
93 | irq->source |= (1 << cpuid); | |
94 | } | |
95 | } | |
96 | ||
637d122b MZ |
97 | /* |
98 | * Clear soft pending state when level irqs have been acked. | |
99 | * Always regenerate the pending state. | |
100 | */ | |
101 | if (irq->config == VGIC_CONFIG_LEVEL) { | |
102 | if (!(val & ICH_LR_PENDING_BIT)) | |
8694e4da | 103 | irq->pending_latch = false; |
59529f69 MZ |
104 | } |
105 | ||
006df0f3 | 106 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
5dd4b924 | 107 | vgic_put_irq(vcpu->kvm, irq); |
59529f69 | 108 | } |
8ac76ef4 CD |
109 | |
110 | vgic_cpu->used_lrs = 0; | |
59529f69 MZ |
111 | } |
112 | ||
113 | /* Requires the irq to be locked already */ | |
114 | void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr) | |
115 | { | |
116 | u32 model = vcpu->kvm->arch.vgic.vgic_model; | |
117 | u64 val = irq->intid; | |
118 | ||
8694e4da | 119 | if (irq_is_pending(irq)) { |
59529f69 MZ |
120 | val |= ICH_LR_PENDING_BIT; |
121 | ||
122 | if (irq->config == VGIC_CONFIG_EDGE) | |
8694e4da | 123 | irq->pending_latch = false; |
59529f69 MZ |
124 | |
125 | if (vgic_irq_is_sgi(irq->intid) && | |
126 | model == KVM_DEV_TYPE_ARM_VGIC_V2) { | |
127 | u32 src = ffs(irq->source); | |
128 | ||
129 | BUG_ON(!src); | |
130 | val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT; | |
131 | irq->source &= ~(1 << (src - 1)); | |
132 | if (irq->source) | |
8694e4da | 133 | irq->pending_latch = true; |
59529f69 MZ |
134 | } |
135 | } | |
136 | ||
137 | if (irq->active) | |
138 | val |= ICH_LR_ACTIVE_BIT; | |
139 | ||
140 | if (irq->hw) { | |
141 | val |= ICH_LR_HW; | |
142 | val |= ((u64)irq->hwintid) << ICH_LR_PHYS_ID_SHIFT; | |
3d6e77ad MZ |
143 | /* |
144 | * Never set pending+active on a HW interrupt, as the | |
145 | * pending state is kept at the physical distributor | |
146 | * level. | |
147 | */ | |
148 | if (irq->active && irq_is_pending(irq)) | |
149 | val &= ~ICH_LR_PENDING_BIT; | |
59529f69 MZ |
150 | } else { |
151 | if (irq->config == VGIC_CONFIG_LEVEL) | |
152 | val |= ICH_LR_EOI; | |
153 | } | |
154 | ||
155 | /* | |
156 | * We currently only support Group1 interrupts, which is a | |
157 | * known defect. This needs to be addressed at some point. | |
158 | */ | |
159 | if (model == KVM_DEV_TYPE_ARM_VGIC_V3) | |
160 | val |= ICH_LR_GROUP; | |
161 | ||
162 | val |= (u64)irq->priority << ICH_LR_PRIORITY_SHIFT; | |
163 | ||
164 | vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = val; | |
165 | } | |
166 | ||
167 | void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr) | |
168 | { | |
169 | vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = 0; | |
170 | } | |
e4823a7a AP |
171 | |
172 | void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) | |
173 | { | |
328e5664 | 174 | struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; |
28232a43 | 175 | u32 model = vcpu->kvm->arch.vgic.vgic_model; |
e4823a7a AP |
176 | u32 vmcr; |
177 | ||
28232a43 CD |
178 | if (model == KVM_DEV_TYPE_ARM_VGIC_V2) { |
179 | vmcr = (vmcrp->ackctl << ICH_VMCR_ACK_CTL_SHIFT) & | |
180 | ICH_VMCR_ACK_CTL_MASK; | |
181 | vmcr |= (vmcrp->fiqen << ICH_VMCR_FIQ_EN_SHIFT) & | |
182 | ICH_VMCR_FIQ_EN_MASK; | |
183 | } else { | |
184 | /* | |
185 | * When emulating GICv3 on GICv3 with SRE=1 on the | |
186 | * VFIQEn bit is RES1 and the VAckCtl bit is RES0. | |
187 | */ | |
188 | vmcr = ICH_VMCR_FIQ_EN_MASK; | |
189 | } | |
190 | ||
191 | vmcr |= (vmcrp->cbpr << ICH_VMCR_CBPR_SHIFT) & ICH_VMCR_CBPR_MASK; | |
192 | vmcr |= (vmcrp->eoim << ICH_VMCR_EOIM_SHIFT) & ICH_VMCR_EOIM_MASK; | |
e4823a7a AP |
193 | vmcr |= (vmcrp->abpr << ICH_VMCR_BPR1_SHIFT) & ICH_VMCR_BPR1_MASK; |
194 | vmcr |= (vmcrp->bpr << ICH_VMCR_BPR0_SHIFT) & ICH_VMCR_BPR0_MASK; | |
195 | vmcr |= (vmcrp->pmr << ICH_VMCR_PMR_SHIFT) & ICH_VMCR_PMR_MASK; | |
5fb247d7 VK |
196 | vmcr |= (vmcrp->grpen0 << ICH_VMCR_ENG0_SHIFT) & ICH_VMCR_ENG0_MASK; |
197 | vmcr |= (vmcrp->grpen1 << ICH_VMCR_ENG1_SHIFT) & ICH_VMCR_ENG1_MASK; | |
e4823a7a | 198 | |
328e5664 | 199 | cpu_if->vgic_vmcr = vmcr; |
e4823a7a AP |
200 | } |
201 | ||
202 | void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) | |
203 | { | |
328e5664 | 204 | struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; |
28232a43 | 205 | u32 model = vcpu->kvm->arch.vgic.vgic_model; |
328e5664 CD |
206 | u32 vmcr; |
207 | ||
208 | vmcr = cpu_if->vgic_vmcr; | |
e4823a7a | 209 | |
28232a43 CD |
210 | if (model == KVM_DEV_TYPE_ARM_VGIC_V2) { |
211 | vmcrp->ackctl = (vmcr & ICH_VMCR_ACK_CTL_MASK) >> | |
212 | ICH_VMCR_ACK_CTL_SHIFT; | |
213 | vmcrp->fiqen = (vmcr & ICH_VMCR_FIQ_EN_MASK) >> | |
214 | ICH_VMCR_FIQ_EN_SHIFT; | |
215 | } else { | |
216 | /* | |
217 | * When emulating GICv3 on GICv3 with SRE=1 on the | |
218 | * VFIQEn bit is RES1 and the VAckCtl bit is RES0. | |
219 | */ | |
220 | vmcrp->fiqen = 1; | |
221 | vmcrp->ackctl = 0; | |
222 | } | |
223 | ||
224 | vmcrp->cbpr = (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT; | |
225 | vmcrp->eoim = (vmcr & ICH_VMCR_EOIM_MASK) >> ICH_VMCR_EOIM_SHIFT; | |
e4823a7a AP |
226 | vmcrp->abpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT; |
227 | vmcrp->bpr = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT; | |
228 | vmcrp->pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT; | |
5fb247d7 VK |
229 | vmcrp->grpen0 = (vmcr & ICH_VMCR_ENG0_MASK) >> ICH_VMCR_ENG0_SHIFT; |
230 | vmcrp->grpen1 = (vmcr & ICH_VMCR_ENG1_MASK) >> ICH_VMCR_ENG1_SHIFT; | |
e4823a7a | 231 | } |
90977732 | 232 | |
0aa1de57 AP |
233 | #define INITIAL_PENDBASER_VALUE \ |
234 | (GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWb) | \ | |
235 | GIC_BASER_CACHEABILITY(GICR_PENDBASER, OUTER, SameAsInner) | \ | |
236 | GIC_BASER_SHAREABILITY(GICR_PENDBASER, InnerShareable)) | |
237 | ||
ad275b8b EA |
238 | void vgic_v3_enable(struct kvm_vcpu *vcpu) |
239 | { | |
f7b6985c EA |
240 | struct vgic_v3_cpu_if *vgic_v3 = &vcpu->arch.vgic_cpu.vgic_v3; |
241 | ||
242 | /* | |
243 | * By forcing VMCR to zero, the GIC will restore the binary | |
244 | * points to their reset values. Anything else resets to zero | |
245 | * anyway. | |
246 | */ | |
247 | vgic_v3->vgic_vmcr = 0; | |
248 | vgic_v3->vgic_elrsr = ~0; | |
249 | ||
250 | /* | |
251 | * If we are emulating a GICv3, we do it in an non-GICv2-compatible | |
252 | * way, so we force SRE to 1 to demonstrate this to the guest. | |
4dfc0505 | 253 | * Also, we don't support any form of IRQ/FIQ bypass. |
f7b6985c EA |
254 | * This goes with the spec allowing the value to be RAO/WI. |
255 | */ | |
0aa1de57 | 256 | if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) { |
4dfc0505 MZ |
257 | vgic_v3->vgic_sre = (ICC_SRE_EL1_DIB | |
258 | ICC_SRE_EL1_DFB | | |
259 | ICC_SRE_EL1_SRE); | |
0aa1de57 AP |
260 | vcpu->arch.vgic_cpu.pendbaser = INITIAL_PENDBASER_VALUE; |
261 | } else { | |
f7b6985c | 262 | vgic_v3->vgic_sre = 0; |
0aa1de57 | 263 | } |
f7b6985c | 264 | |
d017d7b0 VK |
265 | vcpu->arch.vgic_cpu.num_id_bits = (kvm_vgic_global_state.ich_vtr_el2 & |
266 | ICH_VTR_ID_BITS_MASK) >> | |
267 | ICH_VTR_ID_BITS_SHIFT; | |
268 | vcpu->arch.vgic_cpu.num_pri_bits = ((kvm_vgic_global_state.ich_vtr_el2 & | |
269 | ICH_VTR_PRI_BITS_MASK) >> | |
270 | ICH_VTR_PRI_BITS_SHIFT) + 1; | |
271 | ||
f7b6985c EA |
272 | /* Get the show on the road... */ |
273 | vgic_v3->vgic_hcr = ICH_HCR_EN; | |
abf55766 MZ |
274 | if (group0_trap) |
275 | vgic_v3->vgic_hcr |= ICH_HCR_TALL0; | |
9c7bfc28 MZ |
276 | if (group1_trap) |
277 | vgic_v3->vgic_hcr |= ICH_HCR_TALL1; | |
ff89511e MZ |
278 | if (common_trap) |
279 | vgic_v3->vgic_hcr |= ICH_HCR_TC; | |
ad275b8b EA |
280 | } |
281 | ||
44de9d68 EA |
282 | int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq) |
283 | { | |
284 | struct kvm_vcpu *vcpu; | |
285 | int byte_offset, bit_nr; | |
286 | gpa_t pendbase, ptr; | |
287 | bool status; | |
288 | u8 val; | |
289 | int ret; | |
006df0f3 | 290 | unsigned long flags; |
44de9d68 EA |
291 | |
292 | retry: | |
293 | vcpu = irq->target_vcpu; | |
294 | if (!vcpu) | |
295 | return 0; | |
296 | ||
297 | pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser); | |
298 | ||
299 | byte_offset = irq->intid / BITS_PER_BYTE; | |
300 | bit_nr = irq->intid % BITS_PER_BYTE; | |
301 | ptr = pendbase + byte_offset; | |
302 | ||
bd065cd2 | 303 | ret = kvm_read_guest_lock(kvm, ptr, &val, 1); |
44de9d68 EA |
304 | if (ret) |
305 | return ret; | |
306 | ||
307 | status = val & (1 << bit_nr); | |
308 | ||
006df0f3 | 309 | spin_lock_irqsave(&irq->irq_lock, flags); |
44de9d68 | 310 | if (irq->target_vcpu != vcpu) { |
006df0f3 | 311 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
44de9d68 EA |
312 | goto retry; |
313 | } | |
314 | irq->pending_latch = status; | |
006df0f3 | 315 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); |
44de9d68 EA |
316 | |
317 | if (status) { | |
318 | /* clear consumed data */ | |
319 | val &= ~(1 << bit_nr); | |
3f401957 | 320 | ret = kvm_write_guest_lock(kvm, ptr, &val, 1); |
44de9d68 EA |
321 | if (ret) |
322 | return ret; | |
323 | } | |
324 | return 0; | |
325 | } | |
326 | ||
28077125 EA |
327 | /** |
328 | * vgic_its_save_pending_tables - Save the pending tables into guest RAM | |
329 | * kvm lock and all vcpu lock must be held | |
330 | */ | |
331 | int vgic_v3_save_pending_tables(struct kvm *kvm) | |
332 | { | |
333 | struct vgic_dist *dist = &kvm->arch.vgic; | |
334 | int last_byte_offset = -1; | |
335 | struct vgic_irq *irq; | |
336 | int ret; | |
ddb4b010 | 337 | u8 val; |
28077125 EA |
338 | |
339 | list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { | |
340 | int byte_offset, bit_nr; | |
341 | struct kvm_vcpu *vcpu; | |
342 | gpa_t pendbase, ptr; | |
343 | bool stored; | |
28077125 EA |
344 | |
345 | vcpu = irq->target_vcpu; | |
346 | if (!vcpu) | |
347 | continue; | |
348 | ||
349 | pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser); | |
350 | ||
351 | byte_offset = irq->intid / BITS_PER_BYTE; | |
352 | bit_nr = irq->intid % BITS_PER_BYTE; | |
353 | ptr = pendbase + byte_offset; | |
354 | ||
355 | if (byte_offset != last_byte_offset) { | |
bd065cd2 | 356 | ret = kvm_read_guest_lock(kvm, ptr, &val, 1); |
28077125 EA |
357 | if (ret) |
358 | return ret; | |
359 | last_byte_offset = byte_offset; | |
360 | } | |
361 | ||
362 | stored = val & (1U << bit_nr); | |
363 | if (stored == irq->pending_latch) | |
364 | continue; | |
365 | ||
366 | if (irq->pending_latch) | |
367 | val |= 1 << bit_nr; | |
368 | else | |
369 | val &= ~(1 << bit_nr); | |
370 | ||
3f401957 | 371 | ret = kvm_write_guest_lock(kvm, ptr, &val, 1); |
28077125 EA |
372 | if (ret) |
373 | return ret; | |
374 | } | |
375 | return 0; | |
376 | } | |
377 | ||
9a746d75 CD |
378 | /* |
379 | * Check for overlapping regions and for regions crossing the end of memory | |
380 | * for base addresses which have already been set. | |
381 | */ | |
382 | bool vgic_v3_check_base(struct kvm *kvm) | |
b0442ee2 EA |
383 | { |
384 | struct vgic_dist *d = &kvm->arch.vgic; | |
385 | gpa_t redist_size = KVM_VGIC_V3_REDIST_SIZE; | |
386 | ||
387 | redist_size *= atomic_read(&kvm->online_vcpus); | |
388 | ||
9a746d75 CD |
389 | if (!IS_VGIC_ADDR_UNDEF(d->vgic_dist_base) && |
390 | d->vgic_dist_base + KVM_VGIC_V3_DIST_SIZE < d->vgic_dist_base) | |
b0442ee2 | 391 | return false; |
9a746d75 CD |
392 | |
393 | if (!IS_VGIC_ADDR_UNDEF(d->vgic_redist_base) && | |
394 | d->vgic_redist_base + redist_size < d->vgic_redist_base) | |
b0442ee2 EA |
395 | return false; |
396 | ||
9a746d75 CD |
397 | /* Both base addresses must be set to check if they overlap */ |
398 | if (IS_VGIC_ADDR_UNDEF(d->vgic_dist_base) || | |
399 | IS_VGIC_ADDR_UNDEF(d->vgic_redist_base)) | |
400 | return true; | |
401 | ||
b0442ee2 EA |
402 | if (d->vgic_dist_base + KVM_VGIC_V3_DIST_SIZE <= d->vgic_redist_base) |
403 | return true; | |
404 | if (d->vgic_redist_base + redist_size <= d->vgic_dist_base) | |
405 | return true; | |
406 | ||
407 | return false; | |
408 | } | |
409 | ||
410 | int vgic_v3_map_resources(struct kvm *kvm) | |
411 | { | |
412 | int ret = 0; | |
413 | struct vgic_dist *dist = &kvm->arch.vgic; | |
414 | ||
415 | if (vgic_ready(kvm)) | |
416 | goto out; | |
417 | ||
418 | if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) || | |
419 | IS_VGIC_ADDR_UNDEF(dist->vgic_redist_base)) { | |
420 | kvm_err("Need to set vgic distributor addresses first\n"); | |
421 | ret = -ENXIO; | |
422 | goto out; | |
423 | } | |
424 | ||
425 | if (!vgic_v3_check_base(kvm)) { | |
426 | kvm_err("VGIC redist and dist frames overlap\n"); | |
427 | ret = -EINVAL; | |
428 | goto out; | |
429 | } | |
430 | ||
431 | /* | |
432 | * For a VGICv3 we require the userland to explicitly initialize | |
433 | * the VGIC before we need to use it. | |
434 | */ | |
435 | if (!vgic_initialized(kvm)) { | |
436 | ret = -EBUSY; | |
437 | goto out; | |
438 | } | |
439 | ||
440 | ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V3); | |
441 | if (ret) { | |
442 | kvm_err("Unable to register VGICv3 dist MMIO regions\n"); | |
443 | goto out; | |
444 | } | |
445 | ||
b0442ee2 EA |
446 | dist->ready = true; |
447 | ||
448 | out: | |
b0442ee2 EA |
449 | return ret; |
450 | } | |
451 | ||
59da1cbf MZ |
452 | DEFINE_STATIC_KEY_FALSE(vgic_v3_cpuif_trap); |
453 | ||
e23f62f7 MZ |
454 | static int __init early_group0_trap_cfg(char *buf) |
455 | { | |
456 | return strtobool(buf, &group0_trap); | |
457 | } | |
458 | early_param("kvm-arm.vgic_v3_group0_trap", early_group0_trap_cfg); | |
459 | ||
182936ee MZ |
460 | static int __init early_group1_trap_cfg(char *buf) |
461 | { | |
462 | return strtobool(buf, &group1_trap); | |
463 | } | |
464 | early_param("kvm-arm.vgic_v3_group1_trap", early_group1_trap_cfg); | |
465 | ||
ff89511e MZ |
466 | static int __init early_common_trap_cfg(char *buf) |
467 | { | |
468 | return strtobool(buf, &common_trap); | |
469 | } | |
470 | early_param("kvm-arm.vgic_v3_common_trap", early_common_trap_cfg); | |
471 | ||
a7546054 MZ |
472 | static int __init early_gicv4_enable(char *buf) |
473 | { | |
474 | return strtobool(buf, &gicv4_enable); | |
475 | } | |
476 | early_param("kvm-arm.vgic_v4_enable", early_gicv4_enable); | |
477 | ||
90977732 EA |
478 | /** |
479 | * vgic_v3_probe - probe for a GICv3 compatible interrupt controller in DT | |
480 | * @node: pointer to the DT node | |
481 | * | |
482 | * Returns 0 if a GICv3 has been found, returns an error code otherwise | |
483 | */ | |
484 | int vgic_v3_probe(const struct gic_kvm_info *info) | |
485 | { | |
486 | u32 ich_vtr_el2 = kvm_call_hyp(__vgic_v3_get_ich_vtr_el2); | |
42c8870f | 487 | int ret; |
90977732 EA |
488 | |
489 | /* | |
490 | * The ListRegs field is 5 bits, but there is a architectural | |
491 | * maximum of 16 list registers. Just ignore bit 4... | |
492 | */ | |
493 | kvm_vgic_global_state.nr_lr = (ich_vtr_el2 & 0xf) + 1; | |
494 | kvm_vgic_global_state.can_emulate_gicv2 = false; | |
d017d7b0 | 495 | kvm_vgic_global_state.ich_vtr_el2 = ich_vtr_el2; |
90977732 | 496 | |
a7546054 MZ |
497 | /* GICv4 support? */ |
498 | if (info->has_v4) { | |
499 | kvm_vgic_global_state.has_gicv4 = gicv4_enable; | |
500 | kvm_info("GICv4 support %sabled\n", | |
501 | gicv4_enable ? "en" : "dis"); | |
502 | } | |
503 | ||
90977732 EA |
504 | if (!info->vcpu.start) { |
505 | kvm_info("GICv3: no GICV resource entry\n"); | |
506 | kvm_vgic_global_state.vcpu_base = 0; | |
507 | } else if (!PAGE_ALIGNED(info->vcpu.start)) { | |
508 | pr_warn("GICV physical address 0x%llx not page aligned\n", | |
509 | (unsigned long long)info->vcpu.start); | |
510 | kvm_vgic_global_state.vcpu_base = 0; | |
90977732 EA |
511 | } else { |
512 | kvm_vgic_global_state.vcpu_base = info->vcpu.start; | |
513 | kvm_vgic_global_state.can_emulate_gicv2 = true; | |
42c8870f AP |
514 | ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2); |
515 | if (ret) { | |
516 | kvm_err("Cannot register GICv2 KVM device.\n"); | |
517 | return ret; | |
518 | } | |
90977732 EA |
519 | kvm_info("vgic-v2@%llx\n", info->vcpu.start); |
520 | } | |
42c8870f AP |
521 | ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V3); |
522 | if (ret) { | |
523 | kvm_err("Cannot register GICv3 KVM device.\n"); | |
524 | kvm_unregister_device_ops(KVM_DEV_TYPE_ARM_VGIC_V2); | |
525 | return ret; | |
526 | } | |
527 | ||
90977732 EA |
528 | if (kvm_vgic_global_state.vcpu_base == 0) |
529 | kvm_info("disabling GICv2 emulation\n"); | |
90977732 | 530 | |
690a3415 DD |
531 | #ifdef CONFIG_ARM64 |
532 | if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_30115)) { | |
533 | group0_trap = true; | |
534 | group1_trap = true; | |
535 | } | |
536 | #endif | |
537 | ||
ff89511e | 538 | if (group0_trap || group1_trap || common_trap) { |
2873b508 MZ |
539 | kvm_info("GICv3 sysreg trapping enabled ([%s%s%s], reduced performance)\n", |
540 | group0_trap ? "G0" : "", | |
541 | group1_trap ? "G1" : "", | |
542 | common_trap ? "C" : ""); | |
182936ee MZ |
543 | static_branch_enable(&vgic_v3_cpuif_trap); |
544 | } | |
545 | ||
90977732 EA |
546 | kvm_vgic_global_state.vctrl_base = NULL; |
547 | kvm_vgic_global_state.type = VGIC_V3; | |
548 | kvm_vgic_global_state.max_gic_vcpus = VGIC_V3_MAX_CPUS; | |
549 | ||
550 | return 0; | |
551 | } | |
328e5664 CD |
552 | |
553 | void vgic_v3_load(struct kvm_vcpu *vcpu) | |
554 | { | |
555 | struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; | |
556 | ||
ff567614 MZ |
557 | /* |
558 | * If dealing with a GICv2 emulation on GICv3, VMCR_EL2.VFIQen | |
559 | * is dependent on ICC_SRE_EL1.SRE, and we have to perform the | |
560 | * VMCR_EL2 save/restore in the world switch. | |
561 | */ | |
562 | if (likely(cpu_if->vgic_sre)) | |
563 | kvm_call_hyp(__vgic_v3_write_vmcr, cpu_if->vgic_vmcr); | |
328e5664 CD |
564 | } |
565 | ||
566 | void vgic_v3_put(struct kvm_vcpu *vcpu) | |
567 | { | |
568 | struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; | |
569 | ||
ff567614 MZ |
570 | if (likely(cpu_if->vgic_sre)) |
571 | cpu_if->vgic_vmcr = kvm_call_hyp(__vgic_v3_read_vmcr); | |
328e5664 | 572 | } |