]>
Commit | Line | Data |
---|---|---|
140b086d MZ |
1 | /* |
2 | * Copyright (C) 2015, 2016 ARM Ltd. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License version 2 as | |
6 | * published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
11 | * GNU General Public License for more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public License | |
14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
15 | */ | |
16 | ||
17 | #include <linux/irqchip/arm-gic.h> | |
18 | #include <linux/kvm.h> | |
19 | #include <linux/kvm_host.h> | |
90977732 EA |
20 | #include <kvm/arm_vgic.h> |
21 | #include <asm/kvm_mmu.h> | |
140b086d MZ |
22 | |
23 | #include "vgic.h" | |
24 | ||
5b0d2cc2 CD |
25 | static inline void vgic_v2_write_lr(int lr, u32 val) |
26 | { | |
27 | void __iomem *base = kvm_vgic_global_state.vctrl_base; | |
28 | ||
29 | writel_relaxed(val, base + GICH_LR0 + (lr * 4)); | |
30 | } | |
31 | ||
32 | void vgic_v2_init_lrs(void) | |
33 | { | |
34 | int i; | |
35 | ||
36 | for (i = 0; i < kvm_vgic_global_state.nr_lr; i++) | |
37 | vgic_v2_write_lr(i, 0); | |
38 | } | |
39 | ||
9c2a3e60 MZ |
40 | void vgic_v2_set_npie(struct kvm_vcpu *vcpu) |
41 | { | |
42 | struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2; | |
43 | ||
44 | cpuif->vgic_hcr |= GICH_HCR_NPIE; | |
45 | } | |
46 | ||
af061499 | 47 | void vgic_v2_set_underflow(struct kvm_vcpu *vcpu) |
140b086d MZ |
48 | { |
49 | struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2; | |
50 | ||
af061499 | 51 | cpuif->vgic_hcr |= GICH_HCR_UIE; |
140b086d MZ |
52 | } |
53 | ||
af061499 | 54 | static bool lr_signals_eoi_mi(u32 lr_val) |
140b086d | 55 | { |
af061499 CD |
56 | return !(lr_val & GICH_LR_STATE) && (lr_val & GICH_LR_EOI) && |
57 | !(lr_val & GICH_LR_HW); | |
140b086d MZ |
58 | } |
59 | ||
60 | /* | |
61 | * transfer the content of the LRs back into the corresponding ap_list: | |
62 | * - active bit is transferred as is | |
63 | * - pending bit is | |
64 | * - transferred as is in case of edge sensitive IRQs | |
65 | * - set to the line-level (resample time) for level sensitive IRQs | |
66 | */ | |
67 | void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu) | |
68 | { | |
8ac76ef4 CD |
69 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
70 | struct vgic_v2_cpu_if *cpuif = &vgic_cpu->vgic_v2; | |
140b086d | 71 | int lr; |
006df0f3 | 72 | unsigned long flags; |
140b086d | 73 | |
9c2a3e60 | 74 | cpuif->vgic_hcr &= ~(GICH_HCR_UIE | GICH_HCR_NPIE); |
af061499 | 75 | |
8ac76ef4 | 76 | for (lr = 0; lr < vgic_cpu->used_lrs; lr++) { |
140b086d MZ |
77 | u32 val = cpuif->vgic_lr[lr]; |
78 | u32 intid = val & GICH_LR_VIRTUALID; | |
79 | struct vgic_irq *irq; | |
80 | ||
af061499 CD |
81 | /* Notify fds when the guest EOI'ed a level-triggered SPI */ |
82 | if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid)) | |
83 | kvm_notify_acked_irq(vcpu->kvm, 0, | |
84 | intid - VGIC_NR_PRIVATE_IRQS); | |
85 | ||
140b086d MZ |
86 | irq = vgic_get_irq(vcpu->kvm, vcpu, intid); |
87 | ||
006df0f3 | 88 | spin_lock_irqsave(&irq->irq_lock, flags); |
140b086d MZ |
89 | |
90 | /* Always preserve the active bit */ | |
91 | irq->active = !!(val & GICH_LR_ACTIVE_BIT); | |
92 | ||
93 | /* Edge is the only case where we preserve the pending bit */ | |
94 | if (irq->config == VGIC_CONFIG_EDGE && | |
95 | (val & GICH_LR_PENDING_BIT)) { | |
8694e4da | 96 | irq->pending_latch = true; |
140b086d MZ |
97 | |
98 | if (vgic_irq_is_sgi(intid)) { | |
99 | u32 cpuid = val & GICH_LR_PHYSID_CPUID; | |
100 | ||
101 | cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT; | |
102 | irq->source |= (1 << cpuid); | |
103 | } | |
104 | } | |
105 | ||
df7942d1 MZ |
106 | /* |
107 | * Clear soft pending state when level irqs have been acked. | |
108 | * Always regenerate the pending state. | |
109 | */ | |
110 | if (irq->config == VGIC_CONFIG_LEVEL) { | |
111 | if (!(val & GICH_LR_PENDING_BIT)) | |
8694e4da | 112 | irq->pending_latch = false; |
140b086d MZ |
113 | } |
114 | ||
006df0f3 | 115 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
5dd4b924 | 116 | vgic_put_irq(vcpu->kvm, irq); |
140b086d | 117 | } |
8ac76ef4 CD |
118 | |
119 | vgic_cpu->used_lrs = 0; | |
140b086d MZ |
120 | } |
121 | ||
122 | /* | |
123 | * Populates the particular LR with the state of a given IRQ: | |
124 | * - for an edge sensitive IRQ the pending state is cleared in struct vgic_irq | |
125 | * - for a level sensitive IRQ the pending state value is unchanged; | |
126 | * it is dictated directly by the input level | |
127 | * | |
128 | * If @irq describes an SGI with multiple sources, we choose the | |
129 | * lowest-numbered source VCPU and clear that bit in the source bitmap. | |
130 | * | |
131 | * The irq_lock must be held by the caller. | |
132 | */ | |
133 | void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr) | |
134 | { | |
135 | u32 val = irq->intid; | |
136 | ||
8694e4da | 137 | if (irq_is_pending(irq)) { |
140b086d MZ |
138 | val |= GICH_LR_PENDING_BIT; |
139 | ||
140 | if (irq->config == VGIC_CONFIG_EDGE) | |
8694e4da | 141 | irq->pending_latch = false; |
140b086d MZ |
142 | |
143 | if (vgic_irq_is_sgi(irq->intid)) { | |
144 | u32 src = ffs(irq->source); | |
145 | ||
6074168d MZ |
146 | if (WARN_RATELIMIT(!src, "No SGI source for INTID %d\n", |
147 | irq->intid)) | |
148 | return; | |
149 | ||
140b086d MZ |
150 | val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT; |
151 | irq->source &= ~(1 << (src - 1)); | |
152 | if (irq->source) | |
8694e4da | 153 | irq->pending_latch = true; |
140b086d MZ |
154 | } |
155 | } | |
156 | ||
157 | if (irq->active) | |
158 | val |= GICH_LR_ACTIVE_BIT; | |
159 | ||
160 | if (irq->hw) { | |
161 | val |= GICH_LR_HW; | |
162 | val |= irq->hwintid << GICH_LR_PHYSID_CPUID_SHIFT; | |
ddf42d06 MZ |
163 | /* |
164 | * Never set pending+active on a HW interrupt, as the | |
165 | * pending state is kept at the physical distributor | |
166 | * level. | |
167 | */ | |
168 | if (irq->active && irq_is_pending(irq)) | |
169 | val &= ~GICH_LR_PENDING_BIT; | |
140b086d MZ |
170 | } else { |
171 | if (irq->config == VGIC_CONFIG_LEVEL) | |
172 | val |= GICH_LR_EOI; | |
173 | } | |
174 | ||
175 | /* The GICv2 LR only holds five bits of priority. */ | |
176 | val |= (irq->priority >> 3) << GICH_LR_PRIORITY_SHIFT; | |
177 | ||
178 | vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = val; | |
179 | } | |
180 | ||
181 | void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr) | |
182 | { | |
183 | vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = 0; | |
184 | } | |
e4823a7a AP |
185 | |
186 | void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) | |
187 | { | |
328e5664 | 188 | struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; |
e4823a7a AP |
189 | u32 vmcr; |
190 | ||
28232a43 CD |
191 | vmcr = (vmcrp->grpen0 << GICH_VMCR_ENABLE_GRP0_SHIFT) & |
192 | GICH_VMCR_ENABLE_GRP0_MASK; | |
193 | vmcr |= (vmcrp->grpen1 << GICH_VMCR_ENABLE_GRP1_SHIFT) & | |
194 | GICH_VMCR_ENABLE_GRP1_MASK; | |
195 | vmcr |= (vmcrp->ackctl << GICH_VMCR_ACK_CTL_SHIFT) & | |
196 | GICH_VMCR_ACK_CTL_MASK; | |
197 | vmcr |= (vmcrp->fiqen << GICH_VMCR_FIQ_EN_SHIFT) & | |
198 | GICH_VMCR_FIQ_EN_MASK; | |
199 | vmcr |= (vmcrp->cbpr << GICH_VMCR_CBPR_SHIFT) & | |
200 | GICH_VMCR_CBPR_MASK; | |
201 | vmcr |= (vmcrp->eoim << GICH_VMCR_EOI_MODE_SHIFT) & | |
202 | GICH_VMCR_EOI_MODE_MASK; | |
e4823a7a AP |
203 | vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) & |
204 | GICH_VMCR_ALIAS_BINPOINT_MASK; | |
205 | vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) & | |
206 | GICH_VMCR_BINPOINT_MASK; | |
6d56111c CD |
207 | vmcr |= ((vmcrp->pmr >> GICV_PMR_PRIORITY_SHIFT) << |
208 | GICH_VMCR_PRIMASK_SHIFT) & GICH_VMCR_PRIMASK_MASK; | |
e4823a7a | 209 | |
328e5664 | 210 | cpu_if->vgic_vmcr = vmcr; |
e4823a7a AP |
211 | } |
212 | ||
213 | void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) | |
214 | { | |
328e5664 CD |
215 | struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; |
216 | u32 vmcr; | |
217 | ||
218 | vmcr = cpu_if->vgic_vmcr; | |
e4823a7a | 219 | |
28232a43 CD |
220 | vmcrp->grpen0 = (vmcr & GICH_VMCR_ENABLE_GRP0_MASK) >> |
221 | GICH_VMCR_ENABLE_GRP0_SHIFT; | |
222 | vmcrp->grpen1 = (vmcr & GICH_VMCR_ENABLE_GRP1_MASK) >> | |
223 | GICH_VMCR_ENABLE_GRP1_SHIFT; | |
224 | vmcrp->ackctl = (vmcr & GICH_VMCR_ACK_CTL_MASK) >> | |
225 | GICH_VMCR_ACK_CTL_SHIFT; | |
226 | vmcrp->fiqen = (vmcr & GICH_VMCR_FIQ_EN_MASK) >> | |
227 | GICH_VMCR_FIQ_EN_SHIFT; | |
228 | vmcrp->cbpr = (vmcr & GICH_VMCR_CBPR_MASK) >> | |
229 | GICH_VMCR_CBPR_SHIFT; | |
230 | vmcrp->eoim = (vmcr & GICH_VMCR_EOI_MODE_MASK) >> | |
231 | GICH_VMCR_EOI_MODE_SHIFT; | |
232 | ||
e4823a7a AP |
233 | vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >> |
234 | GICH_VMCR_ALIAS_BINPOINT_SHIFT; | |
235 | vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >> | |
236 | GICH_VMCR_BINPOINT_SHIFT; | |
6d56111c CD |
237 | vmcrp->pmr = ((vmcr & GICH_VMCR_PRIMASK_MASK) >> |
238 | GICH_VMCR_PRIMASK_SHIFT) << GICV_PMR_PRIORITY_SHIFT; | |
e4823a7a | 239 | } |
90977732 | 240 | |
ad275b8b EA |
241 | void vgic_v2_enable(struct kvm_vcpu *vcpu) |
242 | { | |
f7b6985c EA |
243 | /* |
244 | * By forcing VMCR to zero, the GIC will restore the binary | |
245 | * points to their reset values. Anything else resets to zero | |
246 | * anyway. | |
247 | */ | |
248 | vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = 0; | |
249 | vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr = ~0; | |
250 | ||
251 | /* Get the show on the road... */ | |
252 | vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr = GICH_HCR_EN; | |
ad275b8b EA |
253 | } |
254 | ||
b0442ee2 EA |
255 | /* check for overlapping regions and for regions crossing the end of memory */ |
256 | static bool vgic_v2_check_base(gpa_t dist_base, gpa_t cpu_base) | |
257 | { | |
258 | if (dist_base + KVM_VGIC_V2_DIST_SIZE < dist_base) | |
259 | return false; | |
260 | if (cpu_base + KVM_VGIC_V2_CPU_SIZE < cpu_base) | |
261 | return false; | |
262 | ||
263 | if (dist_base + KVM_VGIC_V2_DIST_SIZE <= cpu_base) | |
264 | return true; | |
265 | if (cpu_base + KVM_VGIC_V2_CPU_SIZE <= dist_base) | |
266 | return true; | |
267 | ||
268 | return false; | |
269 | } | |
270 | ||
271 | int vgic_v2_map_resources(struct kvm *kvm) | |
272 | { | |
273 | struct vgic_dist *dist = &kvm->arch.vgic; | |
274 | int ret = 0; | |
275 | ||
276 | if (vgic_ready(kvm)) | |
277 | goto out; | |
278 | ||
279 | if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) || | |
280 | IS_VGIC_ADDR_UNDEF(dist->vgic_cpu_base)) { | |
281 | kvm_err("Need to set vgic cpu and dist addresses first\n"); | |
282 | ret = -ENXIO; | |
283 | goto out; | |
284 | } | |
285 | ||
286 | if (!vgic_v2_check_base(dist->vgic_dist_base, dist->vgic_cpu_base)) { | |
287 | kvm_err("VGIC CPU and dist frames overlap\n"); | |
288 | ret = -EINVAL; | |
289 | goto out; | |
290 | } | |
291 | ||
292 | /* | |
293 | * Initialize the vgic if this hasn't already been done on demand by | |
294 | * accessing the vgic state from userspace. | |
295 | */ | |
296 | ret = vgic_init(kvm); | |
297 | if (ret) { | |
298 | kvm_err("Unable to initialize VGIC dynamic data structures\n"); | |
299 | goto out; | |
300 | } | |
301 | ||
302 | ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V2); | |
303 | if (ret) { | |
304 | kvm_err("Unable to register VGIC MMIO regions\n"); | |
305 | goto out; | |
306 | } | |
307 | ||
a07d3b07 MZ |
308 | if (!static_branch_unlikely(&vgic_v2_cpuif_trap)) { |
309 | ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base, | |
310 | kvm_vgic_global_state.vcpu_base, | |
311 | KVM_VGIC_V2_CPU_SIZE, true); | |
312 | if (ret) { | |
313 | kvm_err("Unable to remap VGIC CPU to VCPU\n"); | |
314 | goto out; | |
315 | } | |
b0442ee2 EA |
316 | } |
317 | ||
318 | dist->ready = true; | |
319 | ||
320 | out: | |
b0442ee2 EA |
321 | return ret; |
322 | } | |
323 | ||
fb5ee369 MZ |
324 | DEFINE_STATIC_KEY_FALSE(vgic_v2_cpuif_trap); |
325 | ||
90977732 EA |
326 | /** |
327 | * vgic_v2_probe - probe for a GICv2 compatible interrupt controller in DT | |
328 | * @node: pointer to the DT node | |
329 | * | |
330 | * Returns 0 if a GICv2 has been found, returns an error code otherwise | |
331 | */ | |
332 | int vgic_v2_probe(const struct gic_kvm_info *info) | |
333 | { | |
334 | int ret; | |
335 | u32 vtr; | |
336 | ||
337 | if (!info->vctrl.start) { | |
338 | kvm_err("GICH not present in the firmware table\n"); | |
339 | return -ENXIO; | |
340 | } | |
341 | ||
a07d3b07 MZ |
342 | if (!PAGE_ALIGNED(info->vcpu.start) || |
343 | !PAGE_ALIGNED(resource_size(&info->vcpu))) { | |
344 | kvm_info("GICV region size/alignment is unsafe, using trapping (reduced performance)\n"); | |
345 | kvm_vgic_global_state.vcpu_base_va = ioremap(info->vcpu.start, | |
346 | resource_size(&info->vcpu)); | |
347 | if (!kvm_vgic_global_state.vcpu_base_va) { | |
348 | kvm_err("Cannot ioremap GICV\n"); | |
349 | return -ENOMEM; | |
350 | } | |
90977732 | 351 | |
a07d3b07 MZ |
352 | ret = create_hyp_io_mappings(kvm_vgic_global_state.vcpu_base_va, |
353 | kvm_vgic_global_state.vcpu_base_va + resource_size(&info->vcpu), | |
354 | info->vcpu.start); | |
355 | if (ret) { | |
356 | kvm_err("Cannot map GICV into hyp\n"); | |
357 | goto out; | |
358 | } | |
359 | ||
360 | static_branch_enable(&vgic_v2_cpuif_trap); | |
90977732 EA |
361 | } |
362 | ||
363 | kvm_vgic_global_state.vctrl_base = ioremap(info->vctrl.start, | |
364 | resource_size(&info->vctrl)); | |
365 | if (!kvm_vgic_global_state.vctrl_base) { | |
366 | kvm_err("Cannot ioremap GICH\n"); | |
a07d3b07 MZ |
367 | ret = -ENOMEM; |
368 | goto out; | |
90977732 EA |
369 | } |
370 | ||
371 | vtr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VTR); | |
372 | kvm_vgic_global_state.nr_lr = (vtr & 0x3f) + 1; | |
373 | ||
374 | ret = create_hyp_io_mappings(kvm_vgic_global_state.vctrl_base, | |
375 | kvm_vgic_global_state.vctrl_base + | |
376 | resource_size(&info->vctrl), | |
377 | info->vctrl.start); | |
90977732 EA |
378 | if (ret) { |
379 | kvm_err("Cannot map VCTRL into hyp\n"); | |
a07d3b07 MZ |
380 | goto out; |
381 | } | |
382 | ||
383 | ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2); | |
384 | if (ret) { | |
385 | kvm_err("Cannot register GICv2 KVM device\n"); | |
386 | goto out; | |
90977732 EA |
387 | } |
388 | ||
389 | kvm_vgic_global_state.can_emulate_gicv2 = true; | |
90977732 EA |
390 | kvm_vgic_global_state.vcpu_base = info->vcpu.start; |
391 | kvm_vgic_global_state.type = VGIC_V2; | |
392 | kvm_vgic_global_state.max_gic_vcpus = VGIC_V2_MAX_CPUS; | |
393 | ||
cdebf7c1 | 394 | kvm_debug("vgic-v2@%llx\n", info->vctrl.start); |
90977732 EA |
395 | |
396 | return 0; | |
a07d3b07 MZ |
397 | out: |
398 | if (kvm_vgic_global_state.vctrl_base) | |
399 | iounmap(kvm_vgic_global_state.vctrl_base); | |
400 | if (kvm_vgic_global_state.vcpu_base_va) | |
401 | iounmap(kvm_vgic_global_state.vcpu_base_va); | |
402 | ||
403 | return ret; | |
90977732 | 404 | } |
328e5664 CD |
405 | |
406 | void vgic_v2_load(struct kvm_vcpu *vcpu) | |
407 | { | |
408 | struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; | |
409 | struct vgic_dist *vgic = &vcpu->kvm->arch.vgic; | |
410 | ||
411 | writel_relaxed(cpu_if->vgic_vmcr, vgic->vctrl_base + GICH_VMCR); | |
412 | } | |
413 | ||
4216f6d0 | 414 | void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu) |
328e5664 CD |
415 | { |
416 | struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; | |
417 | struct vgic_dist *vgic = &vcpu->kvm->arch.vgic; | |
418 | ||
419 | cpu_if->vgic_vmcr = readl_relaxed(vgic->vctrl_base + GICH_VMCR); | |
420 | } | |
4216f6d0 MZ |
421 | |
422 | void vgic_v2_put(struct kvm_vcpu *vcpu) | |
423 | { | |
424 | struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; | |
425 | struct vgic_dist *vgic = &vcpu->kvm->arch.vgic; | |
426 | ||
427 | vgic_v2_vmcr_sync(vcpu); | |
428 | cpu_if->vgic_apr = readl_relaxed(vgic->vctrl_base + GICH_APR); | |
429 | } |