]>
Commit | Line | Data |
---|---|---|
caab277b | 1 | // SPDX-License-Identifier: GPL-2.0-only |
140b086d MZ |
2 | /* |
3 | * Copyright (C) 2015, 2016 ARM Ltd. | |
140b086d MZ |
4 | */ |
5 | ||
6 | #include <linux/irqchip/arm-gic.h> | |
7 | #include <linux/kvm.h> | |
8 | #include <linux/kvm_host.h> | |
90977732 EA |
9 | #include <kvm/arm_vgic.h> |
10 | #include <asm/kvm_mmu.h> | |
140b086d MZ |
11 | |
12 | #include "vgic.h" | |
13 | ||
5b0d2cc2 CD |
14 | static inline void vgic_v2_write_lr(int lr, u32 val) |
15 | { | |
16 | void __iomem *base = kvm_vgic_global_state.vctrl_base; | |
17 | ||
18 | writel_relaxed(val, base + GICH_LR0 + (lr * 4)); | |
19 | } | |
20 | ||
21 | void vgic_v2_init_lrs(void) | |
22 | { | |
23 | int i; | |
24 | ||
25 | for (i = 0; i < kvm_vgic_global_state.nr_lr; i++) | |
26 | vgic_v2_write_lr(i, 0); | |
27 | } | |
28 | ||
af061499 | 29 | void vgic_v2_set_underflow(struct kvm_vcpu *vcpu) |
140b086d MZ |
30 | { |
31 | struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2; | |
32 | ||
af061499 | 33 | cpuif->vgic_hcr |= GICH_HCR_UIE; |
140b086d MZ |
34 | } |
35 | ||
af061499 | 36 | static bool lr_signals_eoi_mi(u32 lr_val) |
140b086d | 37 | { |
af061499 CD |
38 | return !(lr_val & GICH_LR_STATE) && (lr_val & GICH_LR_EOI) && |
39 | !(lr_val & GICH_LR_HW); | |
140b086d MZ |
40 | } |
41 | ||
42 | /* | |
43 | * transfer the content of the LRs back into the corresponding ap_list: | |
44 | * - active bit is transferred as is | |
45 | * - pending bit is | |
46 | * - transferred as is in case of edge sensitive IRQs | |
47 | * - set to the line-level (resample time) for level sensitive IRQs | |
48 | */ | |
49 | void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu) | |
50 | { | |
8ac76ef4 CD |
51 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
52 | struct vgic_v2_cpu_if *cpuif = &vgic_cpu->vgic_v2; | |
140b086d | 53 | int lr; |
d0823cb3 JH |
54 | |
55 | DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); | |
140b086d | 56 | |
53692908 | 57 | cpuif->vgic_hcr &= ~GICH_HCR_UIE; |
af061499 | 58 | |
8ac76ef4 | 59 | for (lr = 0; lr < vgic_cpu->used_lrs; lr++) { |
140b086d | 60 | u32 val = cpuif->vgic_lr[lr]; |
53692908 | 61 | u32 cpuid, intid = val & GICH_LR_VIRTUALID; |
140b086d MZ |
62 | struct vgic_irq *irq; |
63 | ||
53692908 MZ |
64 | /* Extract the source vCPU id from the LR */ |
65 | cpuid = val & GICH_LR_PHYSID_CPUID; | |
66 | cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT; | |
67 | cpuid &= 7; | |
68 | ||
af061499 CD |
69 | /* Notify fds when the guest EOI'ed a level-triggered SPI */ |
70 | if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid)) | |
71 | kvm_notify_acked_irq(vcpu->kvm, 0, | |
72 | intid - VGIC_NR_PRIVATE_IRQS); | |
73 | ||
140b086d MZ |
74 | irq = vgic_get_irq(vcpu->kvm, vcpu, intid); |
75 | ||
8fa3adb8 | 76 | raw_spin_lock(&irq->irq_lock); |
140b086d MZ |
77 | |
78 | /* Always preserve the active bit */ | |
79 | irq->active = !!(val & GICH_LR_ACTIVE_BIT); | |
80 | ||
53692908 MZ |
81 | if (irq->active && vgic_irq_is_sgi(intid)) |
82 | irq->active_source = cpuid; | |
83 | ||
140b086d MZ |
84 | /* Edge is the only case where we preserve the pending bit */ |
85 | if (irq->config == VGIC_CONFIG_EDGE && | |
86 | (val & GICH_LR_PENDING_BIT)) { | |
8694e4da | 87 | irq->pending_latch = true; |
140b086d | 88 | |
53692908 | 89 | if (vgic_irq_is_sgi(intid)) |
140b086d | 90 | irq->source |= (1 << cpuid); |
140b086d MZ |
91 | } |
92 | ||
df7942d1 MZ |
93 | /* |
94 | * Clear soft pending state when level irqs have been acked. | |
df7942d1 | 95 | */ |
67b5b673 MZ |
96 | if (irq->config == VGIC_CONFIG_LEVEL && !(val & GICH_LR_STATE)) |
97 | irq->pending_latch = false; | |
140b086d | 98 | |
e40cc57b CD |
99 | /* |
100 | * Level-triggered mapped IRQs are special because we only | |
101 | * observe rising edges as input to the VGIC. | |
102 | * | |
103 | * If the guest never acked the interrupt we have to sample | |
104 | * the physical line and set the line level, because the | |
105 | * device state could have changed or we simply need to | |
106 | * process the still pending interrupt later. | |
107 | * | |
108 | * If this causes us to lower the level, we have to also clear | |
109 | * the physical active state, since we will otherwise never be | |
110 | * told when the interrupt becomes asserted again. | |
111 | */ | |
112 | if (vgic_irq_is_mapped_level(irq) && (val & GICH_LR_PENDING_BIT)) { | |
113 | irq->line_level = vgic_get_phys_line_level(irq); | |
114 | ||
115 | if (!irq->line_level) | |
116 | vgic_irq_set_phys_active(irq, false); | |
117 | } | |
118 | ||
8fa3adb8 | 119 | raw_spin_unlock(&irq->irq_lock); |
5dd4b924 | 120 | vgic_put_irq(vcpu->kvm, irq); |
140b086d | 121 | } |
8ac76ef4 CD |
122 | |
123 | vgic_cpu->used_lrs = 0; | |
140b086d MZ |
124 | } |
125 | ||
126 | /* | |
127 | * Populates the particular LR with the state of a given IRQ: | |
128 | * - for an edge sensitive IRQ the pending state is cleared in struct vgic_irq | |
129 | * - for a level sensitive IRQ the pending state value is unchanged; | |
130 | * it is dictated directly by the input level | |
131 | * | |
132 | * If @irq describes an SGI with multiple sources, we choose the | |
133 | * lowest-numbered source VCPU and clear that bit in the source bitmap. | |
134 | * | |
135 | * The irq_lock must be held by the caller. | |
136 | */ | |
137 | void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr) | |
138 | { | |
139 | u32 val = irq->intid; | |
67b5b673 MZ |
140 | bool allow_pending = true; |
141 | ||
53692908 | 142 | if (irq->active) { |
67b5b673 | 143 | val |= GICH_LR_ACTIVE_BIT; |
53692908 MZ |
144 | if (vgic_irq_is_sgi(irq->intid)) |
145 | val |= irq->active_source << GICH_LR_PHYSID_CPUID_SHIFT; | |
146 | if (vgic_irq_is_multi_sgi(irq)) { | |
147 | allow_pending = false; | |
148 | val |= GICH_LR_EOI; | |
149 | } | |
150 | } | |
67b5b673 | 151 | |
87322099 CD |
152 | if (irq->group) |
153 | val |= GICH_LR_GROUP1; | |
154 | ||
67b5b673 MZ |
155 | if (irq->hw) { |
156 | val |= GICH_LR_HW; | |
157 | val |= irq->hwintid << GICH_LR_PHYSID_CPUID_SHIFT; | |
158 | /* | |
159 | * Never set pending+active on a HW interrupt, as the | |
160 | * pending state is kept at the physical distributor | |
161 | * level. | |
162 | */ | |
163 | if (irq->active) | |
164 | allow_pending = false; | |
165 | } else { | |
166 | if (irq->config == VGIC_CONFIG_LEVEL) { | |
167 | val |= GICH_LR_EOI; | |
140b086d | 168 | |
67b5b673 MZ |
169 | /* |
170 | * Software resampling doesn't work very well | |
171 | * if we allow P+A, so let's not do that. | |
172 | */ | |
173 | if (irq->active) | |
174 | allow_pending = false; | |
175 | } | |
176 | } | |
177 | ||
178 | if (allow_pending && irq_is_pending(irq)) { | |
140b086d MZ |
179 | val |= GICH_LR_PENDING_BIT; |
180 | ||
181 | if (irq->config == VGIC_CONFIG_EDGE) | |
8694e4da | 182 | irq->pending_latch = false; |
140b086d MZ |
183 | |
184 | if (vgic_irq_is_sgi(irq->intid)) { | |
185 | u32 src = ffs(irq->source); | |
186 | ||
187 | BUG_ON(!src); | |
188 | val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT; | |
189 | irq->source &= ~(1 << (src - 1)); | |
53692908 | 190 | if (irq->source) { |
8694e4da | 191 | irq->pending_latch = true; |
53692908 MZ |
192 | val |= GICH_LR_EOI; |
193 | } | |
140b086d MZ |
194 | } |
195 | } | |
196 | ||
e40cc57b CD |
197 | /* |
198 | * Level-triggered mapped IRQs are special because we only observe | |
199 | * rising edges as input to the VGIC. We therefore lower the line | |
200 | * level here, so that we can take new virtual IRQs. See | |
201 | * vgic_v2_fold_lr_state for more info. | |
202 | */ | |
203 | if (vgic_irq_is_mapped_level(irq) && (val & GICH_LR_PENDING_BIT)) | |
204 | irq->line_level = false; | |
205 | ||
140b086d MZ |
206 | /* The GICv2 LR only holds five bits of priority. */ |
207 | val |= (irq->priority >> 3) << GICH_LR_PRIORITY_SHIFT; | |
208 | ||
209 | vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = val; | |
210 | } | |
211 | ||
212 | void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr) | |
213 | { | |
214 | vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = 0; | |
215 | } | |
e4823a7a AP |
216 | |
217 | void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) | |
218 | { | |
328e5664 | 219 | struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; |
e4823a7a AP |
220 | u32 vmcr; |
221 | ||
28232a43 CD |
222 | vmcr = (vmcrp->grpen0 << GICH_VMCR_ENABLE_GRP0_SHIFT) & |
223 | GICH_VMCR_ENABLE_GRP0_MASK; | |
224 | vmcr |= (vmcrp->grpen1 << GICH_VMCR_ENABLE_GRP1_SHIFT) & | |
225 | GICH_VMCR_ENABLE_GRP1_MASK; | |
226 | vmcr |= (vmcrp->ackctl << GICH_VMCR_ACK_CTL_SHIFT) & | |
227 | GICH_VMCR_ACK_CTL_MASK; | |
228 | vmcr |= (vmcrp->fiqen << GICH_VMCR_FIQ_EN_SHIFT) & | |
229 | GICH_VMCR_FIQ_EN_MASK; | |
230 | vmcr |= (vmcrp->cbpr << GICH_VMCR_CBPR_SHIFT) & | |
231 | GICH_VMCR_CBPR_MASK; | |
232 | vmcr |= (vmcrp->eoim << GICH_VMCR_EOI_MODE_SHIFT) & | |
233 | GICH_VMCR_EOI_MODE_MASK; | |
e4823a7a AP |
234 | vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) & |
235 | GICH_VMCR_ALIAS_BINPOINT_MASK; | |
236 | vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) & | |
237 | GICH_VMCR_BINPOINT_MASK; | |
6d56111c CD |
238 | vmcr |= ((vmcrp->pmr >> GICV_PMR_PRIORITY_SHIFT) << |
239 | GICH_VMCR_PRIMASK_SHIFT) & GICH_VMCR_PRIMASK_MASK; | |
e4823a7a | 240 | |
328e5664 | 241 | cpu_if->vgic_vmcr = vmcr; |
e4823a7a AP |
242 | } |
243 | ||
244 | void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) | |
245 | { | |
328e5664 CD |
246 | struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; |
247 | u32 vmcr; | |
248 | ||
249 | vmcr = cpu_if->vgic_vmcr; | |
e4823a7a | 250 | |
28232a43 CD |
251 | vmcrp->grpen0 = (vmcr & GICH_VMCR_ENABLE_GRP0_MASK) >> |
252 | GICH_VMCR_ENABLE_GRP0_SHIFT; | |
253 | vmcrp->grpen1 = (vmcr & GICH_VMCR_ENABLE_GRP1_MASK) >> | |
254 | GICH_VMCR_ENABLE_GRP1_SHIFT; | |
255 | vmcrp->ackctl = (vmcr & GICH_VMCR_ACK_CTL_MASK) >> | |
256 | GICH_VMCR_ACK_CTL_SHIFT; | |
257 | vmcrp->fiqen = (vmcr & GICH_VMCR_FIQ_EN_MASK) >> | |
258 | GICH_VMCR_FIQ_EN_SHIFT; | |
259 | vmcrp->cbpr = (vmcr & GICH_VMCR_CBPR_MASK) >> | |
260 | GICH_VMCR_CBPR_SHIFT; | |
261 | vmcrp->eoim = (vmcr & GICH_VMCR_EOI_MODE_MASK) >> | |
262 | GICH_VMCR_EOI_MODE_SHIFT; | |
263 | ||
e4823a7a AP |
264 | vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >> |
265 | GICH_VMCR_ALIAS_BINPOINT_SHIFT; | |
266 | vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >> | |
267 | GICH_VMCR_BINPOINT_SHIFT; | |
6d56111c CD |
268 | vmcrp->pmr = ((vmcr & GICH_VMCR_PRIMASK_MASK) >> |
269 | GICH_VMCR_PRIMASK_SHIFT) << GICV_PMR_PRIORITY_SHIFT; | |
e4823a7a | 270 | } |
90977732 | 271 | |
ad275b8b EA |
272 | void vgic_v2_enable(struct kvm_vcpu *vcpu) |
273 | { | |
f7b6985c EA |
274 | /* |
275 | * By forcing VMCR to zero, the GIC will restore the binary | |
276 | * points to their reset values. Anything else resets to zero | |
277 | * anyway. | |
278 | */ | |
279 | vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = 0; | |
f7b6985c EA |
280 | |
281 | /* Get the show on the road... */ | |
282 | vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr = GICH_HCR_EN; | |
ad275b8b EA |
283 | } |
284 | ||
b0442ee2 EA |
285 | /* check for overlapping regions and for regions crossing the end of memory */ |
286 | static bool vgic_v2_check_base(gpa_t dist_base, gpa_t cpu_base) | |
287 | { | |
288 | if (dist_base + KVM_VGIC_V2_DIST_SIZE < dist_base) | |
289 | return false; | |
290 | if (cpu_base + KVM_VGIC_V2_CPU_SIZE < cpu_base) | |
291 | return false; | |
292 | ||
293 | if (dist_base + KVM_VGIC_V2_DIST_SIZE <= cpu_base) | |
294 | return true; | |
295 | if (cpu_base + KVM_VGIC_V2_CPU_SIZE <= dist_base) | |
296 | return true; | |
297 | ||
298 | return false; | |
299 | } | |
300 | ||
301 | int vgic_v2_map_resources(struct kvm *kvm) | |
302 | { | |
303 | struct vgic_dist *dist = &kvm->arch.vgic; | |
304 | int ret = 0; | |
305 | ||
306 | if (vgic_ready(kvm)) | |
307 | goto out; | |
308 | ||
309 | if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) || | |
310 | IS_VGIC_ADDR_UNDEF(dist->vgic_cpu_base)) { | |
311 | kvm_err("Need to set vgic cpu and dist addresses first\n"); | |
312 | ret = -ENXIO; | |
313 | goto out; | |
314 | } | |
315 | ||
316 | if (!vgic_v2_check_base(dist->vgic_dist_base, dist->vgic_cpu_base)) { | |
317 | kvm_err("VGIC CPU and dist frames overlap\n"); | |
318 | ret = -EINVAL; | |
319 | goto out; | |
320 | } | |
321 | ||
322 | /* | |
323 | * Initialize the vgic if this hasn't already been done on demand by | |
324 | * accessing the vgic state from userspace. | |
325 | */ | |
326 | ret = vgic_init(kvm); | |
327 | if (ret) { | |
328 | kvm_err("Unable to initialize VGIC dynamic data structures\n"); | |
329 | goto out; | |
330 | } | |
331 | ||
332 | ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V2); | |
333 | if (ret) { | |
334 | kvm_err("Unable to register VGIC MMIO regions\n"); | |
335 | goto out; | |
336 | } | |
337 | ||
a07d3b07 MZ |
338 | if (!static_branch_unlikely(&vgic_v2_cpuif_trap)) { |
339 | ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base, | |
340 | kvm_vgic_global_state.vcpu_base, | |
341 | KVM_VGIC_V2_CPU_SIZE, true); | |
342 | if (ret) { | |
343 | kvm_err("Unable to remap VGIC CPU to VCPU\n"); | |
344 | goto out; | |
345 | } | |
b0442ee2 EA |
346 | } |
347 | ||
348 | dist->ready = true; | |
349 | ||
350 | out: | |
b0442ee2 EA |
351 | return ret; |
352 | } | |
353 | ||
fb5ee369 MZ |
354 | DEFINE_STATIC_KEY_FALSE(vgic_v2_cpuif_trap); |
355 | ||
90977732 EA |
356 | /** |
357 | * vgic_v2_probe - probe for a GICv2 compatible interrupt controller in DT | |
358 | * @node: pointer to the DT node | |
359 | * | |
360 | * Returns 0 if a GICv2 has been found, returns an error code otherwise | |
361 | */ | |
362 | int vgic_v2_probe(const struct gic_kvm_info *info) | |
363 | { | |
364 | int ret; | |
365 | u32 vtr; | |
366 | ||
367 | if (!info->vctrl.start) { | |
368 | kvm_err("GICH not present in the firmware table\n"); | |
369 | return -ENXIO; | |
370 | } | |
371 | ||
a07d3b07 MZ |
372 | if (!PAGE_ALIGNED(info->vcpu.start) || |
373 | !PAGE_ALIGNED(resource_size(&info->vcpu))) { | |
374 | kvm_info("GICV region size/alignment is unsafe, using trapping (reduced performance)\n"); | |
90977732 | 375 | |
807a3784 MZ |
376 | ret = create_hyp_io_mappings(info->vcpu.start, |
377 | resource_size(&info->vcpu), | |
1bb32a44 MZ |
378 | &kvm_vgic_global_state.vcpu_base_va, |
379 | &kvm_vgic_global_state.vcpu_hyp_va); | |
a07d3b07 MZ |
380 | if (ret) { |
381 | kvm_err("Cannot map GICV into hyp\n"); | |
382 | goto out; | |
383 | } | |
384 | ||
385 | static_branch_enable(&vgic_v2_cpuif_trap); | |
90977732 EA |
386 | } |
387 | ||
807a3784 MZ |
388 | ret = create_hyp_io_mappings(info->vctrl.start, |
389 | resource_size(&info->vctrl), | |
1bb32a44 MZ |
390 | &kvm_vgic_global_state.vctrl_base, |
391 | &kvm_vgic_global_state.vctrl_hyp); | |
807a3784 MZ |
392 | if (ret) { |
393 | kvm_err("Cannot map VCTRL into hyp\n"); | |
a07d3b07 | 394 | goto out; |
90977732 EA |
395 | } |
396 | ||
397 | vtr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VTR); | |
398 | kvm_vgic_global_state.nr_lr = (vtr & 0x3f) + 1; | |
399 | ||
a07d3b07 MZ |
400 | ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2); |
401 | if (ret) { | |
402 | kvm_err("Cannot register GICv2 KVM device\n"); | |
403 | goto out; | |
90977732 EA |
404 | } |
405 | ||
406 | kvm_vgic_global_state.can_emulate_gicv2 = true; | |
90977732 EA |
407 | kvm_vgic_global_state.vcpu_base = info->vcpu.start; |
408 | kvm_vgic_global_state.type = VGIC_V2; | |
409 | kvm_vgic_global_state.max_gic_vcpus = VGIC_V2_MAX_CPUS; | |
410 | ||
76600428 | 411 | kvm_debug("vgic-v2@%llx\n", info->vctrl.start); |
90977732 EA |
412 | |
413 | return 0; | |
a07d3b07 MZ |
414 | out: |
415 | if (kvm_vgic_global_state.vctrl_base) | |
416 | iounmap(kvm_vgic_global_state.vctrl_base); | |
417 | if (kvm_vgic_global_state.vcpu_base_va) | |
418 | iounmap(kvm_vgic_global_state.vcpu_base_va); | |
419 | ||
420 | return ret; | |
90977732 | 421 | } |
328e5664 | 422 | |
75174ba6 CD |
423 | static void save_lrs(struct kvm_vcpu *vcpu, void __iomem *base) |
424 | { | |
425 | struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; | |
426 | u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs; | |
427 | u64 elrsr; | |
428 | int i; | |
429 | ||
430 | elrsr = readl_relaxed(base + GICH_ELRSR0); | |
431 | if (unlikely(used_lrs > 32)) | |
432 | elrsr |= ((u64)readl_relaxed(base + GICH_ELRSR1)) << 32; | |
433 | ||
434 | for (i = 0; i < used_lrs; i++) { | |
435 | if (elrsr & (1UL << i)) | |
436 | cpu_if->vgic_lr[i] &= ~GICH_LR_STATE; | |
437 | else | |
438 | cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4)); | |
439 | ||
440 | writel_relaxed(0, base + GICH_LR0 + (i * 4)); | |
441 | } | |
442 | } | |
443 | ||
444 | void vgic_v2_save_state(struct kvm_vcpu *vcpu) | |
445 | { | |
1bb32a44 | 446 | void __iomem *base = kvm_vgic_global_state.vctrl_base; |
75174ba6 CD |
447 | u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs; |
448 | ||
449 | if (!base) | |
450 | return; | |
451 | ||
452 | if (used_lrs) { | |
75174ba6 CD |
453 | save_lrs(vcpu, base); |
454 | writel_relaxed(0, base + GICH_HCR); | |
75174ba6 CD |
455 | } |
456 | } | |
457 | ||
458 | void vgic_v2_restore_state(struct kvm_vcpu *vcpu) | |
459 | { | |
75174ba6 | 460 | struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; |
1bb32a44 | 461 | void __iomem *base = kvm_vgic_global_state.vctrl_base; |
75174ba6 CD |
462 | u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs; |
463 | int i; | |
464 | ||
465 | if (!base) | |
466 | return; | |
467 | ||
468 | if (used_lrs) { | |
469 | writel_relaxed(cpu_if->vgic_hcr, base + GICH_HCR); | |
75174ba6 CD |
470 | for (i = 0; i < used_lrs; i++) { |
471 | writel_relaxed(cpu_if->vgic_lr[i], | |
472 | base + GICH_LR0 + (i * 4)); | |
473 | } | |
474 | } | |
475 | } | |
476 | ||
328e5664 CD |
477 | void vgic_v2_load(struct kvm_vcpu *vcpu) |
478 | { | |
479 | struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; | |
328e5664 | 480 | |
1bb32a44 MZ |
481 | writel_relaxed(cpu_if->vgic_vmcr, |
482 | kvm_vgic_global_state.vctrl_base + GICH_VMCR); | |
483 | writel_relaxed(cpu_if->vgic_apr, | |
484 | kvm_vgic_global_state.vctrl_base + GICH_APR); | |
328e5664 CD |
485 | } |
486 | ||
487 | void vgic_v2_put(struct kvm_vcpu *vcpu) | |
488 | { | |
489 | struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; | |
328e5664 | 490 | |
1bb32a44 MZ |
491 | cpu_if->vgic_vmcr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VMCR); |
492 | cpu_if->vgic_apr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_APR); | |
328e5664 | 493 | } |