2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 #include <linux/irqchip/arm-gic-v3.h>
16 #include <linux/kvm.h>
17 #include <linux/kvm_host.h>
18 #include <kvm/arm_vgic.h>
19 #include <asm/kvm_mmu.h>
20 #include <asm/kvm_asm.h>
24 static bool group0_trap
;
25 static bool group1_trap
;
26 static bool common_trap
;
27 static bool gicv4_enable
;
29 void vgic_v3_set_npie(struct kvm_vcpu
*vcpu
)
31 struct vgic_v3_cpu_if
*cpuif
= &vcpu
->arch
.vgic_cpu
.vgic_v3
;
33 cpuif
->vgic_hcr
|= ICH_HCR_NPIE
;
36 void vgic_v3_set_underflow(struct kvm_vcpu
*vcpu
)
38 struct vgic_v3_cpu_if
*cpuif
= &vcpu
->arch
.vgic_cpu
.vgic_v3
;
40 cpuif
->vgic_hcr
|= ICH_HCR_UIE
;
43 static bool lr_signals_eoi_mi(u64 lr_val
)
45 return !(lr_val
& ICH_LR_STATE
) && (lr_val
& ICH_LR_EOI
) &&
46 !(lr_val
& ICH_LR_HW
);
49 void vgic_v3_fold_lr_state(struct kvm_vcpu
*vcpu
)
51 struct vgic_cpu
*vgic_cpu
= &vcpu
->arch
.vgic_cpu
;
52 struct vgic_v3_cpu_if
*cpuif
= &vgic_cpu
->vgic_v3
;
53 u32 model
= vcpu
->kvm
->arch
.vgic
.vgic_model
;
57 cpuif
->vgic_hcr
&= ~(ICH_HCR_UIE
| ICH_HCR_NPIE
);
59 for (lr
= 0; lr
< vgic_cpu
->used_lrs
; lr
++) {
60 u64 val
= cpuif
->vgic_lr
[lr
];
64 if (model
== KVM_DEV_TYPE_ARM_VGIC_V3
)
65 intid
= val
& ICH_LR_VIRTUAL_ID_MASK
;
67 intid
= val
& GICH_LR_VIRTUALID
;
69 /* Notify fds when the guest EOI'ed a level-triggered IRQ */
70 if (lr_signals_eoi_mi(val
) && vgic_valid_spi(vcpu
->kvm
, intid
))
71 kvm_notify_acked_irq(vcpu
->kvm
, 0,
72 intid
- VGIC_NR_PRIVATE_IRQS
);
74 irq
= vgic_get_irq(vcpu
->kvm
, vcpu
, intid
);
75 if (!irq
) /* An LPI could have been unmapped. */
78 spin_lock_irqsave(&irq
->irq_lock
, flags
);
80 /* Always preserve the active bit */
81 irq
->active
= !!(val
& ICH_LR_ACTIVE_BIT
);
83 /* Edge is the only case where we preserve the pending bit */
84 if (irq
->config
== VGIC_CONFIG_EDGE
&&
85 (val
& ICH_LR_PENDING_BIT
)) {
86 irq
->pending_latch
= true;
88 if (vgic_irq_is_sgi(intid
) &&
89 model
== KVM_DEV_TYPE_ARM_VGIC_V2
) {
90 u32 cpuid
= val
& GICH_LR_PHYSID_CPUID
;
92 cpuid
>>= GICH_LR_PHYSID_CPUID_SHIFT
;
93 irq
->source
|= (1 << cpuid
);
98 * Clear soft pending state when level irqs have been acked.
99 * Always regenerate the pending state.
101 if (irq
->config
== VGIC_CONFIG_LEVEL
) {
102 if (!(val
& ICH_LR_PENDING_BIT
))
103 irq
->pending_latch
= false;
106 spin_unlock_irqrestore(&irq
->irq_lock
, flags
);
107 vgic_put_irq(vcpu
->kvm
, irq
);
110 vgic_cpu
->used_lrs
= 0;
113 /* Requires the irq to be locked already */
114 void vgic_v3_populate_lr(struct kvm_vcpu
*vcpu
, struct vgic_irq
*irq
, int lr
)
116 u32 model
= vcpu
->kvm
->arch
.vgic
.vgic_model
;
117 u64 val
= irq
->intid
;
119 if (irq_is_pending(irq
)) {
120 val
|= ICH_LR_PENDING_BIT
;
122 if (irq
->config
== VGIC_CONFIG_EDGE
)
123 irq
->pending_latch
= false;
125 if (vgic_irq_is_sgi(irq
->intid
) &&
126 model
== KVM_DEV_TYPE_ARM_VGIC_V2
) {
127 u32 src
= ffs(irq
->source
);
130 val
|= (src
- 1) << GICH_LR_PHYSID_CPUID_SHIFT
;
131 irq
->source
&= ~(1 << (src
- 1));
133 irq
->pending_latch
= true;
138 val
|= ICH_LR_ACTIVE_BIT
;
142 val
|= ((u64
)irq
->hwintid
) << ICH_LR_PHYS_ID_SHIFT
;
144 * Never set pending+active on a HW interrupt, as the
145 * pending state is kept at the physical distributor
148 if (irq
->active
&& irq_is_pending(irq
))
149 val
&= ~ICH_LR_PENDING_BIT
;
151 if (irq
->config
== VGIC_CONFIG_LEVEL
)
156 * We currently only support Group1 interrupts, which is a
157 * known defect. This needs to be addressed at some point.
159 if (model
== KVM_DEV_TYPE_ARM_VGIC_V3
)
162 val
|= (u64
)irq
->priority
<< ICH_LR_PRIORITY_SHIFT
;
164 vcpu
->arch
.vgic_cpu
.vgic_v3
.vgic_lr
[lr
] = val
;
167 void vgic_v3_clear_lr(struct kvm_vcpu
*vcpu
, int lr
)
169 vcpu
->arch
.vgic_cpu
.vgic_v3
.vgic_lr
[lr
] = 0;
172 void vgic_v3_set_vmcr(struct kvm_vcpu
*vcpu
, struct vgic_vmcr
*vmcrp
)
174 struct vgic_v3_cpu_if
*cpu_if
= &vcpu
->arch
.vgic_cpu
.vgic_v3
;
175 u32 model
= vcpu
->kvm
->arch
.vgic
.vgic_model
;
178 if (model
== KVM_DEV_TYPE_ARM_VGIC_V2
) {
179 vmcr
= (vmcrp
->ackctl
<< ICH_VMCR_ACK_CTL_SHIFT
) &
180 ICH_VMCR_ACK_CTL_MASK
;
181 vmcr
|= (vmcrp
->fiqen
<< ICH_VMCR_FIQ_EN_SHIFT
) &
182 ICH_VMCR_FIQ_EN_MASK
;
185 * When emulating GICv3 on GICv3 with SRE=1 on the
186 * VFIQEn bit is RES1 and the VAckCtl bit is RES0.
188 vmcr
= ICH_VMCR_FIQ_EN_MASK
;
191 vmcr
|= (vmcrp
->cbpr
<< ICH_VMCR_CBPR_SHIFT
) & ICH_VMCR_CBPR_MASK
;
192 vmcr
|= (vmcrp
->eoim
<< ICH_VMCR_EOIM_SHIFT
) & ICH_VMCR_EOIM_MASK
;
193 vmcr
|= (vmcrp
->abpr
<< ICH_VMCR_BPR1_SHIFT
) & ICH_VMCR_BPR1_MASK
;
194 vmcr
|= (vmcrp
->bpr
<< ICH_VMCR_BPR0_SHIFT
) & ICH_VMCR_BPR0_MASK
;
195 vmcr
|= (vmcrp
->pmr
<< ICH_VMCR_PMR_SHIFT
) & ICH_VMCR_PMR_MASK
;
196 vmcr
|= (vmcrp
->grpen0
<< ICH_VMCR_ENG0_SHIFT
) & ICH_VMCR_ENG0_MASK
;
197 vmcr
|= (vmcrp
->grpen1
<< ICH_VMCR_ENG1_SHIFT
) & ICH_VMCR_ENG1_MASK
;
199 cpu_if
->vgic_vmcr
= vmcr
;
202 void vgic_v3_get_vmcr(struct kvm_vcpu
*vcpu
, struct vgic_vmcr
*vmcrp
)
204 struct vgic_v3_cpu_if
*cpu_if
= &vcpu
->arch
.vgic_cpu
.vgic_v3
;
205 u32 model
= vcpu
->kvm
->arch
.vgic
.vgic_model
;
208 vmcr
= cpu_if
->vgic_vmcr
;
210 if (model
== KVM_DEV_TYPE_ARM_VGIC_V2
) {
211 vmcrp
->ackctl
= (vmcr
& ICH_VMCR_ACK_CTL_MASK
) >>
212 ICH_VMCR_ACK_CTL_SHIFT
;
213 vmcrp
->fiqen
= (vmcr
& ICH_VMCR_FIQ_EN_MASK
) >>
214 ICH_VMCR_FIQ_EN_SHIFT
;
217 * When emulating GICv3 on GICv3 with SRE=1 on the
218 * VFIQEn bit is RES1 and the VAckCtl bit is RES0.
224 vmcrp
->cbpr
= (vmcr
& ICH_VMCR_CBPR_MASK
) >> ICH_VMCR_CBPR_SHIFT
;
225 vmcrp
->eoim
= (vmcr
& ICH_VMCR_EOIM_MASK
) >> ICH_VMCR_EOIM_SHIFT
;
226 vmcrp
->abpr
= (vmcr
& ICH_VMCR_BPR1_MASK
) >> ICH_VMCR_BPR1_SHIFT
;
227 vmcrp
->bpr
= (vmcr
& ICH_VMCR_BPR0_MASK
) >> ICH_VMCR_BPR0_SHIFT
;
228 vmcrp
->pmr
= (vmcr
& ICH_VMCR_PMR_MASK
) >> ICH_VMCR_PMR_SHIFT
;
229 vmcrp
->grpen0
= (vmcr
& ICH_VMCR_ENG0_MASK
) >> ICH_VMCR_ENG0_SHIFT
;
230 vmcrp
->grpen1
= (vmcr
& ICH_VMCR_ENG1_MASK
) >> ICH_VMCR_ENG1_SHIFT
;
233 #define INITIAL_PENDBASER_VALUE \
234 (GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWb) | \
235 GIC_BASER_CACHEABILITY(GICR_PENDBASER, OUTER, SameAsInner) | \
236 GIC_BASER_SHAREABILITY(GICR_PENDBASER, InnerShareable))
238 void vgic_v3_enable(struct kvm_vcpu
*vcpu
)
240 struct vgic_v3_cpu_if
*vgic_v3
= &vcpu
->arch
.vgic_cpu
.vgic_v3
;
243 * By forcing VMCR to zero, the GIC will restore the binary
244 * points to their reset values. Anything else resets to zero
247 vgic_v3
->vgic_vmcr
= 0;
248 vgic_v3
->vgic_elrsr
= ~0;
251 * If we are emulating a GICv3, we do it in an non-GICv2-compatible
252 * way, so we force SRE to 1 to demonstrate this to the guest.
253 * Also, we don't support any form of IRQ/FIQ bypass.
254 * This goes with the spec allowing the value to be RAO/WI.
256 if (vcpu
->kvm
->arch
.vgic
.vgic_model
== KVM_DEV_TYPE_ARM_VGIC_V3
) {
257 vgic_v3
->vgic_sre
= (ICC_SRE_EL1_DIB
|
260 vcpu
->arch
.vgic_cpu
.pendbaser
= INITIAL_PENDBASER_VALUE
;
262 vgic_v3
->vgic_sre
= 0;
265 vcpu
->arch
.vgic_cpu
.num_id_bits
= (kvm_vgic_global_state
.ich_vtr_el2
&
266 ICH_VTR_ID_BITS_MASK
) >>
267 ICH_VTR_ID_BITS_SHIFT
;
268 vcpu
->arch
.vgic_cpu
.num_pri_bits
= ((kvm_vgic_global_state
.ich_vtr_el2
&
269 ICH_VTR_PRI_BITS_MASK
) >>
270 ICH_VTR_PRI_BITS_SHIFT
) + 1;
272 /* Get the show on the road... */
273 vgic_v3
->vgic_hcr
= ICH_HCR_EN
;
275 vgic_v3
->vgic_hcr
|= ICH_HCR_TALL0
;
277 vgic_v3
->vgic_hcr
|= ICH_HCR_TALL1
;
279 vgic_v3
->vgic_hcr
|= ICH_HCR_TC
;
282 int vgic_v3_lpi_sync_pending_status(struct kvm
*kvm
, struct vgic_irq
*irq
)
284 struct kvm_vcpu
*vcpu
;
285 int byte_offset
, bit_nr
;
293 vcpu
= irq
->target_vcpu
;
297 pendbase
= GICR_PENDBASER_ADDRESS(vcpu
->arch
.vgic_cpu
.pendbaser
);
299 byte_offset
= irq
->intid
/ BITS_PER_BYTE
;
300 bit_nr
= irq
->intid
% BITS_PER_BYTE
;
301 ptr
= pendbase
+ byte_offset
;
303 ret
= kvm_read_guest_lock(kvm
, ptr
, &val
, 1);
307 status
= val
& (1 << bit_nr
);
309 spin_lock_irqsave(&irq
->irq_lock
, flags
);
310 if (irq
->target_vcpu
!= vcpu
) {
311 spin_unlock_irqrestore(&irq
->irq_lock
, flags
);
314 irq
->pending_latch
= status
;
315 vgic_queue_irq_unlock(vcpu
->kvm
, irq
, flags
);
318 /* clear consumed data */
319 val
&= ~(1 << bit_nr
);
320 ret
= kvm_write_guest(kvm
, ptr
, &val
, 1);
328 * vgic_its_save_pending_tables - Save the pending tables into guest RAM
329 * kvm lock and all vcpu lock must be held
331 int vgic_v3_save_pending_tables(struct kvm
*kvm
)
333 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
334 int last_byte_offset
= -1;
335 struct vgic_irq
*irq
;
339 list_for_each_entry(irq
, &dist
->lpi_list_head
, lpi_list
) {
340 int byte_offset
, bit_nr
;
341 struct kvm_vcpu
*vcpu
;
345 vcpu
= irq
->target_vcpu
;
349 pendbase
= GICR_PENDBASER_ADDRESS(vcpu
->arch
.vgic_cpu
.pendbaser
);
351 byte_offset
= irq
->intid
/ BITS_PER_BYTE
;
352 bit_nr
= irq
->intid
% BITS_PER_BYTE
;
353 ptr
= pendbase
+ byte_offset
;
355 if (byte_offset
!= last_byte_offset
) {
356 ret
= kvm_read_guest_lock(kvm
, ptr
, &val
, 1);
359 last_byte_offset
= byte_offset
;
362 stored
= val
& (1U << bit_nr
);
363 if (stored
== irq
->pending_latch
)
366 if (irq
->pending_latch
)
369 val
&= ~(1 << bit_nr
);
371 ret
= kvm_write_guest(kvm
, ptr
, &val
, 1);
379 * Check for overlapping regions and for regions crossing the end of memory
380 * for base addresses which have already been set.
382 bool vgic_v3_check_base(struct kvm
*kvm
)
384 struct vgic_dist
*d
= &kvm
->arch
.vgic
;
385 gpa_t redist_size
= KVM_VGIC_V3_REDIST_SIZE
;
387 redist_size
*= atomic_read(&kvm
->online_vcpus
);
389 if (!IS_VGIC_ADDR_UNDEF(d
->vgic_dist_base
) &&
390 d
->vgic_dist_base
+ KVM_VGIC_V3_DIST_SIZE
< d
->vgic_dist_base
)
393 if (!IS_VGIC_ADDR_UNDEF(d
->vgic_redist_base
) &&
394 d
->vgic_redist_base
+ redist_size
< d
->vgic_redist_base
)
397 /* Both base addresses must be set to check if they overlap */
398 if (IS_VGIC_ADDR_UNDEF(d
->vgic_dist_base
) ||
399 IS_VGIC_ADDR_UNDEF(d
->vgic_redist_base
))
402 if (d
->vgic_dist_base
+ KVM_VGIC_V3_DIST_SIZE
<= d
->vgic_redist_base
)
404 if (d
->vgic_redist_base
+ redist_size
<= d
->vgic_dist_base
)
410 int vgic_v3_map_resources(struct kvm
*kvm
)
413 struct vgic_dist
*dist
= &kvm
->arch
.vgic
;
418 if (IS_VGIC_ADDR_UNDEF(dist
->vgic_dist_base
) ||
419 IS_VGIC_ADDR_UNDEF(dist
->vgic_redist_base
)) {
420 kvm_err("Need to set vgic distributor addresses first\n");
425 if (!vgic_v3_check_base(kvm
)) {
426 kvm_err("VGIC redist and dist frames overlap\n");
432 * For a VGICv3 we require the userland to explicitly initialize
433 * the VGIC before we need to use it.
435 if (!vgic_initialized(kvm
)) {
440 ret
= vgic_register_dist_iodev(kvm
, dist
->vgic_dist_base
, VGIC_V3
);
442 kvm_err("Unable to register VGICv3 dist MMIO regions\n");
452 DEFINE_STATIC_KEY_FALSE(vgic_v3_cpuif_trap
);
454 static int __init
early_group0_trap_cfg(char *buf
)
456 return strtobool(buf
, &group0_trap
);
458 early_param("kvm-arm.vgic_v3_group0_trap", early_group0_trap_cfg
);
460 static int __init
early_group1_trap_cfg(char *buf
)
462 return strtobool(buf
, &group1_trap
);
464 early_param("kvm-arm.vgic_v3_group1_trap", early_group1_trap_cfg
);
466 static int __init
early_common_trap_cfg(char *buf
)
468 return strtobool(buf
, &common_trap
);
470 early_param("kvm-arm.vgic_v3_common_trap", early_common_trap_cfg
);
472 static int __init
early_gicv4_enable(char *buf
)
474 return strtobool(buf
, &gicv4_enable
);
476 early_param("kvm-arm.vgic_v4_enable", early_gicv4_enable
);
479 * vgic_v3_probe - probe for a GICv3 compatible interrupt controller in DT
480 * @node: pointer to the DT node
482 * Returns 0 if a GICv3 has been found, returns an error code otherwise
484 int vgic_v3_probe(const struct gic_kvm_info
*info
)
486 u32 ich_vtr_el2
= kvm_call_hyp(__vgic_v3_get_ich_vtr_el2
);
490 * The ListRegs field is 5 bits, but there is a architectural
491 * maximum of 16 list registers. Just ignore bit 4...
493 kvm_vgic_global_state
.nr_lr
= (ich_vtr_el2
& 0xf) + 1;
494 kvm_vgic_global_state
.can_emulate_gicv2
= false;
495 kvm_vgic_global_state
.ich_vtr_el2
= ich_vtr_el2
;
499 kvm_vgic_global_state
.has_gicv4
= gicv4_enable
;
500 kvm_info("GICv4 support %sabled\n",
501 gicv4_enable
? "en" : "dis");
504 if (!info
->vcpu
.start
) {
505 kvm_info("GICv3: no GICV resource entry\n");
506 kvm_vgic_global_state
.vcpu_base
= 0;
507 } else if (!PAGE_ALIGNED(info
->vcpu
.start
)) {
508 pr_warn("GICV physical address 0x%llx not page aligned\n",
509 (unsigned long long)info
->vcpu
.start
);
510 kvm_vgic_global_state
.vcpu_base
= 0;
511 } else if (!PAGE_ALIGNED(resource_size(&info
->vcpu
))) {
512 pr_warn("GICV size 0x%llx not a multiple of page size 0x%lx\n",
513 (unsigned long long)resource_size(&info
->vcpu
),
515 kvm_vgic_global_state
.vcpu_base
= 0;
517 kvm_vgic_global_state
.vcpu_base
= info
->vcpu
.start
;
518 kvm_vgic_global_state
.can_emulate_gicv2
= true;
519 ret
= kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2
);
521 kvm_err("Cannot register GICv2 KVM device.\n");
524 kvm_info("vgic-v2@%llx\n", info
->vcpu
.start
);
526 ret
= kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V3
);
528 kvm_err("Cannot register GICv3 KVM device.\n");
529 kvm_unregister_device_ops(KVM_DEV_TYPE_ARM_VGIC_V2
);
533 if (kvm_vgic_global_state
.vcpu_base
== 0)
534 kvm_info("disabling GICv2 emulation\n");
537 if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_30115
)) {
543 if (group0_trap
|| group1_trap
|| common_trap
) {
544 kvm_info("GICv3 sysreg trapping enabled ([%s%s%s], reduced performance)\n",
545 group0_trap
? "G0" : "",
546 group1_trap
? "G1" : "",
547 common_trap
? "C" : "");
548 static_branch_enable(&vgic_v3_cpuif_trap
);
551 kvm_vgic_global_state
.vctrl_base
= NULL
;
552 kvm_vgic_global_state
.type
= VGIC_V3
;
553 kvm_vgic_global_state
.max_gic_vcpus
= VGIC_V3_MAX_CPUS
;
558 void vgic_v3_load(struct kvm_vcpu
*vcpu
)
560 struct vgic_v3_cpu_if
*cpu_if
= &vcpu
->arch
.vgic_cpu
.vgic_v3
;
563 * If dealing with a GICv2 emulation on GICv3, VMCR_EL2.VFIQen
564 * is dependent on ICC_SRE_EL1.SRE, and we have to perform the
565 * VMCR_EL2 save/restore in the world switch.
567 if (likely(cpu_if
->vgic_sre
))
568 kvm_call_hyp(__vgic_v3_write_vmcr
, cpu_if
->vgic_vmcr
);
571 void vgic_v3_put(struct kvm_vcpu
*vcpu
)
573 struct vgic_v3_cpu_if
*cpu_if
= &vcpu
->arch
.vgic_cpu
.vgic_v3
;
575 if (likely(cpu_if
->vgic_sre
))
576 cpu_if
->vgic_vmcr
= kvm_call_hyp(__vgic_v3_read_vmcr
);