2 * Copyright (C) 2012-2015 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/compiler.h>
19 #include <linux/irqchip/arm-gic-v3.h>
20 #include <linux/kvm_host.h>
22 #include <asm/kvm_emulate.h>
23 #include <asm/kvm_hyp.h>
25 #define vtr_to_max_lr_idx(v) ((v) & 0xf)
26 #define vtr_to_nr_pre_bits(v) ((((u32)(v) >> 26) & 7) + 1)
27 #define vtr_to_nr_apr_regs(v) (1 << (vtr_to_nr_pre_bits(v) - 5))
29 static u64 __hyp_text
__gic_v3_get_lr(unsigned int lr
)
33 return read_gicreg(ICH_LR0_EL2
);
35 return read_gicreg(ICH_LR1_EL2
);
37 return read_gicreg(ICH_LR2_EL2
);
39 return read_gicreg(ICH_LR3_EL2
);
41 return read_gicreg(ICH_LR4_EL2
);
43 return read_gicreg(ICH_LR5_EL2
);
45 return read_gicreg(ICH_LR6_EL2
);
47 return read_gicreg(ICH_LR7_EL2
);
49 return read_gicreg(ICH_LR8_EL2
);
51 return read_gicreg(ICH_LR9_EL2
);
53 return read_gicreg(ICH_LR10_EL2
);
55 return read_gicreg(ICH_LR11_EL2
);
57 return read_gicreg(ICH_LR12_EL2
);
59 return read_gicreg(ICH_LR13_EL2
);
61 return read_gicreg(ICH_LR14_EL2
);
63 return read_gicreg(ICH_LR15_EL2
);
69 static void __hyp_text
__gic_v3_set_lr(u64 val
, int lr
)
73 write_gicreg(val
, ICH_LR0_EL2
);
76 write_gicreg(val
, ICH_LR1_EL2
);
79 write_gicreg(val
, ICH_LR2_EL2
);
82 write_gicreg(val
, ICH_LR3_EL2
);
85 write_gicreg(val
, ICH_LR4_EL2
);
88 write_gicreg(val
, ICH_LR5_EL2
);
91 write_gicreg(val
, ICH_LR6_EL2
);
94 write_gicreg(val
, ICH_LR7_EL2
);
97 write_gicreg(val
, ICH_LR8_EL2
);
100 write_gicreg(val
, ICH_LR9_EL2
);
103 write_gicreg(val
, ICH_LR10_EL2
);
106 write_gicreg(val
, ICH_LR11_EL2
);
109 write_gicreg(val
, ICH_LR12_EL2
);
112 write_gicreg(val
, ICH_LR13_EL2
);
115 write_gicreg(val
, ICH_LR14_EL2
);
118 write_gicreg(val
, ICH_LR15_EL2
);
123 static void __hyp_text
save_maint_int_state(struct kvm_vcpu
*vcpu
, int nr_lr
)
125 struct vgic_v3_cpu_if
*cpu_if
= &vcpu
->arch
.vgic_cpu
.vgic_v3
;
129 expect_mi
= !!(cpu_if
->vgic_hcr
& ICH_HCR_UIE
);
131 for (i
= 0; i
< nr_lr
; i
++) {
132 if (!(vcpu
->arch
.vgic_cpu
.live_lrs
& (1UL << i
)))
135 expect_mi
|= (!(cpu_if
->vgic_lr
[i
] & ICH_LR_HW
) &&
136 (cpu_if
->vgic_lr
[i
] & ICH_LR_EOI
));
140 cpu_if
->vgic_misr
= read_gicreg(ICH_MISR_EL2
);
142 if (cpu_if
->vgic_misr
& ICH_MISR_EOI
)
143 cpu_if
->vgic_eisr
= read_gicreg(ICH_EISR_EL2
);
145 cpu_if
->vgic_eisr
= 0;
147 cpu_if
->vgic_misr
= 0;
148 cpu_if
->vgic_eisr
= 0;
152 static void __hyp_text
__vgic_v3_write_ap0rn(u32 val
, int n
)
156 write_gicreg(val
, ICH_AP0R0_EL2
);
159 write_gicreg(val
, ICH_AP0R1_EL2
);
162 write_gicreg(val
, ICH_AP0R2_EL2
);
165 write_gicreg(val
, ICH_AP0R3_EL2
);
170 static void __hyp_text
__vgic_v3_write_ap1rn(u32 val
, int n
)
174 write_gicreg(val
, ICH_AP1R0_EL2
);
177 write_gicreg(val
, ICH_AP1R1_EL2
);
180 write_gicreg(val
, ICH_AP1R2_EL2
);
183 write_gicreg(val
, ICH_AP1R3_EL2
);
188 static u32 __hyp_text
__vgic_v3_read_ap0rn(int n
)
194 val
= read_gicreg(ICH_AP0R0_EL2
);
197 val
= read_gicreg(ICH_AP0R1_EL2
);
200 val
= read_gicreg(ICH_AP0R2_EL2
);
203 val
= read_gicreg(ICH_AP0R3_EL2
);
212 static u32 __hyp_text
__vgic_v3_read_ap1rn(int n
)
218 val
= read_gicreg(ICH_AP1R0_EL2
);
221 val
= read_gicreg(ICH_AP1R1_EL2
);
224 val
= read_gicreg(ICH_AP1R2_EL2
);
227 val
= read_gicreg(ICH_AP1R3_EL2
);
236 void __hyp_text
__vgic_v3_save_state(struct kvm_vcpu
*vcpu
)
238 struct vgic_v3_cpu_if
*cpu_if
= &vcpu
->arch
.vgic_cpu
.vgic_v3
;
242 * Make sure stores to the GIC via the memory mapped interface
243 * are now visible to the system register interface.
245 if (!cpu_if
->vgic_sre
)
248 cpu_if
->vgic_vmcr
= read_gicreg(ICH_VMCR_EL2
);
250 if (vcpu
->arch
.vgic_cpu
.live_lrs
) {
252 u32 max_lr_idx
, nr_pre_bits
;
254 cpu_if
->vgic_elrsr
= read_gicreg(ICH_ELSR_EL2
);
256 write_gicreg(0, ICH_HCR_EL2
);
257 val
= read_gicreg(ICH_VTR_EL2
);
258 max_lr_idx
= vtr_to_max_lr_idx(val
);
259 nr_pre_bits
= vtr_to_nr_pre_bits(val
);
261 save_maint_int_state(vcpu
, max_lr_idx
+ 1);
263 for (i
= 0; i
<= max_lr_idx
; i
++) {
264 if (!(vcpu
->arch
.vgic_cpu
.live_lrs
& (1UL << i
)))
267 if (cpu_if
->vgic_elrsr
& (1 << i
))
268 cpu_if
->vgic_lr
[i
] &= ~ICH_LR_STATE
;
270 cpu_if
->vgic_lr
[i
] = __gic_v3_get_lr(i
);
272 __gic_v3_set_lr(0, i
);
275 switch (nr_pre_bits
) {
277 cpu_if
->vgic_ap0r
[3] = __vgic_v3_read_ap0rn(3);
278 cpu_if
->vgic_ap0r
[2] = __vgic_v3_read_ap0rn(2);
280 cpu_if
->vgic_ap0r
[1] = __vgic_v3_read_ap0rn(1);
282 cpu_if
->vgic_ap0r
[0] = __vgic_v3_read_ap0rn(0);
285 switch (nr_pre_bits
) {
287 cpu_if
->vgic_ap1r
[3] = __vgic_v3_read_ap1rn(3);
288 cpu_if
->vgic_ap1r
[2] = __vgic_v3_read_ap1rn(2);
290 cpu_if
->vgic_ap1r
[1] = __vgic_v3_read_ap1rn(1);
292 cpu_if
->vgic_ap1r
[0] = __vgic_v3_read_ap1rn(0);
295 vcpu
->arch
.vgic_cpu
.live_lrs
= 0;
297 if (static_branch_unlikely(&vgic_v3_cpuif_trap
))
298 write_gicreg(0, ICH_HCR_EL2
);
300 cpu_if
->vgic_misr
= 0;
301 cpu_if
->vgic_eisr
= 0;
302 cpu_if
->vgic_elrsr
= 0xffff;
303 cpu_if
->vgic_ap0r
[0] = 0;
304 cpu_if
->vgic_ap0r
[1] = 0;
305 cpu_if
->vgic_ap0r
[2] = 0;
306 cpu_if
->vgic_ap0r
[3] = 0;
307 cpu_if
->vgic_ap1r
[0] = 0;
308 cpu_if
->vgic_ap1r
[1] = 0;
309 cpu_if
->vgic_ap1r
[2] = 0;
310 cpu_if
->vgic_ap1r
[3] = 0;
313 val
= read_gicreg(ICC_SRE_EL2
);
314 write_gicreg(val
| ICC_SRE_EL2_ENABLE
, ICC_SRE_EL2
);
316 if (!cpu_if
->vgic_sre
) {
317 /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
319 write_gicreg(1, ICC_SRE_EL1
);
323 void __hyp_text
__vgic_v3_restore_state(struct kvm_vcpu
*vcpu
)
325 struct vgic_v3_cpu_if
*cpu_if
= &vcpu
->arch
.vgic_cpu
.vgic_v3
;
327 u32 max_lr_idx
, nr_pre_bits
;
332 * VFIQEn is RES1 if ICC_SRE_EL1.SRE is 1. This causes a
333 * Group0 interrupt (as generated in GICv2 mode) to be
334 * delivered as a FIQ to the guest, with potentially fatal
335 * consequences. So we must make sure that ICC_SRE_EL1 has
336 * been actually programmed with the value we want before
337 * starting to mess with the rest of the GIC.
339 if (!cpu_if
->vgic_sre
) {
340 write_gicreg(0, ICC_SRE_EL1
);
344 val
= read_gicreg(ICH_VTR_EL2
);
345 max_lr_idx
= vtr_to_max_lr_idx(val
);
346 nr_pre_bits
= vtr_to_nr_pre_bits(val
);
348 for (i
= 0; i
<= max_lr_idx
; i
++) {
349 if (cpu_if
->vgic_lr
[i
] & ICH_LR_STATE
)
350 live_lrs
|= (1 << i
);
353 write_gicreg(cpu_if
->vgic_vmcr
, ICH_VMCR_EL2
);
356 write_gicreg(cpu_if
->vgic_hcr
, ICH_HCR_EL2
);
358 switch (nr_pre_bits
) {
360 __vgic_v3_write_ap0rn(cpu_if
->vgic_ap0r
[3], 3);
361 __vgic_v3_write_ap0rn(cpu_if
->vgic_ap0r
[2], 2);
363 __vgic_v3_write_ap0rn(cpu_if
->vgic_ap0r
[1], 1);
365 __vgic_v3_write_ap0rn(cpu_if
->vgic_ap0r
[0], 0);
368 switch (nr_pre_bits
) {
370 __vgic_v3_write_ap1rn(cpu_if
->vgic_ap1r
[3], 3);
371 __vgic_v3_write_ap1rn(cpu_if
->vgic_ap1r
[2], 2);
373 __vgic_v3_write_ap1rn(cpu_if
->vgic_ap1r
[1], 1);
375 __vgic_v3_write_ap1rn(cpu_if
->vgic_ap1r
[0], 0);
378 for (i
= 0; i
<= max_lr_idx
; i
++) {
379 if (!(live_lrs
& (1 << i
)))
382 __gic_v3_set_lr(cpu_if
->vgic_lr
[i
], i
);
386 * If we need to trap system registers, we must write
387 * ICH_HCR_EL2 anyway, even if no interrupts are being
390 if (static_branch_unlikely(&vgic_v3_cpuif_trap
))
391 write_gicreg(cpu_if
->vgic_hcr
, ICH_HCR_EL2
);
395 * Ensures that the above will have reached the
396 * (re)distributors. This ensure the guest will read the
397 * correct values from the memory-mapped interface.
399 if (!cpu_if
->vgic_sre
) {
403 vcpu
->arch
.vgic_cpu
.live_lrs
= live_lrs
;
406 * Prevent the guest from touching the GIC system registers if
407 * SRE isn't enabled for GICv3 emulation.
409 write_gicreg(read_gicreg(ICC_SRE_EL2
) & ~ICC_SRE_EL2_ENABLE
,
413 void __hyp_text
__vgic_v3_init_lrs(void)
415 int max_lr_idx
= vtr_to_max_lr_idx(read_gicreg(ICH_VTR_EL2
));
418 for (i
= 0; i
<= max_lr_idx
; i
++)
419 __gic_v3_set_lr(0, i
);
422 u64 __hyp_text
__vgic_v3_get_ich_vtr_el2(void)
424 return read_gicreg(ICH_VTR_EL2
);
427 u64 __hyp_text
__vgic_v3_read_vmcr(void)
429 return read_gicreg(ICH_VMCR_EL2
);
432 void __hyp_text
__vgic_v3_write_vmcr(u32 vmcr
)
434 write_gicreg(vmcr
, ICH_VMCR_EL2
);
439 static int __hyp_text
__vgic_v3_bpr_min(void)
441 /* See Pseudocode for VPriorityGroup */
442 return 8 - vtr_to_nr_pre_bits(read_gicreg(ICH_VTR_EL2
));
445 static int __hyp_text
__vgic_v3_get_group(struct kvm_vcpu
*vcpu
)
447 u32 esr
= kvm_vcpu_get_hsr(vcpu
);
448 u8 crm
= (esr
& ESR_ELx_SYS64_ISS_CRM_MASK
) >> ESR_ELx_SYS64_ISS_CRM_SHIFT
;
453 #define GICv3_IDLE_PRIORITY 0xff
455 static int __hyp_text
__vgic_v3_highest_priority_lr(struct kvm_vcpu
*vcpu
,
459 unsigned int used_lrs
= vcpu
->arch
.vgic_cpu
.used_lrs
;
460 u8 priority
= GICv3_IDLE_PRIORITY
;
463 for (i
= 0; i
< used_lrs
; i
++) {
464 u64 val
= __gic_v3_get_lr(i
);
465 u8 lr_prio
= (val
& ICH_LR_PRIORITY_MASK
) >> ICH_LR_PRIORITY_SHIFT
;
467 /* Not pending in the state? */
468 if ((val
& ICH_LR_STATE
) != ICH_LR_PENDING_BIT
)
471 /* Group-0 interrupt, but Group-0 disabled? */
472 if (!(val
& ICH_LR_GROUP
) && !(vmcr
& ICH_VMCR_ENG0_MASK
))
475 /* Group-1 interrupt, but Group-1 disabled? */
476 if ((val
& ICH_LR_GROUP
) && !(vmcr
& ICH_VMCR_ENG1_MASK
))
479 /* Not the highest priority? */
480 if (lr_prio
>= priority
)
483 /* This is a candidate */
490 *lr_val
= ICC_IAR1_EL1_SPURIOUS
;
495 static int __hyp_text
__vgic_v3_find_active_lr(struct kvm_vcpu
*vcpu
,
496 int intid
, u64
*lr_val
)
498 unsigned int used_lrs
= vcpu
->arch
.vgic_cpu
.used_lrs
;
501 for (i
= 0; i
< used_lrs
; i
++) {
502 u64 val
= __gic_v3_get_lr(i
);
504 if ((val
& ICH_LR_VIRTUAL_ID_MASK
) == intid
&&
505 (val
& ICH_LR_ACTIVE_BIT
)) {
511 *lr_val
= ICC_IAR1_EL1_SPURIOUS
;
515 static int __hyp_text
__vgic_v3_get_highest_active_priority(void)
517 u8 nr_apr_regs
= vtr_to_nr_apr_regs(read_gicreg(ICH_VTR_EL2
));
521 for (i
= 0; i
< nr_apr_regs
; i
++) {
525 * The ICH_AP0Rn_EL2 and ICH_AP1Rn_EL2 registers
526 * contain the active priority levels for this VCPU
527 * for the maximum number of supported priority
528 * levels, and we return the full priority level only
529 * if the BPR is programmed to its minimum, otherwise
530 * we return a combination of the priority level and
531 * subpriority, as determined by the setting of the
532 * BPR, but without the full subpriority.
534 val
= __vgic_v3_read_ap0rn(i
);
535 val
|= __vgic_v3_read_ap1rn(i
);
541 return (hap
+ __ffs(val
)) << __vgic_v3_bpr_min();
544 return GICv3_IDLE_PRIORITY
;
547 static unsigned int __hyp_text
__vgic_v3_get_bpr0(u32 vmcr
)
549 return (vmcr
& ICH_VMCR_BPR0_MASK
) >> ICH_VMCR_BPR0_SHIFT
;
552 static unsigned int __hyp_text
__vgic_v3_get_bpr1(u32 vmcr
)
556 if (vmcr
& ICH_VMCR_CBPR_MASK
) {
557 bpr
= __vgic_v3_get_bpr0(vmcr
);
561 bpr
= (vmcr
& ICH_VMCR_BPR1_MASK
) >> ICH_VMCR_BPR1_SHIFT
;
568 * Convert a priority to a preemption level, taking the relevant BPR
569 * into account by zeroing the sub-priority bits.
571 static u8 __hyp_text
__vgic_v3_pri_to_pre(u8 pri
, u32 vmcr
, int grp
)
576 bpr
= __vgic_v3_get_bpr0(vmcr
) + 1;
578 bpr
= __vgic_v3_get_bpr1(vmcr
);
580 return pri
& (GENMASK(7, 0) << bpr
);
584 * The priority value is independent of any of the BPR values, so we
585 * normalize it using the minumal BPR value. This guarantees that no
586 * matter what the guest does with its BPR, we can always set/get the
587 * same value of a priority.
589 static void __hyp_text
__vgic_v3_set_active_priority(u8 pri
, u32 vmcr
, int grp
)
595 pre
= __vgic_v3_pri_to_pre(pri
, vmcr
, grp
);
596 ap
= pre
>> __vgic_v3_bpr_min();
600 val
= __vgic_v3_read_ap0rn(apr
);
601 __vgic_v3_write_ap0rn(val
| BIT(ap
% 32), apr
);
603 val
= __vgic_v3_read_ap1rn(apr
);
604 __vgic_v3_write_ap1rn(val
| BIT(ap
% 32), apr
);
608 static int __hyp_text
__vgic_v3_clear_highest_active_priority(void)
610 u8 nr_apr_regs
= vtr_to_nr_apr_regs(read_gicreg(ICH_VTR_EL2
));
614 for (i
= 0; i
< nr_apr_regs
; i
++) {
618 ap0
= __vgic_v3_read_ap0rn(i
);
619 ap1
= __vgic_v3_read_ap1rn(i
);
625 c0
= ap0
? __ffs(ap0
) : 32;
626 c1
= ap1
? __ffs(ap1
) : 32;
628 /* Always clear the LSB, which is the highest priority */
631 __vgic_v3_write_ap0rn(ap0
, i
);
635 __vgic_v3_write_ap1rn(ap1
, i
);
639 /* Rescale to 8 bits of priority */
640 return hap
<< __vgic_v3_bpr_min();
643 return GICv3_IDLE_PRIORITY
;
646 static void __hyp_text
__vgic_v3_read_iar(struct kvm_vcpu
*vcpu
, u32 vmcr
, int rt
)
652 grp
= __vgic_v3_get_group(vcpu
);
654 lr
= __vgic_v3_highest_priority_lr(vcpu
, vmcr
, &lr_val
);
658 if (grp
!= !!(lr_val
& ICH_LR_GROUP
))
661 pmr
= (vmcr
& ICH_VMCR_PMR_MASK
) >> ICH_VMCR_PMR_SHIFT
;
662 lr_prio
= (lr_val
& ICH_LR_PRIORITY_MASK
) >> ICH_LR_PRIORITY_SHIFT
;
666 if (__vgic_v3_get_highest_active_priority() <= __vgic_v3_pri_to_pre(lr_prio
, vmcr
, grp
))
669 lr_val
&= ~ICH_LR_STATE
;
670 /* No active state for LPIs */
671 if ((lr_val
& ICH_LR_VIRTUAL_ID_MASK
) <= VGIC_MAX_SPI
)
672 lr_val
|= ICH_LR_ACTIVE_BIT
;
673 __gic_v3_set_lr(lr_val
, lr
);
674 __vgic_v3_set_active_priority(lr_prio
, vmcr
, grp
);
675 vcpu_set_reg(vcpu
, rt
, lr_val
& ICH_LR_VIRTUAL_ID_MASK
);
679 vcpu_set_reg(vcpu
, rt
, ICC_IAR1_EL1_SPURIOUS
);
682 static void __hyp_text
__vgic_v3_clear_active_lr(int lr
, u64 lr_val
)
684 lr_val
&= ~ICH_LR_ACTIVE_BIT
;
685 if (lr_val
& ICH_LR_HW
) {
688 pid
= (lr_val
& ICH_LR_PHYS_ID_MASK
) >> ICH_LR_PHYS_ID_SHIFT
;
692 __gic_v3_set_lr(lr_val
, lr
);
695 static void __hyp_text
__vgic_v3_bump_eoicount(void)
699 hcr
= read_gicreg(ICH_HCR_EL2
);
700 hcr
+= 1 << ICH_HCR_EOIcount_SHIFT
;
701 write_gicreg(hcr
, ICH_HCR_EL2
);
704 static void __hyp_text
__vgic_v3_write_eoir(struct kvm_vcpu
*vcpu
, u32 vmcr
, int rt
)
706 u32 vid
= vcpu_get_reg(vcpu
, rt
);
708 u8 lr_prio
, act_prio
;
711 grp
= __vgic_v3_get_group(vcpu
);
713 /* Drop priority in any case */
714 act_prio
= __vgic_v3_clear_highest_active_priority();
716 /* If EOIing an LPI, no deactivate to be performed */
717 if (vid
>= VGIC_MIN_LPI
)
720 /* EOImode == 1, nothing to be done here */
721 if (vmcr
& ICH_VMCR_EOIM_MASK
)
724 lr
= __vgic_v3_find_active_lr(vcpu
, vid
, &lr_val
);
726 __vgic_v3_bump_eoicount();
730 lr_prio
= (lr_val
& ICH_LR_PRIORITY_MASK
) >> ICH_LR_PRIORITY_SHIFT
;
732 /* If priorities or group do not match, the guest has fscked-up. */
733 if (grp
!= !!(lr_val
& ICH_LR_GROUP
) ||
734 __vgic_v3_pri_to_pre(lr_prio
, vmcr
, grp
) != act_prio
)
737 /* Let's now perform the deactivation */
738 __vgic_v3_clear_active_lr(lr
, lr_val
);
741 static void __hyp_text
__vgic_v3_read_igrpen0(struct kvm_vcpu
*vcpu
, u32 vmcr
, int rt
)
743 vcpu_set_reg(vcpu
, rt
, !!(vmcr
& ICH_VMCR_ENG0_MASK
));
746 static void __hyp_text
__vgic_v3_read_igrpen1(struct kvm_vcpu
*vcpu
, u32 vmcr
, int rt
)
748 vcpu_set_reg(vcpu
, rt
, !!(vmcr
& ICH_VMCR_ENG1_MASK
));
751 static void __hyp_text
__vgic_v3_write_igrpen0(struct kvm_vcpu
*vcpu
, u32 vmcr
, int rt
)
753 u64 val
= vcpu_get_reg(vcpu
, rt
);
756 vmcr
|= ICH_VMCR_ENG0_MASK
;
758 vmcr
&= ~ICH_VMCR_ENG0_MASK
;
760 __vgic_v3_write_vmcr(vmcr
);
763 static void __hyp_text
__vgic_v3_write_igrpen1(struct kvm_vcpu
*vcpu
, u32 vmcr
, int rt
)
765 u64 val
= vcpu_get_reg(vcpu
, rt
);
768 vmcr
|= ICH_VMCR_ENG1_MASK
;
770 vmcr
&= ~ICH_VMCR_ENG1_MASK
;
772 __vgic_v3_write_vmcr(vmcr
);
775 static void __hyp_text
__vgic_v3_read_bpr0(struct kvm_vcpu
*vcpu
, u32 vmcr
, int rt
)
777 vcpu_set_reg(vcpu
, rt
, __vgic_v3_get_bpr0(vmcr
));
780 static void __hyp_text
__vgic_v3_read_bpr1(struct kvm_vcpu
*vcpu
, u32 vmcr
, int rt
)
782 vcpu_set_reg(vcpu
, rt
, __vgic_v3_get_bpr1(vmcr
));
785 static void __hyp_text
__vgic_v3_write_bpr0(struct kvm_vcpu
*vcpu
, u32 vmcr
, int rt
)
787 u64 val
= vcpu_get_reg(vcpu
, rt
);
788 u8 bpr_min
= __vgic_v3_bpr_min() - 1;
790 /* Enforce BPR limiting */
794 val
<<= ICH_VMCR_BPR0_SHIFT
;
795 val
&= ICH_VMCR_BPR0_MASK
;
796 vmcr
&= ~ICH_VMCR_BPR0_MASK
;
799 __vgic_v3_write_vmcr(vmcr
);
802 static void __hyp_text
__vgic_v3_write_bpr1(struct kvm_vcpu
*vcpu
, u32 vmcr
, int rt
)
804 u64 val
= vcpu_get_reg(vcpu
, rt
);
805 u8 bpr_min
= __vgic_v3_bpr_min();
807 if (vmcr
& ICH_VMCR_CBPR_MASK
)
810 /* Enforce BPR limiting */
814 val
<<= ICH_VMCR_BPR1_SHIFT
;
815 val
&= ICH_VMCR_BPR1_MASK
;
816 vmcr
&= ~ICH_VMCR_BPR1_MASK
;
819 __vgic_v3_write_vmcr(vmcr
);
822 static void __hyp_text
__vgic_v3_read_apxrn(struct kvm_vcpu
*vcpu
, int rt
, int n
)
826 if (!__vgic_v3_get_group(vcpu
))
827 val
= __vgic_v3_read_ap0rn(n
);
829 val
= __vgic_v3_read_ap1rn(n
);
831 vcpu_set_reg(vcpu
, rt
, val
);
834 static void __hyp_text
__vgic_v3_write_apxrn(struct kvm_vcpu
*vcpu
, int rt
, int n
)
836 u32 val
= vcpu_get_reg(vcpu
, rt
);
838 if (!__vgic_v3_get_group(vcpu
))
839 __vgic_v3_write_ap0rn(val
, n
);
841 __vgic_v3_write_ap1rn(val
, n
);
844 static void __hyp_text
__vgic_v3_read_apxr0(struct kvm_vcpu
*vcpu
,
847 __vgic_v3_read_apxrn(vcpu
, rt
, 0);
850 static void __hyp_text
__vgic_v3_read_apxr1(struct kvm_vcpu
*vcpu
,
853 __vgic_v3_read_apxrn(vcpu
, rt
, 1);
856 static void __hyp_text
__vgic_v3_read_apxr2(struct kvm_vcpu
*vcpu
,
859 __vgic_v3_read_apxrn(vcpu
, rt
, 2);
862 static void __hyp_text
__vgic_v3_read_apxr3(struct kvm_vcpu
*vcpu
,
865 __vgic_v3_read_apxrn(vcpu
, rt
, 3);
868 static void __hyp_text
__vgic_v3_write_apxr0(struct kvm_vcpu
*vcpu
,
871 __vgic_v3_write_apxrn(vcpu
, rt
, 0);
874 static void __hyp_text
__vgic_v3_write_apxr1(struct kvm_vcpu
*vcpu
,
877 __vgic_v3_write_apxrn(vcpu
, rt
, 1);
880 static void __hyp_text
__vgic_v3_write_apxr2(struct kvm_vcpu
*vcpu
,
883 __vgic_v3_write_apxrn(vcpu
, rt
, 2);
886 static void __hyp_text
__vgic_v3_write_apxr3(struct kvm_vcpu
*vcpu
,
889 __vgic_v3_write_apxrn(vcpu
, rt
, 3);
892 static void __hyp_text
__vgic_v3_read_hppir(struct kvm_vcpu
*vcpu
,
898 grp
= __vgic_v3_get_group(vcpu
);
900 lr
= __vgic_v3_highest_priority_lr(vcpu
, vmcr
, &lr_val
);
904 lr_grp
= !!(lr_val
& ICH_LR_GROUP
);
906 lr_val
= ICC_IAR1_EL1_SPURIOUS
;
909 vcpu_set_reg(vcpu
, rt
, lr_val
& ICH_LR_VIRTUAL_ID_MASK
);
912 int __hyp_text
__vgic_v3_perform_cpuif_access(struct kvm_vcpu
*vcpu
)
917 void (*fn
)(struct kvm_vcpu
*, u32
, int);
921 esr
= kvm_vcpu_get_hsr(vcpu
);
922 if (vcpu_mode_is_32bit(vcpu
)) {
923 if (!kvm_condition_valid(vcpu
))
926 sysreg
= esr_cp15_to_sysreg(esr
);
928 sysreg
= esr_sys64_to_sysreg(esr
);
931 is_read
= (esr
& ESR_ELx_SYS64_ISS_DIR_MASK
) == ESR_ELx_SYS64_ISS_DIR_READ
;
935 fn
= __vgic_v3_read_iar
;
938 fn
= __vgic_v3_write_eoir
;
942 fn
= __vgic_v3_read_igrpen1
;
944 fn
= __vgic_v3_write_igrpen1
;
948 fn
= __vgic_v3_read_bpr1
;
950 fn
= __vgic_v3_write_bpr1
;
952 case ICC_AP1Rn_EL1(0):
954 fn
= __vgic_v3_read_apxr0
;
956 fn
= __vgic_v3_write_apxr0
;
958 case ICC_AP1Rn_EL1(1):
960 fn
= __vgic_v3_read_apxr1
;
962 fn
= __vgic_v3_write_apxr1
;
964 case ICC_AP1Rn_EL1(2):
966 fn
= __vgic_v3_read_apxr2
;
968 fn
= __vgic_v3_write_apxr2
;
970 case ICC_AP1Rn_EL1(3):
972 fn
= __vgic_v3_read_apxr3
;
974 fn
= __vgic_v3_write_apxr3
;
977 fn
= __vgic_v3_read_hppir
;
981 fn
= __vgic_v3_read_igrpen0
;
983 fn
= __vgic_v3_write_igrpen0
;
987 fn
= __vgic_v3_read_bpr0
;
989 fn
= __vgic_v3_write_bpr0
;
995 vmcr
= __vgic_v3_read_vmcr();
996 rt
= kvm_vcpu_sys_get_rt(vcpu
);