2 * Copyright (C) 2015 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/types.h>
19 #include <asm/kvm_asm.h>
20 #include <asm/kvm_emulate.h>
21 #include <asm/kvm_hyp.h>
23 static bool __hyp_text
__fpsimd_enabled_nvhe(void)
25 return !(read_sysreg(cptr_el2
) & CPTR_EL2_TFP
);
28 static bool __hyp_text
__fpsimd_enabled_vhe(void)
30 return !!(read_sysreg(cpacr_el1
) & CPACR_EL1_FPEN
);
33 static hyp_alternate_select(__fpsimd_is_enabled
,
34 __fpsimd_enabled_nvhe
, __fpsimd_enabled_vhe
,
35 ARM64_HAS_VIRT_HOST_EXTN
);
37 bool __hyp_text
__fpsimd_enabled(void)
39 return __fpsimd_is_enabled()();
42 static void __hyp_text
__activate_traps_vhe(void)
46 val
= read_sysreg(cpacr_el1
);
48 val
&= ~CPACR_EL1_FPEN
;
49 write_sysreg(val
, cpacr_el1
);
51 write_sysreg(__kvm_hyp_vector
, vbar_el1
);
54 static void __hyp_text
__activate_traps_nvhe(void)
58 val
= CPTR_EL2_DEFAULT
;
59 val
|= CPTR_EL2_TTA
| CPTR_EL2_TFP
;
60 write_sysreg(val
, cptr_el2
);
63 static hyp_alternate_select(__activate_traps_arch
,
64 __activate_traps_nvhe
, __activate_traps_vhe
,
65 ARM64_HAS_VIRT_HOST_EXTN
);
67 static void __hyp_text
__activate_traps(struct kvm_vcpu
*vcpu
)
72 * We are about to set CPTR_EL2.TFP to trap all floating point
73 * register accesses to EL2, however, the ARM ARM clearly states that
74 * traps are only taken to EL2 if the operation would not otherwise
75 * trap to EL1. Therefore, always make sure that for 32-bit guests,
76 * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit.
78 val
= vcpu
->arch
.hcr_el2
;
79 if (!(val
& HCR_RW
)) {
80 write_sysreg(1 << 30, fpexc32_el2
);
83 write_sysreg(val
, hcr_el2
);
84 /* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
85 write_sysreg(1 << 15, hstr_el2
);
86 /* Make sure we trap PMU access from EL0 to EL2 */
87 write_sysreg(ARMV8_PMU_USERENR_MASK
, pmuserenr_el0
);
88 write_sysreg(vcpu
->arch
.mdcr_el2
, mdcr_el2
);
89 __activate_traps_arch()();
92 static void __hyp_text
__deactivate_traps_vhe(void)
94 extern char vectors
[]; /* kernel exception vectors */
96 write_sysreg(HCR_HOST_VHE_FLAGS
, hcr_el2
);
97 write_sysreg(CPACR_EL1_FPEN
, cpacr_el1
);
98 write_sysreg(vectors
, vbar_el1
);
101 static void __hyp_text
__deactivate_traps_nvhe(void)
103 write_sysreg(HCR_RW
, hcr_el2
);
104 write_sysreg(CPTR_EL2_DEFAULT
, cptr_el2
);
107 static hyp_alternate_select(__deactivate_traps_arch
,
108 __deactivate_traps_nvhe
, __deactivate_traps_vhe
,
109 ARM64_HAS_VIRT_HOST_EXTN
);
111 static void __hyp_text
__deactivate_traps(struct kvm_vcpu
*vcpu
)
114 * If we pended a virtual abort, preserve it until it gets
115 * cleared. See D1.14.3 (Virtual Interrupts) for details, but
116 * the crucial bit is "On taking a vSError interrupt,
117 * HCR_EL2.VSE is cleared to 0."
119 if (vcpu
->arch
.hcr_el2
& HCR_VSE
)
120 vcpu
->arch
.hcr_el2
= read_sysreg(hcr_el2
);
122 __deactivate_traps_arch()();
123 write_sysreg(0, hstr_el2
);
124 write_sysreg(read_sysreg(mdcr_el2
) & MDCR_EL2_HPMN_MASK
, mdcr_el2
);
125 write_sysreg(0, pmuserenr_el0
);
128 static void __hyp_text
__activate_vm(struct kvm_vcpu
*vcpu
)
130 struct kvm
*kvm
= kern_hyp_va(vcpu
->kvm
);
131 write_sysreg(kvm
->arch
.vttbr
, vttbr_el2
);
134 static void __hyp_text
__deactivate_vm(struct kvm_vcpu
*vcpu
)
136 write_sysreg(0, vttbr_el2
);
139 static hyp_alternate_select(__vgic_call_save_state
,
140 __vgic_v2_save_state
, __vgic_v3_save_state
,
141 ARM64_HAS_SYSREG_GIC_CPUIF
);
143 static hyp_alternate_select(__vgic_call_restore_state
,
144 __vgic_v2_restore_state
, __vgic_v3_restore_state
,
145 ARM64_HAS_SYSREG_GIC_CPUIF
);
147 static void __hyp_text
__vgic_save_state(struct kvm_vcpu
*vcpu
)
149 __vgic_call_save_state()(vcpu
);
150 write_sysreg(read_sysreg(hcr_el2
) & ~HCR_INT_OVERRIDE
, hcr_el2
);
153 static void __hyp_text
__vgic_restore_state(struct kvm_vcpu
*vcpu
)
157 val
= read_sysreg(hcr_el2
);
158 val
|= HCR_INT_OVERRIDE
;
159 val
|= vcpu
->arch
.irq_lines
;
160 write_sysreg(val
, hcr_el2
);
162 __vgic_call_restore_state()(vcpu
);
165 static bool __hyp_text
__true_value(void)
170 static bool __hyp_text
__false_value(void)
175 static hyp_alternate_select(__check_arm_834220
,
176 __false_value
, __true_value
,
177 ARM64_WORKAROUND_834220
);
179 static bool __hyp_text
__translate_far_to_hpfar(u64 far
, u64
*hpfar
)
184 * Resolve the IPA the hard way using the guest VA.
186 * Stage-1 translation already validated the memory access
187 * rights. As such, we can use the EL1 translation regime, and
188 * don't have to distinguish between EL0 and EL1 access.
190 * We do need to save/restore PAR_EL1 though, as we haven't
191 * saved the guest context yet, and we may return early...
193 par
= read_sysreg(par_el1
);
194 asm volatile("at s1e1r, %0" : : "r" (far
));
197 tmp
= read_sysreg(par_el1
);
198 write_sysreg(par
, par_el1
);
200 if (unlikely(tmp
& 1))
201 return false; /* Translation failed, back to guest */
203 /* Convert PAR to HPFAR format */
204 *hpfar
= ((tmp
>> 12) & ((1UL << 36) - 1)) << 4;
208 static bool __hyp_text
__populate_fault_info(struct kvm_vcpu
*vcpu
)
210 u64 esr
= read_sysreg_el2(esr
);
211 u8 ec
= ESR_ELx_EC(esr
);
214 vcpu
->arch
.fault
.esr_el2
= esr
;
216 if (ec
!= ESR_ELx_EC_DABT_LOW
&& ec
!= ESR_ELx_EC_IABT_LOW
)
219 far
= read_sysreg_el2(far
);
222 * The HPFAR can be invalid if the stage 2 fault did not
223 * happen during a stage 1 page table walk (the ESR_EL2.S1PTW
224 * bit is clear) and one of the two following cases are true:
225 * 1. The fault was due to a permission fault
226 * 2. The processor carries errata 834220
228 * Therefore, for all non S1PTW faults where we either have a
229 * permission fault or the errata workaround is enabled, we
230 * resolve the IPA using the AT instruction.
232 if (!(esr
& ESR_ELx_S1PTW
) &&
233 (__check_arm_834220()() || (esr
& ESR_ELx_FSC_TYPE
) == FSC_PERM
)) {
234 if (!__translate_far_to_hpfar(far
, &hpfar
))
237 hpfar
= read_sysreg(hpfar_el2
);
240 vcpu
->arch
.fault
.far_el2
= far
;
241 vcpu
->arch
.fault
.hpfar_el2
= hpfar
;
245 static void __hyp_text
__skip_instr(struct kvm_vcpu
*vcpu
)
247 *vcpu_pc(vcpu
) = read_sysreg_el2(elr
);
249 if (vcpu_mode_is_32bit(vcpu
)) {
250 vcpu
->arch
.ctxt
.gp_regs
.regs
.pstate
= read_sysreg_el2(spsr
);
251 kvm_skip_instr32(vcpu
, kvm_vcpu_trap_il_is32bit(vcpu
));
252 write_sysreg_el2(vcpu
->arch
.ctxt
.gp_regs
.regs
.pstate
, spsr
);
257 write_sysreg_el2(*vcpu_pc(vcpu
), elr
);
260 int __hyp_text
__kvm_vcpu_run(struct kvm_vcpu
*vcpu
)
262 struct kvm_cpu_context
*host_ctxt
;
263 struct kvm_cpu_context
*guest_ctxt
;
267 vcpu
= kern_hyp_va(vcpu
);
268 write_sysreg(vcpu
, tpidr_el2
);
270 host_ctxt
= kern_hyp_va(vcpu
->arch
.host_cpu_context
);
271 guest_ctxt
= &vcpu
->arch
.ctxt
;
273 __sysreg_save_host_state(host_ctxt
);
274 __debug_cond_save_host_state(vcpu
);
276 __activate_traps(vcpu
);
279 __vgic_restore_state(vcpu
);
280 __timer_restore_state(vcpu
);
283 * We must restore the 32-bit state before the sysregs, thanks
284 * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
286 __sysreg32_restore_state(vcpu
);
287 __sysreg_restore_guest_state(guest_ctxt
);
288 __debug_restore_state(vcpu
, kern_hyp_va(vcpu
->arch
.debug_ptr
), guest_ctxt
);
290 /* Jump in the fire! */
292 exit_code
= __guest_enter(vcpu
, host_ctxt
);
293 /* And we're baaack! */
296 * We're using the raw exception code in order to only process
297 * the trap if no SError is pending. We will come back to the
298 * same PC once the SError has been injected, and replay the
299 * trapping instruction.
301 if (exit_code
== ARM_EXCEPTION_TRAP
&& !__populate_fault_info(vcpu
))
304 if (static_branch_unlikely(&vgic_v2_cpuif_trap
) &&
305 exit_code
== ARM_EXCEPTION_TRAP
) {
308 valid
= kvm_vcpu_trap_get_class(vcpu
) == ESR_ELx_EC_DABT_LOW
&&
309 kvm_vcpu_trap_get_fault_type(vcpu
) == FSC_FAULT
&&
310 kvm_vcpu_dabt_isvalid(vcpu
) &&
311 !kvm_vcpu_dabt_isextabt(vcpu
) &&
312 !kvm_vcpu_dabt_iss1tw(vcpu
);
314 if (valid
&& __vgic_v2_perform_cpuif_access(vcpu
)) {
320 fp_enabled
= __fpsimd_enabled();
322 __sysreg_save_guest_state(guest_ctxt
);
323 __sysreg32_save_state(vcpu
);
324 __timer_save_state(vcpu
);
325 __vgic_save_state(vcpu
);
327 __deactivate_traps(vcpu
);
328 __deactivate_vm(vcpu
);
330 __sysreg_restore_host_state(host_ctxt
);
333 __fpsimd_save_state(&guest_ctxt
->gp_regs
.fp_regs
);
334 __fpsimd_restore_state(&host_ctxt
->gp_regs
.fp_regs
);
337 __debug_save_state(vcpu
, kern_hyp_va(vcpu
->arch
.debug_ptr
), guest_ctxt
);
338 __debug_cond_restore_host_state(vcpu
);
343 static const char __hyp_panic_string
[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";
345 static void __hyp_text
__hyp_call_panic_nvhe(u64 spsr
, u64 elr
, u64 par
)
347 unsigned long str_va
;
350 * Force the panic string to be loaded from the literal pool,
351 * making sure it is a kernel address and not a PC-relative
354 asm volatile("ldr %0, =__hyp_panic_string" : "=r" (str_va
));
356 __hyp_do_panic(str_va
,
358 read_sysreg(esr_el2
), read_sysreg_el2(far
),
359 read_sysreg(hpfar_el2
), par
,
360 (void *)read_sysreg(tpidr_el2
));
363 static void __hyp_text
__hyp_call_panic_vhe(u64 spsr
, u64 elr
, u64 par
)
365 panic(__hyp_panic_string
,
367 read_sysreg_el2(esr
), read_sysreg_el2(far
),
368 read_sysreg(hpfar_el2
), par
,
369 (void *)read_sysreg(tpidr_el2
));
372 static hyp_alternate_select(__hyp_call_panic
,
373 __hyp_call_panic_nvhe
, __hyp_call_panic_vhe
,
374 ARM64_HAS_VIRT_HOST_EXTN
);
376 void __hyp_text __noreturn
__hyp_panic(void)
378 u64 spsr
= read_sysreg_el2(spsr
);
379 u64 elr
= read_sysreg_el2(elr
);
380 u64 par
= read_sysreg(par_el1
);
382 if (read_sysreg(vttbr_el2
)) {
383 struct kvm_vcpu
*vcpu
;
384 struct kvm_cpu_context
*host_ctxt
;
386 vcpu
= (struct kvm_vcpu
*)read_sysreg(tpidr_el2
);
387 host_ctxt
= kern_hyp_va(vcpu
->arch
.host_cpu_context
);
388 __deactivate_traps(vcpu
);
389 __deactivate_vm(vcpu
);
390 __sysreg_restore_host_state(host_ctxt
);
393 /* Call panic for real */
394 __hyp_call_panic()(spsr
, elr
, par
);