2 * Copyright (C) 2015 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/arm-smccc.h>
19 #include <linux/types.h>
20 #include <linux/jump_label.h>
21 #include <uapi/linux/psci.h>
23 #include <kvm/arm_psci.h>
25 #include <asm/kvm_asm.h>
26 #include <asm/kvm_emulate.h>
27 #include <asm/kvm_hyp.h>
28 #include <asm/fpsimd.h>
29 #include <asm/debug-monitors.h>
31 static bool __hyp_text
__fpsimd_enabled_nvhe(void)
33 return !(read_sysreg(cptr_el2
) & CPTR_EL2_TFP
);
36 static bool __hyp_text
__fpsimd_enabled_vhe(void)
38 return !!(read_sysreg(cpacr_el1
) & CPACR_EL1_FPEN
);
41 static hyp_alternate_select(__fpsimd_is_enabled
,
42 __fpsimd_enabled_nvhe
, __fpsimd_enabled_vhe
,
43 ARM64_HAS_VIRT_HOST_EXTN
);
45 bool __hyp_text
__fpsimd_enabled(void)
47 return __fpsimd_is_enabled()();
50 static void __hyp_text
__activate_traps_vhe(void)
54 val
= read_sysreg(cpacr_el1
);
56 val
&= ~(CPACR_EL1_FPEN
| CPACR_EL1_ZEN
);
57 write_sysreg(val
, cpacr_el1
);
59 write_sysreg(kvm_get_hyp_vector(), vbar_el1
);
62 static void __hyp_text
__activate_traps_nvhe(void)
66 val
= CPTR_EL2_DEFAULT
;
67 val
|= CPTR_EL2_TTA
| CPTR_EL2_TFP
| CPTR_EL2_TZ
;
68 write_sysreg(val
, cptr_el2
);
71 static hyp_alternate_select(__activate_traps_arch
,
72 __activate_traps_nvhe
, __activate_traps_vhe
,
73 ARM64_HAS_VIRT_HOST_EXTN
);
75 static void __hyp_text
__activate_traps(struct kvm_vcpu
*vcpu
)
80 * We are about to set CPTR_EL2.TFP to trap all floating point
81 * register accesses to EL2, however, the ARM ARM clearly states that
82 * traps are only taken to EL2 if the operation would not otherwise
83 * trap to EL1. Therefore, always make sure that for 32-bit guests,
84 * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit.
85 * If FP/ASIMD is not implemented, FPEXC is UNDEFINED and any access to
86 * it will cause an exception.
88 val
= vcpu
->arch
.hcr_el2
;
90 if (!(val
& HCR_RW
) && system_supports_fpsimd()) {
91 write_sysreg(1 << 30, fpexc32_el2
);
95 if (val
& HCR_RW
) /* for AArch64 only: */
96 val
|= HCR_TID3
; /* TID3: trap feature register accesses */
98 write_sysreg(val
, hcr_el2
);
100 if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN
) && (val
& HCR_VSE
))
101 write_sysreg_s(vcpu
->arch
.vsesr_el2
, SYS_VSESR_EL2
);
103 /* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
104 write_sysreg(1 << 15, hstr_el2
);
106 * Make sure we trap PMU access from EL0 to EL2. Also sanitize
107 * PMSELR_EL0 to make sure it never contains the cycle
108 * counter, which could make a PMXEVCNTR_EL0 access UNDEF at
109 * EL1 instead of being trapped to EL2.
111 write_sysreg(0, pmselr_el0
);
112 write_sysreg(ARMV8_PMU_USERENR_MASK
, pmuserenr_el0
);
113 write_sysreg(vcpu
->arch
.mdcr_el2
, mdcr_el2
);
114 __activate_traps_arch()();
117 static void __hyp_text
__deactivate_traps_vhe(void)
119 extern char vectors
[]; /* kernel exception vectors */
120 u64 mdcr_el2
= read_sysreg(mdcr_el2
);
122 mdcr_el2
&= MDCR_EL2_HPMN_MASK
|
123 MDCR_EL2_E2PB_MASK
<< MDCR_EL2_E2PB_SHIFT
|
126 write_sysreg(mdcr_el2
, mdcr_el2
);
127 write_sysreg(HCR_HOST_VHE_FLAGS
, hcr_el2
);
128 write_sysreg(CPACR_EL1_DEFAULT
, cpacr_el1
);
129 write_sysreg(vectors
, vbar_el1
);
132 static void __hyp_text
__deactivate_traps_nvhe(void)
134 u64 mdcr_el2
= read_sysreg(mdcr_el2
);
136 mdcr_el2
&= MDCR_EL2_HPMN_MASK
;
137 mdcr_el2
|= MDCR_EL2_E2PB_MASK
<< MDCR_EL2_E2PB_SHIFT
;
139 write_sysreg(mdcr_el2
, mdcr_el2
);
140 write_sysreg(HCR_RW
, hcr_el2
);
141 write_sysreg(CPTR_EL2_DEFAULT
, cptr_el2
);
144 static hyp_alternate_select(__deactivate_traps_arch
,
145 __deactivate_traps_nvhe
, __deactivate_traps_vhe
,
146 ARM64_HAS_VIRT_HOST_EXTN
);
148 static void __hyp_text
__deactivate_traps(struct kvm_vcpu
*vcpu
)
151 * If we pended a virtual abort, preserve it until it gets
152 * cleared. See D1.14.3 (Virtual Interrupts) for details, but
153 * the crucial bit is "On taking a vSError interrupt,
154 * HCR_EL2.VSE is cleared to 0."
156 if (vcpu
->arch
.hcr_el2
& HCR_VSE
)
157 vcpu
->arch
.hcr_el2
= read_sysreg(hcr_el2
);
159 __deactivate_traps_arch()();
160 write_sysreg(0, hstr_el2
);
161 write_sysreg(0, pmuserenr_el0
);
164 static void __hyp_text
__activate_vm(struct kvm_vcpu
*vcpu
)
166 struct kvm
*kvm
= kern_hyp_va(vcpu
->kvm
);
167 write_sysreg(kvm
->arch
.vttbr
, vttbr_el2
);
170 static void __hyp_text
__deactivate_vm(struct kvm_vcpu
*vcpu
)
172 write_sysreg(0, vttbr_el2
);
175 static void __hyp_text
__vgic_save_state(struct kvm_vcpu
*vcpu
)
177 if (static_branch_unlikely(&kvm_vgic_global_state
.gicv3_cpuif
))
178 __vgic_v3_save_state(vcpu
);
180 __vgic_v2_save_state(vcpu
);
182 write_sysreg(read_sysreg(hcr_el2
) & ~HCR_INT_OVERRIDE
, hcr_el2
);
185 static void __hyp_text
__vgic_restore_state(struct kvm_vcpu
*vcpu
)
189 val
= read_sysreg(hcr_el2
);
190 val
|= HCR_INT_OVERRIDE
;
191 val
|= vcpu
->arch
.irq_lines
;
192 write_sysreg(val
, hcr_el2
);
194 if (static_branch_unlikely(&kvm_vgic_global_state
.gicv3_cpuif
))
195 __vgic_v3_restore_state(vcpu
);
197 __vgic_v2_restore_state(vcpu
);
200 static bool __hyp_text
__true_value(void)
205 static bool __hyp_text
__false_value(void)
210 static hyp_alternate_select(__check_arm_834220
,
211 __false_value
, __true_value
,
212 ARM64_WORKAROUND_834220
);
214 static bool __hyp_text
__translate_far_to_hpfar(u64 far
, u64
*hpfar
)
219 * Resolve the IPA the hard way using the guest VA.
221 * Stage-1 translation already validated the memory access
222 * rights. As such, we can use the EL1 translation regime, and
223 * don't have to distinguish between EL0 and EL1 access.
225 * We do need to save/restore PAR_EL1 though, as we haven't
226 * saved the guest context yet, and we may return early...
228 par
= read_sysreg(par_el1
);
229 asm volatile("at s1e1r, %0" : : "r" (far
));
232 tmp
= read_sysreg(par_el1
);
233 write_sysreg(par
, par_el1
);
235 if (unlikely(tmp
& 1))
236 return false; /* Translation failed, back to guest */
238 /* Convert PAR to HPFAR format */
239 *hpfar
= ((tmp
>> 12) & ((1UL << 36) - 1)) << 4;
243 static bool __hyp_text
__populate_fault_info(struct kvm_vcpu
*vcpu
)
249 esr
= vcpu
->arch
.fault
.esr_el2
;
250 ec
= ESR_ELx_EC(esr
);
252 if (ec
!= ESR_ELx_EC_DABT_LOW
&& ec
!= ESR_ELx_EC_IABT_LOW
)
255 far
= read_sysreg_el2(far
);
258 * The HPFAR can be invalid if the stage 2 fault did not
259 * happen during a stage 1 page table walk (the ESR_EL2.S1PTW
260 * bit is clear) and one of the two following cases are true:
261 * 1. The fault was due to a permission fault
262 * 2. The processor carries errata 834220
264 * Therefore, for all non S1PTW faults where we either have a
265 * permission fault or the errata workaround is enabled, we
266 * resolve the IPA using the AT instruction.
268 if (!(esr
& ESR_ELx_S1PTW
) &&
269 (__check_arm_834220()() || (esr
& ESR_ELx_FSC_TYPE
) == FSC_PERM
)) {
270 if (!__translate_far_to_hpfar(far
, &hpfar
))
273 hpfar
= read_sysreg(hpfar_el2
);
276 vcpu
->arch
.fault
.far_el2
= far
;
277 vcpu
->arch
.fault
.hpfar_el2
= hpfar
;
281 /* Skip an instruction which has been emulated. Returns true if
282 * execution can continue or false if we need to exit hyp mode because
283 * single-step was in effect.
285 static bool __hyp_text
__skip_instr(struct kvm_vcpu
*vcpu
)
287 *vcpu_pc(vcpu
) = read_sysreg_el2(elr
);
289 if (vcpu_mode_is_32bit(vcpu
)) {
290 vcpu
->arch
.ctxt
.gp_regs
.regs
.pstate
= read_sysreg_el2(spsr
);
291 kvm_skip_instr32(vcpu
, kvm_vcpu_trap_il_is32bit(vcpu
));
292 write_sysreg_el2(vcpu
->arch
.ctxt
.gp_regs
.regs
.pstate
, spsr
);
297 write_sysreg_el2(*vcpu_pc(vcpu
), elr
);
299 if (vcpu
->guest_debug
& KVM_GUESTDBG_SINGLESTEP
) {
300 vcpu
->arch
.fault
.esr_el2
=
301 (ESR_ELx_EC_SOFTSTP_LOW
<< ESR_ELx_EC_SHIFT
) | 0x22;
308 static inline bool __hyp_text
__needs_ssbd_off(struct kvm_vcpu
*vcpu
)
310 if (!cpus_have_const_cap(ARM64_SSBD
))
313 return !(vcpu
->arch
.workaround_flags
& VCPU_WORKAROUND_2_FLAG
);
316 static void __hyp_text
__set_guest_arch_workaround_state(struct kvm_vcpu
*vcpu
)
318 #ifdef CONFIG_ARM64_SSBD
320 * The host runs with the workaround always present. If the
321 * guest wants it disabled, so be it...
323 if (__needs_ssbd_off(vcpu
) &&
324 __hyp_this_cpu_read(arm64_ssbd_callback_required
))
325 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2
, 0, NULL
);
329 static void __hyp_text
__set_host_arch_workaround_state(struct kvm_vcpu
*vcpu
)
331 #ifdef CONFIG_ARM64_SSBD
333 * If the guest has disabled the workaround, bring it back on.
335 if (__needs_ssbd_off(vcpu
) &&
336 __hyp_this_cpu_read(arm64_ssbd_callback_required
))
337 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2
, 1, NULL
);
341 int __hyp_text
__kvm_vcpu_run(struct kvm_vcpu
*vcpu
)
343 struct kvm_cpu_context
*host_ctxt
;
344 struct kvm_cpu_context
*guest_ctxt
;
348 vcpu
= kern_hyp_va(vcpu
);
350 host_ctxt
= kern_hyp_va(vcpu
->arch
.host_cpu_context
);
351 host_ctxt
->__hyp_running_vcpu
= vcpu
;
352 guest_ctxt
= &vcpu
->arch
.ctxt
;
354 __sysreg_save_host_state(host_ctxt
);
355 __debug_cond_save_host_state(vcpu
);
357 __activate_traps(vcpu
);
360 __vgic_restore_state(vcpu
);
361 __timer_enable_traps(vcpu
);
364 * We must restore the 32-bit state before the sysregs, thanks
365 * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
367 __sysreg32_restore_state(vcpu
);
368 __sysreg_restore_guest_state(guest_ctxt
);
369 __debug_restore_state(vcpu
, kern_hyp_va(vcpu
->arch
.debug_ptr
), guest_ctxt
);
371 __set_guest_arch_workaround_state(vcpu
);
373 /* Jump in the fire! */
375 exit_code
= __guest_enter(vcpu
, host_ctxt
);
376 /* And we're baaack! */
378 if (ARM_EXCEPTION_CODE(exit_code
) != ARM_EXCEPTION_IRQ
)
379 vcpu
->arch
.fault
.esr_el2
= read_sysreg_el2(esr
);
381 * We're using the raw exception code in order to only process
382 * the trap if no SError is pending. We will come back to the
383 * same PC once the SError has been injected, and replay the
384 * trapping instruction.
386 if (exit_code
== ARM_EXCEPTION_TRAP
&& !__populate_fault_info(vcpu
))
389 if (static_branch_unlikely(&vgic_v2_cpuif_trap
) &&
390 exit_code
== ARM_EXCEPTION_TRAP
) {
393 valid
= kvm_vcpu_trap_get_class(vcpu
) == ESR_ELx_EC_DABT_LOW
&&
394 kvm_vcpu_trap_get_fault_type(vcpu
) == FSC_FAULT
&&
395 kvm_vcpu_dabt_isvalid(vcpu
) &&
396 !kvm_vcpu_dabt_isextabt(vcpu
) &&
397 !kvm_vcpu_dabt_iss1tw(vcpu
);
400 int ret
= __vgic_v2_perform_cpuif_access(vcpu
);
403 if (__skip_instr(vcpu
))
406 exit_code
= ARM_EXCEPTION_TRAP
;
410 /* Promote an illegal access to an
411 * SError. If we would be returning
412 * due to single-step clear the SS
413 * bit so handle_exit knows what to
414 * do after dealing with the error.
416 if (!__skip_instr(vcpu
))
417 *vcpu_cpsr(vcpu
) &= ~DBG_SPSR_SS
;
418 exit_code
= ARM_EXCEPTION_EL1_SERROR
;
421 /* 0 falls through to be handler out of EL2 */
425 if (static_branch_unlikely(&vgic_v3_cpuif_trap
) &&
426 exit_code
== ARM_EXCEPTION_TRAP
&&
427 (kvm_vcpu_trap_get_class(vcpu
) == ESR_ELx_EC_SYS64
||
428 kvm_vcpu_trap_get_class(vcpu
) == ESR_ELx_EC_CP15_32
)) {
429 int ret
= __vgic_v3_perform_cpuif_access(vcpu
);
432 if (__skip_instr(vcpu
))
435 exit_code
= ARM_EXCEPTION_TRAP
;
438 /* 0 falls through to be handled out of EL2 */
441 __set_host_arch_workaround_state(vcpu
);
443 if (cpus_have_const_cap(ARM64_HARDEN_BP_POST_GUEST_EXIT
)) {
444 u32 midr
= read_cpuid_id();
446 /* Apply BTAC predictors mitigation to all Falkor chips */
447 if (((midr
& MIDR_CPU_MODEL_MASK
) == MIDR_QCOM_FALKOR
) ||
448 ((midr
& MIDR_CPU_MODEL_MASK
) == MIDR_QCOM_FALKOR_V1
)) {
449 __qcom_hyp_sanitize_btac_predictors();
453 fp_enabled
= __fpsimd_enabled();
455 __sysreg_save_guest_state(guest_ctxt
);
456 __sysreg32_save_state(vcpu
);
457 __timer_disable_traps(vcpu
);
458 __vgic_save_state(vcpu
);
460 __deactivate_traps(vcpu
);
461 __deactivate_vm(vcpu
);
463 __sysreg_restore_host_state(host_ctxt
);
466 __fpsimd_save_state(&guest_ctxt
->gp_regs
.fp_regs
);
467 __fpsimd_restore_state(&host_ctxt
->gp_regs
.fp_regs
);
470 __debug_save_state(vcpu
, kern_hyp_va(vcpu
->arch
.debug_ptr
), guest_ctxt
);
472 * This must come after restoring the host sysregs, since a non-VHE
473 * system may enable SPE here and make use of the TTBRs.
475 __debug_cond_restore_host_state(vcpu
);
480 static const char __hyp_panic_string
[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";
482 static void __hyp_text
__hyp_call_panic_nvhe(u64 spsr
, u64 elr
, u64 par
,
483 struct kvm_vcpu
*vcpu
)
485 unsigned long str_va
;
488 * Force the panic string to be loaded from the literal pool,
489 * making sure it is a kernel address and not a PC-relative
492 asm volatile("ldr %0, =__hyp_panic_string" : "=r" (str_va
));
494 __hyp_do_panic(str_va
,
496 read_sysreg(esr_el2
), read_sysreg_el2(far
),
497 read_sysreg(hpfar_el2
), par
, vcpu
);
500 static void __hyp_text
__hyp_call_panic_vhe(u64 spsr
, u64 elr
, u64 par
,
501 struct kvm_vcpu
*vcpu
)
503 panic(__hyp_panic_string
,
505 read_sysreg_el2(esr
), read_sysreg_el2(far
),
506 read_sysreg(hpfar_el2
), par
, vcpu
);
509 static hyp_alternate_select(__hyp_call_panic
,
510 __hyp_call_panic_nvhe
, __hyp_call_panic_vhe
,
511 ARM64_HAS_VIRT_HOST_EXTN
);
513 void __hyp_text __noreturn
hyp_panic(struct kvm_cpu_context
*host_ctxt
)
515 struct kvm_vcpu
*vcpu
= NULL
;
517 u64 spsr
= read_sysreg_el2(spsr
);
518 u64 elr
= read_sysreg_el2(elr
);
519 u64 par
= read_sysreg(par_el1
);
521 if (read_sysreg(vttbr_el2
)) {
522 vcpu
= host_ctxt
->__hyp_running_vcpu
;
523 __timer_disable_traps(vcpu
);
524 __deactivate_traps(vcpu
);
525 __deactivate_vm(vcpu
);
526 __sysreg_restore_host_state(host_ctxt
);
529 /* Call panic for real */
530 __hyp_call_panic()(spsr
, elr
, par
, vcpu
);