]>
Commit | Line | Data |
---|---|---|
caab277b | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
c76a0a66 MZ |
2 | /* |
3 | * Copyright (C) 2015 - ARM Ltd | |
4 | * Author: Marc Zyngier <marc.zyngier@arm.com> | |
c76a0a66 MZ |
5 | */ |
6 | ||
7 | #ifndef __ARM64_KVM_HYP_H__ | |
8 | #define __ARM64_KVM_HYP_H__ | |
9 | ||
10 | #include <linux/compiler.h> | |
11 | #include <linux/kvm_host.h> | |
1e4448c5 | 12 | #include <asm/alternative.h> |
e329fb75 | 13 | #include <asm/kvm_mmu.h> |
c76a0a66 MZ |
14 | #include <asm/sysreg.h> |
15 | ||
16 | #define __hyp_text __section(.hyp.text) notrace | |
17 | ||
915ccd1d MZ |
18 | #define read_sysreg_elx(r,nvh,vh) \ |
19 | ({ \ | |
20 | u64 reg; \ | |
fdec2a9e | 21 | asm volatile(ALTERNATIVE(__mrs_s("%0", r##nvh), \ |
be604c61 | 22 | __mrs_s("%0", r##vh), \ |
915ccd1d MZ |
23 | ARM64_HAS_VIRT_HOST_EXTN) \ |
24 | : "=r" (reg)); \ | |
25 | reg; \ | |
26 | }) | |
27 | ||
28 | #define write_sysreg_elx(v,r,nvh,vh) \ | |
29 | do { \ | |
30 | u64 __val = (u64)(v); \ | |
fdec2a9e | 31 | asm volatile(ALTERNATIVE(__msr_s(r##nvh, "%x0"), \ |
be604c61 | 32 | __msr_s(r##vh, "%x0"), \ |
915ccd1d MZ |
33 | ARM64_HAS_VIRT_HOST_EXTN) \ |
34 | : : "rZ" (__val)); \ | |
35 | } while (0) | |
36 | ||
37 | /* | |
38 | * Unified accessors for registers that have a different encoding | |
39 | * between VHE and non-VHE. They must be specified without their "ELx" | |
fdec2a9e | 40 | * encoding, but with the SYS_ prefix, as defined in asm/sysreg.h. |
915ccd1d | 41 | */ |
915ccd1d MZ |
42 | |
43 | #define read_sysreg_el0(r) read_sysreg_elx(r, _EL0, _EL02) | |
44 | #define write_sysreg_el0(v,r) write_sysreg_elx(v, r, _EL0, _EL02) | |
45 | #define read_sysreg_el1(r) read_sysreg_elx(r, _EL1, _EL12) | |
46 | #define write_sysreg_el1(v,r) write_sysreg_elx(v, r, _EL1, _EL12) | |
fdec2a9e DM |
47 | #define read_sysreg_el2(r) read_sysreg_elx(r, _EL2, _EL1) |
48 | #define write_sysreg_el2(v,r) write_sysreg_elx(v, r, _EL2, _EL1) | |
915ccd1d | 49 | |
8c2d146e JM |
50 | /* |
51 | * Without an __arch_swab32(), we fall back to ___constant_swab32(), but the | |
52 | * static inline can allow the compiler to out-of-line this. KVM always wants | |
53 | * the macro version as its always inlined. | |
54 | */ | |
55 | #define __kvm_swab32(x) ___constant_swab32(x) | |
56 | ||
3272f0d0 | 57 | int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu); |
06282fd2 | 58 | |
f68d2b1b MZ |
59 | void __vgic_v3_save_state(struct kvm_vcpu *vcpu); |
60 | void __vgic_v3_restore_state(struct kvm_vcpu *vcpu); | |
2d0e63e0 CD |
61 | void __vgic_v3_activate_traps(struct kvm_vcpu *vcpu); |
62 | void __vgic_v3_deactivate_traps(struct kvm_vcpu *vcpu); | |
923a2e30 CD |
63 | void __vgic_v3_save_aprs(struct kvm_vcpu *vcpu); |
64 | void __vgic_v3_restore_aprs(struct kvm_vcpu *vcpu); | |
59da1cbf | 65 | int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu); |
f68d2b1b | 66 | |
688c50aa CD |
67 | void __timer_enable_traps(struct kvm_vcpu *vcpu); |
68 | void __timer_disable_traps(struct kvm_vcpu *vcpu); | |
1431af36 | 69 | |
4cdecaba CD |
70 | void __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt); |
71 | void __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt); | |
f837453d CD |
72 | void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt); |
73 | void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt); | |
74 | void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt); | |
75 | void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt); | |
c209ec85 MZ |
76 | void __sysreg32_save_state(struct kvm_vcpu *vcpu); |
77 | void __sysreg32_restore_state(struct kvm_vcpu *vcpu); | |
6d6ec20f | 78 | |
014c4c77 CD |
79 | void __debug_switch_to_guest(struct kvm_vcpu *vcpu); |
80 | void __debug_switch_to_host(struct kvm_vcpu *vcpu); | |
8eb99267 | 81 | |
c13d1683 MZ |
82 | void __fpsimd_save_state(struct user_fpsimd_state *fp_regs); |
83 | void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs); | |
c13d1683 | 84 | |
a2465629 CD |
85 | void activate_traps_vhe_load(struct kvm_vcpu *vcpu); |
86 | void deactivate_traps_vhe_put(void); | |
87 | ||
b97b66c1 | 88 | u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt); |
53fd5b64 | 89 | void __noreturn __hyp_do_panic(unsigned long, ...); |
b97b66c1 | 90 | |
9f98ddd6 SP |
91 | /* |
92 | * Must be called from hyp code running at EL2 with an updated VTTBR | |
93 | * and interrupts disabled. | |
94 | */ | |
95 | static __always_inline void __hyp_text __load_guest_stage2(struct kvm *kvm) | |
96 | { | |
7665f3a8 | 97 | write_sysreg(kvm->arch.vtcr, vtcr_el2); |
e329fb75 | 98 | write_sysreg(kvm_get_vttbr(kvm), vttbr_el2); |
1e4448c5 MZ |
99 | |
100 | /* | |
275fa0ea SP |
101 | * ARM errata 1165522 and 1530923 require the actual execution of the |
102 | * above before we can switch to the EL1/EL0 translation regime used by | |
1e4448c5 MZ |
103 | * the guest. |
104 | */ | |
e85d68fa | 105 | asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT_VHE)); |
9f98ddd6 SP |
106 | } |
107 | ||
c76a0a66 MZ |
108 | #endif /* __ARM64_KVM_HYP_H__ */ |
109 |