]>
Commit | Line | Data |
---|---|---|
c76a0a66 MZ |
1 | /* |
2 | * Copyright (C) 2015 - ARM Ltd | |
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License version 2 as | |
7 | * published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
16 | */ | |
17 | ||
18 | #ifndef __ARM64_KVM_HYP_H__ | |
19 | #define __ARM64_KVM_HYP_H__ | |
20 | ||
21 | #include <linux/compiler.h> | |
22 | #include <linux/kvm_host.h> | |
23 | #include <asm/kvm_mmu.h> | |
d692b8ad | 24 | #include <asm/kvm_perf_event.h> |
c76a0a66 MZ |
25 | #include <asm/sysreg.h> |
26 | ||
27 | #define __hyp_text __section(.hyp.text) notrace | |
28 | ||
cedbb8b7 MZ |
29 | static inline unsigned long __kern_hyp_va(unsigned long v) |
30 | { | |
31 | asm volatile(ALTERNATIVE("and %0, %0, %1", | |
32 | "nop", | |
33 | ARM64_HAS_VIRT_HOST_EXTN) | |
34 | : "+r" (v) : "i" (HYP_PAGE_OFFSET_MASK)); | |
35 | return v; | |
36 | } | |
37 | ||
38 | #define kern_hyp_va(v) (typeof(v))(__kern_hyp_va((unsigned long)(v))) | |
39 | ||
40 | static inline unsigned long __hyp_kern_va(unsigned long v) | |
41 | { | |
42 | u64 offset = PAGE_OFFSET - HYP_PAGE_OFFSET; | |
43 | asm volatile(ALTERNATIVE("add %0, %0, %1", | |
44 | "nop", | |
45 | ARM64_HAS_VIRT_HOST_EXTN) | |
46 | : "+r" (v) : "r" (offset)); | |
47 | return v; | |
48 | } | |
49 | ||
50 | #define hyp_kern_va(v) (typeof(v))(__hyp_kern_va((unsigned long)(v))) | |
c76a0a66 | 51 | |
915ccd1d MZ |
52 | #define read_sysreg_elx(r,nvh,vh) \ |
53 | ({ \ | |
54 | u64 reg; \ | |
55 | asm volatile(ALTERNATIVE("mrs %0, " __stringify(r##nvh),\ | |
56 | "mrs_s %0, " __stringify(r##vh),\ | |
57 | ARM64_HAS_VIRT_HOST_EXTN) \ | |
58 | : "=r" (reg)); \ | |
59 | reg; \ | |
60 | }) | |
61 | ||
62 | #define write_sysreg_elx(v,r,nvh,vh) \ | |
63 | do { \ | |
64 | u64 __val = (u64)(v); \ | |
65 | asm volatile(ALTERNATIVE("msr " __stringify(r##nvh) ", %x0",\ | |
66 | "msr_s " __stringify(r##vh) ", %x0",\ | |
67 | ARM64_HAS_VIRT_HOST_EXTN) \ | |
68 | : : "rZ" (__val)); \ | |
69 | } while (0) | |
70 | ||
71 | /* | |
72 | * Unified accessors for registers that have a different encoding | |
73 | * between VHE and non-VHE. They must be specified without their "ELx" | |
74 | * encoding. | |
75 | */ | |
76 | #define read_sysreg_el2(r) \ | |
77 | ({ \ | |
78 | u64 reg; \ | |
79 | asm volatile(ALTERNATIVE("mrs %0, " __stringify(r##_EL2),\ | |
80 | "mrs %0, " __stringify(r##_EL1),\ | |
81 | ARM64_HAS_VIRT_HOST_EXTN) \ | |
82 | : "=r" (reg)); \ | |
83 | reg; \ | |
84 | }) | |
85 | ||
86 | #define write_sysreg_el2(v,r) \ | |
87 | do { \ | |
88 | u64 __val = (u64)(v); \ | |
89 | asm volatile(ALTERNATIVE("msr " __stringify(r##_EL2) ", %x0",\ | |
90 | "msr " __stringify(r##_EL1) ", %x0",\ | |
91 | ARM64_HAS_VIRT_HOST_EXTN) \ | |
92 | : : "rZ" (__val)); \ | |
93 | } while (0) | |
94 | ||
95 | #define read_sysreg_el0(r) read_sysreg_elx(r, _EL0, _EL02) | |
96 | #define write_sysreg_el0(v,r) write_sysreg_elx(v, r, _EL0, _EL02) | |
97 | #define read_sysreg_el1(r) read_sysreg_elx(r, _EL1, _EL12) | |
98 | #define write_sysreg_el1(v,r) write_sysreg_elx(v, r, _EL1, _EL12) | |
99 | ||
100 | /* The VHE specific system registers and their encoding */ | |
101 | #define sctlr_EL12 sys_reg(3, 5, 1, 0, 0) | |
102 | #define cpacr_EL12 sys_reg(3, 5, 1, 0, 2) | |
103 | #define ttbr0_EL12 sys_reg(3, 5, 2, 0, 0) | |
104 | #define ttbr1_EL12 sys_reg(3, 5, 2, 0, 1) | |
105 | #define tcr_EL12 sys_reg(3, 5, 2, 0, 2) | |
106 | #define afsr0_EL12 sys_reg(3, 5, 5, 1, 0) | |
107 | #define afsr1_EL12 sys_reg(3, 5, 5, 1, 1) | |
108 | #define esr_EL12 sys_reg(3, 5, 5, 2, 0) | |
109 | #define far_EL12 sys_reg(3, 5, 6, 0, 0) | |
110 | #define mair_EL12 sys_reg(3, 5, 10, 2, 0) | |
111 | #define amair_EL12 sys_reg(3, 5, 10, 3, 0) | |
112 | #define vbar_EL12 sys_reg(3, 5, 12, 0, 0) | |
113 | #define contextidr_EL12 sys_reg(3, 5, 13, 0, 1) | |
114 | #define cntkctl_EL12 sys_reg(3, 5, 14, 1, 0) | |
115 | #define cntp_tval_EL02 sys_reg(3, 5, 14, 2, 0) | |
116 | #define cntp_ctl_EL02 sys_reg(3, 5, 14, 2, 1) | |
117 | #define cntp_cval_EL02 sys_reg(3, 5, 14, 2, 2) | |
118 | #define cntv_tval_EL02 sys_reg(3, 5, 14, 3, 0) | |
119 | #define cntv_ctl_EL02 sys_reg(3, 5, 14, 3, 1) | |
120 | #define cntv_cval_EL02 sys_reg(3, 5, 14, 3, 2) | |
121 | #define spsr_EL12 sys_reg(3, 5, 4, 0, 0) | |
122 | #define elr_EL12 sys_reg(3, 5, 4, 0, 1) | |
123 | ||
c1bf6e18 MZ |
124 | /** |
125 | * hyp_alternate_select - Generates patchable code sequences that are | |
126 | * used to switch between two implementations of a function, depending | |
127 | * on the availability of a feature. | |
128 | * | |
129 | * @fname: a symbol name that will be defined as a function returning a | |
130 | * function pointer whose type will match @orig and @alt | |
131 | * @orig: A pointer to the default function, as returned by @fname when | |
132 | * @cond doesn't hold | |
133 | * @alt: A pointer to the alternate function, as returned by @fname | |
134 | * when @cond holds | |
135 | * @cond: a CPU feature (as described in asm/cpufeature.h) | |
136 | */ | |
137 | #define hyp_alternate_select(fname, orig, alt, cond) \ | |
138 | typeof(orig) * __hyp_text fname(void) \ | |
139 | { \ | |
140 | typeof(alt) *val = orig; \ | |
141 | asm volatile(ALTERNATIVE("nop \n", \ | |
142 | "mov %0, %1 \n", \ | |
143 | cond) \ | |
144 | : "+r" (val) : "r" (alt)); \ | |
145 | return val; \ | |
146 | } | |
147 | ||
06282fd2 MZ |
148 | void __vgic_v2_save_state(struct kvm_vcpu *vcpu); |
149 | void __vgic_v2_restore_state(struct kvm_vcpu *vcpu); | |
150 | ||
f68d2b1b MZ |
151 | void __vgic_v3_save_state(struct kvm_vcpu *vcpu); |
152 | void __vgic_v3_restore_state(struct kvm_vcpu *vcpu); | |
153 | ||
1431af36 MZ |
154 | void __timer_save_state(struct kvm_vcpu *vcpu); |
155 | void __timer_restore_state(struct kvm_vcpu *vcpu); | |
156 | ||
edef528d MZ |
157 | void __sysreg_save_host_state(struct kvm_cpu_context *ctxt); |
158 | void __sysreg_restore_host_state(struct kvm_cpu_context *ctxt); | |
159 | void __sysreg_save_guest_state(struct kvm_cpu_context *ctxt); | |
160 | void __sysreg_restore_guest_state(struct kvm_cpu_context *ctxt); | |
c209ec85 MZ |
161 | void __sysreg32_save_state(struct kvm_vcpu *vcpu); |
162 | void __sysreg32_restore_state(struct kvm_vcpu *vcpu); | |
6d6ec20f | 163 | |
8eb99267 MZ |
164 | void __debug_save_state(struct kvm_vcpu *vcpu, |
165 | struct kvm_guest_debug_arch *dbg, | |
166 | struct kvm_cpu_context *ctxt); | |
167 | void __debug_restore_state(struct kvm_vcpu *vcpu, | |
168 | struct kvm_guest_debug_arch *dbg, | |
169 | struct kvm_cpu_context *ctxt); | |
170 | void __debug_cond_save_host_state(struct kvm_vcpu *vcpu); | |
171 | void __debug_cond_restore_host_state(struct kvm_vcpu *vcpu); | |
172 | ||
c13d1683 MZ |
173 | void __fpsimd_save_state(struct user_fpsimd_state *fp_regs); |
174 | void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs); | |
32876224 | 175 | bool __fpsimd_enabled(void); |
c13d1683 | 176 | |
b97b66c1 | 177 | u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt); |
53fd5b64 | 178 | void __noreturn __hyp_do_panic(unsigned long, ...); |
b97b66c1 | 179 | |
c76a0a66 MZ |
180 | #endif /* __ARM64_KVM_HYP_H__ */ |
181 |