]>
Commit | Line | Data |
---|---|---|
caab277b | 1 | // SPDX-License-Identifier: GPL-2.0-only |
6d6ec20f MZ |
2 | /* |
3 | * Copyright (C) 2012-2015 - ARM Ltd | |
4 | * Author: Marc Zyngier <marc.zyngier@arm.com> | |
6d6ec20f MZ |
5 | */ |
6 | ||
7 | #include <linux/compiler.h> | |
8 | #include <linux/kvm_host.h> | |
9 | ||
7d826029 | 10 | #include <asm/kprobes.h> |
9d8415d6 | 11 | #include <asm/kvm_asm.h> |
e72341c5 | 12 | #include <asm/kvm_emulate.h> |
13720a56 | 13 | #include <asm/kvm_hyp.h> |
6d6ec20f | 14 | |
9c6c3568 MZ |
15 | /* |
16 | * Non-VHE: Both host and guest must save everything. | |
17 | * | |
6e977984 MZ |
18 | * VHE: Host and guest must save mdscr_el1 and sp_el0 (and the PC and |
19 | * pstate, which are handled as part of the el2 return state) on every | |
20 | * switch (sp_el0 is being dealt with in the assembly code). | |
fc7563b3 CD |
21 | * tpidr_el0 and tpidrro_el0 only need to be switched when going |
22 | * to host userspace or a different VCPU. EL1 registers only need to be | |
23 | * switched when potentially going to run a different VCPU. The latter two | |
24 | * classes are handled as part of kvm_arch_vcpu_load and kvm_arch_vcpu_put. | |
9c6c3568 MZ |
25 | */ |
26 | ||
27 | static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt) | |
28 | { | |
4c47eb1c | 29 | ctxt->sys_regs[MDSCR_EL1] = read_sysreg(mdscr_el1); |
9c6c3568 MZ |
30 | } |
31 | ||
060701f0 CD |
32 | static void __hyp_text __sysreg_save_user_state(struct kvm_cpu_context *ctxt) |
33 | { | |
34 | ctxt->sys_regs[TPIDR_EL0] = read_sysreg(tpidr_el0); | |
35 | ctxt->sys_regs[TPIDRRO_EL0] = read_sysreg(tpidrro_el0); | |
36 | } | |
37 | ||
38 | static void __hyp_text __sysreg_save_el1_state(struct kvm_cpu_context *ctxt) | |
6d6ec20f | 39 | { |
6d6ec20f | 40 | ctxt->sys_regs[CSSELR_EL1] = read_sysreg(csselr_el1); |
fdec2a9e | 41 | ctxt->sys_regs[SCTLR_EL1] = read_sysreg_el1(SYS_SCTLR); |
060701f0 | 42 | ctxt->sys_regs[ACTLR_EL1] = read_sysreg(actlr_el1); |
fdec2a9e DM |
43 | ctxt->sys_regs[CPACR_EL1] = read_sysreg_el1(SYS_CPACR); |
44 | ctxt->sys_regs[TTBR0_EL1] = read_sysreg_el1(SYS_TTBR0); | |
45 | ctxt->sys_regs[TTBR1_EL1] = read_sysreg_el1(SYS_TTBR1); | |
46 | ctxt->sys_regs[TCR_EL1] = read_sysreg_el1(SYS_TCR); | |
47 | ctxt->sys_regs[ESR_EL1] = read_sysreg_el1(SYS_ESR); | |
48 | ctxt->sys_regs[AFSR0_EL1] = read_sysreg_el1(SYS_AFSR0); | |
49 | ctxt->sys_regs[AFSR1_EL1] = read_sysreg_el1(SYS_AFSR1); | |
50 | ctxt->sys_regs[FAR_EL1] = read_sysreg_el1(SYS_FAR); | |
51 | ctxt->sys_regs[MAIR_EL1] = read_sysreg_el1(SYS_MAIR); | |
52 | ctxt->sys_regs[VBAR_EL1] = read_sysreg_el1(SYS_VBAR); | |
53 | ctxt->sys_regs[CONTEXTIDR_EL1] = read_sysreg_el1(SYS_CONTEXTIDR); | |
54 | ctxt->sys_regs[AMAIR_EL1] = read_sysreg_el1(SYS_AMAIR); | |
55 | ctxt->sys_regs[CNTKCTL_EL1] = read_sysreg_el1(SYS_CNTKCTL); | |
6d6ec20f | 56 | ctxt->sys_regs[PAR_EL1] = read_sysreg(par_el1); |
1f742679 | 57 | ctxt->sys_regs[TPIDR_EL1] = read_sysreg(tpidr_el1); |
6d6ec20f | 58 | |
6d6ec20f | 59 | ctxt->gp_regs.sp_el1 = read_sysreg(sp_el1); |
fdec2a9e DM |
60 | ctxt->gp_regs.elr_el1 = read_sysreg_el1(SYS_ELR); |
61 | ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg_el1(SYS_SPSR); | |
0c389d90 CD |
62 | } |
63 | ||
64 | static void __hyp_text __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt) | |
65 | { | |
fdec2a9e DM |
66 | ctxt->gp_regs.regs.pc = read_sysreg_el2(SYS_ELR); |
67 | ctxt->gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR); | |
c773ae2b | 68 | |
b5475d8c | 69 | if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) |
c773ae2b | 70 | ctxt->sys_regs[DISR_EL1] = read_sysreg_s(SYS_VDISR_EL2); |
6d6ec20f MZ |
71 | } |
72 | ||
4cdecaba | 73 | void __hyp_text __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt) |
f837453d CD |
74 | { |
75 | __sysreg_save_el1_state(ctxt); | |
76 | __sysreg_save_common_state(ctxt); | |
77 | __sysreg_save_user_state(ctxt); | |
0c389d90 | 78 | __sysreg_save_el2_return_state(ctxt); |
f837453d CD |
79 | } |
80 | ||
81 | void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt) | |
edef528d | 82 | { |
9c6c3568 | 83 | __sysreg_save_common_state(ctxt); |
edef528d | 84 | } |
7d826029 | 85 | NOKPROBE_SYMBOL(sysreg_save_host_state_vhe); |
edef528d | 86 | |
f837453d | 87 | void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt) |
edef528d | 88 | { |
9c6c3568 | 89 | __sysreg_save_common_state(ctxt); |
0c389d90 | 90 | __sysreg_save_el2_return_state(ctxt); |
9c6c3568 | 91 | } |
7d826029 | 92 | NOKPROBE_SYMBOL(sysreg_save_guest_state_vhe); |
9c6c3568 MZ |
93 | |
94 | static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt) | |
95 | { | |
4c47eb1c | 96 | write_sysreg(ctxt->sys_regs[MDSCR_EL1], mdscr_el1); |
edef528d MZ |
97 | } |
98 | ||
060701f0 CD |
99 | static void __hyp_text __sysreg_restore_user_state(struct kvm_cpu_context *ctxt) |
100 | { | |
fdec2a9e DM |
101 | write_sysreg(ctxt->sys_regs[TPIDR_EL0], tpidr_el0); |
102 | write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], tpidrro_el0); | |
060701f0 CD |
103 | } |
104 | ||
105 | static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt) | |
6d6ec20f | 106 | { |
094f8233 MZ |
107 | write_sysreg(ctxt->sys_regs[MPIDR_EL1], vmpidr_el2); |
108 | write_sysreg(ctxt->sys_regs[CSSELR_EL1], csselr_el1); | |
bd227553 | 109 | |
b5475d8c | 110 | if (!cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) { |
bd227553 MZ |
111 | write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1], SYS_SCTLR); |
112 | write_sysreg_el1(ctxt->sys_regs[TCR_EL1], SYS_TCR); | |
113 | } else if (!ctxt->__hyp_running_vcpu) { | |
114 | /* | |
115 | * Must only be done for guest registers, hence the context | |
116 | * test. We're coming from the host, so SCTLR.M is already | |
117 | * set. Pairs with __activate_traps_nvhe(). | |
118 | */ | |
119 | write_sysreg_el1((ctxt->sys_regs[TCR_EL1] | | |
120 | TCR_EPD1_MASK | TCR_EPD0_MASK), | |
121 | SYS_TCR); | |
122 | isb(); | |
123 | } | |
124 | ||
fdec2a9e DM |
125 | write_sysreg(ctxt->sys_regs[ACTLR_EL1], actlr_el1); |
126 | write_sysreg_el1(ctxt->sys_regs[CPACR_EL1], SYS_CPACR); | |
127 | write_sysreg_el1(ctxt->sys_regs[TTBR0_EL1], SYS_TTBR0); | |
128 | write_sysreg_el1(ctxt->sys_regs[TTBR1_EL1], SYS_TTBR1); | |
fdec2a9e DM |
129 | write_sysreg_el1(ctxt->sys_regs[ESR_EL1], SYS_ESR); |
130 | write_sysreg_el1(ctxt->sys_regs[AFSR0_EL1], SYS_AFSR0); | |
131 | write_sysreg_el1(ctxt->sys_regs[AFSR1_EL1], SYS_AFSR1); | |
132 | write_sysreg_el1(ctxt->sys_regs[FAR_EL1], SYS_FAR); | |
133 | write_sysreg_el1(ctxt->sys_regs[MAIR_EL1], SYS_MAIR); | |
134 | write_sysreg_el1(ctxt->sys_regs[VBAR_EL1], SYS_VBAR); | |
135 | write_sysreg_el1(ctxt->sys_regs[CONTEXTIDR_EL1],SYS_CONTEXTIDR); | |
136 | write_sysreg_el1(ctxt->sys_regs[AMAIR_EL1], SYS_AMAIR); | |
137 | write_sysreg_el1(ctxt->sys_regs[CNTKCTL_EL1], SYS_CNTKCTL); | |
094f8233 | 138 | write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1); |
1f742679 | 139 | write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1); |
094f8233 | 140 | |
b5475d8c | 141 | if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE) && |
bd227553 MZ |
142 | ctxt->__hyp_running_vcpu) { |
143 | /* | |
144 | * Must only be done for host registers, hence the context | |
145 | * test. Pairs with __deactivate_traps_nvhe(). | |
146 | */ | |
147 | isb(); | |
148 | /* | |
149 | * At this stage, and thanks to the above isb(), S2 is | |
150 | * deconfigured and disabled. We can now restore the host's | |
151 | * S1 configuration: SCTLR, and only then TCR. | |
152 | */ | |
153 | write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1], SYS_SCTLR); | |
154 | isb(); | |
155 | write_sysreg_el1(ctxt->sys_regs[TCR_EL1], SYS_TCR); | |
156 | } | |
157 | ||
094f8233 | 158 | write_sysreg(ctxt->gp_regs.sp_el1, sp_el1); |
fdec2a9e DM |
159 | write_sysreg_el1(ctxt->gp_regs.elr_el1, SYS_ELR); |
160 | write_sysreg_el1(ctxt->gp_regs.spsr[KVM_SPSR_EL1],SYS_SPSR); | |
0c389d90 CD |
161 | } |
162 | ||
163 | static void __hyp_text | |
164 | __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctxt) | |
165 | { | |
e4e11cc0 CD |
166 | u64 pstate = ctxt->gp_regs.regs.pstate; |
167 | u64 mode = pstate & PSR_AA32_MODE_MASK; | |
168 | ||
169 | /* | |
170 | * Safety check to ensure we're setting the CPU up to enter the guest | |
171 | * in a less privileged mode. | |
172 | * | |
173 | * If we are attempting a return to EL2 or higher in AArch64 state, | |
174 | * program SPSR_EL2 with M=EL2h and the IL bit set which ensures that | |
175 | * we'll take an illegal exception state exception immediately after | |
176 | * the ERET to the guest. Attempts to return to AArch32 Hyp will | |
177 | * result in an illegal exception return because EL2's execution state | |
178 | * is determined by SCR_EL3.RW. | |
179 | */ | |
180 | if (!(mode & PSR_MODE32_BIT) && mode >= PSR_MODE_EL2t) | |
181 | pstate = PSR_MODE_EL2h | PSR_IL_BIT; | |
182 | ||
fdec2a9e DM |
183 | write_sysreg_el2(ctxt->gp_regs.regs.pc, SYS_ELR); |
184 | write_sysreg_el2(pstate, SYS_SPSR); | |
c773ae2b | 185 | |
b5475d8c | 186 | if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) |
c773ae2b | 187 | write_sysreg_s(ctxt->sys_regs[DISR_EL1], SYS_VDISR_EL2); |
6d6ec20f | 188 | } |
c209ec85 | 189 | |
4cdecaba | 190 | void __hyp_text __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt) |
f837453d CD |
191 | { |
192 | __sysreg_restore_el1_state(ctxt); | |
193 | __sysreg_restore_common_state(ctxt); | |
194 | __sysreg_restore_user_state(ctxt); | |
0c389d90 | 195 | __sysreg_restore_el2_return_state(ctxt); |
f837453d CD |
196 | } |
197 | ||
198 | void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt) | |
edef528d | 199 | { |
9c6c3568 | 200 | __sysreg_restore_common_state(ctxt); |
edef528d | 201 | } |
7d826029 | 202 | NOKPROBE_SYMBOL(sysreg_restore_host_state_vhe); |
edef528d | 203 | |
f837453d | 204 | void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt) |
edef528d | 205 | { |
9c6c3568 | 206 | __sysreg_restore_common_state(ctxt); |
0c389d90 | 207 | __sysreg_restore_el2_return_state(ctxt); |
edef528d | 208 | } |
7d826029 | 209 | NOKPROBE_SYMBOL(sysreg_restore_guest_state_vhe); |
edef528d | 210 | |
c209ec85 MZ |
211 | void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu) |
212 | { | |
213 | u64 *spsr, *sysreg; | |
214 | ||
e72341c5 | 215 | if (!vcpu_el1_is_32bit(vcpu)) |
c209ec85 MZ |
216 | return; |
217 | ||
218 | spsr = vcpu->arch.ctxt.gp_regs.spsr; | |
219 | sysreg = vcpu->arch.ctxt.sys_regs; | |
220 | ||
221 | spsr[KVM_SPSR_ABT] = read_sysreg(spsr_abt); | |
222 | spsr[KVM_SPSR_UND] = read_sysreg(spsr_und); | |
223 | spsr[KVM_SPSR_IRQ] = read_sysreg(spsr_irq); | |
224 | spsr[KVM_SPSR_FIQ] = read_sysreg(spsr_fiq); | |
225 | ||
226 | sysreg[DACR32_EL2] = read_sysreg(dacr32_el2); | |
227 | sysreg[IFSR32_EL2] = read_sysreg(ifsr32_el2); | |
228 | ||
fa89d31c | 229 | if (has_vhe() || vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY) |
c209ec85 MZ |
230 | sysreg[DBGVCR32_EL2] = read_sysreg(dbgvcr32_el2); |
231 | } | |
232 | ||
233 | void __hyp_text __sysreg32_restore_state(struct kvm_vcpu *vcpu) | |
234 | { | |
235 | u64 *spsr, *sysreg; | |
236 | ||
e72341c5 | 237 | if (!vcpu_el1_is_32bit(vcpu)) |
c209ec85 MZ |
238 | return; |
239 | ||
240 | spsr = vcpu->arch.ctxt.gp_regs.spsr; | |
241 | sysreg = vcpu->arch.ctxt.sys_regs; | |
242 | ||
243 | write_sysreg(spsr[KVM_SPSR_ABT], spsr_abt); | |
244 | write_sysreg(spsr[KVM_SPSR_UND], spsr_und); | |
245 | write_sysreg(spsr[KVM_SPSR_IRQ], spsr_irq); | |
246 | write_sysreg(spsr[KVM_SPSR_FIQ], spsr_fiq); | |
247 | ||
248 | write_sysreg(sysreg[DACR32_EL2], dacr32_el2); | |
249 | write_sysreg(sysreg[IFSR32_EL2], ifsr32_el2); | |
250 | ||
fa89d31c | 251 | if (has_vhe() || vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY) |
c209ec85 MZ |
252 | write_sysreg(sysreg[DBGVCR32_EL2], dbgvcr32_el2); |
253 | } | |
4464e210 | 254 | |
bc192cee CD |
255 | /** |
256 | * kvm_vcpu_load_sysregs - Load guest system registers to the physical CPU | |
257 | * | |
258 | * @vcpu: The VCPU pointer | |
259 | * | |
260 | * Load system registers that do not affect the host's execution, for | |
261 | * example EL1 system registers on a VHE system where the host kernel | |
262 | * runs at EL2. This function is called from KVM's vcpu_load() function | |
263 | * and loading system register state early avoids having to load them on | |
264 | * every entry to the VM. | |
265 | */ | |
266 | void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu) | |
267 | { | |
fc7563b3 | 268 | struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt; |
07da1ffa | 269 | struct kvm_cpu_context *host_ctxt; |
fc7563b3 CD |
270 | |
271 | if (!has_vhe()) | |
272 | return; | |
273 | ||
07da1ffa | 274 | host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt; |
fc7563b3 CD |
275 | __sysreg_save_user_state(host_ctxt); |
276 | ||
b9f8ca4d CD |
277 | /* |
278 | * Load guest EL1 and user state | |
279 | * | |
280 | * We must restore the 32-bit state before the sysregs, thanks | |
281 | * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72). | |
282 | */ | |
283 | __sysreg32_restore_state(vcpu); | |
fc7563b3 CD |
284 | __sysreg_restore_user_state(guest_ctxt); |
285 | __sysreg_restore_el1_state(guest_ctxt); | |
286 | ||
287 | vcpu->arch.sysregs_loaded_on_cpu = true; | |
a2465629 CD |
288 | |
289 | activate_traps_vhe_load(vcpu); | |
bc192cee CD |
290 | } |
291 | ||
292 | /** | |
293 | * kvm_vcpu_put_sysregs - Restore host system registers to the physical CPU | |
294 | * | |
295 | * @vcpu: The VCPU pointer | |
296 | * | |
297 | * Save guest system registers that do not affect the host's execution, for | |
298 | * example EL1 system registers on a VHE system where the host kernel | |
299 | * runs at EL2. This function is called from KVM's vcpu_put() function | |
300 | * and deferring saving system register state until we're no longer running the | |
301 | * VCPU avoids having to save them on every exit from the VM. | |
302 | */ | |
303 | void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu) | |
304 | { | |
fc7563b3 | 305 | struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt; |
07da1ffa | 306 | struct kvm_cpu_context *host_ctxt; |
fc7563b3 CD |
307 | |
308 | if (!has_vhe()) | |
309 | return; | |
310 | ||
07da1ffa | 311 | host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt; |
a2465629 CD |
312 | deactivate_traps_vhe_put(); |
313 | ||
fc7563b3 CD |
314 | __sysreg_save_el1_state(guest_ctxt); |
315 | __sysreg_save_user_state(guest_ctxt); | |
b9f8ca4d | 316 | __sysreg32_save_state(vcpu); |
fc7563b3 CD |
317 | |
318 | /* Restore host user state */ | |
319 | __sysreg_restore_user_state(host_ctxt); | |
320 | ||
321 | vcpu->arch.sysregs_loaded_on_cpu = false; | |
bc192cee | 322 | } |
7c36447a WD |
323 | |
324 | void __hyp_text __kvm_enable_ssbs(void) | |
325 | { | |
326 | u64 tmp; | |
327 | ||
328 | asm volatile( | |
329 | "mrs %0, sctlr_el2\n" | |
330 | "orr %0, %0, %1\n" | |
331 | "msr sctlr_el2, %0" | |
332 | : "=&r" (tmp) : "L" (SCTLR_ELx_DSSBS)); | |
333 | } |