]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blame - arch/arm64/kvm/hyp/sysreg-sr.c
Merge branches 'for-5.1/upstream-fixes', 'for-5.2/core', 'for-5.2/ish', 'for-5.2...
[mirror_ubuntu-kernels.git] / arch / arm64 / kvm / hyp / sysreg-sr.c
CommitLineData
6d6ec20f
MZ
1/*
2 * Copyright (C) 2012-2015 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/compiler.h>
19#include <linux/kvm_host.h>
20
7d826029 21#include <asm/kprobes.h>
9d8415d6 22#include <asm/kvm_asm.h>
e72341c5 23#include <asm/kvm_emulate.h>
13720a56 24#include <asm/kvm_hyp.h>
6d6ec20f 25
9c6c3568
MZ
26/*
27 * Non-VHE: Both host and guest must save everything.
28 *
fc7563b3
CD
29 * VHE: Host and guest must save mdscr_el1 and sp_el0 (and the PC and pstate,
30 * which are handled as part of the el2 return state) on every switch.
31 * tpidr_el0 and tpidrro_el0 only need to be switched when going
32 * to host userspace or a different VCPU. EL1 registers only need to be
33 * switched when potentially going to run a different VCPU. The latter two
34 * classes are handled as part of kvm_arch_vcpu_load and kvm_arch_vcpu_put.
9c6c3568
MZ
35 */
36
37static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
38{
4c47eb1c 39 ctxt->sys_regs[MDSCR_EL1] = read_sysreg(mdscr_el1);
060701f0
CD
40
41 /*
42 * The host arm64 Linux uses sp_el0 to point to 'current' and it must
43 * therefore be saved/restored on every entry/exit to/from the guest.
44 */
9c6c3568 45 ctxt->gp_regs.regs.sp = read_sysreg(sp_el0);
9c6c3568
MZ
46}
47
060701f0
CD
48static void __hyp_text __sysreg_save_user_state(struct kvm_cpu_context *ctxt)
49{
50 ctxt->sys_regs[TPIDR_EL0] = read_sysreg(tpidr_el0);
51 ctxt->sys_regs[TPIDRRO_EL0] = read_sysreg(tpidrro_el0);
52}
53
54static void __hyp_text __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
6d6ec20f 55{
6d6ec20f 56 ctxt->sys_regs[CSSELR_EL1] = read_sysreg(csselr_el1);
094f8233 57 ctxt->sys_regs[SCTLR_EL1] = read_sysreg_el1(sctlr);
060701f0 58 ctxt->sys_regs[ACTLR_EL1] = read_sysreg(actlr_el1);
094f8233
MZ
59 ctxt->sys_regs[CPACR_EL1] = read_sysreg_el1(cpacr);
60 ctxt->sys_regs[TTBR0_EL1] = read_sysreg_el1(ttbr0);
61 ctxt->sys_regs[TTBR1_EL1] = read_sysreg_el1(ttbr1);
62 ctxt->sys_regs[TCR_EL1] = read_sysreg_el1(tcr);
63 ctxt->sys_regs[ESR_EL1] = read_sysreg_el1(esr);
64 ctxt->sys_regs[AFSR0_EL1] = read_sysreg_el1(afsr0);
65 ctxt->sys_regs[AFSR1_EL1] = read_sysreg_el1(afsr1);
66 ctxt->sys_regs[FAR_EL1] = read_sysreg_el1(far);
67 ctxt->sys_regs[MAIR_EL1] = read_sysreg_el1(mair);
68 ctxt->sys_regs[VBAR_EL1] = read_sysreg_el1(vbar);
69 ctxt->sys_regs[CONTEXTIDR_EL1] = read_sysreg_el1(contextidr);
70 ctxt->sys_regs[AMAIR_EL1] = read_sysreg_el1(amair);
71 ctxt->sys_regs[CNTKCTL_EL1] = read_sysreg_el1(cntkctl);
6d6ec20f 72 ctxt->sys_regs[PAR_EL1] = read_sysreg(par_el1);
1f742679 73 ctxt->sys_regs[TPIDR_EL1] = read_sysreg(tpidr_el1);
6d6ec20f 74
6d6ec20f 75 ctxt->gp_regs.sp_el1 = read_sysreg(sp_el1);
094f8233
MZ
76 ctxt->gp_regs.elr_el1 = read_sysreg_el1(elr);
77 ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg_el1(spsr);
0c389d90
CD
78}
79
80static void __hyp_text __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt)
81{
1f742679
JM
82 ctxt->gp_regs.regs.pc = read_sysreg_el2(elr);
83 ctxt->gp_regs.regs.pstate = read_sysreg_el2(spsr);
c773ae2b
JM
84
85 if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
86 ctxt->sys_regs[DISR_EL1] = read_sysreg_s(SYS_VDISR_EL2);
6d6ec20f
MZ
87}
88
4cdecaba 89void __hyp_text __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt)
f837453d
CD
90{
91 __sysreg_save_el1_state(ctxt);
92 __sysreg_save_common_state(ctxt);
93 __sysreg_save_user_state(ctxt);
0c389d90 94 __sysreg_save_el2_return_state(ctxt);
f837453d
CD
95}
96
97void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt)
edef528d 98{
9c6c3568 99 __sysreg_save_common_state(ctxt);
edef528d 100}
7d826029 101NOKPROBE_SYMBOL(sysreg_save_host_state_vhe);
edef528d 102
f837453d 103void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt)
edef528d 104{
9c6c3568 105 __sysreg_save_common_state(ctxt);
0c389d90 106 __sysreg_save_el2_return_state(ctxt);
9c6c3568 107}
7d826029 108NOKPROBE_SYMBOL(sysreg_save_guest_state_vhe);
9c6c3568
MZ
109
110static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt)
111{
4c47eb1c 112 write_sysreg(ctxt->sys_regs[MDSCR_EL1], mdscr_el1);
060701f0
CD
113
114 /*
115 * The host arm64 Linux uses sp_el0 to point to 'current' and it must
116 * therefore be saved/restored on every entry/exit to/from the guest.
117 */
9c6c3568 118 write_sysreg(ctxt->gp_regs.regs.sp, sp_el0);
edef528d
MZ
119}
120
060701f0
CD
121static void __hyp_text __sysreg_restore_user_state(struct kvm_cpu_context *ctxt)
122{
123 write_sysreg(ctxt->sys_regs[TPIDR_EL0], tpidr_el0);
124 write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], tpidrro_el0);
125}
126
127static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
6d6ec20f 128{
094f8233
MZ
129 write_sysreg(ctxt->sys_regs[MPIDR_EL1], vmpidr_el2);
130 write_sysreg(ctxt->sys_regs[CSSELR_EL1], csselr_el1);
131 write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1], sctlr);
060701f0 132 write_sysreg(ctxt->sys_regs[ACTLR_EL1], actlr_el1);
094f8233
MZ
133 write_sysreg_el1(ctxt->sys_regs[CPACR_EL1], cpacr);
134 write_sysreg_el1(ctxt->sys_regs[TTBR0_EL1], ttbr0);
135 write_sysreg_el1(ctxt->sys_regs[TTBR1_EL1], ttbr1);
136 write_sysreg_el1(ctxt->sys_regs[TCR_EL1], tcr);
137 write_sysreg_el1(ctxt->sys_regs[ESR_EL1], esr);
138 write_sysreg_el1(ctxt->sys_regs[AFSR0_EL1], afsr0);
139 write_sysreg_el1(ctxt->sys_regs[AFSR1_EL1], afsr1);
140 write_sysreg_el1(ctxt->sys_regs[FAR_EL1], far);
141 write_sysreg_el1(ctxt->sys_regs[MAIR_EL1], mair);
142 write_sysreg_el1(ctxt->sys_regs[VBAR_EL1], vbar);
143 write_sysreg_el1(ctxt->sys_regs[CONTEXTIDR_EL1],contextidr);
144 write_sysreg_el1(ctxt->sys_regs[AMAIR_EL1], amair);
145 write_sysreg_el1(ctxt->sys_regs[CNTKCTL_EL1], cntkctl);
146 write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1);
1f742679 147 write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1);
094f8233
MZ
148
149 write_sysreg(ctxt->gp_regs.sp_el1, sp_el1);
150 write_sysreg_el1(ctxt->gp_regs.elr_el1, elr);
151 write_sysreg_el1(ctxt->gp_regs.spsr[KVM_SPSR_EL1],spsr);
0c389d90
CD
152}
153
154static void __hyp_text
155__sysreg_restore_el2_return_state(struct kvm_cpu_context *ctxt)
156{
e4e11cc0
CD
157 u64 pstate = ctxt->gp_regs.regs.pstate;
158 u64 mode = pstate & PSR_AA32_MODE_MASK;
159
160 /*
161 * Safety check to ensure we're setting the CPU up to enter the guest
162 * in a less privileged mode.
163 *
164 * If we are attempting a return to EL2 or higher in AArch64 state,
165 * program SPSR_EL2 with M=EL2h and the IL bit set which ensures that
166 * we'll take an illegal exception state exception immediately after
167 * the ERET to the guest. Attempts to return to AArch32 Hyp will
168 * result in an illegal exception return because EL2's execution state
169 * is determined by SCR_EL3.RW.
170 */
171 if (!(mode & PSR_MODE32_BIT) && mode >= PSR_MODE_EL2t)
172 pstate = PSR_MODE_EL2h | PSR_IL_BIT;
173
1f742679 174 write_sysreg_el2(ctxt->gp_regs.regs.pc, elr);
e4e11cc0 175 write_sysreg_el2(pstate, spsr);
c773ae2b
JM
176
177 if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
178 write_sysreg_s(ctxt->sys_regs[DISR_EL1], SYS_VDISR_EL2);
6d6ec20f 179}
c209ec85 180
4cdecaba 181void __hyp_text __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt)
f837453d
CD
182{
183 __sysreg_restore_el1_state(ctxt);
184 __sysreg_restore_common_state(ctxt);
185 __sysreg_restore_user_state(ctxt);
0c389d90 186 __sysreg_restore_el2_return_state(ctxt);
f837453d
CD
187}
188
189void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt)
edef528d 190{
9c6c3568 191 __sysreg_restore_common_state(ctxt);
edef528d 192}
7d826029 193NOKPROBE_SYMBOL(sysreg_restore_host_state_vhe);
edef528d 194
f837453d 195void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt)
edef528d 196{
9c6c3568 197 __sysreg_restore_common_state(ctxt);
0c389d90 198 __sysreg_restore_el2_return_state(ctxt);
edef528d 199}
7d826029 200NOKPROBE_SYMBOL(sysreg_restore_guest_state_vhe);
edef528d 201
c209ec85
MZ
202void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu)
203{
204 u64 *spsr, *sysreg;
205
e72341c5 206 if (!vcpu_el1_is_32bit(vcpu))
c209ec85
MZ
207 return;
208
209 spsr = vcpu->arch.ctxt.gp_regs.spsr;
210 sysreg = vcpu->arch.ctxt.sys_regs;
211
212 spsr[KVM_SPSR_ABT] = read_sysreg(spsr_abt);
213 spsr[KVM_SPSR_UND] = read_sysreg(spsr_und);
214 spsr[KVM_SPSR_IRQ] = read_sysreg(spsr_irq);
215 spsr[KVM_SPSR_FIQ] = read_sysreg(spsr_fiq);
216
217 sysreg[DACR32_EL2] = read_sysreg(dacr32_el2);
218 sysreg[IFSR32_EL2] = read_sysreg(ifsr32_el2);
219
fa89d31c 220 if (has_vhe() || vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)
c209ec85
MZ
221 sysreg[DBGVCR32_EL2] = read_sysreg(dbgvcr32_el2);
222}
223
224void __hyp_text __sysreg32_restore_state(struct kvm_vcpu *vcpu)
225{
226 u64 *spsr, *sysreg;
227
e72341c5 228 if (!vcpu_el1_is_32bit(vcpu))
c209ec85
MZ
229 return;
230
231 spsr = vcpu->arch.ctxt.gp_regs.spsr;
232 sysreg = vcpu->arch.ctxt.sys_regs;
233
234 write_sysreg(spsr[KVM_SPSR_ABT], spsr_abt);
235 write_sysreg(spsr[KVM_SPSR_UND], spsr_und);
236 write_sysreg(spsr[KVM_SPSR_IRQ], spsr_irq);
237 write_sysreg(spsr[KVM_SPSR_FIQ], spsr_fiq);
238
239 write_sysreg(sysreg[DACR32_EL2], dacr32_el2);
240 write_sysreg(sysreg[IFSR32_EL2], ifsr32_el2);
241
fa89d31c 242 if (has_vhe() || vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY)
c209ec85
MZ
243 write_sysreg(sysreg[DBGVCR32_EL2], dbgvcr32_el2);
244}
4464e210 245
bc192cee
CD
246/**
247 * kvm_vcpu_load_sysregs - Load guest system registers to the physical CPU
248 *
249 * @vcpu: The VCPU pointer
250 *
251 * Load system registers that do not affect the host's execution, for
252 * example EL1 system registers on a VHE system where the host kernel
253 * runs at EL2. This function is called from KVM's vcpu_load() function
254 * and loading system register state early avoids having to load them on
255 * every entry to the VM.
256 */
257void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu)
258{
fc7563b3
CD
259 struct kvm_cpu_context *host_ctxt = vcpu->arch.host_cpu_context;
260 struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
261
262 if (!has_vhe())
263 return;
264
265 __sysreg_save_user_state(host_ctxt);
266
b9f8ca4d
CD
267 /*
268 * Load guest EL1 and user state
269 *
270 * We must restore the 32-bit state before the sysregs, thanks
271 * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
272 */
273 __sysreg32_restore_state(vcpu);
fc7563b3
CD
274 __sysreg_restore_user_state(guest_ctxt);
275 __sysreg_restore_el1_state(guest_ctxt);
276
277 vcpu->arch.sysregs_loaded_on_cpu = true;
a2465629
CD
278
279 activate_traps_vhe_load(vcpu);
bc192cee
CD
280}
281
282/**
283 * kvm_vcpu_put_sysregs - Restore host system registers to the physical CPU
284 *
285 * @vcpu: The VCPU pointer
286 *
287 * Save guest system registers that do not affect the host's execution, for
288 * example EL1 system registers on a VHE system where the host kernel
289 * runs at EL2. This function is called from KVM's vcpu_put() function
290 * and deferring saving system register state until we're no longer running the
291 * VCPU avoids having to save them on every exit from the VM.
292 */
293void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu)
294{
fc7563b3
CD
295 struct kvm_cpu_context *host_ctxt = vcpu->arch.host_cpu_context;
296 struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
297
298 if (!has_vhe())
299 return;
300
a2465629
CD
301 deactivate_traps_vhe_put();
302
fc7563b3
CD
303 __sysreg_save_el1_state(guest_ctxt);
304 __sysreg_save_user_state(guest_ctxt);
b9f8ca4d 305 __sysreg32_save_state(vcpu);
fc7563b3
CD
306
307 /* Restore host user state */
308 __sysreg_restore_user_state(host_ctxt);
309
310 vcpu->arch.sysregs_loaded_on_cpu = false;
bc192cee 311}
7c36447a
WD
312
313void __hyp_text __kvm_enable_ssbs(void)
314{
315 u64 tmp;
316
317 asm volatile(
318 "mrs %0, sctlr_el2\n"
319 "orr %0, %0, %1\n"
320 "msr sctlr_el2, %0"
321 : "=&r" (tmp) : "L" (SCTLR_ELx_DSSBS));
322}