]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/arm64/kvm/hyp/entry.S
KVM: arm64: Avoid storing the vcpu pointer on the stack
[mirror_ubuntu-bionic-kernel.git] / arch / arm64 / kvm / hyp / entry.S
CommitLineData
b97b66c1
MZ
1/*
2 * Copyright (C) 2015 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/linkage.h>
19
20#include <asm/asm-offsets.h>
21#include <asm/assembler.h>
22#include <asm/fpsimdmacros.h>
23#include <asm/kvm.h>
24#include <asm/kvm_arm.h>
25#include <asm/kvm_asm.h>
26#include <asm/kvm_mmu.h>
27
28#define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x)
29#define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
30
31 .text
32 .pushsection .hyp.text, "ax"
33
34.macro save_callee_saved_regs ctxt
35 stp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
36 stp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
37 stp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
38 stp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
39 stp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
40 stp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)]
41.endm
42
43.macro restore_callee_saved_regs ctxt
44 ldp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
45 ldp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
46 ldp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
47 ldp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
48 ldp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
49 ldp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)]
50.endm
51
52/*
53 * u64 __guest_enter(struct kvm_vcpu *vcpu,
54 * struct kvm_cpu_context *host_ctxt);
55 */
56ENTRY(__guest_enter)
57 // x0: vcpu
68381b2b
SD
58 // x1: host context
59 // x2-x17: clobbered by macros
60 // x18: guest context
b97b66c1
MZ
61
62 // Store the host regs
63 save_callee_saved_regs x1
64
68381b2b 65 add x18, x0, #VCPU_CONTEXT
b97b66c1 66
68381b2b
SD
67 // Restore guest regs x0-x17
68 ldp x0, x1, [x18, #CPU_XREG_OFFSET(0)]
69 ldp x2, x3, [x18, #CPU_XREG_OFFSET(2)]
70 ldp x4, x5, [x18, #CPU_XREG_OFFSET(4)]
71 ldp x6, x7, [x18, #CPU_XREG_OFFSET(6)]
72 ldp x8, x9, [x18, #CPU_XREG_OFFSET(8)]
73 ldp x10, x11, [x18, #CPU_XREG_OFFSET(10)]
74 ldp x12, x13, [x18, #CPU_XREG_OFFSET(12)]
75 ldp x14, x15, [x18, #CPU_XREG_OFFSET(14)]
76 ldp x16, x17, [x18, #CPU_XREG_OFFSET(16)]
b97b66c1 77
68381b2b
SD
78 // Restore guest regs x19-x29, lr
79 restore_callee_saved_regs x18
80
81 // Restore guest reg x18
82 ldr x18, [x18, #CPU_XREG_OFFSET(18)]
b97b66c1
MZ
83
84 // Do not touch any register after this!
85 eret
86ENDPROC(__guest_enter)
87
88ENTRY(__guest_exit)
68381b2b
SD
89 // x0: return code
90 // x1: vcpu
91 // x2-x29,lr: vcpu regs
92 // vcpu x0-x1 on the stack
93
94 add x1, x1, #VCPU_CONTEXT
95
cb96408d
VM
96 ALTERNATIVE(nop, SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
97
68381b2b
SD
98 // Store the guest regs x2 and x3
99 stp x2, x3, [x1, #CPU_XREG_OFFSET(2)]
100
101 // Retrieve the guest regs x0-x1 from the stack
102 ldp x2, x3, [sp], #16 // x0, x1
103
104 // Store the guest regs x0-x1 and x4-x18
105 stp x2, x3, [x1, #CPU_XREG_OFFSET(0)]
106 stp x4, x5, [x1, #CPU_XREG_OFFSET(4)]
107 stp x6, x7, [x1, #CPU_XREG_OFFSET(6)]
108 stp x8, x9, [x1, #CPU_XREG_OFFSET(8)]
109 stp x10, x11, [x1, #CPU_XREG_OFFSET(10)]
110 stp x12, x13, [x1, #CPU_XREG_OFFSET(12)]
111 stp x14, x15, [x1, #CPU_XREG_OFFSET(14)]
112 stp x16, x17, [x1, #CPU_XREG_OFFSET(16)]
113 str x18, [x1, #CPU_XREG_OFFSET(18)]
114
115 // Store the guest regs x19-x29, lr
116 save_callee_saved_regs x1
b97b66c1 117
47e02f07 118 get_host_ctxt x2, x3
b97b66c1 119
b97b66c1
MZ
120 // Now restore the host regs
121 restore_callee_saved_regs x2
122
2e66e3af
JM
123alternative_if ARM64_HAS_RAS_EXTN
124 // If we have the RAS extensions we can consume a pending error
125 // without an unmask-SError and isb.
126 esb
127 mrs_s x2, SYS_DISR_EL1
128 str x2, [x1, #(VCPU_FAULT_DISR - VCPU_CONTEXT)]
129 cbz x2, 1f
130 msr_s SYS_DISR_EL1, xzr
131 orr x0, x0, #(1<<ARM_EXIT_WITH_SERROR_BIT)
1321: ret
133alternative_else
395ea79e
MZ
134 // If we have a pending asynchronous abort, now is the
135 // time to find out. From your VAXorcist book, page 666:
136 // "Threaten me not, oh Evil one! For I speak with
137 // the power of DEC, and I command thee to show thyself!"
138 mrs x2, elr_el2
139 mrs x3, esr_el2
140 mrs x4, spsr_el2
141 mov x5, x0
142
143 dsb sy // Synchronize against in-flight ld/st
2e66e3af 144 nop
395ea79e 145 msr daifclr, #4 // Unmask aborts
2e66e3af 146alternative_endif
395ea79e
MZ
147
148 // This is our single instruction exception window. A pending
149 // SError is guaranteed to occur at the earliest when we unmask
150 // it, and at the latest just after the ISB.
151 .global abort_guest_exit_start
152abort_guest_exit_start:
153
154 isb
155
156 .global abort_guest_exit_end
157abort_guest_exit_end:
158
159 // If the exception took place, restore the EL1 exception
160 // context so that we can report some information.
161 // Merge the exception code with the SError pending bit.
162 tbz x0, #ARM_EXIT_WITH_SERROR_BIT, 1f
163 msr elr_el2, x2
164 msr esr_el2, x3
165 msr spsr_el2, x4
166 orr x0, x0, x5
1671: ret
b97b66c1
MZ
168ENDPROC(__guest_exit)
169
c13d1683 170ENTRY(__fpsimd_guest_restore)
42a09132
JM
171 // x0: esr
172 // x1: vcpu
173 // x2-x29,lr: vcpu regs
174 // vcpu x0-x1 on the stack
68381b2b 175 stp x2, x3, [sp, #-16]!
c13d1683
MZ
176 stp x4, lr, [sp, #-16]!
177
77cb2d91 178alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
c13d1683
MZ
179 mrs x2, cptr_el2
180 bic x2, x2, #CPTR_EL2_TFP
181 msr cptr_el2, x2
77cb2d91
MZ
182alternative_else
183 mrs x2, cpacr_el1
184 orr x2, x2, #CPACR_EL1_FPEN
185 msr cpacr_el1, x2
186alternative_endif
c13d1683
MZ
187 isb
188
42a09132 189 mov x3, x1
c13d1683
MZ
190
191 ldr x0, [x3, #VCPU_HOST_CONTEXT]
192 kern_hyp_va x0
193 add x0, x0, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
194 bl __fpsimd_save_state
195
196 add x2, x3, #VCPU_CONTEXT
197 add x0, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
198 bl __fpsimd_restore_state
199
5eec0a91 200 // Skip restoring fpexc32 for AArch64 guests
c13d1683
MZ
201 mrs x1, hcr_el2
202 tbnz x1, #HCR_RW_SHIFT, 1f
9d8415d6 203 ldr x4, [x3, #VCPU_FPEXC32_EL2]
c13d1683
MZ
204 msr fpexc32_el2, x4
2051:
206 ldp x4, lr, [sp], #16
207 ldp x2, x3, [sp], #16
208 ldp x0, x1, [sp], #16
209
210 eret
211ENDPROC(__fpsimd_guest_restore)
6d02b433
SD
212
213ENTRY(__qcom_hyp_sanitize_btac_predictors)
214 /**
215 * Call SMC64 with Silicon provider serviceID 23<<8 (0xc2001700)
216 * 0xC2000000-0xC200FFFF: assigned to SiP Service Calls
217 * b15-b0: contains SiP functionID
218 */
219 movz x0, #0x1700
220 movk x0, #0xc200, lsl #16
221 smc #0
222 ret
223ENDPROC(__qcom_hyp_sanitize_btac_predictors)