]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/arm64/kvm/hyp/hyp-entry.S
KVM: arm64: Avoid storing the vcpu pointer on the stack
[mirror_ubuntu-bionic-kernel.git] / arch / arm64 / kvm / hyp / hyp-entry.S
1 /*
2 * Copyright (C) 2015 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18 #include <linux/arm-smccc.h>
19 #include <linux/linkage.h>
20
21 #include <asm/alternative.h>
22 #include <asm/assembler.h>
23 #include <asm/cpufeature.h>
24 #include <asm/kvm_arm.h>
25 #include <asm/kvm_asm.h>
26 #include <asm/kvm_mmu.h>
27
28 .text
29 .pushsection .hyp.text, "ax"
30
31 .macro do_el2_call
32 /*
33 * Shuffle the parameters before calling the function
34 * pointed to in x0. Assumes parameters in x[1,2,3].
35 */
36 str lr, [sp, #-16]!
37 mov lr, x0
38 mov x0, x1
39 mov x1, x2
40 mov x2, x3
41 blr lr
42 ldr lr, [sp], #16
43 .endm
44
45 ENTRY(__vhe_hyp_call)
46 do_el2_call
47 /*
48 * We used to rely on having an exception return to get
49 * an implicit isb. In the E2H case, we don't have it anymore.
50 * rather than changing all the leaf functions, just do it here
51 * before returning to the rest of the kernel.
52 */
53 isb
54 ret
55 ENDPROC(__vhe_hyp_call)
56
57 el1_sync: // Guest trapped into EL2
58 stp x0, x1, [sp, #-16]!
59
60 mrs x0, esr_el2
61 lsr x0, x0, #ESR_ELx_EC_SHIFT
62 cmp x0, #ESR_ELx_EC_HVC64
63 ccmp x0, #ESR_ELx_EC_HVC32, #4, ne
64 b.ne el1_trap
65
66 mrs x1, vttbr_el2 // If vttbr is valid, the guest
67 cbnz x1, el1_hvc_guest // called HVC
68
69 /* Here, we're pretty sure the host called HVC. */
70 ldp x0, x1, [sp], #16
71
72 /* Check for a stub HVC call */
73 cmp x0, #HVC_STUB_HCALL_NR
74 b.hs 1f
75
76 /*
77 * Compute the idmap address of __kvm_handle_stub_hvc and
78 * jump there. Since we use kimage_voffset, do not use the
79 * HYP VA for __kvm_handle_stub_hvc, but the kernel VA instead
80 * (by loading it from the constant pool).
81 *
82 * Preserve x0-x4, which may contain stub parameters.
83 */
84 ldr x5, =__kvm_handle_stub_hvc
85 ldr_l x6, kimage_voffset
86
87 /* x5 = __pa(x5) */
88 sub x5, x5, x6
89 br x5
90
91 1:
92 /*
93 * Perform the EL2 call
94 */
95 kern_hyp_va x0
96 do_el2_call
97
98 eret
99
100 el1_hvc_guest:
101 /*
102 * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1.
103 * The workaround has already been applied on the host,
104 * so let's quickly get back to the guest. We don't bother
105 * restoring x1, as it can be clobbered anyway.
106 */
107 ldr x1, [sp] // Guest's x0
108 eor w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
109 cbnz w1, el1_trap
110 mov x0, x1
111 add sp, sp, #16
112 eret
113
114 el1_trap:
115 get_vcpu_ptr x1, x0
116
117 mrs x0, esr_el2
118 lsr x0, x0, #ESR_ELx_EC_SHIFT
119 /*
120 * x0: ESR_EC
121 * x1: vcpu pointer
122 */
123
124 /*
125 * We trap the first access to the FP/SIMD to save the host context
126 * and restore the guest context lazily.
127 * If FP/SIMD is not implemented, handle the trap and inject an
128 * undefined instruction exception to the guest.
129 */
130 alternative_if_not ARM64_HAS_NO_FPSIMD
131 cmp x0, #ESR_ELx_EC_FP_ASIMD
132 b.eq __fpsimd_guest_restore
133 alternative_else_nop_endif
134
135 mov x0, #ARM_EXCEPTION_TRAP
136 b __guest_exit
137
138 el1_irq:
139 stp x0, x1, [sp, #-16]!
140 get_vcpu_ptr x1, x0
141 mov x0, #ARM_EXCEPTION_IRQ
142 b __guest_exit
143
144 el1_error:
145 stp x0, x1, [sp, #-16]!
146 get_vcpu_ptr x1, x0
147 mov x0, #ARM_EXCEPTION_EL1_SERROR
148 b __guest_exit
149
150 el2_error:
151 /*
152 * Only two possibilities:
153 * 1) Either we come from the exit path, having just unmasked
154 * PSTATE.A: change the return code to an EL2 fault, and
155 * carry on, as we're already in a sane state to handle it.
156 * 2) Or we come from anywhere else, and that's a bug: we panic.
157 *
158 * For (1), x0 contains the original return code and x1 doesn't
159 * contain anything meaningful at that stage. We can reuse them
160 * as temp registers.
161 * For (2), who cares?
162 */
163 mrs x0, elr_el2
164 adr x1, abort_guest_exit_start
165 cmp x0, x1
166 adr x1, abort_guest_exit_end
167 ccmp x0, x1, #4, ne
168 b.ne __hyp_panic
169 mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
170 eret
171
172 ENTRY(__hyp_do_panic)
173 mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
174 PSR_MODE_EL1h)
175 msr spsr_el2, lr
176 ldr lr, =panic
177 msr elr_el2, lr
178 eret
179 ENDPROC(__hyp_do_panic)
180
181 ENTRY(__hyp_panic)
182 get_host_ctxt x0, x1
183 b hyp_panic
184 ENDPROC(__hyp_panic)
185
186 .macro invalid_vector label, target = __hyp_panic
187 .align 2
188 \label:
189 b \target
190 ENDPROC(\label)
191 .endm
192
193 /* None of these should ever happen */
194 invalid_vector el2t_sync_invalid
195 invalid_vector el2t_irq_invalid
196 invalid_vector el2t_fiq_invalid
197 invalid_vector el2t_error_invalid
198 invalid_vector el2h_sync_invalid
199 invalid_vector el2h_irq_invalid
200 invalid_vector el2h_fiq_invalid
201 invalid_vector el1_sync_invalid
202 invalid_vector el1_irq_invalid
203 invalid_vector el1_fiq_invalid
204
205 .ltorg
206
207 .align 11
208
209 ENTRY(__kvm_hyp_vector)
210 ventry el2t_sync_invalid // Synchronous EL2t
211 ventry el2t_irq_invalid // IRQ EL2t
212 ventry el2t_fiq_invalid // FIQ EL2t
213 ventry el2t_error_invalid // Error EL2t
214
215 ventry el2h_sync_invalid // Synchronous EL2h
216 ventry el2h_irq_invalid // IRQ EL2h
217 ventry el2h_fiq_invalid // FIQ EL2h
218 ventry el2_error // Error EL2h
219
220 ventry el1_sync // Synchronous 64-bit EL1
221 ventry el1_irq // IRQ 64-bit EL1
222 ventry el1_fiq_invalid // FIQ 64-bit EL1
223 ventry el1_error // Error 64-bit EL1
224
225 ventry el1_sync // Synchronous 32-bit EL1
226 ventry el1_irq // IRQ 32-bit EL1
227 ventry el1_fiq_invalid // FIQ 32-bit EL1
228 ventry el1_error // Error 32-bit EL1
229 ENDPROC(__kvm_hyp_vector)