]>
Commit | Line | Data |
---|---|---|
55c7401d MZ |
1 | /* |
2 | * Copyright (C) 2012,2013 - ARM Ltd | |
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License version 2 as | |
7 | * published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
16 | */ | |
17 | ||
18 | #include <linux/linkage.h> | |
55c7401d | 19 | |
8a14849b | 20 | #include <asm/alternative.h> |
55c7401d | 21 | #include <asm/asm-offsets.h> |
c6d01a94 | 22 | #include <asm/assembler.h> |
8a14849b | 23 | #include <asm/cpufeature.h> |
b0e626b3 | 24 | #include <asm/debug-monitors.h> |
c6d01a94 | 25 | #include <asm/esr.h> |
55c7401d MZ |
26 | #include <asm/fpsimdmacros.h> |
27 | #include <asm/kvm.h> | |
55c7401d | 28 | #include <asm/kvm_arm.h> |
c6d01a94 | 29 | #include <asm/kvm_asm.h> |
55c7401d | 30 | #include <asm/kvm_mmu.h> |
c6d01a94 | 31 | #include <asm/memory.h> |
55c7401d MZ |
32 | |
33 | #define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x) | |
34 | #define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x) | |
35 | #define CPU_SPSR_OFFSET(x) CPU_GP_REG_OFFSET(CPU_SPSR + 8*x) | |
36 | #define CPU_SYSREG_OFFSET(x) (CPU_SYSREGS + 8*x) | |
37 | ||
38 | .text | |
39 | .pushsection .hyp.text, "ax" | |
40 | .align PAGE_SHIFT | |
41 | ||
55c7401d MZ |
42 | .macro save_common_regs |
43 | // x2: base address for cpu context | |
44 | // x3: tmp register | |
45 | ||
46 | add x3, x2, #CPU_XREG_OFFSET(19) | |
47 | stp x19, x20, [x3] | |
48 | stp x21, x22, [x3, #16] | |
49 | stp x23, x24, [x3, #32] | |
50 | stp x25, x26, [x3, #48] | |
51 | stp x27, x28, [x3, #64] | |
52 | stp x29, lr, [x3, #80] | |
53 | ||
54 | mrs x19, sp_el0 | |
921ef1e1 AB |
55 | mrs x20, elr_el2 // pc before entering el2 |
56 | mrs x21, spsr_el2 // pstate before entering el2 | |
55c7401d MZ |
57 | |
58 | stp x19, x20, [x3, #96] | |
59 | str x21, [x3, #112] | |
60 | ||
61 | mrs x22, sp_el1 | |
62 | mrs x23, elr_el1 | |
63 | mrs x24, spsr_el1 | |
64 | ||
65 | str x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)] | |
66 | str x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)] | |
67 | str x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)] | |
68 | .endm | |
69 | ||
70 | .macro restore_common_regs | |
71 | // x2: base address for cpu context | |
72 | // x3: tmp register | |
73 | ||
74 | ldr x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)] | |
75 | ldr x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)] | |
76 | ldr x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)] | |
77 | ||
78 | msr sp_el1, x22 | |
79 | msr elr_el1, x23 | |
80 | msr spsr_el1, x24 | |
81 | ||
82 | add x3, x2, #CPU_XREG_OFFSET(31) // SP_EL0 | |
83 | ldp x19, x20, [x3] | |
84 | ldr x21, [x3, #16] | |
85 | ||
86 | msr sp_el0, x19 | |
921ef1e1 AB |
87 | msr elr_el2, x20 // pc on return from el2 |
88 | msr spsr_el2, x21 // pstate on return from el2 | |
55c7401d MZ |
89 | |
90 | add x3, x2, #CPU_XREG_OFFSET(19) | |
91 | ldp x19, x20, [x3] | |
92 | ldp x21, x22, [x3, #16] | |
93 | ldp x23, x24, [x3, #32] | |
94 | ldp x25, x26, [x3, #48] | |
95 | ldp x27, x28, [x3, #64] | |
96 | ldp x29, lr, [x3, #80] | |
97 | .endm | |
98 | ||
99 | .macro save_host_regs | |
100 | save_common_regs | |
101 | .endm | |
102 | ||
103 | .macro restore_host_regs | |
104 | restore_common_regs | |
105 | .endm | |
106 | ||
107 | .macro save_fpsimd | |
108 | // x2: cpu context address | |
109 | // x3, x4: tmp regs | |
110 | add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS) | |
111 | fpsimd_save x3, 4 | |
112 | .endm | |
113 | ||
114 | .macro restore_fpsimd | |
115 | // x2: cpu context address | |
116 | // x3, x4: tmp regs | |
117 | add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS) | |
118 | fpsimd_restore x3, 4 | |
119 | .endm | |
120 | ||
121 | .macro save_guest_regs | |
122 | // x0 is the vcpu address | |
123 | // x1 is the return code, do not corrupt! | |
124 | // x2 is the cpu context | |
125 | // x3 is a tmp register | |
126 | // Guest's x0-x3 are on the stack | |
127 | ||
128 | // Compute base to save registers | |
129 | add x3, x2, #CPU_XREG_OFFSET(4) | |
130 | stp x4, x5, [x3] | |
131 | stp x6, x7, [x3, #16] | |
132 | stp x8, x9, [x3, #32] | |
133 | stp x10, x11, [x3, #48] | |
134 | stp x12, x13, [x3, #64] | |
135 | stp x14, x15, [x3, #80] | |
136 | stp x16, x17, [x3, #96] | |
137 | str x18, [x3, #112] | |
138 | ||
139 | pop x6, x7 // x2, x3 | |
140 | pop x4, x5 // x0, x1 | |
141 | ||
142 | add x3, x2, #CPU_XREG_OFFSET(0) | |
143 | stp x4, x5, [x3] | |
144 | stp x6, x7, [x3, #16] | |
145 | ||
146 | save_common_regs | |
147 | .endm | |
148 | ||
149 | .macro restore_guest_regs | |
150 | // x0 is the vcpu address. | |
151 | // x2 is the cpu context | |
152 | // x3 is a tmp register | |
153 | ||
154 | // Prepare x0-x3 for later restore | |
155 | add x3, x2, #CPU_XREG_OFFSET(0) | |
156 | ldp x4, x5, [x3] | |
157 | ldp x6, x7, [x3, #16] | |
158 | push x4, x5 // Push x0-x3 on the stack | |
159 | push x6, x7 | |
160 | ||
161 | // x4-x18 | |
162 | ldp x4, x5, [x3, #32] | |
163 | ldp x6, x7, [x3, #48] | |
164 | ldp x8, x9, [x3, #64] | |
165 | ldp x10, x11, [x3, #80] | |
166 | ldp x12, x13, [x3, #96] | |
167 | ldp x14, x15, [x3, #112] | |
168 | ldp x16, x17, [x3, #128] | |
169 | ldr x18, [x3, #144] | |
170 | ||
171 | // x19-x29, lr, sp*, elr*, spsr* | |
172 | restore_common_regs | |
173 | ||
174 | // Last bits of the 64bit state | |
175 | pop x2, x3 | |
176 | pop x0, x1 | |
177 | ||
178 | // Do not touch any register after this! | |
179 | .endm | |
180 | ||
181 | /* | |
182 | * Macros to perform system register save/restore. | |
183 | * | |
184 | * Ordering here is absolutely critical, and must be kept consistent | |
185 | * in {save,restore}_sysregs, {save,restore}_guest_32bit_state, | |
186 | * and in kvm_asm.h. | |
187 | * | |
188 | * In other words, don't touch any of these unless you know what | |
189 | * you are doing. | |
190 | */ | |
191 | .macro save_sysregs | |
192 | // x2: base address for cpu context | |
193 | // x3: tmp register | |
194 | ||
195 | add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1) | |
196 | ||
197 | mrs x4, vmpidr_el2 | |
198 | mrs x5, csselr_el1 | |
199 | mrs x6, sctlr_el1 | |
200 | mrs x7, actlr_el1 | |
201 | mrs x8, cpacr_el1 | |
202 | mrs x9, ttbr0_el1 | |
203 | mrs x10, ttbr1_el1 | |
204 | mrs x11, tcr_el1 | |
205 | mrs x12, esr_el1 | |
206 | mrs x13, afsr0_el1 | |
207 | mrs x14, afsr1_el1 | |
208 | mrs x15, far_el1 | |
209 | mrs x16, mair_el1 | |
210 | mrs x17, vbar_el1 | |
211 | mrs x18, contextidr_el1 | |
212 | mrs x19, tpidr_el0 | |
213 | mrs x20, tpidrro_el0 | |
214 | mrs x21, tpidr_el1 | |
215 | mrs x22, amair_el1 | |
216 | mrs x23, cntkctl_el1 | |
1bbd8054 | 217 | mrs x24, par_el1 |
b0e626b3 | 218 | mrs x25, mdscr_el1 |
55c7401d MZ |
219 | |
220 | stp x4, x5, [x3] | |
221 | stp x6, x7, [x3, #16] | |
222 | stp x8, x9, [x3, #32] | |
223 | stp x10, x11, [x3, #48] | |
224 | stp x12, x13, [x3, #64] | |
225 | stp x14, x15, [x3, #80] | |
226 | stp x16, x17, [x3, #96] | |
227 | stp x18, x19, [x3, #112] | |
228 | stp x20, x21, [x3, #128] | |
229 | stp x22, x23, [x3, #144] | |
b0e626b3 MZ |
230 | stp x24, x25, [x3, #160] |
231 | .endm | |
232 | ||
e0a1b9a9 AB |
233 | .macro save_debug type |
234 | // x4: pointer to register set | |
235 | // x5: number of registers to skip | |
236 | // x6..x22 trashed | |
237 | ||
238 | adr x22, 1f | |
239 | add x22, x22, x5, lsl #2 | |
240 | br x22 | |
b0e626b3 | 241 | 1: |
e0a1b9a9 AB |
242 | mrs x21, \type\()15_el1 |
243 | mrs x20, \type\()14_el1 | |
244 | mrs x19, \type\()13_el1 | |
245 | mrs x18, \type\()12_el1 | |
246 | mrs x17, \type\()11_el1 | |
247 | mrs x16, \type\()10_el1 | |
248 | mrs x15, \type\()9_el1 | |
249 | mrs x14, \type\()8_el1 | |
250 | mrs x13, \type\()7_el1 | |
251 | mrs x12, \type\()6_el1 | |
252 | mrs x11, \type\()5_el1 | |
253 | mrs x10, \type\()4_el1 | |
254 | mrs x9, \type\()3_el1 | |
255 | mrs x8, \type\()2_el1 | |
256 | mrs x7, \type\()1_el1 | |
257 | mrs x6, \type\()0_el1 | |
258 | ||
259 | adr x22, 1f | |
260 | add x22, x22, x5, lsl #2 | |
261 | br x22 | |
b0e626b3 | 262 | 1: |
e0a1b9a9 AB |
263 | str x21, [x4, #(15 * 8)] |
264 | str x20, [x4, #(14 * 8)] | |
265 | str x19, [x4, #(13 * 8)] | |
266 | str x18, [x4, #(12 * 8)] | |
267 | str x17, [x4, #(11 * 8)] | |
268 | str x16, [x4, #(10 * 8)] | |
269 | str x15, [x4, #(9 * 8)] | |
270 | str x14, [x4, #(8 * 8)] | |
271 | str x13, [x4, #(7 * 8)] | |
272 | str x12, [x4, #(6 * 8)] | |
273 | str x11, [x4, #(5 * 8)] | |
274 | str x10, [x4, #(4 * 8)] | |
275 | str x9, [x4, #(3 * 8)] | |
276 | str x8, [x4, #(2 * 8)] | |
277 | str x7, [x4, #(1 * 8)] | |
278 | str x6, [x4, #(0 * 8)] | |
55c7401d MZ |
279 | .endm |
280 | ||
281 | .macro restore_sysregs | |
282 | // x2: base address for cpu context | |
283 | // x3: tmp register | |
284 | ||
285 | add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1) | |
286 | ||
287 | ldp x4, x5, [x3] | |
288 | ldp x6, x7, [x3, #16] | |
289 | ldp x8, x9, [x3, #32] | |
290 | ldp x10, x11, [x3, #48] | |
291 | ldp x12, x13, [x3, #64] | |
292 | ldp x14, x15, [x3, #80] | |
293 | ldp x16, x17, [x3, #96] | |
294 | ldp x18, x19, [x3, #112] | |
295 | ldp x20, x21, [x3, #128] | |
296 | ldp x22, x23, [x3, #144] | |
b0e626b3 | 297 | ldp x24, x25, [x3, #160] |
55c7401d MZ |
298 | |
299 | msr vmpidr_el2, x4 | |
300 | msr csselr_el1, x5 | |
301 | msr sctlr_el1, x6 | |
302 | msr actlr_el1, x7 | |
303 | msr cpacr_el1, x8 | |
304 | msr ttbr0_el1, x9 | |
305 | msr ttbr1_el1, x10 | |
306 | msr tcr_el1, x11 | |
307 | msr esr_el1, x12 | |
308 | msr afsr0_el1, x13 | |
309 | msr afsr1_el1, x14 | |
310 | msr far_el1, x15 | |
311 | msr mair_el1, x16 | |
312 | msr vbar_el1, x17 | |
313 | msr contextidr_el1, x18 | |
314 | msr tpidr_el0, x19 | |
315 | msr tpidrro_el0, x20 | |
316 | msr tpidr_el1, x21 | |
317 | msr amair_el1, x22 | |
318 | msr cntkctl_el1, x23 | |
1bbd8054 | 319 | msr par_el1, x24 |
b0e626b3 MZ |
320 | msr mdscr_el1, x25 |
321 | .endm | |
322 | ||
e0a1b9a9 AB |
323 | .macro restore_debug type |
324 | // x4: pointer to register set | |
325 | // x5: number of registers to skip | |
326 | // x6..x22 trashed | |
b0e626b3 | 327 | |
e0a1b9a9 AB |
328 | adr x22, 1f |
329 | add x22, x22, x5, lsl #2 | |
330 | br x22 | |
b0e626b3 | 331 | 1: |
e0a1b9a9 AB |
332 | ldr x21, [x4, #(15 * 8)] |
333 | ldr x20, [x4, #(14 * 8)] | |
334 | ldr x19, [x4, #(13 * 8)] | |
335 | ldr x18, [x4, #(12 * 8)] | |
336 | ldr x17, [x4, #(11 * 8)] | |
337 | ldr x16, [x4, #(10 * 8)] | |
338 | ldr x15, [x4, #(9 * 8)] | |
339 | ldr x14, [x4, #(8 * 8)] | |
340 | ldr x13, [x4, #(7 * 8)] | |
341 | ldr x12, [x4, #(6 * 8)] | |
342 | ldr x11, [x4, #(5 * 8)] | |
343 | ldr x10, [x4, #(4 * 8)] | |
344 | ldr x9, [x4, #(3 * 8)] | |
345 | ldr x8, [x4, #(2 * 8)] | |
346 | ldr x7, [x4, #(1 * 8)] | |
347 | ldr x6, [x4, #(0 * 8)] | |
348 | ||
349 | adr x22, 1f | |
350 | add x22, x22, x5, lsl #2 | |
351 | br x22 | |
b0e626b3 | 352 | 1: |
e0a1b9a9 AB |
353 | msr \type\()15_el1, x21 |
354 | msr \type\()14_el1, x20 | |
355 | msr \type\()13_el1, x19 | |
356 | msr \type\()12_el1, x18 | |
357 | msr \type\()11_el1, x17 | |
358 | msr \type\()10_el1, x16 | |
359 | msr \type\()9_el1, x15 | |
360 | msr \type\()8_el1, x14 | |
361 | msr \type\()7_el1, x13 | |
362 | msr \type\()6_el1, x12 | |
363 | msr \type\()5_el1, x11 | |
364 | msr \type\()4_el1, x10 | |
365 | msr \type\()3_el1, x9 | |
366 | msr \type\()2_el1, x8 | |
367 | msr \type\()1_el1, x7 | |
368 | msr \type\()0_el1, x6 | |
55c7401d MZ |
369 | .endm |
370 | ||
b4afad06 MZ |
371 | .macro skip_32bit_state tmp, target |
372 | // Skip 32bit state if not needed | |
373 | mrs \tmp, hcr_el2 | |
374 | tbnz \tmp, #HCR_RW_SHIFT, \target | |
375 | .endm | |
376 | ||
377 | .macro skip_tee_state tmp, target | |
378 | // Skip ThumbEE state if not needed | |
379 | mrs \tmp, id_pfr0_el1 | |
380 | tbz \tmp, #12, \target | |
381 | .endm | |
382 | ||
b0e626b3 MZ |
383 | .macro skip_debug_state tmp, target |
384 | ldr \tmp, [x0, #VCPU_DEBUG_FLAGS] | |
385 | tbz \tmp, #KVM_ARM64_DEBUG_DIRTY_SHIFT, \target | |
386 | .endm | |
387 | ||
33c76a0b MS |
388 | /* |
389 | * Branch to target if CPTR_EL2.TFP bit is set (VFP/SIMD trapping enabled) | |
390 | */ | |
391 | .macro skip_fpsimd_state tmp, target | |
392 | mrs \tmp, cptr_el2 | |
393 | tbnz \tmp, #CPTR_EL2_TFP_SHIFT, \target | |
394 | .endm | |
395 | ||
b0e626b3 MZ |
396 | .macro compute_debug_state target |
397 | // Compute debug state: If any of KDE, MDE or KVM_ARM64_DEBUG_DIRTY | |
398 | // is set, we do a full save/restore cycle and disable trapping. | |
399 | add x25, x0, #VCPU_CONTEXT | |
400 | ||
401 | // Check the state of MDSCR_EL1 | |
402 | ldr x25, [x25, #CPU_SYSREG_OFFSET(MDSCR_EL1)] | |
403 | and x26, x25, #DBG_MDSCR_KDE | |
404 | and x25, x25, #DBG_MDSCR_MDE | |
405 | adds xzr, x25, x26 | |
406 | b.eq 9998f // Nothing to see there | |
407 | ||
408 | // If any interesting bits was set, we must set the flag | |
409 | mov x26, #KVM_ARM64_DEBUG_DIRTY | |
410 | str x26, [x0, #VCPU_DEBUG_FLAGS] | |
411 | b 9999f // Don't skip restore | |
412 | ||
413 | 9998: | |
414 | // Otherwise load the flags from memory in case we recently | |
415 | // trapped | |
416 | skip_debug_state x25, \target | |
417 | 9999: | |
418 | .endm | |
419 | ||
b4afad06 MZ |
420 | .macro save_guest_32bit_state |
421 | skip_32bit_state x3, 1f | |
422 | ||
423 | add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT) | |
424 | mrs x4, spsr_abt | |
425 | mrs x5, spsr_und | |
426 | mrs x6, spsr_irq | |
427 | mrs x7, spsr_fiq | |
428 | stp x4, x5, [x3] | |
429 | stp x6, x7, [x3, #16] | |
430 | ||
431 | add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2) | |
432 | mrs x4, dacr32_el2 | |
433 | mrs x5, ifsr32_el2 | |
b4afad06 | 434 | stp x4, x5, [x3] |
b4afad06 | 435 | |
34c3faa3 | 436 | skip_fpsimd_state x8, 2f |
33c76a0b MS |
437 | mrs x6, fpexc32_el2 |
438 | str x6, [x3, #16] | |
34c3faa3 WD |
439 | 2: |
440 | skip_debug_state x8, 1f | |
b0e626b3 MZ |
441 | mrs x7, dbgvcr32_el2 |
442 | str x7, [x3, #24] | |
b4afad06 MZ |
443 | 1: |
444 | .endm | |
445 | ||
446 | .macro restore_guest_32bit_state | |
447 | skip_32bit_state x3, 1f | |
448 | ||
449 | add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT) | |
450 | ldp x4, x5, [x3] | |
451 | ldp x6, x7, [x3, #16] | |
452 | msr spsr_abt, x4 | |
453 | msr spsr_und, x5 | |
454 | msr spsr_irq, x6 | |
455 | msr spsr_fiq, x7 | |
456 | ||
457 | add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2) | |
458 | ldp x4, x5, [x3] | |
b4afad06 MZ |
459 | msr dacr32_el2, x4 |
460 | msr ifsr32_el2, x5 | |
b4afad06 | 461 | |
34c3faa3 | 462 | skip_debug_state x8, 1f |
b0e626b3 MZ |
463 | ldr x7, [x3, #24] |
464 | msr dbgvcr32_el2, x7 | |
b4afad06 MZ |
465 | 1: |
466 | .endm | |
467 | ||
55c7401d | 468 | .macro activate_traps |
ac3c3747 | 469 | ldr x2, [x0, #VCPU_HCR_EL2] |
33c76a0b MS |
470 | |
471 | /* | |
472 | * We are about to set CPTR_EL2.TFP to trap all floating point | |
473 | * register accesses to EL2, however, the ARM ARM clearly states that | |
474 | * traps are only taken to EL2 if the operation would not otherwise | |
475 | * trap to EL1. Therefore, always make sure that for 32-bit guests, | |
476 | * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit. | |
477 | */ | |
478 | tbnz x2, #HCR_RW_SHIFT, 99f // open code skip_32bit_state | |
479 | mov x3, #(1 << 30) | |
480 | msr fpexc32_el2, x3 | |
481 | isb | |
482 | 99: | |
ac3c3747 | 483 | msr hcr_el2, x2 |
302cd37c | 484 | mov x2, #CPTR_EL2_TTA |
33c76a0b | 485 | orr x2, x2, #CPTR_EL2_TFP |
55c7401d MZ |
486 | msr cptr_el2, x2 |
487 | ||
302cd37c | 488 | mov x2, #(1 << 15) // Trap CP15 Cr=15 |
55c7401d MZ |
489 | msr hstr_el2, x2 |
490 | ||
56c7f5e7 AB |
491 | // Monitor Debug Config - see kvm_arm_setup_debug() |
492 | ldr x2, [x0, #VCPU_MDCR_EL2] | |
55c7401d MZ |
493 | msr mdcr_el2, x2 |
494 | .endm | |
495 | ||
496 | .macro deactivate_traps | |
497 | mov x2, #HCR_RW | |
498 | msr hcr_el2, x2 | |
55c7401d MZ |
499 | msr hstr_el2, xzr |
500 | ||
501 | mrs x2, mdcr_el2 | |
502 | and x2, x2, #MDCR_EL2_HPMN_MASK | |
503 | msr mdcr_el2, x2 | |
504 | .endm | |
505 | ||
506 | .macro activate_vm | |
507 | ldr x1, [x0, #VCPU_KVM] | |
508 | kern_hyp_va x1 | |
509 | ldr x2, [x1, #KVM_VTTBR] | |
510 | msr vttbr_el2, x2 | |
511 | .endm | |
512 | ||
513 | .macro deactivate_vm | |
514 | msr vttbr_el2, xzr | |
515 | .endm | |
516 | ||
1f17f3b6 | 517 | /* |
1a9b1305 | 518 | * Call into the vgic backend for state saving |
1f17f3b6 MZ |
519 | */ |
520 | .macro save_vgic_state | |
fc032421 DT |
521 | alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF |
522 | bl __save_vgic_v2_state | |
523 | alternative_else | |
524 | bl __save_vgic_v3_state | |
525 | alternative_endif | |
ac3c3747 MZ |
526 | mrs x24, hcr_el2 |
527 | mov x25, #HCR_INT_OVERRIDE | |
528 | neg x25, x25 | |
529 | and x24, x24, x25 | |
530 | msr hcr_el2, x24 | |
1f17f3b6 MZ |
531 | .endm |
532 | ||
533 | /* | |
1a9b1305 | 534 | * Call into the vgic backend for state restoring |
1f17f3b6 MZ |
535 | */ |
536 | .macro restore_vgic_state | |
ac3c3747 MZ |
537 | mrs x24, hcr_el2 |
538 | ldr x25, [x0, #VCPU_IRQ_LINES] | |
539 | orr x24, x24, #HCR_INT_OVERRIDE | |
540 | orr x24, x24, x25 | |
541 | msr hcr_el2, x24 | |
fc032421 DT |
542 | alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF |
543 | bl __restore_vgic_v2_state | |
544 | alternative_else | |
545 | bl __restore_vgic_v3_state | |
546 | alternative_endif | |
1f17f3b6 MZ |
547 | .endm |
548 | ||
003300de MZ |
549 | .macro save_timer_state |
550 | // x0: vcpu pointer | |
551 | ldr x2, [x0, #VCPU_KVM] | |
552 | kern_hyp_va x2 | |
553 | ldr w3, [x2, #KVM_TIMER_ENABLED] | |
554 | cbz w3, 1f | |
555 | ||
556 | mrs x3, cntv_ctl_el0 | |
557 | and x3, x3, #3 | |
558 | str w3, [x0, #VCPU_TIMER_CNTV_CTL] | |
003300de MZ |
559 | |
560 | isb | |
561 | ||
562 | mrs x3, cntv_cval_el0 | |
563 | str x3, [x0, #VCPU_TIMER_CNTV_CVAL] | |
564 | ||
565 | 1: | |
c4cbba9f MZ |
566 | // Disable the virtual timer |
567 | msr cntv_ctl_el0, xzr | |
568 | ||
003300de MZ |
569 | // Allow physical timer/counter access for the host |
570 | mrs x2, cnthctl_el2 | |
571 | orr x2, x2, #3 | |
572 | msr cnthctl_el2, x2 | |
573 | ||
574 | // Clear cntvoff for the host | |
575 | msr cntvoff_el2, xzr | |
576 | .endm | |
577 | ||
578 | .macro restore_timer_state | |
579 | // x0: vcpu pointer | |
580 | // Disallow physical timer access for the guest | |
581 | // Physical counter access is allowed | |
582 | mrs x2, cnthctl_el2 | |
583 | orr x2, x2, #1 | |
584 | bic x2, x2, #2 | |
585 | msr cnthctl_el2, x2 | |
586 | ||
587 | ldr x2, [x0, #VCPU_KVM] | |
588 | kern_hyp_va x2 | |
589 | ldr w3, [x2, #KVM_TIMER_ENABLED] | |
590 | cbz w3, 1f | |
591 | ||
592 | ldr x3, [x2, #KVM_TIMER_CNTVOFF] | |
593 | msr cntvoff_el2, x3 | |
594 | ldr x2, [x0, #VCPU_TIMER_CNTV_CVAL] | |
595 | msr cntv_cval_el0, x2 | |
596 | isb | |
597 | ||
598 | ldr w2, [x0, #VCPU_TIMER_CNTV_CTL] | |
599 | and x2, x2, #3 | |
600 | msr cntv_ctl_el0, x2 | |
601 | 1: | |
602 | .endm | |
603 | ||
55c7401d MZ |
604 | __save_sysregs: |
605 | save_sysregs | |
606 | ret | |
607 | ||
608 | __restore_sysregs: | |
609 | restore_sysregs | |
610 | ret | |
611 | ||
e0a1b9a9 | 612 | /* Save debug state */ |
b0e626b3 | 613 | __save_debug: |
e0a1b9a9 | 614 | // x2: ptr to CPU context |
84e690bf | 615 | // x3: ptr to debug reg struct |
e0a1b9a9 AB |
616 | // x4/x5/x6-22/x24-26: trashed |
617 | ||
618 | mrs x26, id_aa64dfr0_el1 | |
619 | ubfx x24, x26, #12, #4 // Extract BRPs | |
620 | ubfx x25, x26, #20, #4 // Extract WRPs | |
621 | mov w26, #15 | |
622 | sub w24, w26, w24 // How many BPs to skip | |
623 | sub w25, w26, w25 // How many WPs to skip | |
624 | ||
625 | mov x5, x24 | |
84e690bf | 626 | add x4, x3, #DEBUG_BCR |
e0a1b9a9 | 627 | save_debug dbgbcr |
84e690bf | 628 | add x4, x3, #DEBUG_BVR |
e0a1b9a9 AB |
629 | save_debug dbgbvr |
630 | ||
631 | mov x5, x25 | |
84e690bf | 632 | add x4, x3, #DEBUG_WCR |
e0a1b9a9 | 633 | save_debug dbgwcr |
84e690bf | 634 | add x4, x3, #DEBUG_WVR |
e0a1b9a9 AB |
635 | save_debug dbgwvr |
636 | ||
637 | mrs x21, mdccint_el1 | |
638 | str x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)] | |
b0e626b3 MZ |
639 | ret |
640 | ||
e0a1b9a9 | 641 | /* Restore debug state */ |
b0e626b3 | 642 | __restore_debug: |
e0a1b9a9 | 643 | // x2: ptr to CPU context |
84e690bf | 644 | // x3: ptr to debug reg struct |
e0a1b9a9 AB |
645 | // x4/x5/x6-22/x24-26: trashed |
646 | ||
647 | mrs x26, id_aa64dfr0_el1 | |
648 | ubfx x24, x26, #12, #4 // Extract BRPs | |
649 | ubfx x25, x26, #20, #4 // Extract WRPs | |
650 | mov w26, #15 | |
651 | sub w24, w26, w24 // How many BPs to skip | |
652 | sub w25, w26, w25 // How many WPs to skip | |
653 | ||
654 | mov x5, x24 | |
84e690bf | 655 | add x4, x3, #DEBUG_BCR |
e0a1b9a9 | 656 | restore_debug dbgbcr |
84e690bf | 657 | add x4, x3, #DEBUG_BVR |
e0a1b9a9 AB |
658 | restore_debug dbgbvr |
659 | ||
660 | mov x5, x25 | |
84e690bf | 661 | add x4, x3, #DEBUG_WCR |
e0a1b9a9 | 662 | restore_debug dbgwcr |
84e690bf | 663 | add x4, x3, #DEBUG_WVR |
e0a1b9a9 AB |
664 | restore_debug dbgwvr |
665 | ||
666 | ldr x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)] | |
667 | msr mdccint_el1, x21 | |
668 | ||
b0e626b3 MZ |
669 | ret |
670 | ||
55c7401d | 671 | __save_fpsimd: |
33c76a0b | 672 | skip_fpsimd_state x3, 1f |
55c7401d | 673 | save_fpsimd |
33c76a0b | 674 | 1: ret |
55c7401d MZ |
675 | |
676 | __restore_fpsimd: | |
33c76a0b | 677 | skip_fpsimd_state x3, 1f |
55c7401d | 678 | restore_fpsimd |
33c76a0b MS |
679 | 1: ret |
680 | ||
681 | switch_to_guest_fpsimd: | |
682 | push x4, lr | |
683 | ||
684 | mrs x2, cptr_el2 | |
685 | bic x2, x2, #CPTR_EL2_TFP | |
686 | msr cptr_el2, x2 | |
687 | isb | |
688 | ||
689 | mrs x0, tpidr_el2 | |
690 | ||
691 | ldr x2, [x0, #VCPU_HOST_CONTEXT] | |
692 | kern_hyp_va x2 | |
693 | bl __save_fpsimd | |
694 | ||
695 | add x2, x0, #VCPU_CONTEXT | |
696 | bl __restore_fpsimd | |
697 | ||
698 | skip_32bit_state x3, 1f | |
699 | ldr x4, [x2, #CPU_SYSREG_OFFSET(FPEXC32_EL2)] | |
700 | msr fpexc32_el2, x4 | |
701 | 1: | |
702 | pop x4, lr | |
703 | pop x2, x3 | |
704 | pop x0, x1 | |
705 | ||
706 | eret | |
55c7401d MZ |
707 | |
708 | /* | |
709 | * u64 __kvm_vcpu_run(struct kvm_vcpu *vcpu); | |
710 | * | |
711 | * This is the world switch. The first half of the function | |
712 | * deals with entering the guest, and anything from __kvm_vcpu_return | |
713 | * to the end of the function deals with reentering the host. | |
714 | * On the enter path, only x0 (vcpu pointer) must be preserved until | |
715 | * the last moment. On the exit path, x0 (vcpu pointer) and x1 (exception | |
716 | * code) must both be preserved until the epilogue. | |
717 | * In both cases, x2 points to the CPU context we're saving/restoring from/to. | |
718 | */ | |
719 | ENTRY(__kvm_vcpu_run) | |
720 | kern_hyp_va x0 | |
721 | msr tpidr_el2, x0 // Save the vcpu register | |
722 | ||
723 | // Host context | |
724 | ldr x2, [x0, #VCPU_HOST_CONTEXT] | |
725 | kern_hyp_va x2 | |
726 | ||
727 | save_host_regs | |
55c7401d MZ |
728 | bl __save_sysregs |
729 | ||
b0e626b3 | 730 | compute_debug_state 1f |
84e690bf | 731 | add x3, x0, #VCPU_HOST_DEBUG_STATE |
b0e626b3 MZ |
732 | bl __save_debug |
733 | 1: | |
55c7401d MZ |
734 | activate_traps |
735 | activate_vm | |
736 | ||
1f17f3b6 | 737 | restore_vgic_state |
003300de | 738 | restore_timer_state |
1f17f3b6 | 739 | |
55c7401d MZ |
740 | // Guest context |
741 | add x2, x0, #VCPU_CONTEXT | |
742 | ||
43297dda WD |
743 | // We must restore the 32-bit state before the sysregs, thanks |
744 | // to Cortex-A57 erratum #852523. | |
745 | restore_guest_32bit_state | |
55c7401d | 746 | bl __restore_sysregs |
b0e626b3 MZ |
747 | |
748 | skip_debug_state x3, 1f | |
84e690bf AB |
749 | ldr x3, [x0, #VCPU_DEBUG_PTR] |
750 | kern_hyp_va x3 | |
b0e626b3 MZ |
751 | bl __restore_debug |
752 | 1: | |
55c7401d MZ |
753 | restore_guest_regs |
754 | ||
755 | // That's it, no more messing around. | |
756 | eret | |
757 | ||
758 | __kvm_vcpu_return: | |
759 | // Assume x0 is the vcpu pointer, x1 the return code | |
760 | // Guest's x0-x3 are on the stack | |
761 | ||
762 | // Guest context | |
763 | add x2, x0, #VCPU_CONTEXT | |
764 | ||
765 | save_guest_regs | |
766 | bl __save_fpsimd | |
767 | bl __save_sysregs | |
b0e626b3 MZ |
768 | |
769 | skip_debug_state x3, 1f | |
84e690bf AB |
770 | ldr x3, [x0, #VCPU_DEBUG_PTR] |
771 | kern_hyp_va x3 | |
b0e626b3 MZ |
772 | bl __save_debug |
773 | 1: | |
b4afad06 | 774 | save_guest_32bit_state |
55c7401d | 775 | |
003300de | 776 | save_timer_state |
1f17f3b6 MZ |
777 | save_vgic_state |
778 | ||
55c7401d MZ |
779 | deactivate_traps |
780 | deactivate_vm | |
781 | ||
782 | // Host context | |
783 | ldr x2, [x0, #VCPU_HOST_CONTEXT] | |
784 | kern_hyp_va x2 | |
785 | ||
786 | bl __restore_sysregs | |
787 | bl __restore_fpsimd | |
33c76a0b MS |
788 | /* Clear FPSIMD and Trace trapping */ |
789 | msr cptr_el2, xzr | |
b0e626b3 MZ |
790 | |
791 | skip_debug_state x3, 1f | |
792 | // Clear the dirty flag for the next run, as all the state has | |
793 | // already been saved. Note that we nuke the whole 64bit word. | |
794 | // If we ever add more flags, we'll have to be more careful... | |
795 | str xzr, [x0, #VCPU_DEBUG_FLAGS] | |
84e690bf | 796 | add x3, x0, #VCPU_HOST_DEBUG_STATE |
b0e626b3 MZ |
797 | bl __restore_debug |
798 | 1: | |
55c7401d MZ |
799 | restore_host_regs |
800 | ||
801 | mov x0, x1 | |
802 | ret | |
803 | END(__kvm_vcpu_run) | |
804 | ||
805 | // void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); | |
806 | ENTRY(__kvm_tlb_flush_vmid_ipa) | |
f142e5ee MZ |
807 | dsb ishst |
808 | ||
55c7401d MZ |
809 | kern_hyp_va x0 |
810 | ldr x2, [x0, #KVM_VTTBR] | |
811 | msr vttbr_el2, x2 | |
812 | isb | |
813 | ||
814 | /* | |
815 | * We could do so much better if we had the VA as well. | |
816 | * Instead, we invalidate Stage-2 for this IPA, and the | |
817 | * whole of Stage-1. Weep... | |
818 | */ | |
55e858b7 | 819 | lsr x1, x1, #12 |
55c7401d | 820 | tlbi ipas2e1is, x1 |
ee9e101c WD |
821 | /* |
822 | * We have to ensure completion of the invalidation at Stage-2, | |
823 | * since a table walk on another CPU could refill a TLB with a | |
824 | * complete (S1 + S2) walk based on the old Stage-2 mapping if | |
825 | * the Stage-1 invalidation happened first. | |
826 | */ | |
827 | dsb ish | |
55c7401d | 828 | tlbi vmalle1is |
ee9e101c | 829 | dsb ish |
55c7401d MZ |
830 | isb |
831 | ||
832 | msr vttbr_el2, xzr | |
833 | ret | |
834 | ENDPROC(__kvm_tlb_flush_vmid_ipa) | |
835 | ||
9836c6b9 MS |
836 | /** |
837 | * void __kvm_tlb_flush_vmid(struct kvm *kvm) - Flush per-VMID TLBs | |
838 | * @struct kvm *kvm - pointer to kvm structure | |
839 | * | |
840 | * Invalidates all Stage 1 and 2 TLB entries for current VMID. | |
841 | */ | |
842 | ENTRY(__kvm_tlb_flush_vmid) | |
843 | dsb ishst | |
844 | ||
845 | kern_hyp_va x0 | |
846 | ldr x2, [x0, #KVM_VTTBR] | |
847 | msr vttbr_el2, x2 | |
848 | isb | |
849 | ||
850 | tlbi vmalls12e1is | |
851 | dsb ish | |
852 | isb | |
853 | ||
854 | msr vttbr_el2, xzr | |
855 | ret | |
856 | ENDPROC(__kvm_tlb_flush_vmid) | |
857 | ||
55c7401d | 858 | ENTRY(__kvm_flush_vm_context) |
f142e5ee | 859 | dsb ishst |
55c7401d MZ |
860 | tlbi alle1is |
861 | ic ialluis | |
ee9e101c | 862 | dsb ish |
55c7401d MZ |
863 | ret |
864 | ENDPROC(__kvm_flush_vm_context) | |
865 | ||
866 | __kvm_hyp_panic: | |
867 | // Guess the context by looking at VTTBR: | |
868 | // If zero, then we're already a host. | |
869 | // Otherwise restore a minimal host context before panicing. | |
870 | mrs x0, vttbr_el2 | |
871 | cbz x0, 1f | |
872 | ||
873 | mrs x0, tpidr_el2 | |
874 | ||
875 | deactivate_traps | |
876 | deactivate_vm | |
877 | ||
878 | ldr x2, [x0, #VCPU_HOST_CONTEXT] | |
879 | kern_hyp_va x2 | |
880 | ||
881 | bl __restore_sysregs | |
882 | ||
db85c55f MR |
883 | /* |
884 | * Make sure we have a valid host stack, and don't leave junk in the | |
885 | * frame pointer that will give us a misleading host stack unwinding. | |
886 | */ | |
887 | ldr x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)] | |
888 | msr sp_el1, x22 | |
889 | mov x29, xzr | |
890 | ||
55c7401d MZ |
891 | 1: adr x0, __hyp_panic_str |
892 | adr x1, 2f | |
893 | ldp x2, x3, [x1] | |
894 | sub x0, x0, x2 | |
895 | add x0, x0, x3 | |
896 | mrs x1, spsr_el2 | |
897 | mrs x2, elr_el2 | |
898 | mrs x3, esr_el2 | |
899 | mrs x4, far_el2 | |
900 | mrs x5, hpfar_el2 | |
901 | mrs x6, par_el1 | |
902 | mrs x7, tpidr_el2 | |
903 | ||
904 | mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ | |
905 | PSR_MODE_EL1h) | |
906 | msr spsr_el2, lr | |
907 | ldr lr, =panic | |
908 | msr elr_el2, lr | |
909 | eret | |
910 | ||
911 | .align 3 | |
912 | 2: .quad HYP_PAGE_OFFSET | |
913 | .quad PAGE_OFFSET | |
914 | ENDPROC(__kvm_hyp_panic) | |
915 | ||
916 | __hyp_panic_str: | |
917 | .ascii "HYP panic:\nPS:%08x PC:%p ESR:%p\nFAR:%p HPFAR:%p PAR:%p\nVCPU:%p\n\0" | |
918 | ||
919 | .align 2 | |
920 | ||
b20c9f29 MZ |
921 | /* |
922 | * u64 kvm_call_hyp(void *hypfn, ...); | |
923 | * | |
924 | * This is not really a variadic function in the classic C-way and care must | |
925 | * be taken when calling this to ensure parameters are passed in registers | |
926 | * only, since the stack will change between the caller and the callee. | |
927 | * | |
928 | * Call the function with the first argument containing a pointer to the | |
929 | * function you wish to call in Hyp mode, and subsequent arguments will be | |
930 | * passed as x0, x1, and x2 (a maximum of 3 arguments in addition to the | |
931 | * function pointer can be passed). The function being called must be mapped | |
932 | * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are | |
933 | * passed in r0 and r1. | |
934 | * | |
935 | * A function pointer with a value of 0 has a special meaning, and is | |
936 | * used to implement __hyp_get_vectors in the same way as in | |
937 | * arch/arm64/kernel/hyp_stub.S. | |
938 | */ | |
55c7401d MZ |
939 | ENTRY(kvm_call_hyp) |
940 | hvc #0 | |
941 | ret | |
942 | ENDPROC(kvm_call_hyp) | |
943 | ||
944 | .macro invalid_vector label, target | |
945 | .align 2 | |
946 | \label: | |
947 | b \target | |
948 | ENDPROC(\label) | |
949 | .endm | |
950 | ||
951 | /* None of these should ever happen */ | |
952 | invalid_vector el2t_sync_invalid, __kvm_hyp_panic | |
953 | invalid_vector el2t_irq_invalid, __kvm_hyp_panic | |
954 | invalid_vector el2t_fiq_invalid, __kvm_hyp_panic | |
955 | invalid_vector el2t_error_invalid, __kvm_hyp_panic | |
956 | invalid_vector el2h_sync_invalid, __kvm_hyp_panic | |
957 | invalid_vector el2h_irq_invalid, __kvm_hyp_panic | |
958 | invalid_vector el2h_fiq_invalid, __kvm_hyp_panic | |
959 | invalid_vector el2h_error_invalid, __kvm_hyp_panic | |
960 | invalid_vector el1_sync_invalid, __kvm_hyp_panic | |
961 | invalid_vector el1_irq_invalid, __kvm_hyp_panic | |
962 | invalid_vector el1_fiq_invalid, __kvm_hyp_panic | |
963 | invalid_vector el1_error_invalid, __kvm_hyp_panic | |
964 | ||
965 | el1_sync: // Guest trapped into EL2 | |
966 | push x0, x1 | |
967 | push x2, x3 | |
968 | ||
969 | mrs x1, esr_el2 | |
c6d01a94 | 970 | lsr x2, x1, #ESR_ELx_EC_SHIFT |
55c7401d | 971 | |
c6d01a94 | 972 | cmp x2, #ESR_ELx_EC_HVC64 |
55c7401d MZ |
973 | b.ne el1_trap |
974 | ||
975 | mrs x3, vttbr_el2 // If vttbr is valid, the 64bit guest | |
976 | cbnz x3, el1_trap // called HVC | |
977 | ||
978 | /* Here, we're pretty sure the host called HVC. */ | |
979 | pop x2, x3 | |
980 | pop x0, x1 | |
981 | ||
b20c9f29 MZ |
982 | /* Check for __hyp_get_vectors */ |
983 | cbnz x0, 1f | |
984 | mrs x0, vbar_el2 | |
985 | b 2f | |
986 | ||
987 | 1: push lr, xzr | |
55c7401d MZ |
988 | |
989 | /* | |
990 | * Compute the function address in EL2, and shuffle the parameters. | |
991 | */ | |
992 | kern_hyp_va x0 | |
993 | mov lr, x0 | |
994 | mov x0, x1 | |
995 | mov x1, x2 | |
996 | mov x2, x3 | |
997 | blr lr | |
998 | ||
999 | pop lr, xzr | |
b20c9f29 | 1000 | 2: eret |
55c7401d MZ |
1001 | |
1002 | el1_trap: | |
1003 | /* | |
1004 | * x1: ESR | |
1005 | * x2: ESR_EC | |
1006 | */ | |
33c76a0b MS |
1007 | |
1008 | /* Guest accessed VFP/SIMD registers, save host, restore Guest */ | |
1009 | cmp x2, #ESR_ELx_EC_FP_ASIMD | |
1010 | b.eq switch_to_guest_fpsimd | |
1011 | ||
c6d01a94 MR |
1012 | cmp x2, #ESR_ELx_EC_DABT_LOW |
1013 | mov x0, #ESR_ELx_EC_IABT_LOW | |
55c7401d MZ |
1014 | ccmp x2, x0, #4, ne |
1015 | b.ne 1f // Not an abort we care about | |
1016 | ||
1017 | /* This is an abort. Check for permission fault */ | |
498cd5c3 | 1018 | alternative_if_not ARM64_WORKAROUND_834220 |
c6d01a94 | 1019 | and x2, x1, #ESR_ELx_FSC_TYPE |
55c7401d MZ |
1020 | cmp x2, #FSC_PERM |
1021 | b.ne 1f // Not a permission fault | |
498cd5c3 MZ |
1022 | alternative_else |
1023 | nop // Use the permission fault path to | |
1024 | nop // check for a valid S1 translation, | |
1025 | nop // regardless of the ESR value. | |
1026 | alternative_endif | |
55c7401d MZ |
1027 | |
1028 | /* | |
1029 | * Check for Stage-1 page table walk, which is guaranteed | |
1030 | * to give a valid HPFAR_EL2. | |
1031 | */ | |
1032 | tbnz x1, #7, 1f // S1PTW is set | |
1033 | ||
1bbd8054 MZ |
1034 | /* Preserve PAR_EL1 */ |
1035 | mrs x3, par_el1 | |
1036 | push x3, xzr | |
1037 | ||
55c7401d MZ |
1038 | /* |
1039 | * Permission fault, HPFAR_EL2 is invalid. | |
1040 | * Resolve the IPA the hard way using the guest VA. | |
1041 | * Stage-1 translation already validated the memory access rights. | |
1042 | * As such, we can use the EL1 translation regime, and don't have | |
1043 | * to distinguish between EL0 and EL1 access. | |
1044 | */ | |
1045 | mrs x2, far_el2 | |
1046 | at s1e1r, x2 | |
1047 | isb | |
1048 | ||
1049 | /* Read result */ | |
1050 | mrs x3, par_el1 | |
1bbd8054 MZ |
1051 | pop x0, xzr // Restore PAR_EL1 from the stack |
1052 | msr par_el1, x0 | |
55c7401d MZ |
1053 | tbnz x3, #0, 3f // Bail out if we failed the translation |
1054 | ubfx x3, x3, #12, #36 // Extract IPA | |
1055 | lsl x3, x3, #4 // and present it like HPFAR | |
1056 | b 2f | |
1057 | ||
1058 | 1: mrs x3, hpfar_el2 | |
1059 | mrs x2, far_el2 | |
1060 | ||
1061 | 2: mrs x0, tpidr_el2 | |
ba083d20 | 1062 | str w1, [x0, #VCPU_ESR_EL2] |
55c7401d MZ |
1063 | str x2, [x0, #VCPU_FAR_EL2] |
1064 | str x3, [x0, #VCPU_HPFAR_EL2] | |
1065 | ||
1066 | mov x1, #ARM_EXCEPTION_TRAP | |
1067 | b __kvm_vcpu_return | |
1068 | ||
1069 | /* | |
1070 | * Translation failed. Just return to the guest and | |
1071 | * let it fault again. Another CPU is probably playing | |
1072 | * behind our back. | |
1073 | */ | |
1074 | 3: pop x2, x3 | |
1075 | pop x0, x1 | |
1076 | ||
1077 | eret | |
1078 | ||
1079 | el1_irq: | |
1080 | push x0, x1 | |
1081 | push x2, x3 | |
1082 | mrs x0, tpidr_el2 | |
1083 | mov x1, #ARM_EXCEPTION_IRQ | |
1084 | b __kvm_vcpu_return | |
1085 | ||
1086 | .ltorg | |
1087 | ||
1088 | .align 11 | |
1089 | ||
1090 | ENTRY(__kvm_hyp_vector) | |
1091 | ventry el2t_sync_invalid // Synchronous EL2t | |
1092 | ventry el2t_irq_invalid // IRQ EL2t | |
1093 | ventry el2t_fiq_invalid // FIQ EL2t | |
1094 | ventry el2t_error_invalid // Error EL2t | |
1095 | ||
1096 | ventry el2h_sync_invalid // Synchronous EL2h | |
1097 | ventry el2h_irq_invalid // IRQ EL2h | |
1098 | ventry el2h_fiq_invalid // FIQ EL2h | |
1099 | ventry el2h_error_invalid // Error EL2h | |
1100 | ||
1101 | ventry el1_sync // Synchronous 64-bit EL1 | |
1102 | ventry el1_irq // IRQ 64-bit EL1 | |
1103 | ventry el1_fiq_invalid // FIQ 64-bit EL1 | |
1104 | ventry el1_error_invalid // Error 64-bit EL1 | |
1105 | ||
1106 | ventry el1_sync // Synchronous 32-bit EL1 | |
1107 | ventry el1_irq // IRQ 32-bit EL1 | |
1108 | ventry el1_fiq_invalid // FIQ 32-bit EL1 | |
1109 | ventry el1_error_invalid // Error 32-bit EL1 | |
1110 | ENDPROC(__kvm_hyp_vector) | |
1111 | ||
56c7f5e7 AB |
1112 | |
1113 | ENTRY(__kvm_get_mdcr_el2) | |
1114 | mrs x0, mdcr_el2 | |
1115 | ret | |
1116 | ENDPROC(__kvm_get_mdcr_el2) | |
1117 | ||
55c7401d | 1118 | .popsection |