]>
Commit | Line | Data |
---|---|---|
caab277b | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
83a49794 MZ |
2 | /* |
3 | * Copyright (C) 2012,2013 - ARM Ltd | |
4 | * Author: Marc Zyngier <marc.zyngier@arm.com> | |
5 | * | |
6 | * Derived from arch/arm/include/kvm_emulate.h | |
7 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | |
8 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | |
83a49794 MZ |
9 | */ |
10 | ||
11 | #ifndef __ARM64_KVM_EMULATE_H__ | |
12 | #define __ARM64_KVM_EMULATE_H__ | |
13 | ||
14 | #include <linux/kvm_host.h> | |
c6d01a94 | 15 | |
bd7d95ca | 16 | #include <asm/debug-monitors.h> |
c6d01a94 | 17 | #include <asm/esr.h> |
83a49794 | 18 | #include <asm/kvm_arm.h> |
00536ec4 | 19 | #include <asm/kvm_hyp.h> |
83a49794 MZ |
20 | #include <asm/kvm_mmio.h> |
21 | #include <asm/ptrace.h> | |
4429fc64 | 22 | #include <asm/cputype.h> |
68908bf7 | 23 | #include <asm/virt.h> |
83a49794 | 24 | |
b547631f | 25 | unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num); |
a8928195 CD |
26 | unsigned long vcpu_read_spsr32(const struct kvm_vcpu *vcpu); |
27 | void vcpu_write_spsr32(struct kvm_vcpu *vcpu, unsigned long v); | |
b547631f | 28 | |
27b190bd MZ |
29 | bool kvm_condition_valid32(const struct kvm_vcpu *vcpu); |
30 | void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr); | |
31 | ||
83a49794 | 32 | void kvm_inject_undefined(struct kvm_vcpu *vcpu); |
10cf3390 | 33 | void kvm_inject_vabt(struct kvm_vcpu *vcpu); |
83a49794 MZ |
34 | void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); |
35 | void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); | |
74a64a98 MZ |
36 | void kvm_inject_undef32(struct kvm_vcpu *vcpu); |
37 | void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr); | |
38 | void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr); | |
83a49794 | 39 | |
e72341c5 CD |
40 | static inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu) |
41 | { | |
42 | return !(vcpu->arch.hcr_el2 & HCR_RW); | |
43 | } | |
44 | ||
b856a591 CD |
45 | static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) |
46 | { | |
47 | vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS; | |
68908bf7 MZ |
48 | if (is_kernel_in_hyp_mode()) |
49 | vcpu->arch.hcr_el2 |= HCR_E2H; | |
558daf69 DG |
50 | if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) { |
51 | /* route synchronous external abort exceptions to EL2 */ | |
52 | vcpu->arch.hcr_el2 |= HCR_TEA; | |
53 | /* trap error record accesses */ | |
54 | vcpu->arch.hcr_el2 |= HCR_TERR; | |
55 | } | |
e48d53a9 MZ |
56 | if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) |
57 | vcpu->arch.hcr_el2 |= HCR_FWB; | |
558daf69 | 58 | |
801f6772 MZ |
59 | if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) |
60 | vcpu->arch.hcr_el2 &= ~HCR_RW; | |
005781be DM |
61 | |
62 | /* | |
63 | * TID3: trap feature register accesses that we virtualise. | |
64 | * For now this is conditional, since no AArch32 feature regs | |
65 | * are currently virtualised. | |
66 | */ | |
e72341c5 | 67 | if (!vcpu_el1_is_32bit(vcpu)) |
005781be | 68 | vcpu->arch.hcr_el2 |= HCR_TID3; |
f7f2b15c | 69 | |
793acf87 AB |
70 | if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE) || |
71 | vcpu_el1_is_32bit(vcpu)) | |
f7f2b15c | 72 | vcpu->arch.hcr_el2 |= HCR_TID2; |
b856a591 CD |
73 | } |
74 | ||
3df59d8d | 75 | static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu) |
3c1e7165 | 76 | { |
3df59d8d | 77 | return (unsigned long *)&vcpu->arch.hcr_el2; |
3c1e7165 MZ |
78 | } |
79 | ||
de737089 MZ |
80 | static inline void vcpu_clear_wfe_traps(struct kvm_vcpu *vcpu) |
81 | { | |
82 | vcpu->arch.hcr_el2 &= ~HCR_TWE; | |
83 | } | |
84 | ||
85 | static inline void vcpu_set_wfe_traps(struct kvm_vcpu *vcpu) | |
86 | { | |
87 | vcpu->arch.hcr_el2 |= HCR_TWE; | |
88 | } | |
89 | ||
384b40ca MR |
90 | static inline void vcpu_ptrauth_enable(struct kvm_vcpu *vcpu) |
91 | { | |
92 | vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK); | |
93 | } | |
94 | ||
95 | static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu) | |
96 | { | |
97 | vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK); | |
98 | } | |
99 | ||
100 | static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu) | |
101 | { | |
102 | if (vcpu_has_ptrauth(vcpu)) | |
103 | vcpu_ptrauth_disable(vcpu); | |
104 | } | |
105 | ||
b7b27fac DG |
106 | static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu) |
107 | { | |
108 | return vcpu->arch.vsesr_el2; | |
109 | } | |
110 | ||
4715c14b JM |
111 | static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr) |
112 | { | |
113 | vcpu->arch.vsesr_el2 = vsesr; | |
114 | } | |
115 | ||
83a49794 MZ |
116 | static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu) |
117 | { | |
118 | return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc; | |
119 | } | |
120 | ||
6d4bd909 | 121 | static inline unsigned long *__vcpu_elr_el1(const struct kvm_vcpu *vcpu) |
83a49794 MZ |
122 | { |
123 | return (unsigned long *)&vcpu_gp_regs(vcpu)->elr_el1; | |
124 | } | |
125 | ||
6d4bd909 CD |
126 | static inline unsigned long vcpu_read_elr_el1(const struct kvm_vcpu *vcpu) |
127 | { | |
128 | if (vcpu->arch.sysregs_loaded_on_cpu) | |
129 | return read_sysreg_el1(elr); | |
130 | else | |
131 | return *__vcpu_elr_el1(vcpu); | |
132 | } | |
133 | ||
134 | static inline void vcpu_write_elr_el1(const struct kvm_vcpu *vcpu, unsigned long v) | |
135 | { | |
136 | if (vcpu->arch.sysregs_loaded_on_cpu) | |
137 | write_sysreg_el1(v, elr); | |
138 | else | |
139 | *__vcpu_elr_el1(vcpu) = v; | |
140 | } | |
141 | ||
83a49794 MZ |
142 | static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu) |
143 | { | |
144 | return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate; | |
145 | } | |
146 | ||
147 | static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu) | |
148 | { | |
b547631f | 149 | return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT); |
83a49794 MZ |
150 | } |
151 | ||
152 | static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu) | |
153 | { | |
27b190bd MZ |
154 | if (vcpu_mode_is_32bit(vcpu)) |
155 | return kvm_condition_valid32(vcpu); | |
156 | ||
157 | return true; | |
83a49794 MZ |
158 | } |
159 | ||
83a49794 MZ |
160 | static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) |
161 | { | |
256c0960 | 162 | *vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT; |
83a49794 MZ |
163 | } |
164 | ||
c0f09634 | 165 | /* |
f6be563a PF |
166 | * vcpu_get_reg and vcpu_set_reg should always be passed a register number |
167 | * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on | |
168 | * AArch32 with banked registers. | |
c0f09634 | 169 | */ |
bc45a516 PF |
170 | static inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu, |
171 | u8 reg_num) | |
172 | { | |
173 | return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num]; | |
174 | } | |
175 | ||
176 | static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num, | |
177 | unsigned long val) | |
178 | { | |
179 | if (reg_num != 31) | |
180 | vcpu_gp_regs(vcpu)->regs.regs[reg_num] = val; | |
181 | } | |
182 | ||
00536ec4 | 183 | static inline unsigned long vcpu_read_spsr(const struct kvm_vcpu *vcpu) |
83a49794 | 184 | { |
a8928195 CD |
185 | if (vcpu_mode_is_32bit(vcpu)) |
186 | return vcpu_read_spsr32(vcpu); | |
00536ec4 CD |
187 | |
188 | if (vcpu->arch.sysregs_loaded_on_cpu) | |
189 | return read_sysreg_el1(spsr); | |
190 | else | |
a8928195 | 191 | return vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1]; |
00536ec4 | 192 | } |
b547631f | 193 | |
a8928195 | 194 | static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v) |
00536ec4 | 195 | { |
00536ec4 | 196 | if (vcpu_mode_is_32bit(vcpu)) { |
a8928195 CD |
197 | vcpu_write_spsr32(vcpu, v); |
198 | return; | |
00536ec4 CD |
199 | } |
200 | ||
201 | if (vcpu->arch.sysregs_loaded_on_cpu) | |
202 | write_sysreg_el1(v, spsr); | |
203 | else | |
a8928195 | 204 | vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1] = v; |
83a49794 MZ |
205 | } |
206 | ||
207 | static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu) | |
208 | { | |
9586a2ea | 209 | u32 mode; |
83a49794 | 210 | |
9586a2ea | 211 | if (vcpu_mode_is_32bit(vcpu)) { |
256c0960 MR |
212 | mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK; |
213 | return mode > PSR_AA32_MODE_USR; | |
9586a2ea SZ |
214 | } |
215 | ||
216 | mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK; | |
b547631f | 217 | |
83a49794 MZ |
218 | return mode != PSR_MODE_EL0t; |
219 | } | |
220 | ||
221 | static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu) | |
222 | { | |
223 | return vcpu->arch.fault.esr_el2; | |
224 | } | |
225 | ||
3e51d435 MZ |
226 | static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu) |
227 | { | |
228 | u32 esr = kvm_vcpu_get_hsr(vcpu); | |
229 | ||
230 | if (esr & ESR_ELx_CV) | |
231 | return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT; | |
232 | ||
233 | return -1; | |
234 | } | |
235 | ||
83a49794 MZ |
236 | static inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu) |
237 | { | |
238 | return vcpu->arch.fault.far_el2; | |
239 | } | |
240 | ||
241 | static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu) | |
242 | { | |
243 | return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8; | |
244 | } | |
245 | ||
0067df41 JM |
246 | static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu) |
247 | { | |
248 | return vcpu->arch.fault.disr_el1; | |
249 | } | |
250 | ||
0d97f884 WH |
251 | static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu) |
252 | { | |
1c6007d5 | 253 | return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK; |
0d97f884 WH |
254 | } |
255 | ||
83a49794 MZ |
256 | static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu) |
257 | { | |
c6d01a94 | 258 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV); |
83a49794 MZ |
259 | } |
260 | ||
83a49794 MZ |
261 | static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu) |
262 | { | |
c6d01a94 | 263 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE); |
83a49794 MZ |
264 | } |
265 | ||
266 | static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu) | |
267 | { | |
c6d01a94 | 268 | return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT; |
83a49794 MZ |
269 | } |
270 | ||
83a49794 MZ |
271 | static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu) |
272 | { | |
c6d01a94 | 273 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW); |
83a49794 MZ |
274 | } |
275 | ||
60e21a0e WD |
276 | static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu) |
277 | { | |
278 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) || | |
279 | kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */ | |
280 | } | |
281 | ||
57c841f1 MZ |
282 | static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu) |
283 | { | |
284 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM); | |
285 | } | |
286 | ||
83a49794 MZ |
287 | static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu) |
288 | { | |
c6d01a94 | 289 | return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT); |
83a49794 MZ |
290 | } |
291 | ||
292 | /* This one is not specific to Data Abort */ | |
293 | static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu) | |
294 | { | |
c6d01a94 | 295 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL); |
83a49794 MZ |
296 | } |
297 | ||
298 | static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu) | |
299 | { | |
561454e2 | 300 | return ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu)); |
83a49794 MZ |
301 | } |
302 | ||
303 | static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu) | |
304 | { | |
c6d01a94 | 305 | return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW; |
83a49794 MZ |
306 | } |
307 | ||
308 | static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu) | |
0496daa5 | 309 | { |
c6d01a94 | 310 | return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC; |
0496daa5 CD |
311 | } |
312 | ||
313 | static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu) | |
83a49794 | 314 | { |
c6d01a94 | 315 | return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE; |
83a49794 MZ |
316 | } |
317 | ||
bb428921 JM |
318 | static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu) |
319 | { | |
a2b83133 | 320 | switch (kvm_vcpu_trap_get_fault(vcpu)) { |
bb428921 JM |
321 | case FSC_SEA: |
322 | case FSC_SEA_TTW0: | |
323 | case FSC_SEA_TTW1: | |
324 | case FSC_SEA_TTW2: | |
325 | case FSC_SEA_TTW3: | |
326 | case FSC_SECC: | |
327 | case FSC_SECC_TTW0: | |
328 | case FSC_SECC_TTW1: | |
329 | case FSC_SECC_TTW2: | |
330 | case FSC_SECC_TTW3: | |
331 | return true; | |
332 | default: | |
333 | return false; | |
334 | } | |
335 | } | |
336 | ||
c667186f MZ |
337 | static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu) |
338 | { | |
339 | u32 esr = kvm_vcpu_get_hsr(vcpu); | |
1c839141 | 340 | return ESR_ELx_SYS64_ISS_RT(esr); |
c667186f MZ |
341 | } |
342 | ||
64cf98fa CD |
343 | static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu) |
344 | { | |
345 | if (kvm_vcpu_trap_is_iabt(vcpu)) | |
346 | return false; | |
347 | ||
348 | return kvm_vcpu_dabt_iswrite(vcpu); | |
349 | } | |
350 | ||
4429fc64 | 351 | static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu) |
79c64880 | 352 | { |
8d404c4c | 353 | return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK; |
79c64880 MZ |
354 | } |
355 | ||
ce94fe93 MZ |
356 | static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) |
357 | { | |
8d404c4c | 358 | if (vcpu_mode_is_32bit(vcpu)) { |
256c0960 | 359 | *vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT; |
8d404c4c CD |
360 | } else { |
361 | u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1); | |
362 | sctlr |= (1 << 25); | |
1975fa56 | 363 | vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1); |
8d404c4c | 364 | } |
ce94fe93 MZ |
365 | } |
366 | ||
6d89d2d9 MZ |
367 | static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu) |
368 | { | |
369 | if (vcpu_mode_is_32bit(vcpu)) | |
256c0960 | 370 | return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT); |
6d89d2d9 | 371 | |
8d404c4c | 372 | return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & (1 << 25)); |
6d89d2d9 MZ |
373 | } |
374 | ||
375 | static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu, | |
376 | unsigned long data, | |
377 | unsigned int len) | |
378 | { | |
379 | if (kvm_vcpu_is_be(vcpu)) { | |
380 | switch (len) { | |
381 | case 1: | |
382 | return data & 0xff; | |
383 | case 2: | |
384 | return be16_to_cpu(data & 0xffff); | |
385 | case 4: | |
386 | return be32_to_cpu(data & 0xffffffff); | |
387 | default: | |
388 | return be64_to_cpu(data); | |
389 | } | |
b3007086 VK |
390 | } else { |
391 | switch (len) { | |
392 | case 1: | |
393 | return data & 0xff; | |
394 | case 2: | |
395 | return le16_to_cpu(data & 0xffff); | |
396 | case 4: | |
397 | return le32_to_cpu(data & 0xffffffff); | |
398 | default: | |
399 | return le64_to_cpu(data); | |
400 | } | |
6d89d2d9 MZ |
401 | } |
402 | ||
403 | return data; /* Leave LE untouched */ | |
404 | } | |
405 | ||
406 | static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu, | |
407 | unsigned long data, | |
408 | unsigned int len) | |
409 | { | |
410 | if (kvm_vcpu_is_be(vcpu)) { | |
411 | switch (len) { | |
412 | case 1: | |
413 | return data & 0xff; | |
414 | case 2: | |
415 | return cpu_to_be16(data & 0xffff); | |
416 | case 4: | |
417 | return cpu_to_be32(data & 0xffffffff); | |
418 | default: | |
419 | return cpu_to_be64(data); | |
420 | } | |
b3007086 VK |
421 | } else { |
422 | switch (len) { | |
423 | case 1: | |
424 | return data & 0xff; | |
425 | case 2: | |
426 | return cpu_to_le16(data & 0xffff); | |
427 | case 4: | |
428 | return cpu_to_le32(data & 0xffffffff); | |
429 | default: | |
430 | return cpu_to_le64(data); | |
431 | } | |
6d89d2d9 MZ |
432 | } |
433 | ||
434 | return data; /* Leave LE untouched */ | |
435 | } | |
436 | ||
bd7d95ca MR |
437 | static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) |
438 | { | |
439 | if (vcpu_mode_is_32bit(vcpu)) | |
440 | kvm_skip_instr32(vcpu, is_wide_instr); | |
441 | else | |
442 | *vcpu_pc(vcpu) += 4; | |
443 | ||
444 | /* advance the singlestep state machine */ | |
445 | *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS; | |
446 | } | |
447 | ||
448 | /* | |
449 | * Skip an instruction which has been emulated at hyp while most guest sysregs | |
450 | * are live. | |
451 | */ | |
452 | static inline void __hyp_text __kvm_skip_instr(struct kvm_vcpu *vcpu) | |
453 | { | |
454 | *vcpu_pc(vcpu) = read_sysreg_el2(elr); | |
455 | vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(spsr); | |
456 | ||
457 | kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); | |
458 | ||
459 | write_sysreg_el2(vcpu->arch.ctxt.gp_regs.regs.pstate, spsr); | |
460 | write_sysreg_el2(*vcpu_pc(vcpu), elr); | |
461 | } | |
462 | ||
83a49794 | 463 | #endif /* __ARM64_KVM_EMULATE_H__ */ |