2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * Derived from arch/arm/include/kvm_emulate.h
6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #ifndef __ARM64_KVM_EMULATE_H__
23 #define __ARM64_KVM_EMULATE_H__
25 #include <linux/kvm_host.h>
27 #include <asm/debug-monitors.h>
29 #include <asm/kvm_arm.h>
30 #include <asm/kvm_hyp.h>
31 #include <asm/kvm_mmio.h>
32 #include <asm/ptrace.h>
33 #include <asm/cputype.h>
36 unsigned long *vcpu_reg32(const struct kvm_vcpu
*vcpu
, u8 reg_num
);
37 unsigned long vcpu_read_spsr32(const struct kvm_vcpu
*vcpu
);
38 void vcpu_write_spsr32(struct kvm_vcpu
*vcpu
, unsigned long v
);
40 bool kvm_condition_valid32(const struct kvm_vcpu
*vcpu
);
41 void kvm_skip_instr32(struct kvm_vcpu
*vcpu
, bool is_wide_instr
);
43 void kvm_inject_undefined(struct kvm_vcpu
*vcpu
);
44 void kvm_inject_vabt(struct kvm_vcpu
*vcpu
);
45 void kvm_inject_dabt(struct kvm_vcpu
*vcpu
, unsigned long addr
);
46 void kvm_inject_pabt(struct kvm_vcpu
*vcpu
, unsigned long addr
);
47 void kvm_inject_undef32(struct kvm_vcpu
*vcpu
);
48 void kvm_inject_dabt32(struct kvm_vcpu
*vcpu
, unsigned long addr
);
49 void kvm_inject_pabt32(struct kvm_vcpu
*vcpu
, unsigned long addr
);
51 static inline bool vcpu_el1_is_32bit(struct kvm_vcpu
*vcpu
)
53 return !(vcpu
->arch
.hcr_el2
& HCR_RW
);
56 static inline void vcpu_reset_hcr(struct kvm_vcpu
*vcpu
)
58 vcpu
->arch
.hcr_el2
= HCR_GUEST_FLAGS
;
59 if (is_kernel_in_hyp_mode())
60 vcpu
->arch
.hcr_el2
|= HCR_E2H
;
61 if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN
)) {
62 /* route synchronous external abort exceptions to EL2 */
63 vcpu
->arch
.hcr_el2
|= HCR_TEA
;
64 /* trap error record accesses */
65 vcpu
->arch
.hcr_el2
|= HCR_TERR
;
67 if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB
))
68 vcpu
->arch
.hcr_el2
|= HCR_FWB
;
70 if (test_bit(KVM_ARM_VCPU_EL1_32BIT
, vcpu
->arch
.features
))
71 vcpu
->arch
.hcr_el2
&= ~HCR_RW
;
74 * TID3: trap feature register accesses that we virtualise.
75 * For now this is conditional, since no AArch32 feature regs
76 * are currently virtualised.
78 if (!vcpu_el1_is_32bit(vcpu
))
79 vcpu
->arch
.hcr_el2
|= HCR_TID3
;
81 if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE
) ||
82 vcpu_el1_is_32bit(vcpu
))
83 vcpu
->arch
.hcr_el2
|= HCR_TID2
;
86 static inline unsigned long *vcpu_hcr(struct kvm_vcpu
*vcpu
)
88 return (unsigned long *)&vcpu
->arch
.hcr_el2
;
91 static inline void vcpu_clear_wfe_traps(struct kvm_vcpu
*vcpu
)
93 vcpu
->arch
.hcr_el2
&= ~HCR_TWE
;
96 static inline void vcpu_set_wfe_traps(struct kvm_vcpu
*vcpu
)
98 vcpu
->arch
.hcr_el2
|= HCR_TWE
;
101 static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu
*vcpu
)
103 return vcpu
->arch
.vsesr_el2
;
106 static inline void vcpu_set_vsesr(struct kvm_vcpu
*vcpu
, u64 vsesr
)
108 vcpu
->arch
.vsesr_el2
= vsesr
;
111 static inline unsigned long *vcpu_pc(const struct kvm_vcpu
*vcpu
)
113 return (unsigned long *)&vcpu_gp_regs(vcpu
)->regs
.pc
;
116 static inline unsigned long *__vcpu_elr_el1(const struct kvm_vcpu
*vcpu
)
118 return (unsigned long *)&vcpu_gp_regs(vcpu
)->elr_el1
;
121 static inline unsigned long vcpu_read_elr_el1(const struct kvm_vcpu
*vcpu
)
123 if (vcpu
->arch
.sysregs_loaded_on_cpu
)
124 return read_sysreg_el1(elr
);
126 return *__vcpu_elr_el1(vcpu
);
129 static inline void vcpu_write_elr_el1(const struct kvm_vcpu
*vcpu
, unsigned long v
)
131 if (vcpu
->arch
.sysregs_loaded_on_cpu
)
132 write_sysreg_el1(v
, elr
);
134 *__vcpu_elr_el1(vcpu
) = v
;
137 static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu
*vcpu
)
139 return (unsigned long *)&vcpu_gp_regs(vcpu
)->regs
.pstate
;
142 static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu
*vcpu
)
144 return !!(*vcpu_cpsr(vcpu
) & PSR_MODE32_BIT
);
147 static inline bool kvm_condition_valid(const struct kvm_vcpu
*vcpu
)
149 if (vcpu_mode_is_32bit(vcpu
))
150 return kvm_condition_valid32(vcpu
);
155 static inline void vcpu_set_thumb(struct kvm_vcpu
*vcpu
)
157 *vcpu_cpsr(vcpu
) |= PSR_AA32_T_BIT
;
161 * vcpu_get_reg and vcpu_set_reg should always be passed a register number
162 * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
163 * AArch32 with banked registers.
165 static inline unsigned long vcpu_get_reg(const struct kvm_vcpu
*vcpu
,
168 return (reg_num
== 31) ? 0 : vcpu_gp_regs(vcpu
)->regs
.regs
[reg_num
];
171 static inline void vcpu_set_reg(struct kvm_vcpu
*vcpu
, u8 reg_num
,
175 vcpu_gp_regs(vcpu
)->regs
.regs
[reg_num
] = val
;
178 static inline unsigned long vcpu_read_spsr(const struct kvm_vcpu
*vcpu
)
180 if (vcpu_mode_is_32bit(vcpu
))
181 return vcpu_read_spsr32(vcpu
);
183 if (vcpu
->arch
.sysregs_loaded_on_cpu
)
184 return read_sysreg_el1(spsr
);
186 return vcpu_gp_regs(vcpu
)->spsr
[KVM_SPSR_EL1
];
189 static inline void vcpu_write_spsr(struct kvm_vcpu
*vcpu
, unsigned long v
)
191 if (vcpu_mode_is_32bit(vcpu
)) {
192 vcpu_write_spsr32(vcpu
, v
);
196 if (vcpu
->arch
.sysregs_loaded_on_cpu
)
197 write_sysreg_el1(v
, spsr
);
199 vcpu_gp_regs(vcpu
)->spsr
[KVM_SPSR_EL1
] = v
;
202 static inline bool vcpu_mode_priv(const struct kvm_vcpu
*vcpu
)
206 if (vcpu_mode_is_32bit(vcpu
)) {
207 mode
= *vcpu_cpsr(vcpu
) & PSR_AA32_MODE_MASK
;
208 return mode
> PSR_AA32_MODE_USR
;
211 mode
= *vcpu_cpsr(vcpu
) & PSR_MODE_MASK
;
213 return mode
!= PSR_MODE_EL0t
;
216 static inline u32
kvm_vcpu_get_hsr(const struct kvm_vcpu
*vcpu
)
218 return vcpu
->arch
.fault
.esr_el2
;
221 static inline int kvm_vcpu_get_condition(const struct kvm_vcpu
*vcpu
)
223 u32 esr
= kvm_vcpu_get_hsr(vcpu
);
225 if (esr
& ESR_ELx_CV
)
226 return (esr
& ESR_ELx_COND_MASK
) >> ESR_ELx_COND_SHIFT
;
231 static inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu
*vcpu
)
233 return vcpu
->arch
.fault
.far_el2
;
236 static inline phys_addr_t
kvm_vcpu_get_fault_ipa(const struct kvm_vcpu
*vcpu
)
238 return ((phys_addr_t
)vcpu
->arch
.fault
.hpfar_el2
& HPFAR_MASK
) << 8;
241 static inline u64
kvm_vcpu_get_disr(const struct kvm_vcpu
*vcpu
)
243 return vcpu
->arch
.fault
.disr_el1
;
246 static inline u32
kvm_vcpu_hvc_get_imm(const struct kvm_vcpu
*vcpu
)
248 return kvm_vcpu_get_hsr(vcpu
) & ESR_ELx_xVC_IMM_MASK
;
251 static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu
*vcpu
)
253 return !!(kvm_vcpu_get_hsr(vcpu
) & ESR_ELx_ISV
);
256 static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu
*vcpu
)
258 return !!(kvm_vcpu_get_hsr(vcpu
) & ESR_ELx_SSE
);
261 static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu
*vcpu
)
263 return (kvm_vcpu_get_hsr(vcpu
) & ESR_ELx_SRT_MASK
) >> ESR_ELx_SRT_SHIFT
;
266 static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu
*vcpu
)
268 return !!(kvm_vcpu_get_hsr(vcpu
) & ESR_ELx_S1PTW
);
271 static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu
*vcpu
)
273 return !!(kvm_vcpu_get_hsr(vcpu
) & ESR_ELx_WNR
) ||
274 kvm_vcpu_dabt_iss1tw(vcpu
); /* AF/DBM update */
277 static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu
*vcpu
)
279 return !!(kvm_vcpu_get_hsr(vcpu
) & ESR_ELx_CM
);
282 static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu
*vcpu
)
284 return 1 << ((kvm_vcpu_get_hsr(vcpu
) & ESR_ELx_SAS
) >> ESR_ELx_SAS_SHIFT
);
287 /* This one is not specific to Data Abort */
288 static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu
*vcpu
)
290 return !!(kvm_vcpu_get_hsr(vcpu
) & ESR_ELx_IL
);
293 static inline u8
kvm_vcpu_trap_get_class(const struct kvm_vcpu
*vcpu
)
295 return ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu
));
298 static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu
*vcpu
)
300 return kvm_vcpu_trap_get_class(vcpu
) == ESR_ELx_EC_IABT_LOW
;
303 static inline u8
kvm_vcpu_trap_get_fault(const struct kvm_vcpu
*vcpu
)
305 return kvm_vcpu_get_hsr(vcpu
) & ESR_ELx_FSC
;
308 static inline u8
kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu
*vcpu
)
310 return kvm_vcpu_get_hsr(vcpu
) & ESR_ELx_FSC_TYPE
;
313 static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu
*vcpu
)
315 switch (kvm_vcpu_trap_get_fault(vcpu
)) {
332 static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu
*vcpu
)
334 u32 esr
= kvm_vcpu_get_hsr(vcpu
);
335 return ESR_ELx_SYS64_ISS_RT(esr
);
338 static inline bool kvm_is_write_fault(struct kvm_vcpu
*vcpu
)
340 if (kvm_vcpu_trap_is_iabt(vcpu
))
343 return kvm_vcpu_dabt_iswrite(vcpu
);
346 static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu
*vcpu
)
348 return vcpu_read_sys_reg(vcpu
, MPIDR_EL1
) & MPIDR_HWID_BITMASK
;
351 static inline void kvm_vcpu_set_be(struct kvm_vcpu
*vcpu
)
353 if (vcpu_mode_is_32bit(vcpu
)) {
354 *vcpu_cpsr(vcpu
) |= PSR_AA32_E_BIT
;
356 u64 sctlr
= vcpu_read_sys_reg(vcpu
, SCTLR_EL1
);
358 vcpu_write_sys_reg(vcpu
, sctlr
, SCTLR_EL1
);
362 static inline bool kvm_vcpu_is_be(struct kvm_vcpu
*vcpu
)
364 if (vcpu_mode_is_32bit(vcpu
))
365 return !!(*vcpu_cpsr(vcpu
) & PSR_AA32_E_BIT
);
367 return !!(vcpu_read_sys_reg(vcpu
, SCTLR_EL1
) & (1 << 25));
370 static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu
*vcpu
,
374 if (kvm_vcpu_is_be(vcpu
)) {
379 return be16_to_cpu(data
& 0xffff);
381 return be32_to_cpu(data
& 0xffffffff);
383 return be64_to_cpu(data
);
390 return le16_to_cpu(data
& 0xffff);
392 return le32_to_cpu(data
& 0xffffffff);
394 return le64_to_cpu(data
);
398 return data
; /* Leave LE untouched */
401 static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu
*vcpu
,
405 if (kvm_vcpu_is_be(vcpu
)) {
410 return cpu_to_be16(data
& 0xffff);
412 return cpu_to_be32(data
& 0xffffffff);
414 return cpu_to_be64(data
);
421 return cpu_to_le16(data
& 0xffff);
423 return cpu_to_le32(data
& 0xffffffff);
425 return cpu_to_le64(data
);
429 return data
; /* Leave LE untouched */
432 static inline void kvm_skip_instr(struct kvm_vcpu
*vcpu
, bool is_wide_instr
)
434 if (vcpu_mode_is_32bit(vcpu
))
435 kvm_skip_instr32(vcpu
, is_wide_instr
);
439 /* advance the singlestep state machine */
440 *vcpu_cpsr(vcpu
) &= ~DBG_SPSR_SS
;
444 * Skip an instruction which has been emulated at hyp while most guest sysregs
447 static inline void __hyp_text
__kvm_skip_instr(struct kvm_vcpu
*vcpu
)
449 *vcpu_pc(vcpu
) = read_sysreg_el2(elr
);
450 vcpu
->arch
.ctxt
.gp_regs
.regs
.pstate
= read_sysreg_el2(spsr
);
452 kvm_skip_instr(vcpu
, kvm_vcpu_trap_il_is32bit(vcpu
));
454 write_sysreg_el2(vcpu
->arch
.ctxt
.gp_regs
.regs
.pstate
, spsr
);
455 write_sysreg_el2(*vcpu_pc(vcpu
), elr
);
458 #endif /* __ARM64_KVM_EMULATE_H__ */