]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/arm64/include/asm/kvm_emulate.h
c3baa971edab561abf976349ef6dc5c57fa5f96e
[mirror_ubuntu-artful-kernel.git] / arch / arm64 / include / asm / kvm_emulate.h
1 /*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * Derived from arch/arm/include/kvm_emulate.h
6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22 #ifndef __ARM64_KVM_EMULATE_H__
23 #define __ARM64_KVM_EMULATE_H__
24
25 #include <linux/kvm_host.h>
26
27 #include <asm/esr.h>
28 #include <asm/kvm_arm.h>
29 #include <asm/kvm_asm.h>
30 #include <asm/kvm_mmio.h>
31 #include <asm/ptrace.h>
32 #include <asm/cputype.h>
33
34 unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num);
35 unsigned long *vcpu_spsr32(const struct kvm_vcpu *vcpu);
36
37 bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
38 void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr);
39
40 void kvm_inject_undefined(struct kvm_vcpu *vcpu);
41 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
42 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
43
44 static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
45 {
46 vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
47 }
48
49 static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
50 {
51 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
52 }
53
54 static inline unsigned long *vcpu_elr_el1(const struct kvm_vcpu *vcpu)
55 {
56 return (unsigned long *)&vcpu_gp_regs(vcpu)->elr_el1;
57 }
58
59 static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
60 {
61 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate;
62 }
63
64 static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
65 {
66 return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
67 }
68
69 static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
70 {
71 if (vcpu_mode_is_32bit(vcpu))
72 return kvm_condition_valid32(vcpu);
73
74 return true;
75 }
76
77 static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
78 {
79 if (vcpu_mode_is_32bit(vcpu))
80 kvm_skip_instr32(vcpu, is_wide_instr);
81 else
82 *vcpu_pc(vcpu) += 4;
83 }
84
85 static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
86 {
87 *vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT;
88 }
89
90 static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num)
91 {
92 if (vcpu_mode_is_32bit(vcpu))
93 return vcpu_reg32(vcpu, reg_num);
94
95 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num];
96 }
97
98 /* Get vcpu SPSR for current mode */
99 static inline unsigned long *vcpu_spsr(const struct kvm_vcpu *vcpu)
100 {
101 if (vcpu_mode_is_32bit(vcpu))
102 return vcpu_spsr32(vcpu);
103
104 return (unsigned long *)&vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1];
105 }
106
107 static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
108 {
109 u32 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
110
111 if (vcpu_mode_is_32bit(vcpu))
112 return mode > COMPAT_PSR_MODE_USR;
113
114 return mode != PSR_MODE_EL0t;
115 }
116
117 static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
118 {
119 return vcpu->arch.fault.esr_el2;
120 }
121
122 static inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
123 {
124 return vcpu->arch.fault.far_el2;
125 }
126
127 static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
128 {
129 return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
130 }
131
132 static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
133 {
134 return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK;
135 }
136
137 static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
138 {
139 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV);
140 }
141
142 static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
143 {
144 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR);
145 }
146
147 static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
148 {
149 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE);
150 }
151
152 static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
153 {
154 return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
155 }
156
157 static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
158 {
159 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_EA);
160 }
161
162 static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
163 {
164 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
165 }
166
167 static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
168 {
169 return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
170 }
171
172 /* This one is not specific to Data Abort */
173 static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
174 {
175 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL);
176 }
177
178 static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
179 {
180 return kvm_vcpu_get_hsr(vcpu) >> ESR_ELx_EC_SHIFT;
181 }
182
183 static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
184 {
185 return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
186 }
187
188 static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
189 {
190 return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC;
191 }
192
193 static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
194 {
195 return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE;
196 }
197
198 static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
199 {
200 return vcpu_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
201 }
202
203 static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
204 {
205 if (vcpu_mode_is_32bit(vcpu))
206 *vcpu_cpsr(vcpu) |= COMPAT_PSR_E_BIT;
207 else
208 vcpu_sys_reg(vcpu, SCTLR_EL1) |= (1 << 25);
209 }
210
211 static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
212 {
213 if (vcpu_mode_is_32bit(vcpu))
214 return !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_E_BIT);
215
216 return !!(vcpu_sys_reg(vcpu, SCTLR_EL1) & (1 << 25));
217 }
218
219 static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
220 unsigned long data,
221 unsigned int len)
222 {
223 if (kvm_vcpu_is_be(vcpu)) {
224 switch (len) {
225 case 1:
226 return data & 0xff;
227 case 2:
228 return be16_to_cpu(data & 0xffff);
229 case 4:
230 return be32_to_cpu(data & 0xffffffff);
231 default:
232 return be64_to_cpu(data);
233 }
234 } else {
235 switch (len) {
236 case 1:
237 return data & 0xff;
238 case 2:
239 return le16_to_cpu(data & 0xffff);
240 case 4:
241 return le32_to_cpu(data & 0xffffffff);
242 default:
243 return le64_to_cpu(data);
244 }
245 }
246
247 return data; /* Leave LE untouched */
248 }
249
250 static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
251 unsigned long data,
252 unsigned int len)
253 {
254 if (kvm_vcpu_is_be(vcpu)) {
255 switch (len) {
256 case 1:
257 return data & 0xff;
258 case 2:
259 return cpu_to_be16(data & 0xffff);
260 case 4:
261 return cpu_to_be32(data & 0xffffffff);
262 default:
263 return cpu_to_be64(data);
264 }
265 } else {
266 switch (len) {
267 case 1:
268 return data & 0xff;
269 case 2:
270 return cpu_to_le16(data & 0xffff);
271 case 4:
272 return cpu_to_le32(data & 0xffffffff);
273 default:
274 return cpu_to_le64(data);
275 }
276 }
277
278 return data; /* Leave LE untouched */
279 }
280
281 #endif /* __ARM64_KVM_EMULATE_H__ */