]>
Commit | Line | Data |
---|---|---|
2f4a07c5 MZ |
1 | /* |
2 | * Copyright (C) 2012,2013 - ARM Ltd | |
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | |
4 | * | |
5 | * Derived from arch/arm/kvm/guest.c: | |
6 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | |
7 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License version 2 as | |
11 | * published by the Free Software Foundation. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
20 | */ | |
21 | ||
22 | #include <linux/errno.h> | |
23 | #include <linux/err.h> | |
24 | #include <linux/kvm_host.h> | |
25 | #include <linux/module.h> | |
26 | #include <linux/vmalloc.h> | |
27 | #include <linux/fs.h> | |
28 | #include <asm/cputype.h> | |
29 | #include <asm/uaccess.h> | |
30 | #include <asm/kvm.h> | |
31 | #include <asm/kvm_asm.h> | |
32 | #include <asm/kvm_emulate.h> | |
33 | #include <asm/kvm_coproc.h> | |
34 | ||
35 | struct kvm_stats_debugfs_item debugfs_entries[] = { | |
36 | { NULL } | |
37 | }; | |
38 | ||
39 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |
40 | { | |
41 | vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS; | |
42 | return 0; | |
43 | } | |
44 | ||
45 | static u64 core_reg_offset_from_id(u64 id) | |
46 | { | |
47 | return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE); | |
48 | } | |
49 | ||
50 | static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | |
51 | { | |
52 | /* | |
53 | * Because the kvm_regs structure is a mix of 32, 64 and | |
54 | * 128bit fields, we index it as if it was a 32bit | |
55 | * array. Hence below, nr_regs is the number of entries, and | |
56 | * off the index in the "array". | |
57 | */ | |
58 | __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr; | |
59 | struct kvm_regs *regs = vcpu_gp_regs(vcpu); | |
60 | int nr_regs = sizeof(*regs) / sizeof(__u32); | |
61 | u32 off; | |
62 | ||
63 | /* Our ID is an index into the kvm_regs struct. */ | |
64 | off = core_reg_offset_from_id(reg->id); | |
65 | if (off >= nr_regs || | |
66 | (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs) | |
67 | return -ENOENT; | |
68 | ||
69 | if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id))) | |
70 | return -EFAULT; | |
71 | ||
72 | return 0; | |
73 | } | |
74 | ||
75 | static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | |
76 | { | |
77 | __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr; | |
78 | struct kvm_regs *regs = vcpu_gp_regs(vcpu); | |
79 | int nr_regs = sizeof(*regs) / sizeof(__u32); | |
80 | __uint128_t tmp; | |
81 | void *valp = &tmp; | |
82 | u64 off; | |
83 | int err = 0; | |
84 | ||
85 | /* Our ID is an index into the kvm_regs struct. */ | |
86 | off = core_reg_offset_from_id(reg->id); | |
87 | if (off >= nr_regs || | |
88 | (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs) | |
89 | return -ENOENT; | |
90 | ||
91 | if (KVM_REG_SIZE(reg->id) > sizeof(tmp)) | |
92 | return -EINVAL; | |
93 | ||
94 | if (copy_from_user(valp, uaddr, KVM_REG_SIZE(reg->id))) { | |
95 | err = -EFAULT; | |
96 | goto out; | |
97 | } | |
98 | ||
99 | if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) { | |
100 | u32 mode = (*(u32 *)valp) & COMPAT_PSR_MODE_MASK; | |
101 | switch (mode) { | |
0d854a60 MZ |
102 | case COMPAT_PSR_MODE_USR: |
103 | case COMPAT_PSR_MODE_FIQ: | |
104 | case COMPAT_PSR_MODE_IRQ: | |
105 | case COMPAT_PSR_MODE_SVC: | |
106 | case COMPAT_PSR_MODE_ABT: | |
107 | case COMPAT_PSR_MODE_UND: | |
2f4a07c5 MZ |
108 | case PSR_MODE_EL0t: |
109 | case PSR_MODE_EL1t: | |
110 | case PSR_MODE_EL1h: | |
111 | break; | |
112 | default: | |
113 | err = -EINVAL; | |
114 | goto out; | |
115 | } | |
116 | } | |
117 | ||
118 | memcpy((u32 *)regs + off, valp, KVM_REG_SIZE(reg->id)); | |
119 | out: | |
120 | return err; | |
121 | } | |
122 | ||
123 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |
124 | { | |
125 | return -EINVAL; | |
126 | } | |
127 | ||
128 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |
129 | { | |
130 | return -EINVAL; | |
131 | } | |
132 | ||
133 | static unsigned long num_core_regs(void) | |
134 | { | |
135 | return sizeof(struct kvm_regs) / sizeof(__u32); | |
136 | } | |
137 | ||
1df08ba0 AB |
138 | /** |
139 | * ARM64 versions of the TIMER registers, always available on arm64 | |
140 | */ | |
141 | ||
142 | #define NUM_TIMER_REGS 3 | |
143 | ||
144 | static bool is_timer_reg(u64 index) | |
145 | { | |
146 | switch (index) { | |
147 | case KVM_REG_ARM_TIMER_CTL: | |
148 | case KVM_REG_ARM_TIMER_CNT: | |
149 | case KVM_REG_ARM_TIMER_CVAL: | |
150 | return true; | |
151 | } | |
152 | return false; | |
153 | } | |
154 | ||
155 | static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) | |
156 | { | |
157 | if (put_user(KVM_REG_ARM_TIMER_CTL, uindices)) | |
158 | return -EFAULT; | |
159 | uindices++; | |
160 | if (put_user(KVM_REG_ARM_TIMER_CNT, uindices)) | |
161 | return -EFAULT; | |
162 | uindices++; | |
163 | if (put_user(KVM_REG_ARM_TIMER_CVAL, uindices)) | |
164 | return -EFAULT; | |
165 | ||
166 | return 0; | |
167 | } | |
168 | ||
169 | static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | |
170 | { | |
171 | void __user *uaddr = (void __user *)(long)reg->addr; | |
172 | u64 val; | |
173 | int ret; | |
174 | ||
175 | ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)); | |
176 | if (ret != 0) | |
177 | return ret; | |
178 | ||
179 | return kvm_arm_timer_set_reg(vcpu, reg->id, val); | |
180 | } | |
181 | ||
182 | static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | |
183 | { | |
184 | void __user *uaddr = (void __user *)(long)reg->addr; | |
185 | u64 val; | |
186 | ||
187 | val = kvm_arm_timer_get_reg(vcpu, reg->id); | |
188 | return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)); | |
189 | } | |
190 | ||
2f4a07c5 MZ |
191 | /** |
192 | * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG | |
193 | * | |
194 | * This is for all registers. | |
195 | */ | |
196 | unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) | |
197 | { | |
1df08ba0 AB |
198 | return num_core_regs() + kvm_arm_num_sys_reg_descs(vcpu) |
199 | + NUM_TIMER_REGS; | |
2f4a07c5 MZ |
200 | } |
201 | ||
202 | /** | |
203 | * kvm_arm_copy_reg_indices - get indices of all registers. | |
204 | * | |
205 | * We do core registers right here, then we apppend system regs. | |
206 | */ | |
207 | int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) | |
208 | { | |
209 | unsigned int i; | |
210 | const u64 core_reg = KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE; | |
1df08ba0 | 211 | int ret; |
2f4a07c5 MZ |
212 | |
213 | for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) { | |
214 | if (put_user(core_reg | i, uindices)) | |
215 | return -EFAULT; | |
216 | uindices++; | |
217 | } | |
218 | ||
1df08ba0 AB |
219 | ret = copy_timer_indices(vcpu, uindices); |
220 | if (ret) | |
221 | return ret; | |
222 | uindices += NUM_TIMER_REGS; | |
223 | ||
2f4a07c5 MZ |
224 | return kvm_arm_copy_sys_reg_indices(vcpu, uindices); |
225 | } | |
226 | ||
227 | int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | |
228 | { | |
229 | /* We currently use nothing arch-specific in upper 32 bits */ | |
230 | if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32) | |
231 | return -EINVAL; | |
232 | ||
233 | /* Register group 16 means we want a core register. */ | |
234 | if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) | |
235 | return get_core_reg(vcpu, reg); | |
236 | ||
1df08ba0 AB |
237 | if (is_timer_reg(reg->id)) |
238 | return get_timer_reg(vcpu, reg); | |
239 | ||
2f4a07c5 MZ |
240 | return kvm_arm_sys_reg_get_reg(vcpu, reg); |
241 | } | |
242 | ||
243 | int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | |
244 | { | |
245 | /* We currently use nothing arch-specific in upper 32 bits */ | |
246 | if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32) | |
247 | return -EINVAL; | |
248 | ||
249 | /* Register group 16 means we set a core register. */ | |
250 | if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) | |
251 | return set_core_reg(vcpu, reg); | |
252 | ||
1df08ba0 AB |
253 | if (is_timer_reg(reg->id)) |
254 | return set_timer_reg(vcpu, reg); | |
255 | ||
2f4a07c5 MZ |
256 | return kvm_arm_sys_reg_set_reg(vcpu, reg); |
257 | } | |
258 | ||
259 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | |
260 | struct kvm_sregs *sregs) | |
261 | { | |
262 | return -EINVAL; | |
263 | } | |
264 | ||
265 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |
266 | struct kvm_sregs *sregs) | |
267 | { | |
268 | return -EINVAL; | |
269 | } | |
270 | ||
271 | int __attribute_const__ kvm_target_cpu(void) | |
272 | { | |
273 | unsigned long implementor = read_cpuid_implementor(); | |
274 | unsigned long part_number = read_cpuid_part_number(); | |
275 | ||
e28100bd AP |
276 | switch (implementor) { |
277 | case ARM_CPU_IMP_ARM: | |
278 | switch (part_number) { | |
279 | case ARM_CPU_PART_AEM_V8: | |
280 | return KVM_ARM_TARGET_AEM_V8; | |
281 | case ARM_CPU_PART_FOUNDATION: | |
282 | return KVM_ARM_TARGET_FOUNDATION_V8; | |
1252b331 MZ |
283 | case ARM_CPU_PART_CORTEX_A53: |
284 | return KVM_ARM_TARGET_CORTEX_A53; | |
e28100bd AP |
285 | case ARM_CPU_PART_CORTEX_A57: |
286 | return KVM_ARM_TARGET_CORTEX_A57; | |
287 | }; | |
288 | break; | |
289 | case ARM_CPU_IMP_APM: | |
290 | switch (part_number) { | |
291 | case APM_CPU_PART_POTENZA: | |
292 | return KVM_ARM_TARGET_XGENE_POTENZA; | |
293 | }; | |
294 | break; | |
295 | }; | |
2f4a07c5 | 296 | |
e28100bd | 297 | return -EINVAL; |
2f4a07c5 MZ |
298 | } |
299 | ||
300 | int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, | |
301 | const struct kvm_vcpu_init *init) | |
302 | { | |
303 | unsigned int i; | |
304 | int phys_target = kvm_target_cpu(); | |
305 | ||
306 | if (init->target != phys_target) | |
307 | return -EINVAL; | |
308 | ||
309 | vcpu->arch.target = phys_target; | |
310 | bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES); | |
311 | ||
312 | /* -ENOENT for unknown features, -EINVAL for invalid combinations. */ | |
313 | for (i = 0; i < sizeof(init->features) * 8; i++) { | |
314 | if (init->features[i / 32] & (1 << (i % 32))) { | |
315 | if (i >= KVM_VCPU_MAX_FEATURES) | |
316 | return -ENOENT; | |
317 | set_bit(i, vcpu->arch.features); | |
318 | } | |
319 | } | |
320 | ||
321 | /* Now we know what it is, we can reset it. */ | |
322 | return kvm_reset_vcpu(vcpu); | |
323 | } | |
324 | ||
473bdc0e AP |
325 | int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init) |
326 | { | |
327 | int target = kvm_target_cpu(); | |
328 | ||
329 | if (target < 0) | |
330 | return -ENODEV; | |
331 | ||
332 | memset(init, 0, sizeof(*init)); | |
333 | ||
334 | /* | |
335 | * For now, we don't return any features. | |
336 | * In future, we might use features to return target | |
337 | * specific features available for the preferred | |
338 | * target type. | |
339 | */ | |
340 | init->target = (__u32)target; | |
341 | ||
342 | return 0; | |
343 | } | |
344 | ||
2f4a07c5 MZ |
345 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) |
346 | { | |
347 | return -EINVAL; | |
348 | } | |
349 | ||
350 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | |
351 | { | |
352 | return -EINVAL; | |
353 | } | |
354 | ||
355 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, | |
356 | struct kvm_translation *tr) | |
357 | { | |
358 | return -EINVAL; | |
359 | } |