]> git.proxmox.com Git - mirror_qemu.git/blame - target-arm/kvm32.c
Merge remote-tracking branch 'remotes/jnsnow/tags/ide-pull-request' into staging
[mirror_qemu.git] / target-arm / kvm32.c
CommitLineData
b197ebd4
PM
1/*
2 * ARM implementation of KVM hooks, 32 bit specific code.
3 *
4 * Copyright Christoffer Dall 2009-2010
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
8 *
9 */
10
11#include <stdio.h>
12#include <sys/types.h>
13#include <sys/ioctl.h>
14#include <sys/mman.h>
15
16#include <linux/kvm.h>
17
18#include "qemu-common.h"
19#include "qemu/timer.h"
20#include "sysemu/sysemu.h"
21#include "sysemu/kvm.h"
22#include "kvm_arm.h"
23#include "cpu.h"
ccd38087 24#include "internals.h"
b197ebd4
PM
25#include "hw/arm/arm.h"
26
27static inline void set_feature(uint64_t *features, int feature)
28{
29 *features |= 1ULL << feature;
30}
31
32bool kvm_arm_get_host_cpu_features(ARMHostCPUClass *ahcc)
33{
34 /* Identify the feature bits corresponding to the host CPU, and
35 * fill out the ARMHostCPUClass fields accordingly. To do this
36 * we have to create a scratch VM, create a single CPU inside it,
37 * and then query that CPU for the relevant ID registers.
38 */
39 int i, ret, fdarray[3];
40 uint32_t midr, id_pfr0, id_isar0, mvfr1;
41 uint64_t features = 0;
42 /* Old kernels may not know about the PREFERRED_TARGET ioctl: however
43 * we know these will only support creating one kind of guest CPU,
44 * which is its preferred CPU type.
45 */
46 static const uint32_t cpus_to_try[] = {
47 QEMU_KVM_ARM_TARGET_CORTEX_A15,
48 QEMU_KVM_ARM_TARGET_NONE
49 };
50 struct kvm_vcpu_init init;
51 struct kvm_one_reg idregs[] = {
52 {
53 .id = KVM_REG_ARM | KVM_REG_SIZE_U32
51a79b03 54 | ENCODE_CP_REG(15, 0, 0, 0, 0, 0, 0),
b197ebd4
PM
55 .addr = (uintptr_t)&midr,
56 },
57 {
58 .id = KVM_REG_ARM | KVM_REG_SIZE_U32
51a79b03 59 | ENCODE_CP_REG(15, 0, 0, 0, 1, 0, 0),
b197ebd4
PM
60 .addr = (uintptr_t)&id_pfr0,
61 },
62 {
63 .id = KVM_REG_ARM | KVM_REG_SIZE_U32
51a79b03 64 | ENCODE_CP_REG(15, 0, 0, 0, 2, 0, 0),
b197ebd4
PM
65 .addr = (uintptr_t)&id_isar0,
66 },
67 {
68 .id = KVM_REG_ARM | KVM_REG_SIZE_U32
69 | KVM_REG_ARM_VFP | KVM_REG_ARM_VFP_MVFR1,
70 .addr = (uintptr_t)&mvfr1,
71 },
72 };
73
74 if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) {
75 return false;
76 }
77
78 ahcc->target = init.target;
79
80 /* This is not strictly blessed by the device tree binding docs yet,
81 * but in practice the kernel does not care about this string so
82 * there is no point maintaining an KVM_ARM_TARGET_* -> string table.
83 */
84 ahcc->dtb_compatible = "arm,arm-v7";
85
86 for (i = 0; i < ARRAY_SIZE(idregs); i++) {
87 ret = ioctl(fdarray[2], KVM_GET_ONE_REG, &idregs[i]);
88 if (ret) {
89 break;
90 }
91 }
92
93 kvm_arm_destroy_scratch_host_vcpu(fdarray);
94
95 if (ret) {
96 return false;
97 }
98
99 /* Now we've retrieved all the register information we can
100 * set the feature bits based on the ID register fields.
101 * We can assume any KVM supporting CPU is at least a v7
102 * with VFPv3, LPAE and the generic timers; this in turn implies
103 * most of the other feature bits, but a few must be tested.
104 */
105 set_feature(&features, ARM_FEATURE_V7);
106 set_feature(&features, ARM_FEATURE_VFP3);
107 set_feature(&features, ARM_FEATURE_LPAE);
108 set_feature(&features, ARM_FEATURE_GENERIC_TIMER);
109
110 switch (extract32(id_isar0, 24, 4)) {
111 case 1:
112 set_feature(&features, ARM_FEATURE_THUMB_DIV);
113 break;
114 case 2:
115 set_feature(&features, ARM_FEATURE_ARM_DIV);
116 set_feature(&features, ARM_FEATURE_THUMB_DIV);
117 break;
118 default:
119 break;
120 }
121
122 if (extract32(id_pfr0, 12, 4) == 1) {
123 set_feature(&features, ARM_FEATURE_THUMB2EE);
124 }
125 if (extract32(mvfr1, 20, 4) == 1) {
126 set_feature(&features, ARM_FEATURE_VFP_FP16);
127 }
128 if (extract32(mvfr1, 12, 4) == 1) {
129 set_feature(&features, ARM_FEATURE_NEON);
130 }
131 if (extract32(mvfr1, 28, 4) == 1) {
132 /* FMAC support implies VFPv4 */
133 set_feature(&features, ARM_FEATURE_VFP4);
134 }
135
136 ahcc->features = features;
137
138 return true;
139}
140
38df27c8 141bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx)
b197ebd4
PM
142{
143 /* Return true if the regidx is a register we should synchronize
144 * via the cpreg_tuples array (ie is not a core reg we sync by
145 * hand in kvm_arch_get/put_registers())
146 */
147 switch (regidx & KVM_REG_ARM_COPROC_MASK) {
148 case KVM_REG_ARM_CORE:
149 case KVM_REG_ARM_VFP:
150 return false;
151 default:
152 return true;
153 }
154}
155
eb5e1d3c
PF
156#define ARM_MPIDR_HWID_BITMASK 0xFFFFFF
157#define ARM_CPU_ID_MPIDR 0, 0, 0, 5
158
b197ebd4
PM
159int kvm_arch_init_vcpu(CPUState *cs)
160{
38df27c8 161 int ret;
b197ebd4 162 uint64_t v;
eb5e1d3c 163 uint32_t mpidr;
b197ebd4 164 struct kvm_one_reg r;
b197ebd4
PM
165 ARMCPU *cpu = ARM_CPU(cs);
166
167 if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE) {
168 fprintf(stderr, "KVM is not supported for this guest CPU type\n");
169 return -EINVAL;
170 }
171
228d5e04
PS
172 /* Determine init features for this CPU */
173 memset(cpu->kvm_init_features, 0, sizeof(cpu->kvm_init_features));
b197ebd4 174 if (cpu->start_powered_off) {
228d5e04 175 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_POWER_OFF;
b197ebd4 176 }
7cd62e53 177 if (kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PSCI_0_2)) {
dd032e34 178 cpu->psci_version = 2;
7cd62e53
PS
179 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2;
180 }
228d5e04
PS
181
182 /* Do KVM_ARM_VCPU_INIT ioctl */
183 ret = kvm_arm_vcpu_init(cs);
b197ebd4
PM
184 if (ret) {
185 return ret;
186 }
228d5e04 187
b197ebd4
PM
188 /* Query the kernel to make sure it supports 32 VFP
189 * registers: QEMU's "cortex-a15" CPU is always a
190 * VFP-D32 core. The simplest way to do this is just
191 * to attempt to read register d31.
192 */
193 r.id = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP | 31;
194 r.addr = (uintptr_t)(&v);
195 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
196 if (ret == -ENOENT) {
197 return -EINVAL;
198 }
199
eb5e1d3c
PF
200 /*
201 * When KVM is in use, PSCI is emulated in-kernel and not by qemu.
202 * Currently KVM has its own idea about MPIDR assignment, so we
203 * override our defaults with what we get from KVM.
204 */
205 ret = kvm_get_one_reg(cs, ARM_CP15_REG32(ARM_CPU_ID_MPIDR), &mpidr);
206 if (ret) {
207 return ret;
208 }
209 cpu->mp_affinity = mpidr & ARM_MPIDR_HWID_BITMASK;
210
38df27c8 211 return kvm_arm_init_cpreg_list(cpu);
b197ebd4
PM
212}
213
214typedef struct Reg {
215 uint64_t id;
216 int offset;
217} Reg;
218
219#define COREREG(KERNELNAME, QEMUFIELD) \
220 { \
221 KVM_REG_ARM | KVM_REG_SIZE_U32 | \
222 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(KERNELNAME), \
223 offsetof(CPUARMState, QEMUFIELD) \
224 }
225
226#define VFPSYSREG(R) \
227 { \
228 KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP | \
229 KVM_REG_ARM_VFP_##R, \
230 offsetof(CPUARMState, vfp.xregs[ARM_VFP_##R]) \
231 }
232
a65f1de9
PM
233/* Like COREREG, but handle fields which are in a uint64_t in CPUARMState. */
234#define COREREG64(KERNELNAME, QEMUFIELD) \
235 { \
236 KVM_REG_ARM | KVM_REG_SIZE_U32 | \
237 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(KERNELNAME), \
238 offsetoflow32(CPUARMState, QEMUFIELD) \
239 }
240
b197ebd4
PM
241static const Reg regs[] = {
242 /* R0_usr .. R14_usr */
243 COREREG(usr_regs.uregs[0], regs[0]),
244 COREREG(usr_regs.uregs[1], regs[1]),
245 COREREG(usr_regs.uregs[2], regs[2]),
246 COREREG(usr_regs.uregs[3], regs[3]),
247 COREREG(usr_regs.uregs[4], regs[4]),
248 COREREG(usr_regs.uregs[5], regs[5]),
249 COREREG(usr_regs.uregs[6], regs[6]),
250 COREREG(usr_regs.uregs[7], regs[7]),
251 COREREG(usr_regs.uregs[8], usr_regs[0]),
252 COREREG(usr_regs.uregs[9], usr_regs[1]),
253 COREREG(usr_regs.uregs[10], usr_regs[2]),
254 COREREG(usr_regs.uregs[11], usr_regs[3]),
255 COREREG(usr_regs.uregs[12], usr_regs[4]),
256 COREREG(usr_regs.uregs[13], banked_r13[0]),
257 COREREG(usr_regs.uregs[14], banked_r14[0]),
258 /* R13, R14, SPSR for SVC, ABT, UND, IRQ banks */
259 COREREG(svc_regs[0], banked_r13[1]),
260 COREREG(svc_regs[1], banked_r14[1]),
a65f1de9 261 COREREG64(svc_regs[2], banked_spsr[1]),
b197ebd4
PM
262 COREREG(abt_regs[0], banked_r13[2]),
263 COREREG(abt_regs[1], banked_r14[2]),
a65f1de9 264 COREREG64(abt_regs[2], banked_spsr[2]),
b197ebd4
PM
265 COREREG(und_regs[0], banked_r13[3]),
266 COREREG(und_regs[1], banked_r14[3]),
a65f1de9 267 COREREG64(und_regs[2], banked_spsr[3]),
b197ebd4
PM
268 COREREG(irq_regs[0], banked_r13[4]),
269 COREREG(irq_regs[1], banked_r14[4]),
a65f1de9 270 COREREG64(irq_regs[2], banked_spsr[4]),
b197ebd4
PM
271 /* R8_fiq .. R14_fiq and SPSR_fiq */
272 COREREG(fiq_regs[0], fiq_regs[0]),
273 COREREG(fiq_regs[1], fiq_regs[1]),
274 COREREG(fiq_regs[2], fiq_regs[2]),
275 COREREG(fiq_regs[3], fiq_regs[3]),
276 COREREG(fiq_regs[4], fiq_regs[4]),
277 COREREG(fiq_regs[5], banked_r13[5]),
278 COREREG(fiq_regs[6], banked_r14[5]),
a65f1de9 279 COREREG64(fiq_regs[7], banked_spsr[5]),
b197ebd4
PM
280 /* R15 */
281 COREREG(usr_regs.uregs[15], regs[15]),
282 /* VFP system registers */
283 VFPSYSREG(FPSID),
284 VFPSYSREG(MVFR1),
285 VFPSYSREG(MVFR0),
286 VFPSYSREG(FPEXC),
287 VFPSYSREG(FPINST),
288 VFPSYSREG(FPINST2),
289};
290
291int kvm_arch_put_registers(CPUState *cs, int level)
292{
293 ARMCPU *cpu = ARM_CPU(cs);
294 CPUARMState *env = &cpu->env;
295 struct kvm_one_reg r;
296 int mode, bn;
297 int ret, i;
298 uint32_t cpsr, fpscr;
299
300 /* Make sure the banked regs are properly set */
301 mode = env->uncached_cpsr & CPSR_M;
302 bn = bank_number(mode);
303 if (mode == ARM_CPU_MODE_FIQ) {
304 memcpy(env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
305 } else {
306 memcpy(env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
307 }
308 env->banked_r13[bn] = env->regs[13];
309 env->banked_r14[bn] = env->regs[14];
310 env->banked_spsr[bn] = env->spsr;
311
312 /* Now we can safely copy stuff down to the kernel */
313 for (i = 0; i < ARRAY_SIZE(regs); i++) {
314 r.id = regs[i].id;
315 r.addr = (uintptr_t)(env) + regs[i].offset;
316 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
317 if (ret) {
318 return ret;
319 }
320 }
321
322 /* Special cases which aren't a single CPUARMState field */
323 cpsr = cpsr_read(env);
324 r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 |
325 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(usr_regs.ARM_cpsr);
326 r.addr = (uintptr_t)(&cpsr);
327 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
328 if (ret) {
329 return ret;
330 }
331
332 /* VFP registers */
333 r.id = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP;
334 for (i = 0; i < 32; i++) {
335 r.addr = (uintptr_t)(&env->vfp.regs[i]);
336 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
337 if (ret) {
338 return ret;
339 }
340 r.id++;
341 }
342
343 r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP |
344 KVM_REG_ARM_VFP_FPSCR;
345 fpscr = vfp_get_fpscr(env);
346 r.addr = (uintptr_t)&fpscr;
347 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
348 if (ret) {
349 return ret;
350 }
351
352 /* Note that we do not call write_cpustate_to_list()
353 * here, so we are only writing the tuple list back to
354 * KVM. This is safe because nothing can change the
355 * CPUARMState cp15 fields (in particular gdb accesses cannot)
356 * and so there are no changes to sync. In fact syncing would
357 * be wrong at this point: for a constant register where TCG and
358 * KVM disagree about its value, the preceding write_list_to_cpustate()
359 * would not have had any effect on the CPUARMState value (since the
360 * register is read-only), and a write_cpustate_to_list() here would
361 * then try to write the TCG value back into KVM -- this would either
362 * fail or incorrectly change the value the guest sees.
363 *
364 * If we ever want to allow the user to modify cp15 registers via
365 * the gdb stub, we would need to be more clever here (for instance
366 * tracking the set of registers kvm_arch_get_registers() successfully
367 * managed to update the CPUARMState with, and only allowing those
368 * to be written back up into the kernel).
369 */
370 if (!write_list_to_kvmstate(cpu)) {
371 return EINVAL;
372 }
373
1a1753f7
AB
374 kvm_arm_sync_mpstate_to_kvm(cpu);
375
b197ebd4
PM
376 return ret;
377}
378
379int kvm_arch_get_registers(CPUState *cs)
380{
381 ARMCPU *cpu = ARM_CPU(cs);
382 CPUARMState *env = &cpu->env;
383 struct kvm_one_reg r;
384 int mode, bn;
385 int ret, i;
386 uint32_t cpsr, fpscr;
387
388 for (i = 0; i < ARRAY_SIZE(regs); i++) {
389 r.id = regs[i].id;
390 r.addr = (uintptr_t)(env) + regs[i].offset;
391 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
392 if (ret) {
393 return ret;
394 }
395 }
396
397 /* Special cases which aren't a single CPUARMState field */
398 r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 |
399 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(usr_regs.ARM_cpsr);
400 r.addr = (uintptr_t)(&cpsr);
401 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
402 if (ret) {
403 return ret;
404 }
405 cpsr_write(env, cpsr, 0xffffffff);
406
407 /* Make sure the current mode regs are properly set */
408 mode = env->uncached_cpsr & CPSR_M;
409 bn = bank_number(mode);
410 if (mode == ARM_CPU_MODE_FIQ) {
411 memcpy(env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
412 } else {
413 memcpy(env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
414 }
415 env->regs[13] = env->banked_r13[bn];
416 env->regs[14] = env->banked_r14[bn];
417 env->spsr = env->banked_spsr[bn];
418
419 /* VFP registers */
420 r.id = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP;
421 for (i = 0; i < 32; i++) {
422 r.addr = (uintptr_t)(&env->vfp.regs[i]);
423 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
424 if (ret) {
425 return ret;
426 }
427 r.id++;
428 }
429
430 r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP |
431 KVM_REG_ARM_VFP_FPSCR;
432 r.addr = (uintptr_t)&fpscr;
433 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
434 if (ret) {
435 return ret;
436 }
437 vfp_set_fpscr(env, fpscr);
438
439 if (!write_kvmstate_to_list(cpu)) {
440 return EINVAL;
441 }
442 /* Note that it's OK to have registers which aren't in CPUState,
443 * so we can ignore a failure return here.
444 */
445 write_list_to_cpustate(cpu);
446
1a1753f7
AB
447 kvm_arm_sync_mpstate_to_qemu(cpu);
448
b197ebd4
PM
449 return 0;
450}