]> git.proxmox.com Git - mirror_qemu.git/blame - target-arm/kvm64.c
Merge remote-tracking branch 'remotes/lalrae/tags/mips-20151124' into staging
[mirror_qemu.git] / target-arm / kvm64.c
CommitLineData
26861c7c
MH
1/*
2 * ARM implementation of KVM hooks, 64 bit specific code
3 *
4 * Copyright Mian-M. Hamayun 2013, Virtual Open Systems
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
8 *
9 */
10
11#include <stdio.h>
12#include <sys/types.h>
13#include <sys/ioctl.h>
14#include <sys/mman.h>
15
16#include <linux/kvm.h>
17
0e4b5869 18#include "config-host.h"
26861c7c
MH
19#include "qemu-common.h"
20#include "qemu/timer.h"
21#include "sysemu/sysemu.h"
22#include "sysemu/kvm.h"
23#include "kvm_arm.h"
24#include "cpu.h"
9208b961 25#include "internals.h"
26861c7c
MH
26#include "hw/arm/arm.h"
27
28static inline void set_feature(uint64_t *features, int feature)
29{
30 *features |= 1ULL << feature;
31}
32
33bool kvm_arm_get_host_cpu_features(ARMHostCPUClass *ahcc)
34{
35 /* Identify the feature bits corresponding to the host CPU, and
36 * fill out the ARMHostCPUClass fields accordingly. To do this
37 * we have to create a scratch VM, create a single CPU inside it,
38 * and then query that CPU for the relevant ID registers.
39 * For AArch64 we currently don't care about ID registers at
40 * all; we just want to know the CPU type.
41 */
42 int fdarray[3];
43 uint64_t features = 0;
44 /* Old kernels may not know about the PREFERRED_TARGET ioctl: however
45 * we know these will only support creating one kind of guest CPU,
46 * which is its preferred CPU type. Fortunately these old kernels
47 * support only a very limited number of CPUs.
48 */
49 static const uint32_t cpus_to_try[] = {
50 KVM_ARM_TARGET_AEM_V8,
51 KVM_ARM_TARGET_FOUNDATION_V8,
52 KVM_ARM_TARGET_CORTEX_A57,
53 QEMU_KVM_ARM_TARGET_NONE
54 };
55 struct kvm_vcpu_init init;
56
57 if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) {
58 return false;
59 }
60
61 ahcc->target = init.target;
62 ahcc->dtb_compatible = "arm,arm-v8";
63
64 kvm_arm_destroy_scratch_host_vcpu(fdarray);
65
66 /* We can assume any KVM supporting CPU is at least a v8
67 * with VFPv4+Neon; this in turn implies most of the other
68 * feature bits.
69 */
70 set_feature(&features, ARM_FEATURE_V8);
71 set_feature(&features, ARM_FEATURE_VFP4);
72 set_feature(&features, ARM_FEATURE_NEON);
73 set_feature(&features, ARM_FEATURE_AARCH64);
74
75 ahcc->features = features;
76
77 return true;
78}
79
eb5e1d3c
PF
80#define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5
81
26861c7c
MH
82int kvm_arch_init_vcpu(CPUState *cs)
83{
26861c7c 84 int ret;
eb5e1d3c 85 uint64_t mpidr;
228d5e04 86 ARMCPU *cpu = ARM_CPU(cs);
26861c7c
MH
87
88 if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE ||
56073970 89 !object_dynamic_cast(OBJECT(cpu), TYPE_AARCH64_CPU)) {
26861c7c
MH
90 fprintf(stderr, "KVM is not supported for this guest CPU type\n");
91 return -EINVAL;
92 }
93
228d5e04
PS
94 /* Determine init features for this CPU */
95 memset(cpu->kvm_init_features, 0, sizeof(cpu->kvm_init_features));
26861c7c 96 if (cpu->start_powered_off) {
228d5e04
PS
97 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_POWER_OFF;
98 }
7cd62e53 99 if (kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PSCI_0_2)) {
dd032e34 100 cpu->psci_version = 2;
7cd62e53
PS
101 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2;
102 }
56073970
GB
103 if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
104 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT;
105 }
228d5e04
PS
106
107 /* Do KVM_ARM_VCPU_INIT ioctl */
108 ret = kvm_arm_vcpu_init(cs);
109 if (ret) {
110 return ret;
26861c7c 111 }
26861c7c 112
eb5e1d3c
PF
113 /*
114 * When KVM is in use, PSCI is emulated in-kernel and not by qemu.
115 * Currently KVM has its own idea about MPIDR assignment, so we
116 * override our defaults with what we get from KVM.
117 */
118 ret = kvm_get_one_reg(cs, ARM64_SYS_REG(ARM_CPU_ID_MPIDR), &mpidr);
119 if (ret) {
120 return ret;
121 }
0f4a9e45 122 cpu->mp_affinity = mpidr & ARM64_AFFINITY_MASK;
eb5e1d3c 123
38df27c8
AB
124 return kvm_arm_init_cpreg_list(cpu);
125}
26861c7c 126
38df27c8
AB
127bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx)
128{
129 /* Return true if the regidx is a register we should synchronize
130 * via the cpreg_tuples array (ie is not a core reg we sync by
131 * hand in kvm_arch_get/put_registers())
132 */
133 switch (regidx & KVM_REG_ARM_COPROC_MASK) {
134 case KVM_REG_ARM_CORE:
135 return false;
136 default:
137 return true;
138 }
26861c7c
MH
139}
140
4b7a6bf4
CD
141typedef struct CPRegStateLevel {
142 uint64_t regidx;
143 int level;
144} CPRegStateLevel;
145
146/* All system registers not listed in the following table are assumed to be
147 * of the level KVM_PUT_RUNTIME_STATE. If a register should be written less
148 * often, you must add it to this table with a state of either
149 * KVM_PUT_RESET_STATE or KVM_PUT_FULL_STATE.
150 */
151static const CPRegStateLevel non_runtime_cpregs[] = {
152 { KVM_REG_ARM_TIMER_CNT, KVM_PUT_FULL_STATE },
153};
154
155int kvm_arm_cpreg_level(uint64_t regidx)
156{
157 int i;
158
159 for (i = 0; i < ARRAY_SIZE(non_runtime_cpregs); i++) {
160 const CPRegStateLevel *l = &non_runtime_cpregs[i];
161 if (l->regidx == regidx) {
162 return l->level;
163 }
164 }
165
166 return KVM_PUT_RUNTIME_STATE;
167}
168
26861c7c
MH
169#define AARCH64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
170 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
171
0e4b5869
AB
172#define AARCH64_SIMD_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U128 | \
173 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
174
175#define AARCH64_SIMD_CTRL_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U32 | \
176 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
177
26861c7c
MH
178int kvm_arch_put_registers(CPUState *cs, int level)
179{
180 struct kvm_one_reg reg;
0e4b5869 181 uint32_t fpr;
26861c7c
MH
182 uint64_t val;
183 int i;
184 int ret;
25b9fb10 185 unsigned int el;
26861c7c
MH
186
187 ARMCPU *cpu = ARM_CPU(cs);
188 CPUARMState *env = &cpu->env;
189
56073970
GB
190 /* If we are in AArch32 mode then we need to copy the AArch32 regs to the
191 * AArch64 registers before pushing them out to 64-bit KVM.
192 */
193 if (!is_a64(env)) {
194 aarch64_sync_32_to_64(env);
195 }
196
26861c7c
MH
197 for (i = 0; i < 31; i++) {
198 reg.id = AARCH64_CORE_REG(regs.regs[i]);
199 reg.addr = (uintptr_t) &env->xregs[i];
200 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
201 if (ret) {
202 return ret;
203 }
204 }
205
f502cfc2
PM
206 /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the
207 * QEMU side we keep the current SP in xregs[31] as well.
208 */
9208b961 209 aarch64_save_sp(env, 1);
f502cfc2 210
26861c7c 211 reg.id = AARCH64_CORE_REG(regs.sp);
f502cfc2
PM
212 reg.addr = (uintptr_t) &env->sp_el[0];
213 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
214 if (ret) {
215 return ret;
216 }
217
218 reg.id = AARCH64_CORE_REG(sp_el1);
219 reg.addr = (uintptr_t) &env->sp_el[1];
26861c7c
MH
220 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
221 if (ret) {
222 return ret;
223 }
224
225 /* Note that KVM thinks pstate is 64 bit but we use a uint32_t */
56073970
GB
226 if (is_a64(env)) {
227 val = pstate_read(env);
228 } else {
229 val = cpsr_read(env);
230 }
26861c7c
MH
231 reg.id = AARCH64_CORE_REG(regs.pstate);
232 reg.addr = (uintptr_t) &val;
233 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
234 if (ret) {
235 return ret;
236 }
237
238 reg.id = AARCH64_CORE_REG(regs.pc);
239 reg.addr = (uintptr_t) &env->pc;
240 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
241 if (ret) {
242 return ret;
243 }
244
a0618a19 245 reg.id = AARCH64_CORE_REG(elr_el1);
6947f059 246 reg.addr = (uintptr_t) &env->elr_el[1];
a0618a19
PM
247 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
248 if (ret) {
249 return ret;
250 }
251
25b9fb10
AB
252 /* Saved Program State Registers
253 *
254 * Before we restore from the banked_spsr[] array we need to
255 * ensure that any modifications to env->spsr are correctly
256 * reflected in the banks.
257 */
258 el = arm_current_el(env);
259 if (el > 0 && !is_a64(env)) {
260 i = bank_number(env->uncached_cpsr & CPSR_M);
261 env->banked_spsr[i] = env->spsr;
262 }
263
264 /* KVM 0-4 map to QEMU banks 1-5 */
a65f1de9
PM
265 for (i = 0; i < KVM_NR_SPSR; i++) {
266 reg.id = AARCH64_CORE_REG(spsr[i]);
25b9fb10 267 reg.addr = (uintptr_t) &env->banked_spsr[i + 1];
a65f1de9
PM
268 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
269 if (ret) {
270 return ret;
271 }
272 }
273
0e4b5869
AB
274 /* Advanced SIMD and FP registers
275 * We map Qn = regs[2n+1]:regs[2n]
276 */
277 for (i = 0; i < 32; i++) {
278 int rd = i << 1;
279 uint64_t fp_val[2];
280#ifdef HOST_WORDS_BIGENDIAN
281 fp_val[0] = env->vfp.regs[rd + 1];
282 fp_val[1] = env->vfp.regs[rd];
283#else
284 fp_val[1] = env->vfp.regs[rd + 1];
285 fp_val[0] = env->vfp.regs[rd];
286#endif
287 reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
288 reg.addr = (uintptr_t)(&fp_val);
289 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
290 if (ret) {
291 return ret;
292 }
293 }
294
295 reg.addr = (uintptr_t)(&fpr);
296 fpr = vfp_get_fpsr(env);
297 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
298 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
299 if (ret) {
300 return ret;
301 }
302
303 fpr = vfp_get_fpcr(env);
304 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
305 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
306 if (ret) {
307 return ret;
308 }
309
4b7a6bf4 310 if (!write_list_to_kvmstate(cpu, level)) {
568bab1f
PS
311 return EINVAL;
312 }
313
1a1753f7
AB
314 kvm_arm_sync_mpstate_to_kvm(cpu);
315
26861c7c
MH
316 return ret;
317}
318
319int kvm_arch_get_registers(CPUState *cs)
320{
321 struct kvm_one_reg reg;
322 uint64_t val;
0e4b5869 323 uint32_t fpr;
25b9fb10 324 unsigned int el;
26861c7c
MH
325 int i;
326 int ret;
327
328 ARMCPU *cpu = ARM_CPU(cs);
329 CPUARMState *env = &cpu->env;
330
331 for (i = 0; i < 31; i++) {
332 reg.id = AARCH64_CORE_REG(regs.regs[i]);
333 reg.addr = (uintptr_t) &env->xregs[i];
334 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
335 if (ret) {
336 return ret;
337 }
338 }
339
340 reg.id = AARCH64_CORE_REG(regs.sp);
f502cfc2
PM
341 reg.addr = (uintptr_t) &env->sp_el[0];
342 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
343 if (ret) {
344 return ret;
345 }
346
347 reg.id = AARCH64_CORE_REG(sp_el1);
348 reg.addr = (uintptr_t) &env->sp_el[1];
26861c7c
MH
349 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
350 if (ret) {
351 return ret;
352 }
353
354 reg.id = AARCH64_CORE_REG(regs.pstate);
355 reg.addr = (uintptr_t) &val;
356 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
357 if (ret) {
358 return ret;
359 }
56073970
GB
360
361 env->aarch64 = ((val & PSTATE_nRW) == 0);
362 if (is_a64(env)) {
363 pstate_write(env, val);
364 } else {
365 env->uncached_cpsr = val & CPSR_M;
366 cpsr_write(env, val, 0xffffffff);
367 }
26861c7c 368
f502cfc2
PM
369 /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the
370 * QEMU side we keep the current SP in xregs[31] as well.
371 */
9208b961 372 aarch64_restore_sp(env, 1);
f502cfc2 373
26861c7c
MH
374 reg.id = AARCH64_CORE_REG(regs.pc);
375 reg.addr = (uintptr_t) &env->pc;
376 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
377 if (ret) {
378 return ret;
379 }
380
56073970
GB
381 /* If we are in AArch32 mode then we need to sync the AArch32 regs with the
382 * incoming AArch64 regs received from 64-bit KVM.
383 * We must perform this after all of the registers have been acquired from
384 * the kernel.
385 */
386 if (!is_a64(env)) {
387 aarch64_sync_64_to_32(env);
388 }
389
a0618a19 390 reg.id = AARCH64_CORE_REG(elr_el1);
6947f059 391 reg.addr = (uintptr_t) &env->elr_el[1];
a0618a19
PM
392 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
393 if (ret) {
394 return ret;
395 }
396
25b9fb10
AB
397 /* Fetch the SPSR registers
398 *
399 * KVM SPSRs 0-4 map to QEMU banks 1-5
400 */
a65f1de9
PM
401 for (i = 0; i < KVM_NR_SPSR; i++) {
402 reg.id = AARCH64_CORE_REG(spsr[i]);
25b9fb10 403 reg.addr = (uintptr_t) &env->banked_spsr[i + 1];
a65f1de9
PM
404 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
405 if (ret) {
406 return ret;
407 }
408 }
409
25b9fb10
AB
410 el = arm_current_el(env);
411 if (el > 0 && !is_a64(env)) {
412 i = bank_number(env->uncached_cpsr & CPSR_M);
413 env->spsr = env->banked_spsr[i];
414 }
415
0e4b5869
AB
416 /* Advanced SIMD and FP registers
417 * We map Qn = regs[2n+1]:regs[2n]
418 */
419 for (i = 0; i < 32; i++) {
420 uint64_t fp_val[2];
421 reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
422 reg.addr = (uintptr_t)(&fp_val);
423 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
424 if (ret) {
425 return ret;
426 } else {
427 int rd = i << 1;
428#ifdef HOST_WORDS_BIGENDIAN
429 env->vfp.regs[rd + 1] = fp_val[0];
430 env->vfp.regs[rd] = fp_val[1];
431#else
432 env->vfp.regs[rd + 1] = fp_val[1];
433 env->vfp.regs[rd] = fp_val[0];
434#endif
435 }
436 }
437
438 reg.addr = (uintptr_t)(&fpr);
439 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
440 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
441 if (ret) {
442 return ret;
443 }
444 vfp_set_fpsr(env, fpr);
445
446 reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
447 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
448 if (ret) {
449 return ret;
450 }
451 vfp_set_fpcr(env, fpr);
452
568bab1f
PS
453 if (!write_kvmstate_to_list(cpu)) {
454 return EINVAL;
455 }
456 /* Note that it's OK to have registers which aren't in CPUState,
457 * so we can ignore a failure return here.
458 */
459 write_list_to_cpustate(cpu);
460
1a1753f7
AB
461 kvm_arm_sync_mpstate_to_qemu(cpu);
462
26861c7c
MH
463 /* TODO: other registers */
464 return ret;
465}