]> git.proxmox.com Git - mirror_qemu.git/blame - target-arm/kvm32.c
Merge remote-tracking branch 'sstabellini/tags/xen-20161122-tag' into staging
[mirror_qemu.git] / target-arm / kvm32.c
CommitLineData
b197ebd4
PM
1/*
2 * ARM implementation of KVM hooks, 32 bit specific code.
3 *
4 * Copyright Christoffer Dall 2009-2010
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
8 *
9 */
10
74c21bd0 11#include "qemu/osdep.h"
b197ebd4 12#include <sys/ioctl.h>
b197ebd4
PM
13
14#include <linux/kvm.h>
15
16#include "qemu-common.h"
33c11879 17#include "cpu.h"
b197ebd4
PM
18#include "qemu/timer.h"
19#include "sysemu/sysemu.h"
20#include "sysemu/kvm.h"
21#include "kvm_arm.h"
ccd38087 22#include "internals.h"
b197ebd4 23#include "hw/arm/arm.h"
03dd024f 24#include "qemu/log.h"
b197ebd4
PM
25
26static inline void set_feature(uint64_t *features, int feature)
27{
28 *features |= 1ULL << feature;
29}
30
31bool kvm_arm_get_host_cpu_features(ARMHostCPUClass *ahcc)
32{
33 /* Identify the feature bits corresponding to the host CPU, and
34 * fill out the ARMHostCPUClass fields accordingly. To do this
35 * we have to create a scratch VM, create a single CPU inside it,
36 * and then query that CPU for the relevant ID registers.
37 */
38 int i, ret, fdarray[3];
39 uint32_t midr, id_pfr0, id_isar0, mvfr1;
40 uint64_t features = 0;
41 /* Old kernels may not know about the PREFERRED_TARGET ioctl: however
42 * we know these will only support creating one kind of guest CPU,
43 * which is its preferred CPU type.
44 */
45 static const uint32_t cpus_to_try[] = {
46 QEMU_KVM_ARM_TARGET_CORTEX_A15,
47 QEMU_KVM_ARM_TARGET_NONE
48 };
49 struct kvm_vcpu_init init;
50 struct kvm_one_reg idregs[] = {
51 {
52 .id = KVM_REG_ARM | KVM_REG_SIZE_U32
51a79b03 53 | ENCODE_CP_REG(15, 0, 0, 0, 0, 0, 0),
b197ebd4
PM
54 .addr = (uintptr_t)&midr,
55 },
56 {
57 .id = KVM_REG_ARM | KVM_REG_SIZE_U32
51a79b03 58 | ENCODE_CP_REG(15, 0, 0, 0, 1, 0, 0),
b197ebd4
PM
59 .addr = (uintptr_t)&id_pfr0,
60 },
61 {
62 .id = KVM_REG_ARM | KVM_REG_SIZE_U32
51a79b03 63 | ENCODE_CP_REG(15, 0, 0, 0, 2, 0, 0),
b197ebd4
PM
64 .addr = (uintptr_t)&id_isar0,
65 },
66 {
67 .id = KVM_REG_ARM | KVM_REG_SIZE_U32
68 | KVM_REG_ARM_VFP | KVM_REG_ARM_VFP_MVFR1,
69 .addr = (uintptr_t)&mvfr1,
70 },
71 };
72
73 if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) {
74 return false;
75 }
76
77 ahcc->target = init.target;
78
79 /* This is not strictly blessed by the device tree binding docs yet,
80 * but in practice the kernel does not care about this string so
81 * there is no point maintaining an KVM_ARM_TARGET_* -> string table.
82 */
83 ahcc->dtb_compatible = "arm,arm-v7";
84
85 for (i = 0; i < ARRAY_SIZE(idregs); i++) {
86 ret = ioctl(fdarray[2], KVM_GET_ONE_REG, &idregs[i]);
87 if (ret) {
88 break;
89 }
90 }
91
92 kvm_arm_destroy_scratch_host_vcpu(fdarray);
93
94 if (ret) {
95 return false;
96 }
97
98 /* Now we've retrieved all the register information we can
99 * set the feature bits based on the ID register fields.
100 * We can assume any KVM supporting CPU is at least a v7
101 * with VFPv3, LPAE and the generic timers; this in turn implies
102 * most of the other feature bits, but a few must be tested.
103 */
104 set_feature(&features, ARM_FEATURE_V7);
105 set_feature(&features, ARM_FEATURE_VFP3);
106 set_feature(&features, ARM_FEATURE_LPAE);
107 set_feature(&features, ARM_FEATURE_GENERIC_TIMER);
108
109 switch (extract32(id_isar0, 24, 4)) {
110 case 1:
111 set_feature(&features, ARM_FEATURE_THUMB_DIV);
112 break;
113 case 2:
114 set_feature(&features, ARM_FEATURE_ARM_DIV);
115 set_feature(&features, ARM_FEATURE_THUMB_DIV);
116 break;
117 default:
118 break;
119 }
120
121 if (extract32(id_pfr0, 12, 4) == 1) {
122 set_feature(&features, ARM_FEATURE_THUMB2EE);
123 }
124 if (extract32(mvfr1, 20, 4) == 1) {
125 set_feature(&features, ARM_FEATURE_VFP_FP16);
126 }
127 if (extract32(mvfr1, 12, 4) == 1) {
128 set_feature(&features, ARM_FEATURE_NEON);
129 }
130 if (extract32(mvfr1, 28, 4) == 1) {
131 /* FMAC support implies VFPv4 */
132 set_feature(&features, ARM_FEATURE_VFP4);
133 }
134
135 ahcc->features = features;
136
137 return true;
138}
139
38df27c8 140bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx)
b197ebd4
PM
141{
142 /* Return true if the regidx is a register we should synchronize
143 * via the cpreg_tuples array (ie is not a core reg we sync by
144 * hand in kvm_arch_get/put_registers())
145 */
146 switch (regidx & KVM_REG_ARM_COPROC_MASK) {
147 case KVM_REG_ARM_CORE:
148 case KVM_REG_ARM_VFP:
149 return false;
150 default:
151 return true;
152 }
153}
154
4b7a6bf4
CD
155typedef struct CPRegStateLevel {
156 uint64_t regidx;
157 int level;
158} CPRegStateLevel;
159
160/* All coprocessor registers not listed in the following table are assumed to
161 * be of the level KVM_PUT_RUNTIME_STATE. If a register should be written less
162 * often, you must add it to this table with a state of either
163 * KVM_PUT_RESET_STATE or KVM_PUT_FULL_STATE.
164 */
165static const CPRegStateLevel non_runtime_cpregs[] = {
166 { KVM_REG_ARM_TIMER_CNT, KVM_PUT_FULL_STATE },
167};
168
169int kvm_arm_cpreg_level(uint64_t regidx)
170{
171 int i;
172
173 for (i = 0; i < ARRAY_SIZE(non_runtime_cpregs); i++) {
174 const CPRegStateLevel *l = &non_runtime_cpregs[i];
175 if (l->regidx == regidx) {
176 return l->level;
177 }
178 }
179
180 return KVM_PUT_RUNTIME_STATE;
181}
182
eb5e1d3c
PF
183#define ARM_CPU_ID_MPIDR 0, 0, 0, 5
184
b197ebd4
PM
185int kvm_arch_init_vcpu(CPUState *cs)
186{
38df27c8 187 int ret;
b197ebd4 188 uint64_t v;
eb5e1d3c 189 uint32_t mpidr;
b197ebd4 190 struct kvm_one_reg r;
b197ebd4
PM
191 ARMCPU *cpu = ARM_CPU(cs);
192
193 if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE) {
194 fprintf(stderr, "KVM is not supported for this guest CPU type\n");
195 return -EINVAL;
196 }
197
228d5e04
PS
198 /* Determine init features for this CPU */
199 memset(cpu->kvm_init_features, 0, sizeof(cpu->kvm_init_features));
b197ebd4 200 if (cpu->start_powered_off) {
228d5e04 201 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_POWER_OFF;
b197ebd4 202 }
7cd62e53 203 if (kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PSCI_0_2)) {
dd032e34 204 cpu->psci_version = 2;
7cd62e53
PS
205 cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2;
206 }
228d5e04
PS
207
208 /* Do KVM_ARM_VCPU_INIT ioctl */
209 ret = kvm_arm_vcpu_init(cs);
b197ebd4
PM
210 if (ret) {
211 return ret;
212 }
228d5e04 213
b197ebd4
PM
214 /* Query the kernel to make sure it supports 32 VFP
215 * registers: QEMU's "cortex-a15" CPU is always a
216 * VFP-D32 core. The simplest way to do this is just
217 * to attempt to read register d31.
218 */
219 r.id = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP | 31;
220 r.addr = (uintptr_t)(&v);
221 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
222 if (ret == -ENOENT) {
223 return -EINVAL;
224 }
225
eb5e1d3c
PF
226 /*
227 * When KVM is in use, PSCI is emulated in-kernel and not by qemu.
228 * Currently KVM has its own idea about MPIDR assignment, so we
229 * override our defaults with what we get from KVM.
230 */
231 ret = kvm_get_one_reg(cs, ARM_CP15_REG32(ARM_CPU_ID_MPIDR), &mpidr);
232 if (ret) {
233 return ret;
234 }
0f4a9e45 235 cpu->mp_affinity = mpidr & ARM32_AFFINITY_MASK;
eb5e1d3c 236
38df27c8 237 return kvm_arm_init_cpreg_list(cpu);
b197ebd4
PM
238}
239
240typedef struct Reg {
241 uint64_t id;
242 int offset;
243} Reg;
244
245#define COREREG(KERNELNAME, QEMUFIELD) \
246 { \
247 KVM_REG_ARM | KVM_REG_SIZE_U32 | \
248 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(KERNELNAME), \
249 offsetof(CPUARMState, QEMUFIELD) \
250 }
251
252#define VFPSYSREG(R) \
253 { \
254 KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP | \
255 KVM_REG_ARM_VFP_##R, \
256 offsetof(CPUARMState, vfp.xregs[ARM_VFP_##R]) \
257 }
258
a65f1de9
PM
259/* Like COREREG, but handle fields which are in a uint64_t in CPUARMState. */
260#define COREREG64(KERNELNAME, QEMUFIELD) \
261 { \
262 KVM_REG_ARM | KVM_REG_SIZE_U32 | \
263 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(KERNELNAME), \
264 offsetoflow32(CPUARMState, QEMUFIELD) \
265 }
266
b197ebd4
PM
267static const Reg regs[] = {
268 /* R0_usr .. R14_usr */
269 COREREG(usr_regs.uregs[0], regs[0]),
270 COREREG(usr_regs.uregs[1], regs[1]),
271 COREREG(usr_regs.uregs[2], regs[2]),
272 COREREG(usr_regs.uregs[3], regs[3]),
273 COREREG(usr_regs.uregs[4], regs[4]),
274 COREREG(usr_regs.uregs[5], regs[5]),
275 COREREG(usr_regs.uregs[6], regs[6]),
276 COREREG(usr_regs.uregs[7], regs[7]),
277 COREREG(usr_regs.uregs[8], usr_regs[0]),
278 COREREG(usr_regs.uregs[9], usr_regs[1]),
279 COREREG(usr_regs.uregs[10], usr_regs[2]),
280 COREREG(usr_regs.uregs[11], usr_regs[3]),
281 COREREG(usr_regs.uregs[12], usr_regs[4]),
99a99c1f
SB
282 COREREG(usr_regs.uregs[13], banked_r13[BANK_USRSYS]),
283 COREREG(usr_regs.uregs[14], banked_r14[BANK_USRSYS]),
b197ebd4 284 /* R13, R14, SPSR for SVC, ABT, UND, IRQ banks */
99a99c1f
SB
285 COREREG(svc_regs[0], banked_r13[BANK_SVC]),
286 COREREG(svc_regs[1], banked_r14[BANK_SVC]),
287 COREREG64(svc_regs[2], banked_spsr[BANK_SVC]),
288 COREREG(abt_regs[0], banked_r13[BANK_ABT]),
289 COREREG(abt_regs[1], banked_r14[BANK_ABT]),
290 COREREG64(abt_regs[2], banked_spsr[BANK_ABT]),
291 COREREG(und_regs[0], banked_r13[BANK_UND]),
292 COREREG(und_regs[1], banked_r14[BANK_UND]),
293 COREREG64(und_regs[2], banked_spsr[BANK_UND]),
294 COREREG(irq_regs[0], banked_r13[BANK_IRQ]),
295 COREREG(irq_regs[1], banked_r14[BANK_IRQ]),
296 COREREG64(irq_regs[2], banked_spsr[BANK_IRQ]),
b197ebd4
PM
297 /* R8_fiq .. R14_fiq and SPSR_fiq */
298 COREREG(fiq_regs[0], fiq_regs[0]),
299 COREREG(fiq_regs[1], fiq_regs[1]),
300 COREREG(fiq_regs[2], fiq_regs[2]),
301 COREREG(fiq_regs[3], fiq_regs[3]),
302 COREREG(fiq_regs[4], fiq_regs[4]),
99a99c1f
SB
303 COREREG(fiq_regs[5], banked_r13[BANK_FIQ]),
304 COREREG(fiq_regs[6], banked_r14[BANK_FIQ]),
305 COREREG64(fiq_regs[7], banked_spsr[BANK_FIQ]),
b197ebd4
PM
306 /* R15 */
307 COREREG(usr_regs.uregs[15], regs[15]),
308 /* VFP system registers */
309 VFPSYSREG(FPSID),
310 VFPSYSREG(MVFR1),
311 VFPSYSREG(MVFR0),
312 VFPSYSREG(FPEXC),
313 VFPSYSREG(FPINST),
314 VFPSYSREG(FPINST2),
315};
316
317int kvm_arch_put_registers(CPUState *cs, int level)
318{
319 ARMCPU *cpu = ARM_CPU(cs);
320 CPUARMState *env = &cpu->env;
321 struct kvm_one_reg r;
322 int mode, bn;
323 int ret, i;
324 uint32_t cpsr, fpscr;
325
326 /* Make sure the banked regs are properly set */
327 mode = env->uncached_cpsr & CPSR_M;
328 bn = bank_number(mode);
329 if (mode == ARM_CPU_MODE_FIQ) {
330 memcpy(env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
331 } else {
332 memcpy(env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
333 }
334 env->banked_r13[bn] = env->regs[13];
335 env->banked_r14[bn] = env->regs[14];
336 env->banked_spsr[bn] = env->spsr;
337
338 /* Now we can safely copy stuff down to the kernel */
339 for (i = 0; i < ARRAY_SIZE(regs); i++) {
340 r.id = regs[i].id;
341 r.addr = (uintptr_t)(env) + regs[i].offset;
342 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
343 if (ret) {
344 return ret;
345 }
346 }
347
348 /* Special cases which aren't a single CPUARMState field */
349 cpsr = cpsr_read(env);
350 r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 |
351 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(usr_regs.ARM_cpsr);
352 r.addr = (uintptr_t)(&cpsr);
353 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
354 if (ret) {
355 return ret;
356 }
357
358 /* VFP registers */
359 r.id = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP;
360 for (i = 0; i < 32; i++) {
361 r.addr = (uintptr_t)(&env->vfp.regs[i]);
362 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
363 if (ret) {
364 return ret;
365 }
366 r.id++;
367 }
368
369 r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP |
370 KVM_REG_ARM_VFP_FPSCR;
371 fpscr = vfp_get_fpscr(env);
372 r.addr = (uintptr_t)&fpscr;
373 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
374 if (ret) {
375 return ret;
376 }
377
378 /* Note that we do not call write_cpustate_to_list()
379 * here, so we are only writing the tuple list back to
380 * KVM. This is safe because nothing can change the
381 * CPUARMState cp15 fields (in particular gdb accesses cannot)
382 * and so there are no changes to sync. In fact syncing would
383 * be wrong at this point: for a constant register where TCG and
384 * KVM disagree about its value, the preceding write_list_to_cpustate()
385 * would not have had any effect on the CPUARMState value (since the
386 * register is read-only), and a write_cpustate_to_list() here would
387 * then try to write the TCG value back into KVM -- this would either
388 * fail or incorrectly change the value the guest sees.
389 *
390 * If we ever want to allow the user to modify cp15 registers via
391 * the gdb stub, we would need to be more clever here (for instance
392 * tracking the set of registers kvm_arch_get_registers() successfully
393 * managed to update the CPUARMState with, and only allowing those
394 * to be written back up into the kernel).
395 */
4b7a6bf4 396 if (!write_list_to_kvmstate(cpu, level)) {
b197ebd4
PM
397 return EINVAL;
398 }
399
1a1753f7
AB
400 kvm_arm_sync_mpstate_to_kvm(cpu);
401
b197ebd4
PM
402 return ret;
403}
404
405int kvm_arch_get_registers(CPUState *cs)
406{
407 ARMCPU *cpu = ARM_CPU(cs);
408 CPUARMState *env = &cpu->env;
409 struct kvm_one_reg r;
410 int mode, bn;
411 int ret, i;
412 uint32_t cpsr, fpscr;
413
414 for (i = 0; i < ARRAY_SIZE(regs); i++) {
415 r.id = regs[i].id;
416 r.addr = (uintptr_t)(env) + regs[i].offset;
417 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
418 if (ret) {
419 return ret;
420 }
421 }
422
423 /* Special cases which aren't a single CPUARMState field */
424 r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 |
425 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(usr_regs.ARM_cpsr);
426 r.addr = (uintptr_t)(&cpsr);
427 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
428 if (ret) {
429 return ret;
430 }
50866ba5 431 cpsr_write(env, cpsr, 0xffffffff, CPSRWriteRaw);
b197ebd4
PM
432
433 /* Make sure the current mode regs are properly set */
434 mode = env->uncached_cpsr & CPSR_M;
435 bn = bank_number(mode);
436 if (mode == ARM_CPU_MODE_FIQ) {
437 memcpy(env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
438 } else {
439 memcpy(env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
440 }
441 env->regs[13] = env->banked_r13[bn];
442 env->regs[14] = env->banked_r14[bn];
443 env->spsr = env->banked_spsr[bn];
444
445 /* VFP registers */
446 r.id = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP;
447 for (i = 0; i < 32; i++) {
448 r.addr = (uintptr_t)(&env->vfp.regs[i]);
449 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
450 if (ret) {
451 return ret;
452 }
453 r.id++;
454 }
455
456 r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP |
457 KVM_REG_ARM_VFP_FPSCR;
458 r.addr = (uintptr_t)&fpscr;
459 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
460 if (ret) {
461 return ret;
462 }
463 vfp_set_fpscr(env, fpscr);
464
465 if (!write_kvmstate_to_list(cpu)) {
466 return EINVAL;
467 }
468 /* Note that it's OK to have registers which aren't in CPUState,
469 * so we can ignore a failure return here.
470 */
471 write_list_to_cpustate(cpu);
472
1a1753f7
AB
473 kvm_arm_sync_mpstate_to_qemu(cpu);
474
b197ebd4
PM
475 return 0;
476}
2ecb2027
AB
477
478int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
479{
480 qemu_log_mask(LOG_UNIMP, "%s: guest debug not yet implemented\n", __func__);
481 return -EINVAL;
482}
483
484int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
485{
486 qemu_log_mask(LOG_UNIMP, "%s: guest debug not yet implemented\n", __func__);
487 return -EINVAL;
488}
489
490bool kvm_arm_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit)
491{
492 qemu_log_mask(LOG_UNIMP, "%s: guest debug not yet implemented\n", __func__);
493 return false;
494}
e4482ab7
AB
495
496int kvm_arch_insert_hw_breakpoint(target_ulong addr,
497 target_ulong len, int type)
498{
499 qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
500 return -EINVAL;
501}
502
503int kvm_arch_remove_hw_breakpoint(target_ulong addr,
504 target_ulong len, int type)
505{
506 qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
507 return -EINVAL;
508}
509
510void kvm_arch_remove_all_hw_breakpoints(void)
511{
512 qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
513}
514
515void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch *ptr)
516{
517 qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
518}
519
520bool kvm_arm_hw_debug_active(CPUState *cs)
521{
522 return false;
523}
01fe6b60
SZ
524
525int kvm_arm_pmu_create(CPUState *cs, int irq)
526{
527 qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
528 return 0;
529}