2 * ARM implementation of KVM hooks
4 * Copyright Christoffer Dall 2009-2010
5 * Copyright Mian-M. Hamayun 2013, Virtual Open Systems
6 * Copyright Alex Bennée 2014, Linaro
8 * This work is licensed under the terms of the GNU GPL, version 2 or later.
9 * See the COPYING file in the top-level directory.
13 #include "qemu/osdep.h"
14 #include <sys/ioctl.h>
16 #include <linux/kvm.h>
18 #include "qemu/timer.h"
19 #include "qemu/error-report.h"
20 #include "qemu/main-loop.h"
21 #include "qom/object.h"
22 #include "qapi/error.h"
23 #include "sysemu/sysemu.h"
24 #include "sysemu/runstate.h"
25 #include "sysemu/kvm.h"
26 #include "sysemu/kvm_int.h"
30 #include "internals.h"
31 #include "hw/pci/pci.h"
32 #include "exec/memattrs.h"
33 #include "exec/address-spaces.h"
34 #include "exec/gdbstub.h"
35 #include "hw/boards.h"
37 #include "qapi/visitor.h"
39 #include "hw/acpi/acpi.h"
40 #include "hw/acpi/ghes.h"
42 const KVMCapabilityInfo kvm_arch_required_capabilities
[] = {
46 static bool cap_has_mp_state
;
47 static bool cap_has_inject_serror_esr
;
48 static bool cap_has_inject_ext_dabt
;
51 * ARMHostCPUFeatures: information about the host CPU (identified
52 * by asking the host kernel)
54 typedef struct ARMHostCPUFeatures
{
58 const char *dtb_compatible
;
61 static ARMHostCPUFeatures arm_host_cpu_features
;
67 * Initialize (or reinitialize) the VCPU by invoking the
68 * KVM_ARM_VCPU_INIT ioctl with the CPU type and feature
69 * bitmask specified in the CPUState.
71 * Returns: 0 if success else < 0 error code
73 static int kvm_arm_vcpu_init(CPUState
*cs
)
75 ARMCPU
*cpu
= ARM_CPU(cs
);
76 struct kvm_vcpu_init init
;
78 init
.target
= cpu
->kvm_target
;
79 memcpy(init
.features
, cpu
->kvm_init_features
, sizeof(init
.features
));
81 return kvm_vcpu_ioctl(cs
, KVM_ARM_VCPU_INIT
, &init
);
85 * kvm_arm_vcpu_finalize:
87 * @feature: feature to finalize
89 * Finalizes the configuration of the specified VCPU feature by
90 * invoking the KVM_ARM_VCPU_FINALIZE ioctl. Features requiring
91 * this are documented in the "KVM_ARM_VCPU_FINALIZE" section of
92 * KVM's API documentation.
94 * Returns: 0 if success else < 0 error code
96 static int kvm_arm_vcpu_finalize(CPUState
*cs
, int feature
)
98 return kvm_vcpu_ioctl(cs
, KVM_ARM_VCPU_FINALIZE
, &feature
);
101 bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try
,
103 struct kvm_vcpu_init
*init
)
105 int ret
= 0, kvmfd
= -1, vmfd
= -1, cpufd
= -1;
108 kvmfd
= qemu_open_old("/dev/kvm", O_RDWR
);
112 max_vm_pa_size
= ioctl(kvmfd
, KVM_CHECK_EXTENSION
, KVM_CAP_ARM_VM_IPA_SIZE
);
113 if (max_vm_pa_size
< 0) {
117 vmfd
= ioctl(kvmfd
, KVM_CREATE_VM
, max_vm_pa_size
);
118 } while (vmfd
== -1 && errno
== EINTR
);
122 cpufd
= ioctl(vmfd
, KVM_CREATE_VCPU
, 0);
128 /* Caller doesn't want the VCPU to be initialized, so skip it */
132 if (init
->target
== -1) {
133 struct kvm_vcpu_init preferred
;
135 ret
= ioctl(vmfd
, KVM_ARM_PREFERRED_TARGET
, &preferred
);
137 init
->target
= preferred
.target
;
141 ret
= ioctl(cpufd
, KVM_ARM_VCPU_INIT
, init
);
145 } else if (cpus_to_try
) {
146 /* Old kernel which doesn't know about the
147 * PREFERRED_TARGET ioctl: we know it will only support
148 * creating one kind of guest CPU which is its preferred
151 struct kvm_vcpu_init
try;
153 while (*cpus_to_try
!= QEMU_KVM_ARM_TARGET_NONE
) {
154 try.target
= *cpus_to_try
++;
155 memcpy(try.features
, init
->features
, sizeof(init
->features
));
156 ret
= ioctl(cpufd
, KVM_ARM_VCPU_INIT
, &try);
164 init
->target
= try.target
;
166 /* Treat a NULL cpus_to_try argument the same as an empty
167 * list, which means we will fail the call since this must
168 * be an old kernel which doesn't support PREFERRED_TARGET.
194 void kvm_arm_destroy_scratch_host_vcpu(int *fdarray
)
198 for (i
= 2; i
>= 0; i
--) {
203 static int read_sys_reg32(int fd
, uint32_t *pret
, uint64_t id
)
206 struct kvm_one_reg idreg
= { .id
= id
, .addr
= (uintptr_t)&ret
};
209 assert((id
& KVM_REG_SIZE_MASK
) == KVM_REG_SIZE_U64
);
210 err
= ioctl(fd
, KVM_GET_ONE_REG
, &idreg
);
218 static int read_sys_reg64(int fd
, uint64_t *pret
, uint64_t id
)
220 struct kvm_one_reg idreg
= { .id
= id
, .addr
= (uintptr_t)pret
};
222 assert((id
& KVM_REG_SIZE_MASK
) == KVM_REG_SIZE_U64
);
223 return ioctl(fd
, KVM_GET_ONE_REG
, &idreg
);
226 static bool kvm_arm_pauth_supported(void)
228 return (kvm_check_extension(kvm_state
, KVM_CAP_ARM_PTRAUTH_ADDRESS
) &&
229 kvm_check_extension(kvm_state
, KVM_CAP_ARM_PTRAUTH_GENERIC
));
232 static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures
*ahcf
)
234 /* Identify the feature bits corresponding to the host CPU, and
235 * fill out the ARMHostCPUClass fields accordingly. To do this
236 * we have to create a scratch VM, create a single CPU inside it,
237 * and then query that CPU for the relevant ID registers.
241 bool pmu_supported
= false;
242 uint64_t features
= 0;
245 /* Old kernels may not know about the PREFERRED_TARGET ioctl: however
246 * we know these will only support creating one kind of guest CPU,
247 * which is its preferred CPU type. Fortunately these old kernels
248 * support only a very limited number of CPUs.
250 static const uint32_t cpus_to_try
[] = {
251 KVM_ARM_TARGET_AEM_V8
,
252 KVM_ARM_TARGET_FOUNDATION_V8
,
253 KVM_ARM_TARGET_CORTEX_A57
,
254 QEMU_KVM_ARM_TARGET_NONE
257 * target = -1 informs kvm_arm_create_scratch_host_vcpu()
258 * to use the preferred target
260 struct kvm_vcpu_init init
= { .target
= -1, };
263 * Ask for SVE if supported, so that we can query ID_AA64ZFR0,
264 * which is otherwise RAZ.
266 sve_supported
= kvm_arm_sve_supported();
268 init
.features
[0] |= 1 << KVM_ARM_VCPU_SVE
;
272 * Ask for Pointer Authentication if supported, so that we get
273 * the unsanitized field values for AA64ISAR1_EL1.
275 if (kvm_arm_pauth_supported()) {
276 init
.features
[0] |= (1 << KVM_ARM_VCPU_PTRAUTH_ADDRESS
|
277 1 << KVM_ARM_VCPU_PTRAUTH_GENERIC
);
280 if (kvm_arm_pmu_supported()) {
281 init
.features
[0] |= 1 << KVM_ARM_VCPU_PMU_V3
;
282 pmu_supported
= true;
285 if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try
, fdarray
, &init
)) {
289 ahcf
->target
= init
.target
;
290 ahcf
->dtb_compatible
= "arm,arm-v8";
292 err
= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64pfr0
,
293 ARM64_SYS_REG(3, 0, 0, 4, 0));
294 if (unlikely(err
< 0)) {
296 * Before v4.15, the kernel only exposed a limited number of system
297 * registers, not including any of the interesting AArch64 ID regs.
298 * For the most part we could leave these fields as zero with minimal
299 * effect, since this does not affect the values seen by the guest.
301 * However, it could cause problems down the line for QEMU,
302 * so provide a minimal v8.0 default.
304 * ??? Could read MIDR and use knowledge from cpu64.c.
305 * ??? Could map a page of memory into our temp guest and
306 * run the tiniest of hand-crafted kernels to extract
307 * the values seen by the guest.
308 * ??? Either of these sounds like too much effort just
309 * to work around running a modern host kernel.
311 ahcf
->isar
.id_aa64pfr0
= 0x00000011; /* EL1&0, AArch64 only */
314 err
|= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64pfr1
,
315 ARM64_SYS_REG(3, 0, 0, 4, 1));
316 err
|= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64smfr0
,
317 ARM64_SYS_REG(3, 0, 0, 4, 5));
318 err
|= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64dfr0
,
319 ARM64_SYS_REG(3, 0, 0, 5, 0));
320 err
|= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64dfr1
,
321 ARM64_SYS_REG(3, 0, 0, 5, 1));
322 err
|= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64isar0
,
323 ARM64_SYS_REG(3, 0, 0, 6, 0));
324 err
|= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64isar1
,
325 ARM64_SYS_REG(3, 0, 0, 6, 1));
326 err
|= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64isar2
,
327 ARM64_SYS_REG(3, 0, 0, 6, 2));
328 err
|= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64mmfr0
,
329 ARM64_SYS_REG(3, 0, 0, 7, 0));
330 err
|= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64mmfr1
,
331 ARM64_SYS_REG(3, 0, 0, 7, 1));
332 err
|= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64mmfr2
,
333 ARM64_SYS_REG(3, 0, 0, 7, 2));
336 * Note that if AArch32 support is not present in the host,
337 * the AArch32 sysregs are present to be read, but will
338 * return UNKNOWN values. This is neither better nor worse
339 * than skipping the reads and leaving 0, as we must avoid
340 * considering the values in every case.
342 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_pfr0
,
343 ARM64_SYS_REG(3, 0, 0, 1, 0));
344 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_pfr1
,
345 ARM64_SYS_REG(3, 0, 0, 1, 1));
346 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_dfr0
,
347 ARM64_SYS_REG(3, 0, 0, 1, 2));
348 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_mmfr0
,
349 ARM64_SYS_REG(3, 0, 0, 1, 4));
350 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_mmfr1
,
351 ARM64_SYS_REG(3, 0, 0, 1, 5));
352 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_mmfr2
,
353 ARM64_SYS_REG(3, 0, 0, 1, 6));
354 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_mmfr3
,
355 ARM64_SYS_REG(3, 0, 0, 1, 7));
356 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_isar0
,
357 ARM64_SYS_REG(3, 0, 0, 2, 0));
358 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_isar1
,
359 ARM64_SYS_REG(3, 0, 0, 2, 1));
360 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_isar2
,
361 ARM64_SYS_REG(3, 0, 0, 2, 2));
362 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_isar3
,
363 ARM64_SYS_REG(3, 0, 0, 2, 3));
364 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_isar4
,
365 ARM64_SYS_REG(3, 0, 0, 2, 4));
366 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_isar5
,
367 ARM64_SYS_REG(3, 0, 0, 2, 5));
368 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_mmfr4
,
369 ARM64_SYS_REG(3, 0, 0, 2, 6));
370 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_isar6
,
371 ARM64_SYS_REG(3, 0, 0, 2, 7));
373 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.mvfr0
,
374 ARM64_SYS_REG(3, 0, 0, 3, 0));
375 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.mvfr1
,
376 ARM64_SYS_REG(3, 0, 0, 3, 1));
377 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.mvfr2
,
378 ARM64_SYS_REG(3, 0, 0, 3, 2));
379 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_pfr2
,
380 ARM64_SYS_REG(3, 0, 0, 3, 4));
381 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_dfr1
,
382 ARM64_SYS_REG(3, 0, 0, 3, 5));
383 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_mmfr5
,
384 ARM64_SYS_REG(3, 0, 0, 3, 6));
387 * DBGDIDR is a bit complicated because the kernel doesn't
388 * provide an accessor for it in 64-bit mode, which is what this
389 * scratch VM is in, and there's no architected "64-bit sysreg
390 * which reads the same as the 32-bit register" the way there is
391 * for other ID registers. Instead we synthesize a value from the
392 * AArch64 ID_AA64DFR0, the same way the kernel code in
393 * arch/arm64/kvm/sys_regs.c:trap_dbgidr() does.
394 * We only do this if the CPU supports AArch32 at EL1.
396 if (FIELD_EX32(ahcf
->isar
.id_aa64pfr0
, ID_AA64PFR0
, EL1
) >= 2) {
397 int wrps
= FIELD_EX64(ahcf
->isar
.id_aa64dfr0
, ID_AA64DFR0
, WRPS
);
398 int brps
= FIELD_EX64(ahcf
->isar
.id_aa64dfr0
, ID_AA64DFR0
, BRPS
);
400 FIELD_EX64(ahcf
->isar
.id_aa64dfr0
, ID_AA64DFR0
, CTX_CMPS
);
401 int version
= 6; /* ARMv8 debug architecture */
403 !!FIELD_EX32(ahcf
->isar
.id_aa64pfr0
, ID_AA64PFR0
, EL3
);
404 uint32_t dbgdidr
= 0;
406 dbgdidr
= FIELD_DP32(dbgdidr
, DBGDIDR
, WRPS
, wrps
);
407 dbgdidr
= FIELD_DP32(dbgdidr
, DBGDIDR
, BRPS
, brps
);
408 dbgdidr
= FIELD_DP32(dbgdidr
, DBGDIDR
, CTX_CMPS
, ctx_cmps
);
409 dbgdidr
= FIELD_DP32(dbgdidr
, DBGDIDR
, VERSION
, version
);
410 dbgdidr
= FIELD_DP32(dbgdidr
, DBGDIDR
, NSUHD_IMP
, has_el3
);
411 dbgdidr
= FIELD_DP32(dbgdidr
, DBGDIDR
, SE_IMP
, has_el3
);
412 dbgdidr
|= (1 << 15); /* RES1 bit */
413 ahcf
->isar
.dbgdidr
= dbgdidr
;
417 /* PMCR_EL0 is only accessible if the vCPU has feature PMU_V3 */
418 err
|= read_sys_reg64(fdarray
[2], &ahcf
->isar
.reset_pmcr_el0
,
419 ARM64_SYS_REG(3, 3, 9, 12, 0));
424 * There is a range of kernels between kernel commit 73433762fcae
425 * and f81cb2c3ad41 which have a bug where the kernel doesn't
426 * expose SYS_ID_AA64ZFR0_EL1 via the ONE_REG API unless the VM has
427 * enabled SVE support, which resulted in an error rather than RAZ.
428 * So only read the register if we set KVM_ARM_VCPU_SVE above.
430 err
|= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64zfr0
,
431 ARM64_SYS_REG(3, 0, 0, 4, 4));
435 kvm_arm_destroy_scratch_host_vcpu(fdarray
);
442 * We can assume any KVM supporting CPU is at least a v8
443 * with VFPv4+Neon; this in turn implies most of the other
446 features
|= 1ULL << ARM_FEATURE_V8
;
447 features
|= 1ULL << ARM_FEATURE_NEON
;
448 features
|= 1ULL << ARM_FEATURE_AARCH64
;
449 features
|= 1ULL << ARM_FEATURE_PMU
;
450 features
|= 1ULL << ARM_FEATURE_GENERIC_TIMER
;
452 ahcf
->features
= features
;
457 void kvm_arm_set_cpu_features_from_host(ARMCPU
*cpu
)
459 CPUARMState
*env
= &cpu
->env
;
461 if (!arm_host_cpu_features
.dtb_compatible
) {
462 if (!kvm_enabled() ||
463 !kvm_arm_get_host_cpu_features(&arm_host_cpu_features
)) {
464 /* We can't report this error yet, so flag that we need to
465 * in arm_cpu_realizefn().
467 cpu
->kvm_target
= QEMU_KVM_ARM_TARGET_NONE
;
468 cpu
->host_cpu_probe_failed
= true;
473 cpu
->kvm_target
= arm_host_cpu_features
.target
;
474 cpu
->dtb_compatible
= arm_host_cpu_features
.dtb_compatible
;
475 cpu
->isar
= arm_host_cpu_features
.isar
;
476 env
->features
= arm_host_cpu_features
.features
;
479 static bool kvm_no_adjvtime_get(Object
*obj
, Error
**errp
)
481 return !ARM_CPU(obj
)->kvm_adjvtime
;
484 static void kvm_no_adjvtime_set(Object
*obj
, bool value
, Error
**errp
)
486 ARM_CPU(obj
)->kvm_adjvtime
= !value
;
489 static bool kvm_steal_time_get(Object
*obj
, Error
**errp
)
491 return ARM_CPU(obj
)->kvm_steal_time
!= ON_OFF_AUTO_OFF
;
494 static void kvm_steal_time_set(Object
*obj
, bool value
, Error
**errp
)
496 ARM_CPU(obj
)->kvm_steal_time
= value
? ON_OFF_AUTO_ON
: ON_OFF_AUTO_OFF
;
499 /* KVM VCPU properties should be prefixed with "kvm-". */
500 void kvm_arm_add_vcpu_properties(ARMCPU
*cpu
)
502 CPUARMState
*env
= &cpu
->env
;
503 Object
*obj
= OBJECT(cpu
);
505 if (arm_feature(env
, ARM_FEATURE_GENERIC_TIMER
)) {
506 cpu
->kvm_adjvtime
= true;
507 object_property_add_bool(obj
, "kvm-no-adjvtime", kvm_no_adjvtime_get
,
508 kvm_no_adjvtime_set
);
509 object_property_set_description(obj
, "kvm-no-adjvtime",
510 "Set on to disable the adjustment of "
511 "the virtual counter. VM stopped time "
515 cpu
->kvm_steal_time
= ON_OFF_AUTO_AUTO
;
516 object_property_add_bool(obj
, "kvm-steal-time", kvm_steal_time_get
,
518 object_property_set_description(obj
, "kvm-steal-time",
519 "Set off to disable KVM steal time.");
522 bool kvm_arm_pmu_supported(void)
524 return kvm_check_extension(kvm_state
, KVM_CAP_ARM_PMU_V3
);
527 int kvm_arm_get_max_vm_ipa_size(MachineState
*ms
, bool *fixed_ipa
)
529 KVMState
*s
= KVM_STATE(ms
->accelerator
);
532 ret
= kvm_check_extension(s
, KVM_CAP_ARM_VM_IPA_SIZE
);
533 *fixed_ipa
= ret
<= 0;
535 return ret
> 0 ? ret
: 40;
538 int kvm_arch_get_default_type(MachineState
*ms
)
541 int size
= kvm_arm_get_max_vm_ipa_size(ms
, &fixed_ipa
);
542 return fixed_ipa
? 0 : size
;
545 int kvm_arch_init(MachineState
*ms
, KVMState
*s
)
548 /* For ARM interrupt delivery is always asynchronous,
549 * whether we are using an in-kernel VGIC or not.
551 kvm_async_interrupts_allowed
= true;
554 * PSCI wakes up secondary cores, so we always need to
555 * have vCPUs waiting in kernel space
557 kvm_halt_in_kernel_allowed
= true;
559 cap_has_mp_state
= kvm_check_extension(s
, KVM_CAP_MP_STATE
);
561 /* Check whether user space can specify guest syndrome value */
562 cap_has_inject_serror_esr
=
563 kvm_check_extension(s
, KVM_CAP_ARM_INJECT_SERROR_ESR
);
565 if (ms
->smp
.cpus
> 256 &&
566 !kvm_check_extension(s
, KVM_CAP_ARM_IRQ_LINE_LAYOUT_2
)) {
567 error_report("Using more than 256 vcpus requires a host kernel "
568 "with KVM_CAP_ARM_IRQ_LINE_LAYOUT_2");
572 if (kvm_check_extension(s
, KVM_CAP_ARM_NISV_TO_USER
)) {
573 if (kvm_vm_enable_cap(s
, KVM_CAP_ARM_NISV_TO_USER
, 0)) {
574 error_report("Failed to enable KVM_CAP_ARM_NISV_TO_USER cap");
576 /* Set status for supporting the external dabt injection */
577 cap_has_inject_ext_dabt
= kvm_check_extension(s
,
578 KVM_CAP_ARM_INJECT_EXT_DABT
);
582 if (s
->kvm_eager_split_size
) {
585 sizes
= kvm_vm_check_extension(s
, KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES
);
587 s
->kvm_eager_split_size
= 0;
588 warn_report("Eager Page Split support not available");
589 } else if (!(s
->kvm_eager_split_size
& sizes
)) {
590 error_report("Eager Page Split requested chunk size not valid");
593 ret
= kvm_vm_enable_cap(s
, KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE
, 0,
594 s
->kvm_eager_split_size
);
596 error_report("Enabling of Eager Page Split failed: %s",
602 max_hw_wps
= kvm_check_extension(s
, KVM_CAP_GUEST_DEBUG_HW_WPS
);
603 hw_watchpoints
= g_array_sized_new(true, true,
604 sizeof(HWWatchpoint
), max_hw_wps
);
606 max_hw_bps
= kvm_check_extension(s
, KVM_CAP_GUEST_DEBUG_HW_BPS
);
607 hw_breakpoints
= g_array_sized_new(true, true,
608 sizeof(HWBreakpoint
), max_hw_bps
);
613 unsigned long kvm_arch_vcpu_id(CPUState
*cpu
)
615 return cpu
->cpu_index
;
618 /* We track all the KVM devices which need their memory addresses
619 * passing to the kernel in a list of these structures.
620 * When board init is complete we run through the list and
621 * tell the kernel the base addresses of the memory regions.
622 * We use a MemoryListener to track mapping and unmapping of
623 * the regions during board creation, so the board models don't
624 * need to do anything special for the KVM case.
626 * Sometimes the address must be OR'ed with some other fields
627 * (for example for KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION).
628 * @kda_addr_ormask aims at storing the value of those fields.
630 typedef struct KVMDevice
{
631 struct kvm_arm_device_addr kda
;
632 struct kvm_device_attr kdattr
;
633 uint64_t kda_addr_ormask
;
635 QSLIST_ENTRY(KVMDevice
) entries
;
639 static QSLIST_HEAD(, KVMDevice
) kvm_devices_head
;
641 static void kvm_arm_devlistener_add(MemoryListener
*listener
,
642 MemoryRegionSection
*section
)
646 QSLIST_FOREACH(kd
, &kvm_devices_head
, entries
) {
647 if (section
->mr
== kd
->mr
) {
648 kd
->kda
.addr
= section
->offset_within_address_space
;
653 static void kvm_arm_devlistener_del(MemoryListener
*listener
,
654 MemoryRegionSection
*section
)
658 QSLIST_FOREACH(kd
, &kvm_devices_head
, entries
) {
659 if (section
->mr
== kd
->mr
) {
665 static MemoryListener devlistener
= {
667 .region_add
= kvm_arm_devlistener_add
,
668 .region_del
= kvm_arm_devlistener_del
,
669 .priority
= MEMORY_LISTENER_PRIORITY_MIN
,
672 static void kvm_arm_set_device_addr(KVMDevice
*kd
)
674 struct kvm_device_attr
*attr
= &kd
->kdattr
;
677 /* If the device control API is available and we have a device fd on the
678 * KVMDevice struct, let's use the newer API
680 if (kd
->dev_fd
>= 0) {
681 uint64_t addr
= kd
->kda
.addr
;
683 addr
|= kd
->kda_addr_ormask
;
684 attr
->addr
= (uintptr_t)&addr
;
685 ret
= kvm_device_ioctl(kd
->dev_fd
, KVM_SET_DEVICE_ATTR
, attr
);
687 ret
= kvm_vm_ioctl(kvm_state
, KVM_ARM_SET_DEVICE_ADDR
, &kd
->kda
);
691 fprintf(stderr
, "Failed to set device address: %s\n",
697 static void kvm_arm_machine_init_done(Notifier
*notifier
, void *data
)
701 QSLIST_FOREACH_SAFE(kd
, &kvm_devices_head
, entries
, tkd
) {
702 if (kd
->kda
.addr
!= -1) {
703 kvm_arm_set_device_addr(kd
);
705 memory_region_unref(kd
->mr
);
706 QSLIST_REMOVE_HEAD(&kvm_devices_head
, entries
);
709 memory_listener_unregister(&devlistener
);
712 static Notifier notify
= {
713 .notify
= kvm_arm_machine_init_done
,
716 void kvm_arm_register_device(MemoryRegion
*mr
, uint64_t devid
, uint64_t group
,
717 uint64_t attr
, int dev_fd
, uint64_t addr_ormask
)
721 if (!kvm_irqchip_in_kernel()) {
725 if (QSLIST_EMPTY(&kvm_devices_head
)) {
726 memory_listener_register(&devlistener
, &address_space_memory
);
727 qemu_add_machine_init_done_notifier(¬ify
);
729 kd
= g_new0(KVMDevice
, 1);
733 kd
->kdattr
.flags
= 0;
734 kd
->kdattr
.group
= group
;
735 kd
->kdattr
.attr
= attr
;
737 kd
->kda_addr_ormask
= addr_ormask
;
738 QSLIST_INSERT_HEAD(&kvm_devices_head
, kd
, entries
);
739 memory_region_ref(kd
->mr
);
742 static int compare_u64(const void *a
, const void *b
)
744 if (*(uint64_t *)a
> *(uint64_t *)b
) {
747 if (*(uint64_t *)a
< *(uint64_t *)b
) {
754 * cpreg_values are sorted in ascending order by KVM register ID
755 * (see kvm_arm_init_cpreg_list). This allows us to cheaply find
756 * the storage for a KVM register by ID with a binary search.
758 static uint64_t *kvm_arm_get_cpreg_ptr(ARMCPU
*cpu
, uint64_t regidx
)
762 res
= bsearch(®idx
, cpu
->cpreg_indexes
, cpu
->cpreg_array_len
,
763 sizeof(uint64_t), compare_u64
);
766 return &cpu
->cpreg_values
[res
- cpu
->cpreg_indexes
];
770 * kvm_arm_reg_syncs_via_cpreg_list:
771 * @regidx: KVM register index
773 * Return true if this KVM register should be synchronized via the
774 * cpreg list of arbitrary system registers, false if it is synchronized
775 * by hand using code in kvm_arch_get/put_registers().
777 static bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx
)
779 switch (regidx
& KVM_REG_ARM_COPROC_MASK
) {
780 case KVM_REG_ARM_CORE
:
781 case KVM_REG_ARM64_SVE
:
789 * kvm_arm_init_cpreg_list:
792 * Initialize the ARMCPU cpreg list according to the kernel's
793 * definition of what CPU registers it knows about (and throw away
794 * the previous TCG-created cpreg list).
796 * Returns: 0 if success, else < 0 error code
798 static int kvm_arm_init_cpreg_list(ARMCPU
*cpu
)
800 struct kvm_reg_list rl
;
801 struct kvm_reg_list
*rlp
;
802 int i
, ret
, arraylen
;
803 CPUState
*cs
= CPU(cpu
);
806 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_REG_LIST
, &rl
);
810 rlp
= g_malloc(sizeof(struct kvm_reg_list
) + rl
.n
* sizeof(uint64_t));
812 ret
= kvm_vcpu_ioctl(cs
, KVM_GET_REG_LIST
, rlp
);
816 /* Sort the list we get back from the kernel, since cpreg_tuples
817 * must be in strictly ascending order.
819 qsort(&rlp
->reg
, rlp
->n
, sizeof(rlp
->reg
[0]), compare_u64
);
821 for (i
= 0, arraylen
= 0; i
< rlp
->n
; i
++) {
822 if (!kvm_arm_reg_syncs_via_cpreg_list(rlp
->reg
[i
])) {
825 switch (rlp
->reg
[i
] & KVM_REG_SIZE_MASK
) {
826 case KVM_REG_SIZE_U32
:
827 case KVM_REG_SIZE_U64
:
830 fprintf(stderr
, "Can't handle size of register in kernel list\n");
838 cpu
->cpreg_indexes
= g_renew(uint64_t, cpu
->cpreg_indexes
, arraylen
);
839 cpu
->cpreg_values
= g_renew(uint64_t, cpu
->cpreg_values
, arraylen
);
840 cpu
->cpreg_vmstate_indexes
= g_renew(uint64_t, cpu
->cpreg_vmstate_indexes
,
842 cpu
->cpreg_vmstate_values
= g_renew(uint64_t, cpu
->cpreg_vmstate_values
,
844 cpu
->cpreg_array_len
= arraylen
;
845 cpu
->cpreg_vmstate_array_len
= arraylen
;
847 for (i
= 0, arraylen
= 0; i
< rlp
->n
; i
++) {
848 uint64_t regidx
= rlp
->reg
[i
];
849 if (!kvm_arm_reg_syncs_via_cpreg_list(regidx
)) {
852 cpu
->cpreg_indexes
[arraylen
] = regidx
;
855 assert(cpu
->cpreg_array_len
== arraylen
);
857 if (!write_kvmstate_to_list(cpu
)) {
858 /* Shouldn't happen unless kernel is inconsistent about
859 * what registers exist.
861 fprintf(stderr
, "Initial read of kernel register state failed\n");
872 * kvm_arm_cpreg_level:
873 * @regidx: KVM register index
875 * Return the level of this coprocessor/system register. Return value is
876 * either KVM_PUT_RUNTIME_STATE, KVM_PUT_RESET_STATE, or KVM_PUT_FULL_STATE.
878 static int kvm_arm_cpreg_level(uint64_t regidx
)
881 * All system registers are assumed to be level KVM_PUT_RUNTIME_STATE.
882 * If a register should be written less often, you must add it here
883 * with a state of either KVM_PUT_RESET_STATE or KVM_PUT_FULL_STATE.
886 case KVM_REG_ARM_TIMER_CNT
:
887 case KVM_REG_ARM_PTIMER_CNT
:
888 return KVM_PUT_FULL_STATE
;
890 return KVM_PUT_RUNTIME_STATE
;
893 bool write_kvmstate_to_list(ARMCPU
*cpu
)
895 CPUState
*cs
= CPU(cpu
);
899 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
900 uint64_t regidx
= cpu
->cpreg_indexes
[i
];
904 switch (regidx
& KVM_REG_SIZE_MASK
) {
905 case KVM_REG_SIZE_U32
:
906 ret
= kvm_get_one_reg(cs
, regidx
, &v32
);
908 cpu
->cpreg_values
[i
] = v32
;
911 case KVM_REG_SIZE_U64
:
912 ret
= kvm_get_one_reg(cs
, regidx
, cpu
->cpreg_values
+ i
);
915 g_assert_not_reached();
924 bool write_list_to_kvmstate(ARMCPU
*cpu
, int level
)
926 CPUState
*cs
= CPU(cpu
);
930 for (i
= 0; i
< cpu
->cpreg_array_len
; i
++) {
931 uint64_t regidx
= cpu
->cpreg_indexes
[i
];
935 if (kvm_arm_cpreg_level(regidx
) > level
) {
939 switch (regidx
& KVM_REG_SIZE_MASK
) {
940 case KVM_REG_SIZE_U32
:
941 v32
= cpu
->cpreg_values
[i
];
942 ret
= kvm_set_one_reg(cs
, regidx
, &v32
);
944 case KVM_REG_SIZE_U64
:
945 ret
= kvm_set_one_reg(cs
, regidx
, cpu
->cpreg_values
+ i
);
948 g_assert_not_reached();
951 /* We might fail for "unknown register" and also for
952 * "you tried to set a register which is constant with
953 * a different value from what it actually contains".
961 void kvm_arm_cpu_pre_save(ARMCPU
*cpu
)
963 /* KVM virtual time adjustment */
964 if (cpu
->kvm_vtime_dirty
) {
965 *kvm_arm_get_cpreg_ptr(cpu
, KVM_REG_ARM_TIMER_CNT
) = cpu
->kvm_vtime
;
969 void kvm_arm_cpu_post_load(ARMCPU
*cpu
)
971 /* KVM virtual time adjustment */
972 if (cpu
->kvm_adjvtime
) {
973 cpu
->kvm_vtime
= *kvm_arm_get_cpreg_ptr(cpu
, KVM_REG_ARM_TIMER_CNT
);
974 cpu
->kvm_vtime_dirty
= true;
978 void kvm_arm_reset_vcpu(ARMCPU
*cpu
)
982 /* Re-init VCPU so that all registers are set to
983 * their respective reset values.
985 ret
= kvm_arm_vcpu_init(CPU(cpu
));
987 fprintf(stderr
, "kvm_arm_vcpu_init failed: %s\n", strerror(-ret
));
990 if (!write_kvmstate_to_list(cpu
)) {
991 fprintf(stderr
, "write_kvmstate_to_list failed\n");
995 * Sync the reset values also into the CPUState. This is necessary
996 * because the next thing we do will be a kvm_arch_put_registers()
997 * which will update the list values from the CPUState before copying
998 * the list values back to KVM. It's OK to ignore failure returns here
999 * for the same reason we do so in kvm_arch_get_registers().
1001 write_list_to_cpustate(cpu
);
1005 * Update KVM's MP_STATE based on what QEMU thinks it is
1007 static int kvm_arm_sync_mpstate_to_kvm(ARMCPU
*cpu
)
1009 if (cap_has_mp_state
) {
1010 struct kvm_mp_state mp_state
= {
1011 .mp_state
= (cpu
->power_state
== PSCI_OFF
) ?
1012 KVM_MP_STATE_STOPPED
: KVM_MP_STATE_RUNNABLE
1014 return kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_MP_STATE
, &mp_state
);
1020 * Sync the KVM MP_STATE into QEMU
1022 static int kvm_arm_sync_mpstate_to_qemu(ARMCPU
*cpu
)
1024 if (cap_has_mp_state
) {
1025 struct kvm_mp_state mp_state
;
1026 int ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_MP_STATE
, &mp_state
);
1030 cpu
->power_state
= (mp_state
.mp_state
== KVM_MP_STATE_STOPPED
) ?
1037 * kvm_arm_get_virtual_time:
1040 * Gets the VCPU's virtual counter and stores it in the KVM CPU state.
1042 static void kvm_arm_get_virtual_time(CPUState
*cs
)
1044 ARMCPU
*cpu
= ARM_CPU(cs
);
1047 if (cpu
->kvm_vtime_dirty
) {
1051 ret
= kvm_get_one_reg(cs
, KVM_REG_ARM_TIMER_CNT
, &cpu
->kvm_vtime
);
1053 error_report("Failed to get KVM_REG_ARM_TIMER_CNT");
1057 cpu
->kvm_vtime_dirty
= true;
1061 * kvm_arm_put_virtual_time:
1064 * Sets the VCPU's virtual counter to the value stored in the KVM CPU state.
1066 static void kvm_arm_put_virtual_time(CPUState
*cs
)
1068 ARMCPU
*cpu
= ARM_CPU(cs
);
1071 if (!cpu
->kvm_vtime_dirty
) {
1075 ret
= kvm_set_one_reg(cs
, KVM_REG_ARM_TIMER_CNT
, &cpu
->kvm_vtime
);
1077 error_report("Failed to set KVM_REG_ARM_TIMER_CNT");
1081 cpu
->kvm_vtime_dirty
= false;
1085 * kvm_put_vcpu_events:
1088 * Put VCPU related state to kvm.
1090 * Returns: 0 if success else < 0 error code
1092 static int kvm_put_vcpu_events(ARMCPU
*cpu
)
1094 CPUARMState
*env
= &cpu
->env
;
1095 struct kvm_vcpu_events events
;
1098 if (!kvm_has_vcpu_events()) {
1102 memset(&events
, 0, sizeof(events
));
1103 events
.exception
.serror_pending
= env
->serror
.pending
;
1105 /* Inject SError to guest with specified syndrome if host kernel
1106 * supports it, otherwise inject SError without syndrome.
1108 if (cap_has_inject_serror_esr
) {
1109 events
.exception
.serror_has_esr
= env
->serror
.has_esr
;
1110 events
.exception
.serror_esr
= env
->serror
.esr
;
1113 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_VCPU_EVENTS
, &events
);
1115 error_report("failed to put vcpu events");
1122 * kvm_get_vcpu_events:
1125 * Get VCPU related state from kvm.
1127 * Returns: 0 if success else < 0 error code
1129 static int kvm_get_vcpu_events(ARMCPU
*cpu
)
1131 CPUARMState
*env
= &cpu
->env
;
1132 struct kvm_vcpu_events events
;
1135 if (!kvm_has_vcpu_events()) {
1139 memset(&events
, 0, sizeof(events
));
1140 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_GET_VCPU_EVENTS
, &events
);
1142 error_report("failed to get vcpu events");
1146 env
->serror
.pending
= events
.exception
.serror_pending
;
1147 env
->serror
.has_esr
= events
.exception
.serror_has_esr
;
1148 env
->serror
.esr
= events
.exception
.serror_esr
;
1153 #define ARM64_REG_ESR_EL1 ARM64_SYS_REG(3, 0, 5, 2, 0)
1154 #define ARM64_REG_TCR_EL1 ARM64_SYS_REG(3, 0, 2, 0, 2)
1159 * AARCH64: DFSC, bits [5:0]
1163 * FS[3:0] - DFSR[3:0]
1167 #define ESR_DFSC(aarch64, lpae, v) \
1168 ((aarch64 || (lpae)) ? ((v) & 0x3F) \
1169 : (((v) >> 6) | ((v) & 0x1F)))
1171 #define ESR_DFSC_EXTABT(aarch64, lpae) \
1172 ((aarch64) ? 0x10 : (lpae) ? 0x10 : 0x8)
1175 * kvm_arm_verify_ext_dabt_pending:
1178 * Verify the fault status code wrt the Ext DABT injection
1180 * Returns: true if the fault status code is as expected, false otherwise
1182 static bool kvm_arm_verify_ext_dabt_pending(CPUState
*cs
)
1186 if (!kvm_get_one_reg(cs
, ARM64_REG_ESR_EL1
, &dfsr_val
)) {
1187 ARMCPU
*cpu
= ARM_CPU(cs
);
1188 CPUARMState
*env
= &cpu
->env
;
1189 int aarch64_mode
= arm_feature(env
, ARM_FEATURE_AARCH64
);
1192 if (!aarch64_mode
) {
1195 if (!kvm_get_one_reg(cs
, ARM64_REG_TCR_EL1
, &ttbcr
)) {
1196 lpae
= arm_feature(env
, ARM_FEATURE_LPAE
)
1197 && (ttbcr
& TTBCR_EAE
);
1201 * The verification here is based on the DFSC bits
1202 * of the ESR_EL1 reg only
1204 return (ESR_DFSC(aarch64_mode
, lpae
, dfsr_val
) ==
1205 ESR_DFSC_EXTABT(aarch64_mode
, lpae
));
1210 void kvm_arch_pre_run(CPUState
*cs
, struct kvm_run
*run
)
1212 ARMCPU
*cpu
= ARM_CPU(cs
);
1213 CPUARMState
*env
= &cpu
->env
;
1215 if (unlikely(env
->ext_dabt_raised
)) {
1217 * Verifying that the ext DABT has been properly injected,
1218 * otherwise risking indefinitely re-running the faulting instruction
1219 * Covering a very narrow case for kernels 5.5..5.5.4
1220 * when injected abort was misconfigured to be
1221 * an IMPLEMENTATION DEFINED exception (for 32-bit EL1)
1223 if (!arm_feature(env
, ARM_FEATURE_AARCH64
) &&
1224 unlikely(!kvm_arm_verify_ext_dabt_pending(cs
))) {
1226 error_report("Data abort exception with no valid ISS generated by "
1227 "guest memory access. KVM unable to emulate faulting "
1228 "instruction. Failed to inject an external data abort "
1232 /* Clear the status */
1233 env
->ext_dabt_raised
= 0;
1237 MemTxAttrs
kvm_arch_post_run(CPUState
*cs
, struct kvm_run
*run
)
1240 uint32_t switched_level
;
1242 if (kvm_irqchip_in_kernel()) {
1244 * We only need to sync timer states with user-space interrupt
1245 * controllers, so return early and save cycles if we don't.
1247 return MEMTXATTRS_UNSPECIFIED
;
1252 /* Synchronize our shadowed in-kernel device irq lines with the kvm ones */
1253 if (run
->s
.regs
.device_irq_level
!= cpu
->device_irq_level
) {
1254 switched_level
= cpu
->device_irq_level
^ run
->s
.regs
.device_irq_level
;
1256 qemu_mutex_lock_iothread();
1258 if (switched_level
& KVM_ARM_DEV_EL1_VTIMER
) {
1259 qemu_set_irq(cpu
->gt_timer_outputs
[GTIMER_VIRT
],
1260 !!(run
->s
.regs
.device_irq_level
&
1261 KVM_ARM_DEV_EL1_VTIMER
));
1262 switched_level
&= ~KVM_ARM_DEV_EL1_VTIMER
;
1265 if (switched_level
& KVM_ARM_DEV_EL1_PTIMER
) {
1266 qemu_set_irq(cpu
->gt_timer_outputs
[GTIMER_PHYS
],
1267 !!(run
->s
.regs
.device_irq_level
&
1268 KVM_ARM_DEV_EL1_PTIMER
));
1269 switched_level
&= ~KVM_ARM_DEV_EL1_PTIMER
;
1272 if (switched_level
& KVM_ARM_DEV_PMU
) {
1273 qemu_set_irq(cpu
->pmu_interrupt
,
1274 !!(run
->s
.regs
.device_irq_level
& KVM_ARM_DEV_PMU
));
1275 switched_level
&= ~KVM_ARM_DEV_PMU
;
1278 if (switched_level
) {
1279 qemu_log_mask(LOG_UNIMP
, "%s: unhandled in-kernel device IRQ %x\n",
1280 __func__
, switched_level
);
1283 /* We also mark unknown levels as processed to not waste cycles */
1284 cpu
->device_irq_level
= run
->s
.regs
.device_irq_level
;
1285 qemu_mutex_unlock_iothread();
1288 return MEMTXATTRS_UNSPECIFIED
;
1291 static void kvm_arm_vm_state_change(void *opaque
, bool running
, RunState state
)
1293 CPUState
*cs
= opaque
;
1294 ARMCPU
*cpu
= ARM_CPU(cs
);
1297 if (cpu
->kvm_adjvtime
) {
1298 kvm_arm_put_virtual_time(cs
);
1301 if (cpu
->kvm_adjvtime
) {
1302 kvm_arm_get_virtual_time(cs
);
1308 * kvm_arm_handle_dabt_nisv:
1310 * @esr_iss: ISS encoding (limited) for the exception from Data Abort
1311 * ISV bit set to '0b0' -> no valid instruction syndrome
1312 * @fault_ipa: faulting address for the synchronous data abort
1314 * Returns: 0 if the exception has been handled, < 0 otherwise
1316 static int kvm_arm_handle_dabt_nisv(CPUState
*cs
, uint64_t esr_iss
,
1319 ARMCPU
*cpu
= ARM_CPU(cs
);
1320 CPUARMState
*env
= &cpu
->env
;
1322 * Request KVM to inject the external data abort into the guest
1324 if (cap_has_inject_ext_dabt
) {
1325 struct kvm_vcpu_events events
= { };
1327 * The external data abort event will be handled immediately by KVM
1328 * using the address fault that triggered the exit on given VCPU.
1329 * Requesting injection of the external data abort does not rely
1330 * on any other VCPU state. Therefore, in this particular case, the VCPU
1331 * synchronization can be exceptionally skipped.
1333 events
.exception
.ext_dabt_pending
= 1;
1334 /* KVM_CAP_ARM_INJECT_EXT_DABT implies KVM_CAP_VCPU_EVENTS */
1335 if (!kvm_vcpu_ioctl(cs
, KVM_SET_VCPU_EVENTS
, &events
)) {
1336 env
->ext_dabt_raised
= 1;
1340 error_report("Data abort exception triggered by guest memory access "
1341 "at physical address: 0x" TARGET_FMT_lx
,
1342 (target_ulong
)fault_ipa
);
1343 error_printf("KVM unable to emulate faulting instruction.\n");
1349 * kvm_arm_handle_debug:
1351 * @debug_exit: debug part of the KVM exit structure
1353 * Returns: TRUE if the debug exception was handled.
1355 * See v8 ARM ARM D7.2.27 ESR_ELx, Exception Syndrome Register
1357 * To minimise translating between kernel and user-space the kernel
1358 * ABI just provides user-space with the full exception syndrome
1359 * register value to be decoded in QEMU.
1361 static bool kvm_arm_handle_debug(CPUState
*cs
,
1362 struct kvm_debug_exit_arch
*debug_exit
)
1364 int hsr_ec
= syn_get_ec(debug_exit
->hsr
);
1365 ARMCPU
*cpu
= ARM_CPU(cs
);
1366 CPUARMState
*env
= &cpu
->env
;
1368 /* Ensure PC is synchronised */
1369 kvm_cpu_synchronize_state(cs
);
1372 case EC_SOFTWARESTEP
:
1373 if (cs
->singlestep_enabled
) {
1377 * The kernel should have suppressed the guest's ability to
1378 * single step at this point so something has gone wrong.
1380 error_report("%s: guest single-step while debugging unsupported"
1381 " (%"PRIx64
", %"PRIx32
")",
1382 __func__
, env
->pc
, debug_exit
->hsr
);
1387 if (kvm_find_sw_breakpoint(cs
, env
->pc
)) {
1392 if (find_hw_breakpoint(cs
, env
->pc
)) {
1398 CPUWatchpoint
*wp
= find_hw_watchpoint(cs
, debug_exit
->far
);
1400 cs
->watchpoint_hit
= wp
;
1406 error_report("%s: unhandled debug exit (%"PRIx32
", %"PRIx64
")",
1407 __func__
, debug_exit
->hsr
, env
->pc
);
1410 /* If we are not handling the debug exception it must belong to
1411 * the guest. Let's re-use the existing TCG interrupt code to set
1412 * everything up properly.
1414 cs
->exception_index
= EXCP_BKPT
;
1415 env
->exception
.syndrome
= debug_exit
->hsr
;
1416 env
->exception
.vaddress
= debug_exit
->far
;
1417 env
->exception
.target_el
= 1;
1418 qemu_mutex_lock_iothread();
1419 arm_cpu_do_interrupt(cs
);
1420 qemu_mutex_unlock_iothread();
1425 int kvm_arch_handle_exit(CPUState
*cs
, struct kvm_run
*run
)
1429 switch (run
->exit_reason
) {
1430 case KVM_EXIT_DEBUG
:
1431 if (kvm_arm_handle_debug(cs
, &run
->debug
.arch
)) {
1433 } /* otherwise return to guest */
1435 case KVM_EXIT_ARM_NISV
:
1436 /* External DABT with no valid iss to decode */
1437 ret
= kvm_arm_handle_dabt_nisv(cs
, run
->arm_nisv
.esr_iss
,
1438 run
->arm_nisv
.fault_ipa
);
1441 qemu_log_mask(LOG_UNIMP
, "%s: un-handled exit reason %d\n",
1442 __func__
, run
->exit_reason
);
1448 bool kvm_arch_stop_on_emulation_error(CPUState
*cs
)
1453 int kvm_arch_process_async_events(CPUState
*cs
)
1459 * kvm_arm_hw_debug_active:
1462 * Return: TRUE if any hardware breakpoints in use.
1464 static bool kvm_arm_hw_debug_active(CPUState
*cs
)
1466 return ((cur_hw_wps
> 0) || (cur_hw_bps
> 0));
1470 * kvm_arm_copy_hw_debug_data:
1471 * @ptr: kvm_guest_debug_arch structure
1473 * Copy the architecture specific debug registers into the
1474 * kvm_guest_debug ioctl structure.
1476 static void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch
*ptr
)
1479 memset(ptr
, 0, sizeof(struct kvm_guest_debug_arch
));
1481 for (i
= 0; i
< max_hw_wps
; i
++) {
1482 HWWatchpoint
*wp
= get_hw_wp(i
);
1483 ptr
->dbg_wcr
[i
] = wp
->wcr
;
1484 ptr
->dbg_wvr
[i
] = wp
->wvr
;
1486 for (i
= 0; i
< max_hw_bps
; i
++) {
1487 HWBreakpoint
*bp
= get_hw_bp(i
);
1488 ptr
->dbg_bcr
[i
] = bp
->bcr
;
1489 ptr
->dbg_bvr
[i
] = bp
->bvr
;
1493 void kvm_arch_update_guest_debug(CPUState
*cs
, struct kvm_guest_debug
*dbg
)
1495 if (kvm_sw_breakpoints_active(cs
)) {
1496 dbg
->control
|= KVM_GUESTDBG_ENABLE
| KVM_GUESTDBG_USE_SW_BP
;
1498 if (kvm_arm_hw_debug_active(cs
)) {
1499 dbg
->control
|= KVM_GUESTDBG_ENABLE
| KVM_GUESTDBG_USE_HW
;
1500 kvm_arm_copy_hw_debug_data(&dbg
->arch
);
1504 void kvm_arch_init_irq_routing(KVMState
*s
)
1508 int kvm_arch_irqchip_create(KVMState
*s
)
1510 if (kvm_kernel_irqchip_split()) {
1511 error_report("-machine kernel_irqchip=split is not supported on ARM.");
1515 /* If we can create the VGIC using the newer device control API, we
1516 * let the device do this when it initializes itself, otherwise we
1517 * fall back to the old API */
1518 return kvm_check_extension(s
, KVM_CAP_DEVICE_CTRL
);
1521 int kvm_arm_vgic_probe(void)
1525 if (kvm_create_device(kvm_state
,
1526 KVM_DEV_TYPE_ARM_VGIC_V3
, true) == 0) {
1527 val
|= KVM_ARM_VGIC_V3
;
1529 if (kvm_create_device(kvm_state
,
1530 KVM_DEV_TYPE_ARM_VGIC_V2
, true) == 0) {
1531 val
|= KVM_ARM_VGIC_V2
;
1536 int kvm_arm_set_irq(int cpu
, int irqtype
, int irq
, int level
)
1538 int kvm_irq
= (irqtype
<< KVM_ARM_IRQ_TYPE_SHIFT
) | irq
;
1539 int cpu_idx1
= cpu
% 256;
1540 int cpu_idx2
= cpu
/ 256;
1542 kvm_irq
|= (cpu_idx1
<< KVM_ARM_IRQ_VCPU_SHIFT
) |
1543 (cpu_idx2
<< KVM_ARM_IRQ_VCPU2_SHIFT
);
1545 return kvm_set_irq(kvm_state
, kvm_irq
, !!level
);
1548 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry
*route
,
1549 uint64_t address
, uint32_t data
, PCIDevice
*dev
)
1551 AddressSpace
*as
= pci_device_iommu_address_space(dev
);
1552 hwaddr xlat
, len
, doorbell_gpa
;
1553 MemoryRegionSection mrs
;
1556 if (as
== &address_space_memory
) {
1560 /* MSI doorbell address is translated by an IOMMU */
1562 RCU_READ_LOCK_GUARD();
1564 mr
= address_space_translate(as
, address
, &xlat
, &len
, true,
1565 MEMTXATTRS_UNSPECIFIED
);
1571 mrs
= memory_region_find(mr
, xlat
, 1);
1577 doorbell_gpa
= mrs
.offset_within_address_space
;
1578 memory_region_unref(mrs
.mr
);
1580 route
->u
.msi
.address_lo
= doorbell_gpa
;
1581 route
->u
.msi
.address_hi
= doorbell_gpa
>> 32;
1583 trace_kvm_arm_fixup_msi_route(address
, doorbell_gpa
);
1588 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry
*route
,
1589 int vector
, PCIDevice
*dev
)
1594 int kvm_arch_release_virq_post(int virq
)
1599 int kvm_arch_msi_data_to_gsi(uint32_t data
)
1601 return (data
- 32) & 0xffff;
1604 bool kvm_arch_cpu_check_are_resettable(void)
1609 static void kvm_arch_get_eager_split_size(Object
*obj
, Visitor
*v
,
1610 const char *name
, void *opaque
,
1613 KVMState
*s
= KVM_STATE(obj
);
1614 uint64_t value
= s
->kvm_eager_split_size
;
1616 visit_type_size(v
, name
, &value
, errp
);
1619 static void kvm_arch_set_eager_split_size(Object
*obj
, Visitor
*v
,
1620 const char *name
, void *opaque
,
1623 KVMState
*s
= KVM_STATE(obj
);
1627 error_setg(errp
, "Unable to set early-split-size after KVM has been initialized");
1631 if (!visit_type_size(v
, name
, &value
, errp
)) {
1635 if (value
&& !is_power_of_2(value
)) {
1636 error_setg(errp
, "early-split-size must be a power of two");
1640 s
->kvm_eager_split_size
= value
;
1643 void kvm_arch_accel_class_init(ObjectClass
*oc
)
1645 object_class_property_add(oc
, "eager-split-size", "size",
1646 kvm_arch_get_eager_split_size
,
1647 kvm_arch_set_eager_split_size
, NULL
, NULL
);
1649 object_class_property_set_description(oc
, "eager-split-size",
1650 "Eager Page Split chunk size for hugepages. (default: 0, disabled)");
1653 int kvm_arch_insert_hw_breakpoint(vaddr addr
, vaddr len
, int type
)
1656 case GDB_BREAKPOINT_HW
:
1657 return insert_hw_breakpoint(addr
);
1659 case GDB_WATCHPOINT_READ
:
1660 case GDB_WATCHPOINT_WRITE
:
1661 case GDB_WATCHPOINT_ACCESS
:
1662 return insert_hw_watchpoint(addr
, len
, type
);
1668 int kvm_arch_remove_hw_breakpoint(vaddr addr
, vaddr len
, int type
)
1671 case GDB_BREAKPOINT_HW
:
1672 return delete_hw_breakpoint(addr
);
1673 case GDB_WATCHPOINT_READ
:
1674 case GDB_WATCHPOINT_WRITE
:
1675 case GDB_WATCHPOINT_ACCESS
:
1676 return delete_hw_watchpoint(addr
, len
, type
);
1682 void kvm_arch_remove_all_hw_breakpoints(void)
1684 if (cur_hw_wps
> 0) {
1685 g_array_remove_range(hw_watchpoints
, 0, cur_hw_wps
);
1687 if (cur_hw_bps
> 0) {
1688 g_array_remove_range(hw_breakpoints
, 0, cur_hw_bps
);
1692 static bool kvm_arm_set_device_attr(ARMCPU
*cpu
, struct kvm_device_attr
*attr
,
1697 err
= kvm_vcpu_ioctl(CPU(cpu
), KVM_HAS_DEVICE_ATTR
, attr
);
1699 error_report("%s: KVM_HAS_DEVICE_ATTR: %s", name
, strerror(-err
));
1703 err
= kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_DEVICE_ATTR
, attr
);
1705 error_report("%s: KVM_SET_DEVICE_ATTR: %s", name
, strerror(-err
));
1712 void kvm_arm_pmu_init(ARMCPU
*cpu
)
1714 struct kvm_device_attr attr
= {
1715 .group
= KVM_ARM_VCPU_PMU_V3_CTRL
,
1716 .attr
= KVM_ARM_VCPU_PMU_V3_INIT
,
1719 if (!cpu
->has_pmu
) {
1722 if (!kvm_arm_set_device_attr(cpu
, &attr
, "PMU")) {
1723 error_report("failed to init PMU");
1728 void kvm_arm_pmu_set_irq(ARMCPU
*cpu
, int irq
)
1730 struct kvm_device_attr attr
= {
1731 .group
= KVM_ARM_VCPU_PMU_V3_CTRL
,
1732 .addr
= (intptr_t)&irq
,
1733 .attr
= KVM_ARM_VCPU_PMU_V3_IRQ
,
1736 if (!cpu
->has_pmu
) {
1739 if (!kvm_arm_set_device_attr(cpu
, &attr
, "PMU")) {
1740 error_report("failed to set irq for PMU");
1745 void kvm_arm_pvtime_init(ARMCPU
*cpu
, uint64_t ipa
)
1747 struct kvm_device_attr attr
= {
1748 .group
= KVM_ARM_VCPU_PVTIME_CTRL
,
1749 .attr
= KVM_ARM_VCPU_PVTIME_IPA
,
1750 .addr
= (uint64_t)&ipa
,
1753 if (cpu
->kvm_steal_time
== ON_OFF_AUTO_OFF
) {
1756 if (!kvm_arm_set_device_attr(cpu
, &attr
, "PVTIME IPA")) {
1757 error_report("failed to init PVTIME IPA");
1762 void kvm_arm_steal_time_finalize(ARMCPU
*cpu
, Error
**errp
)
1764 bool has_steal_time
= kvm_check_extension(kvm_state
, KVM_CAP_STEAL_TIME
);
1766 if (cpu
->kvm_steal_time
== ON_OFF_AUTO_AUTO
) {
1767 if (!has_steal_time
|| !arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
1768 cpu
->kvm_steal_time
= ON_OFF_AUTO_OFF
;
1770 cpu
->kvm_steal_time
= ON_OFF_AUTO_ON
;
1772 } else if (cpu
->kvm_steal_time
== ON_OFF_AUTO_ON
) {
1773 if (!has_steal_time
) {
1774 error_setg(errp
, "'kvm-steal-time' cannot be enabled "
1777 } else if (!arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
1779 * DEN0057A chapter 2 says "This specification only covers
1780 * systems in which the Execution state of the hypervisor
1781 * as well as EL1 of virtual machines is AArch64.". And,
1782 * to ensure that, the smc/hvc calls are only specified as
1785 error_setg(errp
, "'kvm-steal-time' cannot be enabled "
1786 "for AArch32 guests");
1792 bool kvm_arm_aarch32_supported(void)
1794 return kvm_check_extension(kvm_state
, KVM_CAP_ARM_EL1_32BIT
);
1797 bool kvm_arm_sve_supported(void)
1799 return kvm_check_extension(kvm_state
, KVM_CAP_ARM_SVE
);
1802 QEMU_BUILD_BUG_ON(KVM_ARM64_SVE_VQ_MIN
!= 1);
1804 uint32_t kvm_arm_sve_get_vls(ARMCPU
*cpu
)
1806 /* Only call this function if kvm_arm_sve_supported() returns true. */
1807 static uint64_t vls
[KVM_ARM64_SVE_VLS_WORDS
];
1813 * KVM ensures all host CPUs support the same set of vector lengths.
1814 * So we only need to create the scratch VCPUs once and then cache
1818 struct kvm_vcpu_init init
= {
1820 .features
[0] = (1 << KVM_ARM_VCPU_SVE
),
1822 struct kvm_one_reg reg
= {
1823 .id
= KVM_REG_ARM64_SVE_VLS
,
1824 .addr
= (uint64_t)&vls
[0],
1826 int fdarray
[3], ret
;
1830 if (!kvm_arm_create_scratch_host_vcpu(NULL
, fdarray
, &init
)) {
1831 error_report("failed to create scratch VCPU with SVE enabled");
1834 ret
= ioctl(fdarray
[2], KVM_GET_ONE_REG
, ®
);
1835 kvm_arm_destroy_scratch_host_vcpu(fdarray
);
1837 error_report("failed to get KVM_REG_ARM64_SVE_VLS: %s",
1842 for (i
= KVM_ARM64_SVE_VLS_WORDS
- 1; i
>= 0; --i
) {
1844 vq
= 64 - clz64(vls
[i
]) + i
* 64;
1848 if (vq
> ARM_MAX_VQ
) {
1849 warn_report("KVM supports vector lengths larger than "
1851 vls
[0] &= MAKE_64BIT_MASK(0, ARM_MAX_VQ
);
1858 static int kvm_arm_sve_set_vls(ARMCPU
*cpu
)
1860 uint64_t vls
[KVM_ARM64_SVE_VLS_WORDS
] = { cpu
->sve_vq
.map
};
1862 assert(cpu
->sve_max_vq
<= KVM_ARM64_SVE_VQ_MAX
);
1864 return kvm_set_one_reg(CPU(cpu
), KVM_REG_ARM64_SVE_VLS
, &vls
[0]);
1867 #define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5
1869 int kvm_arch_init_vcpu(CPUState
*cs
)
1873 ARMCPU
*cpu
= ARM_CPU(cs
);
1874 CPUARMState
*env
= &cpu
->env
;
1877 if (cpu
->kvm_target
== QEMU_KVM_ARM_TARGET_NONE
||
1878 !object_dynamic_cast(OBJECT(cpu
), TYPE_AARCH64_CPU
)) {
1879 error_report("KVM is not supported for this guest CPU type");
1883 qemu_add_vm_change_state_handler(kvm_arm_vm_state_change
, cs
);
1885 /* Determine init features for this CPU */
1886 memset(cpu
->kvm_init_features
, 0, sizeof(cpu
->kvm_init_features
));
1887 if (cs
->start_powered_off
) {
1888 cpu
->kvm_init_features
[0] |= 1 << KVM_ARM_VCPU_POWER_OFF
;
1890 if (kvm_check_extension(cs
->kvm_state
, KVM_CAP_ARM_PSCI_0_2
)) {
1891 cpu
->psci_version
= QEMU_PSCI_VERSION_0_2
;
1892 cpu
->kvm_init_features
[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2
;
1894 if (!arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
1895 cpu
->kvm_init_features
[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT
;
1897 if (!kvm_check_extension(cs
->kvm_state
, KVM_CAP_ARM_PMU_V3
)) {
1898 cpu
->has_pmu
= false;
1901 cpu
->kvm_init_features
[0] |= 1 << KVM_ARM_VCPU_PMU_V3
;
1903 env
->features
&= ~(1ULL << ARM_FEATURE_PMU
);
1905 if (cpu_isar_feature(aa64_sve
, cpu
)) {
1906 assert(kvm_arm_sve_supported());
1907 cpu
->kvm_init_features
[0] |= 1 << KVM_ARM_VCPU_SVE
;
1909 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
1910 cpu
->kvm_init_features
[0] |= (1 << KVM_ARM_VCPU_PTRAUTH_ADDRESS
|
1911 1 << KVM_ARM_VCPU_PTRAUTH_GENERIC
);
1914 /* Do KVM_ARM_VCPU_INIT ioctl */
1915 ret
= kvm_arm_vcpu_init(cs
);
1920 if (cpu_isar_feature(aa64_sve
, cpu
)) {
1921 ret
= kvm_arm_sve_set_vls(cpu
);
1925 ret
= kvm_arm_vcpu_finalize(cs
, KVM_ARM_VCPU_SVE
);
1932 * KVM reports the exact PSCI version it is implementing via a
1933 * special sysreg. If it is present, use its contents to determine
1934 * what to report to the guest in the dtb (it is the PSCI version,
1935 * in the same 15-bits major 16-bits minor format that PSCI_VERSION
1938 if (!kvm_get_one_reg(cs
, KVM_REG_ARM_PSCI_VERSION
, &psciver
)) {
1939 cpu
->psci_version
= psciver
;
1943 * When KVM is in use, PSCI is emulated in-kernel and not by qemu.
1944 * Currently KVM has its own idea about MPIDR assignment, so we
1945 * override our defaults with what we get from KVM.
1947 ret
= kvm_get_one_reg(cs
, ARM64_SYS_REG(ARM_CPU_ID_MPIDR
), &mpidr
);
1951 cpu
->mp_affinity
= mpidr
& ARM64_AFFINITY_MASK
;
1953 return kvm_arm_init_cpreg_list(cpu
);
1956 int kvm_arch_destroy_vcpu(CPUState
*cs
)
1961 /* Callers must hold the iothread mutex lock */
1962 static void kvm_inject_arm_sea(CPUState
*c
)
1964 ARMCPU
*cpu
= ARM_CPU(c
);
1965 CPUARMState
*env
= &cpu
->env
;
1969 c
->exception_index
= EXCP_DATA_ABORT
;
1970 env
->exception
.target_el
= 1;
1973 * Set the DFSC to synchronous external abort and set FnV to not valid,
1974 * this will tell guest the FAR_ELx is UNKNOWN for this abort.
1976 same_el
= arm_current_el(env
) == env
->exception
.target_el
;
1977 esr
= syn_data_abort_no_iss(same_el
, 1, 0, 0, 0, 0, 0x10);
1979 env
->exception
.syndrome
= esr
;
1981 arm_cpu_do_interrupt(c
);
1984 #define AARCH64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
1985 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
1987 #define AARCH64_SIMD_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U128 | \
1988 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
1990 #define AARCH64_SIMD_CTRL_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U32 | \
1991 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
1993 static int kvm_arch_put_fpsimd(CPUState
*cs
)
1995 CPUARMState
*env
= &ARM_CPU(cs
)->env
;
1998 for (i
= 0; i
< 32; i
++) {
1999 uint64_t *q
= aa64_vfp_qreg(env
, i
);
2001 uint64_t fp_val
[2] = { q
[1], q
[0] };
2002 ret
= kvm_set_one_reg(cs
, AARCH64_SIMD_CORE_REG(fp_regs
.vregs
[i
]),
2005 ret
= kvm_set_one_reg(cs
, AARCH64_SIMD_CORE_REG(fp_regs
.vregs
[i
]), q
);
2016 * KVM SVE registers come in slices where ZREGs have a slice size of 2048 bits
2017 * and PREGS and the FFR have a slice size of 256 bits. However we simply hard
2018 * code the slice index to zero for now as it's unlikely we'll need more than
2019 * one slice for quite some time.
2021 static int kvm_arch_put_sve(CPUState
*cs
)
2023 ARMCPU
*cpu
= ARM_CPU(cs
);
2024 CPUARMState
*env
= &cpu
->env
;
2025 uint64_t tmp
[ARM_MAX_VQ
* 2];
2029 for (n
= 0; n
< KVM_ARM64_SVE_NUM_ZREGS
; ++n
) {
2030 r
= sve_bswap64(tmp
, &env
->vfp
.zregs
[n
].d
[0], cpu
->sve_max_vq
* 2);
2031 ret
= kvm_set_one_reg(cs
, KVM_REG_ARM64_SVE_ZREG(n
, 0), r
);
2037 for (n
= 0; n
< KVM_ARM64_SVE_NUM_PREGS
; ++n
) {
2038 r
= sve_bswap64(tmp
, r
= &env
->vfp
.pregs
[n
].p
[0],
2039 DIV_ROUND_UP(cpu
->sve_max_vq
* 2, 8));
2040 ret
= kvm_set_one_reg(cs
, KVM_REG_ARM64_SVE_PREG(n
, 0), r
);
2046 r
= sve_bswap64(tmp
, &env
->vfp
.pregs
[FFR_PRED_NUM
].p
[0],
2047 DIV_ROUND_UP(cpu
->sve_max_vq
* 2, 8));
2048 ret
= kvm_set_one_reg(cs
, KVM_REG_ARM64_SVE_FFR(0), r
);
2056 int kvm_arch_put_registers(CPUState
*cs
, int level
)
2063 ARMCPU
*cpu
= ARM_CPU(cs
);
2064 CPUARMState
*env
= &cpu
->env
;
2066 /* If we are in AArch32 mode then we need to copy the AArch32 regs to the
2067 * AArch64 registers before pushing them out to 64-bit KVM.
2070 aarch64_sync_32_to_64(env
);
2073 for (i
= 0; i
< 31; i
++) {
2074 ret
= kvm_set_one_reg(cs
, AARCH64_CORE_REG(regs
.regs
[i
]),
2081 /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the
2082 * QEMU side we keep the current SP in xregs[31] as well.
2084 aarch64_save_sp(env
, 1);
2086 ret
= kvm_set_one_reg(cs
, AARCH64_CORE_REG(regs
.sp
), &env
->sp_el
[0]);
2091 ret
= kvm_set_one_reg(cs
, AARCH64_CORE_REG(sp_el1
), &env
->sp_el
[1]);
2096 /* Note that KVM thinks pstate is 64 bit but we use a uint32_t */
2098 val
= pstate_read(env
);
2100 val
= cpsr_read(env
);
2102 ret
= kvm_set_one_reg(cs
, AARCH64_CORE_REG(regs
.pstate
), &val
);
2107 ret
= kvm_set_one_reg(cs
, AARCH64_CORE_REG(regs
.pc
), &env
->pc
);
2112 ret
= kvm_set_one_reg(cs
, AARCH64_CORE_REG(elr_el1
), &env
->elr_el
[1]);
2117 /* Saved Program State Registers
2119 * Before we restore from the banked_spsr[] array we need to
2120 * ensure that any modifications to env->spsr are correctly
2121 * reflected in the banks.
2123 el
= arm_current_el(env
);
2124 if (el
> 0 && !is_a64(env
)) {
2125 i
= bank_number(env
->uncached_cpsr
& CPSR_M
);
2126 env
->banked_spsr
[i
] = env
->spsr
;
2129 /* KVM 0-4 map to QEMU banks 1-5 */
2130 for (i
= 0; i
< KVM_NR_SPSR
; i
++) {
2131 ret
= kvm_set_one_reg(cs
, AARCH64_CORE_REG(spsr
[i
]),
2132 &env
->banked_spsr
[i
+ 1]);
2138 if (cpu_isar_feature(aa64_sve
, cpu
)) {
2139 ret
= kvm_arch_put_sve(cs
);
2141 ret
= kvm_arch_put_fpsimd(cs
);
2147 fpr
= vfp_get_fpsr(env
);
2148 ret
= kvm_set_one_reg(cs
, AARCH64_SIMD_CTRL_REG(fp_regs
.fpsr
), &fpr
);
2153 fpr
= vfp_get_fpcr(env
);
2154 ret
= kvm_set_one_reg(cs
, AARCH64_SIMD_CTRL_REG(fp_regs
.fpcr
), &fpr
);
2159 write_cpustate_to_list(cpu
, true);
2161 if (!write_list_to_kvmstate(cpu
, level
)) {
2166 * Setting VCPU events should be triggered after syncing the registers
2167 * to avoid overwriting potential changes made by KVM upon calling
2168 * KVM_SET_VCPU_EVENTS ioctl
2170 ret
= kvm_put_vcpu_events(cpu
);
2175 return kvm_arm_sync_mpstate_to_kvm(cpu
);
2178 static int kvm_arch_get_fpsimd(CPUState
*cs
)
2180 CPUARMState
*env
= &ARM_CPU(cs
)->env
;
2183 for (i
= 0; i
< 32; i
++) {
2184 uint64_t *q
= aa64_vfp_qreg(env
, i
);
2185 ret
= kvm_get_one_reg(cs
, AARCH64_SIMD_CORE_REG(fp_regs
.vregs
[i
]), q
);
2191 t
= q
[0], q
[0] = q
[1], q
[1] = t
;
2200 * KVM SVE registers come in slices where ZREGs have a slice size of 2048 bits
2201 * and PREGS and the FFR have a slice size of 256 bits. However we simply hard
2202 * code the slice index to zero for now as it's unlikely we'll need more than
2203 * one slice for quite some time.
2205 static int kvm_arch_get_sve(CPUState
*cs
)
2207 ARMCPU
*cpu
= ARM_CPU(cs
);
2208 CPUARMState
*env
= &cpu
->env
;
2212 for (n
= 0; n
< KVM_ARM64_SVE_NUM_ZREGS
; ++n
) {
2213 r
= &env
->vfp
.zregs
[n
].d
[0];
2214 ret
= kvm_get_one_reg(cs
, KVM_REG_ARM64_SVE_ZREG(n
, 0), r
);
2218 sve_bswap64(r
, r
, cpu
->sve_max_vq
* 2);
2221 for (n
= 0; n
< KVM_ARM64_SVE_NUM_PREGS
; ++n
) {
2222 r
= &env
->vfp
.pregs
[n
].p
[0];
2223 ret
= kvm_get_one_reg(cs
, KVM_REG_ARM64_SVE_PREG(n
, 0), r
);
2227 sve_bswap64(r
, r
, DIV_ROUND_UP(cpu
->sve_max_vq
* 2, 8));
2230 r
= &env
->vfp
.pregs
[FFR_PRED_NUM
].p
[0];
2231 ret
= kvm_get_one_reg(cs
, KVM_REG_ARM64_SVE_FFR(0), r
);
2235 sve_bswap64(r
, r
, DIV_ROUND_UP(cpu
->sve_max_vq
* 2, 8));
2240 int kvm_arch_get_registers(CPUState
*cs
)
2247 ARMCPU
*cpu
= ARM_CPU(cs
);
2248 CPUARMState
*env
= &cpu
->env
;
2250 for (i
= 0; i
< 31; i
++) {
2251 ret
= kvm_get_one_reg(cs
, AARCH64_CORE_REG(regs
.regs
[i
]),
2258 ret
= kvm_get_one_reg(cs
, AARCH64_CORE_REG(regs
.sp
), &env
->sp_el
[0]);
2263 ret
= kvm_get_one_reg(cs
, AARCH64_CORE_REG(sp_el1
), &env
->sp_el
[1]);
2268 ret
= kvm_get_one_reg(cs
, AARCH64_CORE_REG(regs
.pstate
), &val
);
2273 env
->aarch64
= ((val
& PSTATE_nRW
) == 0);
2275 pstate_write(env
, val
);
2277 cpsr_write(env
, val
, 0xffffffff, CPSRWriteRaw
);
2280 /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the
2281 * QEMU side we keep the current SP in xregs[31] as well.
2283 aarch64_restore_sp(env
, 1);
2285 ret
= kvm_get_one_reg(cs
, AARCH64_CORE_REG(regs
.pc
), &env
->pc
);
2290 /* If we are in AArch32 mode then we need to sync the AArch32 regs with the
2291 * incoming AArch64 regs received from 64-bit KVM.
2292 * We must perform this after all of the registers have been acquired from
2296 aarch64_sync_64_to_32(env
);
2299 ret
= kvm_get_one_reg(cs
, AARCH64_CORE_REG(elr_el1
), &env
->elr_el
[1]);
2304 /* Fetch the SPSR registers
2306 * KVM SPSRs 0-4 map to QEMU banks 1-5
2308 for (i
= 0; i
< KVM_NR_SPSR
; i
++) {
2309 ret
= kvm_get_one_reg(cs
, AARCH64_CORE_REG(spsr
[i
]),
2310 &env
->banked_spsr
[i
+ 1]);
2316 el
= arm_current_el(env
);
2317 if (el
> 0 && !is_a64(env
)) {
2318 i
= bank_number(env
->uncached_cpsr
& CPSR_M
);
2319 env
->spsr
= env
->banked_spsr
[i
];
2322 if (cpu_isar_feature(aa64_sve
, cpu
)) {
2323 ret
= kvm_arch_get_sve(cs
);
2325 ret
= kvm_arch_get_fpsimd(cs
);
2331 ret
= kvm_get_one_reg(cs
, AARCH64_SIMD_CTRL_REG(fp_regs
.fpsr
), &fpr
);
2335 vfp_set_fpsr(env
, fpr
);
2337 ret
= kvm_get_one_reg(cs
, AARCH64_SIMD_CTRL_REG(fp_regs
.fpcr
), &fpr
);
2341 vfp_set_fpcr(env
, fpr
);
2343 ret
= kvm_get_vcpu_events(cpu
);
2348 if (!write_kvmstate_to_list(cpu
)) {
2351 /* Note that it's OK to have registers which aren't in CPUState,
2352 * so we can ignore a failure return here.
2354 write_list_to_cpustate(cpu
);
2356 ret
= kvm_arm_sync_mpstate_to_qemu(cpu
);
2358 /* TODO: other registers */
2362 void kvm_arch_on_sigbus_vcpu(CPUState
*c
, int code
, void *addr
)
2364 ram_addr_t ram_addr
;
2367 assert(code
== BUS_MCEERR_AR
|| code
== BUS_MCEERR_AO
);
2369 if (acpi_ghes_present() && addr
) {
2370 ram_addr
= qemu_ram_addr_from_host(addr
);
2371 if (ram_addr
!= RAM_ADDR_INVALID
&&
2372 kvm_physical_memory_addr_from_host(c
->kvm_state
, addr
, &paddr
)) {
2373 kvm_hwpoison_page_add(ram_addr
);
2375 * If this is a BUS_MCEERR_AR, we know we have been called
2376 * synchronously from the vCPU thread, so we can easily
2377 * synchronize the state and inject an error.
2379 * TODO: we currently don't tell the guest at all about
2380 * BUS_MCEERR_AO. In that case we might either be being
2381 * called synchronously from the vCPU thread, or a bit
2382 * later from the main thread, so doing the injection of
2383 * the error would be more complicated.
2385 if (code
== BUS_MCEERR_AR
) {
2386 kvm_cpu_synchronize_state(c
);
2387 if (!acpi_ghes_record_errors(ACPI_HEST_SRC_ID_SEA
, paddr
)) {
2388 kvm_inject_arm_sea(c
);
2390 error_report("failed to record the error");
2396 if (code
== BUS_MCEERR_AO
) {
2397 error_report("Hardware memory error at addr %p for memory used by "
2398 "QEMU itself instead of guest system!", addr
);
2402 if (code
== BUS_MCEERR_AR
) {
2403 error_report("Hardware memory error!");
2408 /* C6.6.29 BRK instruction */
2409 static const uint32_t brk_insn
= 0xd4200000;
2411 int kvm_arch_insert_sw_breakpoint(CPUState
*cs
, struct kvm_sw_breakpoint
*bp
)
2413 if (cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&bp
->saved_insn
, 4, 0) ||
2414 cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&brk_insn
, 4, 1)) {
2420 int kvm_arch_remove_sw_breakpoint(CPUState
*cs
, struct kvm_sw_breakpoint
*bp
)
2422 static uint32_t brk
;
2424 if (cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&brk
, 4, 0) ||
2426 cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&bp
->saved_insn
, 4, 1)) {