2 * ARM implementation of KVM hooks, 64 bit specific code
4 * Copyright Mian-M. Hamayun 2013, Virtual Open Systems
5 * Copyright Alex Bennée 2014, Linaro
7 * This work is licensed under the terms of the GNU GPL, version 2 or later.
8 * See the COPYING file in the top-level directory.
12 #include "qemu/osdep.h"
13 #include <sys/ioctl.h>
14 #include <sys/ptrace.h>
16 #include <linux/elf.h>
17 #include <linux/kvm.h>
19 #include "qapi/error.h"
21 #include "qemu/timer.h"
22 #include "qemu/error-report.h"
23 #include "qemu/host-utils.h"
24 #include "qemu/main-loop.h"
25 #include "exec/gdbstub.h"
26 #include "sysemu/runstate.h"
27 #include "sysemu/kvm.h"
28 #include "sysemu/kvm_int.h"
30 #include "internals.h"
31 #include "hw/acpi/acpi.h"
32 #include "hw/acpi/ghes.h"
34 static bool have_guest_debug
;
36 void kvm_arm_init_debug(KVMState
*s
)
38 have_guest_debug
= kvm_check_extension(s
,
39 KVM_CAP_SET_GUEST_DEBUG
);
41 max_hw_wps
= kvm_check_extension(s
, KVM_CAP_GUEST_DEBUG_HW_WPS
);
42 hw_watchpoints
= g_array_sized_new(true, true,
43 sizeof(HWWatchpoint
), max_hw_wps
);
45 max_hw_bps
= kvm_check_extension(s
, KVM_CAP_GUEST_DEBUG_HW_BPS
);
46 hw_breakpoints
= g_array_sized_new(true, true,
47 sizeof(HWBreakpoint
), max_hw_bps
);
51 int kvm_arch_insert_hw_breakpoint(vaddr addr
, vaddr len
, int type
)
54 case GDB_BREAKPOINT_HW
:
55 return insert_hw_breakpoint(addr
);
57 case GDB_WATCHPOINT_READ
:
58 case GDB_WATCHPOINT_WRITE
:
59 case GDB_WATCHPOINT_ACCESS
:
60 return insert_hw_watchpoint(addr
, len
, type
);
66 int kvm_arch_remove_hw_breakpoint(vaddr addr
, vaddr len
, int type
)
69 case GDB_BREAKPOINT_HW
:
70 return delete_hw_breakpoint(addr
);
71 case GDB_WATCHPOINT_READ
:
72 case GDB_WATCHPOINT_WRITE
:
73 case GDB_WATCHPOINT_ACCESS
:
74 return delete_hw_watchpoint(addr
, len
, type
);
81 void kvm_arch_remove_all_hw_breakpoints(void)
84 g_array_remove_range(hw_watchpoints
, 0, cur_hw_wps
);
87 g_array_remove_range(hw_breakpoints
, 0, cur_hw_bps
);
91 void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch
*ptr
)
94 memset(ptr
, 0, sizeof(struct kvm_guest_debug_arch
));
96 for (i
= 0; i
< max_hw_wps
; i
++) {
97 HWWatchpoint
*wp
= get_hw_wp(i
);
98 ptr
->dbg_wcr
[i
] = wp
->wcr
;
99 ptr
->dbg_wvr
[i
] = wp
->wvr
;
101 for (i
= 0; i
< max_hw_bps
; i
++) {
102 HWBreakpoint
*bp
= get_hw_bp(i
);
103 ptr
->dbg_bcr
[i
] = bp
->bcr
;
104 ptr
->dbg_bvr
[i
] = bp
->bvr
;
108 bool kvm_arm_hw_debug_active(CPUState
*cs
)
110 return ((cur_hw_wps
> 0) || (cur_hw_bps
> 0));
113 static bool kvm_arm_set_device_attr(CPUState
*cs
, struct kvm_device_attr
*attr
,
118 err
= kvm_vcpu_ioctl(cs
, KVM_HAS_DEVICE_ATTR
, attr
);
120 error_report("%s: KVM_HAS_DEVICE_ATTR: %s", name
, strerror(-err
));
124 err
= kvm_vcpu_ioctl(cs
, KVM_SET_DEVICE_ATTR
, attr
);
126 error_report("%s: KVM_SET_DEVICE_ATTR: %s", name
, strerror(-err
));
133 void kvm_arm_pmu_init(CPUState
*cs
)
135 struct kvm_device_attr attr
= {
136 .group
= KVM_ARM_VCPU_PMU_V3_CTRL
,
137 .attr
= KVM_ARM_VCPU_PMU_V3_INIT
,
140 if (!ARM_CPU(cs
)->has_pmu
) {
143 if (!kvm_arm_set_device_attr(cs
, &attr
, "PMU")) {
144 error_report("failed to init PMU");
149 void kvm_arm_pmu_set_irq(CPUState
*cs
, int irq
)
151 struct kvm_device_attr attr
= {
152 .group
= KVM_ARM_VCPU_PMU_V3_CTRL
,
153 .addr
= (intptr_t)&irq
,
154 .attr
= KVM_ARM_VCPU_PMU_V3_IRQ
,
157 if (!ARM_CPU(cs
)->has_pmu
) {
160 if (!kvm_arm_set_device_attr(cs
, &attr
, "PMU")) {
161 error_report("failed to set irq for PMU");
166 void kvm_arm_pvtime_init(CPUState
*cs
, uint64_t ipa
)
168 struct kvm_device_attr attr
= {
169 .group
= KVM_ARM_VCPU_PVTIME_CTRL
,
170 .attr
= KVM_ARM_VCPU_PVTIME_IPA
,
171 .addr
= (uint64_t)&ipa
,
174 if (ARM_CPU(cs
)->kvm_steal_time
== ON_OFF_AUTO_OFF
) {
177 if (!kvm_arm_set_device_attr(cs
, &attr
, "PVTIME IPA")) {
178 error_report("failed to init PVTIME IPA");
183 static int read_sys_reg32(int fd
, uint32_t *pret
, uint64_t id
)
186 struct kvm_one_reg idreg
= { .id
= id
, .addr
= (uintptr_t)&ret
};
189 assert((id
& KVM_REG_SIZE_MASK
) == KVM_REG_SIZE_U64
);
190 err
= ioctl(fd
, KVM_GET_ONE_REG
, &idreg
);
198 static int read_sys_reg64(int fd
, uint64_t *pret
, uint64_t id
)
200 struct kvm_one_reg idreg
= { .id
= id
, .addr
= (uintptr_t)pret
};
202 assert((id
& KVM_REG_SIZE_MASK
) == KVM_REG_SIZE_U64
);
203 return ioctl(fd
, KVM_GET_ONE_REG
, &idreg
);
206 static bool kvm_arm_pauth_supported(void)
208 return (kvm_check_extension(kvm_state
, KVM_CAP_ARM_PTRAUTH_ADDRESS
) &&
209 kvm_check_extension(kvm_state
, KVM_CAP_ARM_PTRAUTH_GENERIC
));
212 bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures
*ahcf
)
214 /* Identify the feature bits corresponding to the host CPU, and
215 * fill out the ARMHostCPUClass fields accordingly. To do this
216 * we have to create a scratch VM, create a single CPU inside it,
217 * and then query that CPU for the relevant ID registers.
221 bool pmu_supported
= false;
222 uint64_t features
= 0;
225 /* Old kernels may not know about the PREFERRED_TARGET ioctl: however
226 * we know these will only support creating one kind of guest CPU,
227 * which is its preferred CPU type. Fortunately these old kernels
228 * support only a very limited number of CPUs.
230 static const uint32_t cpus_to_try
[] = {
231 KVM_ARM_TARGET_AEM_V8
,
232 KVM_ARM_TARGET_FOUNDATION_V8
,
233 KVM_ARM_TARGET_CORTEX_A57
,
234 QEMU_KVM_ARM_TARGET_NONE
237 * target = -1 informs kvm_arm_create_scratch_host_vcpu()
238 * to use the preferred target
240 struct kvm_vcpu_init init
= { .target
= -1, };
243 * Ask for SVE if supported, so that we can query ID_AA64ZFR0,
244 * which is otherwise RAZ.
246 sve_supported
= kvm_arm_sve_supported();
248 init
.features
[0] |= 1 << KVM_ARM_VCPU_SVE
;
252 * Ask for Pointer Authentication if supported, so that we get
253 * the unsanitized field values for AA64ISAR1_EL1.
255 if (kvm_arm_pauth_supported()) {
256 init
.features
[0] |= (1 << KVM_ARM_VCPU_PTRAUTH_ADDRESS
|
257 1 << KVM_ARM_VCPU_PTRAUTH_GENERIC
);
260 if (kvm_arm_pmu_supported()) {
261 init
.features
[0] |= 1 << KVM_ARM_VCPU_PMU_V3
;
262 pmu_supported
= true;
265 if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try
, fdarray
, &init
)) {
269 ahcf
->target
= init
.target
;
270 ahcf
->dtb_compatible
= "arm,arm-v8";
272 err
= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64pfr0
,
273 ARM64_SYS_REG(3, 0, 0, 4, 0));
274 if (unlikely(err
< 0)) {
276 * Before v4.15, the kernel only exposed a limited number of system
277 * registers, not including any of the interesting AArch64 ID regs.
278 * For the most part we could leave these fields as zero with minimal
279 * effect, since this does not affect the values seen by the guest.
281 * However, it could cause problems down the line for QEMU,
282 * so provide a minimal v8.0 default.
284 * ??? Could read MIDR and use knowledge from cpu64.c.
285 * ??? Could map a page of memory into our temp guest and
286 * run the tiniest of hand-crafted kernels to extract
287 * the values seen by the guest.
288 * ??? Either of these sounds like too much effort just
289 * to work around running a modern host kernel.
291 ahcf
->isar
.id_aa64pfr0
= 0x00000011; /* EL1&0, AArch64 only */
294 err
|= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64pfr1
,
295 ARM64_SYS_REG(3, 0, 0, 4, 1));
296 err
|= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64smfr0
,
297 ARM64_SYS_REG(3, 0, 0, 4, 5));
298 err
|= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64dfr0
,
299 ARM64_SYS_REG(3, 0, 0, 5, 0));
300 err
|= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64dfr1
,
301 ARM64_SYS_REG(3, 0, 0, 5, 1));
302 err
|= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64isar0
,
303 ARM64_SYS_REG(3, 0, 0, 6, 0));
304 err
|= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64isar1
,
305 ARM64_SYS_REG(3, 0, 0, 6, 1));
306 err
|= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64isar2
,
307 ARM64_SYS_REG(3, 0, 0, 6, 2));
308 err
|= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64mmfr0
,
309 ARM64_SYS_REG(3, 0, 0, 7, 0));
310 err
|= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64mmfr1
,
311 ARM64_SYS_REG(3, 0, 0, 7, 1));
312 err
|= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64mmfr2
,
313 ARM64_SYS_REG(3, 0, 0, 7, 2));
316 * Note that if AArch32 support is not present in the host,
317 * the AArch32 sysregs are present to be read, but will
318 * return UNKNOWN values. This is neither better nor worse
319 * than skipping the reads and leaving 0, as we must avoid
320 * considering the values in every case.
322 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_pfr0
,
323 ARM64_SYS_REG(3, 0, 0, 1, 0));
324 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_pfr1
,
325 ARM64_SYS_REG(3, 0, 0, 1, 1));
326 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_dfr0
,
327 ARM64_SYS_REG(3, 0, 0, 1, 2));
328 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_mmfr0
,
329 ARM64_SYS_REG(3, 0, 0, 1, 4));
330 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_mmfr1
,
331 ARM64_SYS_REG(3, 0, 0, 1, 5));
332 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_mmfr2
,
333 ARM64_SYS_REG(3, 0, 0, 1, 6));
334 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_mmfr3
,
335 ARM64_SYS_REG(3, 0, 0, 1, 7));
336 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_isar0
,
337 ARM64_SYS_REG(3, 0, 0, 2, 0));
338 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_isar1
,
339 ARM64_SYS_REG(3, 0, 0, 2, 1));
340 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_isar2
,
341 ARM64_SYS_REG(3, 0, 0, 2, 2));
342 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_isar3
,
343 ARM64_SYS_REG(3, 0, 0, 2, 3));
344 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_isar4
,
345 ARM64_SYS_REG(3, 0, 0, 2, 4));
346 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_isar5
,
347 ARM64_SYS_REG(3, 0, 0, 2, 5));
348 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_mmfr4
,
349 ARM64_SYS_REG(3, 0, 0, 2, 6));
350 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_isar6
,
351 ARM64_SYS_REG(3, 0, 0, 2, 7));
353 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.mvfr0
,
354 ARM64_SYS_REG(3, 0, 0, 3, 0));
355 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.mvfr1
,
356 ARM64_SYS_REG(3, 0, 0, 3, 1));
357 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.mvfr2
,
358 ARM64_SYS_REG(3, 0, 0, 3, 2));
359 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_pfr2
,
360 ARM64_SYS_REG(3, 0, 0, 3, 4));
361 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_dfr1
,
362 ARM64_SYS_REG(3, 0, 0, 3, 5));
363 err
|= read_sys_reg32(fdarray
[2], &ahcf
->isar
.id_mmfr5
,
364 ARM64_SYS_REG(3, 0, 0, 3, 6));
367 * DBGDIDR is a bit complicated because the kernel doesn't
368 * provide an accessor for it in 64-bit mode, which is what this
369 * scratch VM is in, and there's no architected "64-bit sysreg
370 * which reads the same as the 32-bit register" the way there is
371 * for other ID registers. Instead we synthesize a value from the
372 * AArch64 ID_AA64DFR0, the same way the kernel code in
373 * arch/arm64/kvm/sys_regs.c:trap_dbgidr() does.
374 * We only do this if the CPU supports AArch32 at EL1.
376 if (FIELD_EX32(ahcf
->isar
.id_aa64pfr0
, ID_AA64PFR0
, EL1
) >= 2) {
377 int wrps
= FIELD_EX64(ahcf
->isar
.id_aa64dfr0
, ID_AA64DFR0
, WRPS
);
378 int brps
= FIELD_EX64(ahcf
->isar
.id_aa64dfr0
, ID_AA64DFR0
, BRPS
);
380 FIELD_EX64(ahcf
->isar
.id_aa64dfr0
, ID_AA64DFR0
, CTX_CMPS
);
381 int version
= 6; /* ARMv8 debug architecture */
383 !!FIELD_EX32(ahcf
->isar
.id_aa64pfr0
, ID_AA64PFR0
, EL3
);
384 uint32_t dbgdidr
= 0;
386 dbgdidr
= FIELD_DP32(dbgdidr
, DBGDIDR
, WRPS
, wrps
);
387 dbgdidr
= FIELD_DP32(dbgdidr
, DBGDIDR
, BRPS
, brps
);
388 dbgdidr
= FIELD_DP32(dbgdidr
, DBGDIDR
, CTX_CMPS
, ctx_cmps
);
389 dbgdidr
= FIELD_DP32(dbgdidr
, DBGDIDR
, VERSION
, version
);
390 dbgdidr
= FIELD_DP32(dbgdidr
, DBGDIDR
, NSUHD_IMP
, has_el3
);
391 dbgdidr
= FIELD_DP32(dbgdidr
, DBGDIDR
, SE_IMP
, has_el3
);
392 dbgdidr
|= (1 << 15); /* RES1 bit */
393 ahcf
->isar
.dbgdidr
= dbgdidr
;
397 /* PMCR_EL0 is only accessible if the vCPU has feature PMU_V3 */
398 err
|= read_sys_reg64(fdarray
[2], &ahcf
->isar
.reset_pmcr_el0
,
399 ARM64_SYS_REG(3, 3, 9, 12, 0));
404 * There is a range of kernels between kernel commit 73433762fcae
405 * and f81cb2c3ad41 which have a bug where the kernel doesn't
406 * expose SYS_ID_AA64ZFR0_EL1 via the ONE_REG API unless the VM has
407 * enabled SVE support, which resulted in an error rather than RAZ.
408 * So only read the register if we set KVM_ARM_VCPU_SVE above.
410 err
|= read_sys_reg64(fdarray
[2], &ahcf
->isar
.id_aa64zfr0
,
411 ARM64_SYS_REG(3, 0, 0, 4, 4));
415 kvm_arm_destroy_scratch_host_vcpu(fdarray
);
422 * We can assume any KVM supporting CPU is at least a v8
423 * with VFPv4+Neon; this in turn implies most of the other
426 features
|= 1ULL << ARM_FEATURE_V8
;
427 features
|= 1ULL << ARM_FEATURE_NEON
;
428 features
|= 1ULL << ARM_FEATURE_AARCH64
;
429 features
|= 1ULL << ARM_FEATURE_PMU
;
430 features
|= 1ULL << ARM_FEATURE_GENERIC_TIMER
;
432 ahcf
->features
= features
;
437 void kvm_arm_steal_time_finalize(ARMCPU
*cpu
, Error
**errp
)
439 bool has_steal_time
= kvm_arm_steal_time_supported();
441 if (cpu
->kvm_steal_time
== ON_OFF_AUTO_AUTO
) {
442 if (!has_steal_time
|| !arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
443 cpu
->kvm_steal_time
= ON_OFF_AUTO_OFF
;
445 cpu
->kvm_steal_time
= ON_OFF_AUTO_ON
;
447 } else if (cpu
->kvm_steal_time
== ON_OFF_AUTO_ON
) {
448 if (!has_steal_time
) {
449 error_setg(errp
, "'kvm-steal-time' cannot be enabled "
452 } else if (!arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
454 * DEN0057A chapter 2 says "This specification only covers
455 * systems in which the Execution state of the hypervisor
456 * as well as EL1 of virtual machines is AArch64.". And,
457 * to ensure that, the smc/hvc calls are only specified as
460 error_setg(errp
, "'kvm-steal-time' cannot be enabled "
461 "for AArch32 guests");
467 bool kvm_arm_aarch32_supported(void)
469 return kvm_check_extension(kvm_state
, KVM_CAP_ARM_EL1_32BIT
);
472 bool kvm_arm_sve_supported(void)
474 return kvm_check_extension(kvm_state
, KVM_CAP_ARM_SVE
);
477 bool kvm_arm_steal_time_supported(void)
479 return kvm_check_extension(kvm_state
, KVM_CAP_STEAL_TIME
);
482 QEMU_BUILD_BUG_ON(KVM_ARM64_SVE_VQ_MIN
!= 1);
484 uint32_t kvm_arm_sve_get_vls(CPUState
*cs
)
486 /* Only call this function if kvm_arm_sve_supported() returns true. */
487 static uint64_t vls
[KVM_ARM64_SVE_VLS_WORDS
];
493 * KVM ensures all host CPUs support the same set of vector lengths.
494 * So we only need to create the scratch VCPUs once and then cache
498 struct kvm_vcpu_init init
= {
500 .features
[0] = (1 << KVM_ARM_VCPU_SVE
),
502 struct kvm_one_reg reg
= {
503 .id
= KVM_REG_ARM64_SVE_VLS
,
504 .addr
= (uint64_t)&vls
[0],
510 if (!kvm_arm_create_scratch_host_vcpu(NULL
, fdarray
, &init
)) {
511 error_report("failed to create scratch VCPU with SVE enabled");
514 ret
= ioctl(fdarray
[2], KVM_GET_ONE_REG
, ®
);
515 kvm_arm_destroy_scratch_host_vcpu(fdarray
);
517 error_report("failed to get KVM_REG_ARM64_SVE_VLS: %s",
522 for (i
= KVM_ARM64_SVE_VLS_WORDS
- 1; i
>= 0; --i
) {
524 vq
= 64 - clz64(vls
[i
]) + i
* 64;
528 if (vq
> ARM_MAX_VQ
) {
529 warn_report("KVM supports vector lengths larger than "
531 vls
[0] &= MAKE_64BIT_MASK(0, ARM_MAX_VQ
);
538 static int kvm_arm_sve_set_vls(CPUState
*cs
)
540 ARMCPU
*cpu
= ARM_CPU(cs
);
541 uint64_t vls
[KVM_ARM64_SVE_VLS_WORDS
] = { cpu
->sve_vq
.map
};
543 assert(cpu
->sve_max_vq
<= KVM_ARM64_SVE_VQ_MAX
);
545 return kvm_set_one_reg(cs
, KVM_REG_ARM64_SVE_VLS
, &vls
[0]);
548 #define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5
550 int kvm_arch_init_vcpu(CPUState
*cs
)
554 ARMCPU
*cpu
= ARM_CPU(cs
);
555 CPUARMState
*env
= &cpu
->env
;
558 if (cpu
->kvm_target
== QEMU_KVM_ARM_TARGET_NONE
||
559 !object_dynamic_cast(OBJECT(cpu
), TYPE_AARCH64_CPU
)) {
560 error_report("KVM is not supported for this guest CPU type");
564 qemu_add_vm_change_state_handler(kvm_arm_vm_state_change
, cs
);
566 /* Determine init features for this CPU */
567 memset(cpu
->kvm_init_features
, 0, sizeof(cpu
->kvm_init_features
));
568 if (cs
->start_powered_off
) {
569 cpu
->kvm_init_features
[0] |= 1 << KVM_ARM_VCPU_POWER_OFF
;
571 if (kvm_check_extension(cs
->kvm_state
, KVM_CAP_ARM_PSCI_0_2
)) {
572 cpu
->psci_version
= QEMU_PSCI_VERSION_0_2
;
573 cpu
->kvm_init_features
[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2
;
575 if (!arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)) {
576 cpu
->kvm_init_features
[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT
;
578 if (!kvm_check_extension(cs
->kvm_state
, KVM_CAP_ARM_PMU_V3
)) {
579 cpu
->has_pmu
= false;
582 cpu
->kvm_init_features
[0] |= 1 << KVM_ARM_VCPU_PMU_V3
;
584 env
->features
&= ~(1ULL << ARM_FEATURE_PMU
);
586 if (cpu_isar_feature(aa64_sve
, cpu
)) {
587 assert(kvm_arm_sve_supported());
588 cpu
->kvm_init_features
[0] |= 1 << KVM_ARM_VCPU_SVE
;
590 if (cpu_isar_feature(aa64_pauth
, cpu
)) {
591 cpu
->kvm_init_features
[0] |= (1 << KVM_ARM_VCPU_PTRAUTH_ADDRESS
|
592 1 << KVM_ARM_VCPU_PTRAUTH_GENERIC
);
595 /* Do KVM_ARM_VCPU_INIT ioctl */
596 ret
= kvm_arm_vcpu_init(cs
);
601 if (cpu_isar_feature(aa64_sve
, cpu
)) {
602 ret
= kvm_arm_sve_set_vls(cs
);
606 ret
= kvm_arm_vcpu_finalize(cs
, KVM_ARM_VCPU_SVE
);
613 * KVM reports the exact PSCI version it is implementing via a
614 * special sysreg. If it is present, use its contents to determine
615 * what to report to the guest in the dtb (it is the PSCI version,
616 * in the same 15-bits major 16-bits minor format that PSCI_VERSION
619 if (!kvm_get_one_reg(cs
, KVM_REG_ARM_PSCI_VERSION
, &psciver
)) {
620 cpu
->psci_version
= psciver
;
624 * When KVM is in use, PSCI is emulated in-kernel and not by qemu.
625 * Currently KVM has its own idea about MPIDR assignment, so we
626 * override our defaults with what we get from KVM.
628 ret
= kvm_get_one_reg(cs
, ARM64_SYS_REG(ARM_CPU_ID_MPIDR
), &mpidr
);
632 cpu
->mp_affinity
= mpidr
& ARM64_AFFINITY_MASK
;
634 /* Check whether user space can specify guest syndrome value */
635 kvm_arm_init_serror_injection(cs
);
637 return kvm_arm_init_cpreg_list(cpu
);
640 int kvm_arch_destroy_vcpu(CPUState
*cs
)
645 bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx
)
647 /* Return true if the regidx is a register we should synchronize
648 * via the cpreg_tuples array (ie is not a core or sve reg that
649 * we sync by hand in kvm_arch_get/put_registers())
651 switch (regidx
& KVM_REG_ARM_COPROC_MASK
) {
652 case KVM_REG_ARM_CORE
:
653 case KVM_REG_ARM64_SVE
:
660 typedef struct CPRegStateLevel
{
665 /* All system registers not listed in the following table are assumed to be
666 * of the level KVM_PUT_RUNTIME_STATE. If a register should be written less
667 * often, you must add it to this table with a state of either
668 * KVM_PUT_RESET_STATE or KVM_PUT_FULL_STATE.
670 static const CPRegStateLevel non_runtime_cpregs
[] = {
671 { KVM_REG_ARM_TIMER_CNT
, KVM_PUT_FULL_STATE
},
672 { KVM_REG_ARM_PTIMER_CNT
, KVM_PUT_FULL_STATE
},
675 int kvm_arm_cpreg_level(uint64_t regidx
)
679 for (i
= 0; i
< ARRAY_SIZE(non_runtime_cpregs
); i
++) {
680 const CPRegStateLevel
*l
= &non_runtime_cpregs
[i
];
681 if (l
->regidx
== regidx
) {
686 return KVM_PUT_RUNTIME_STATE
;
689 /* Callers must hold the iothread mutex lock */
690 static void kvm_inject_arm_sea(CPUState
*c
)
692 ARMCPU
*cpu
= ARM_CPU(c
);
693 CPUARMState
*env
= &cpu
->env
;
697 c
->exception_index
= EXCP_DATA_ABORT
;
698 env
->exception
.target_el
= 1;
701 * Set the DFSC to synchronous external abort and set FnV to not valid,
702 * this will tell guest the FAR_ELx is UNKNOWN for this abort.
704 same_el
= arm_current_el(env
) == env
->exception
.target_el
;
705 esr
= syn_data_abort_no_iss(same_el
, 1, 0, 0, 0, 0, 0x10);
707 env
->exception
.syndrome
= esr
;
709 arm_cpu_do_interrupt(c
);
712 #define AARCH64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
713 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
715 #define AARCH64_SIMD_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U128 | \
716 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
718 #define AARCH64_SIMD_CTRL_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U32 | \
719 KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
721 static int kvm_arch_put_fpsimd(CPUState
*cs
)
723 CPUARMState
*env
= &ARM_CPU(cs
)->env
;
726 for (i
= 0; i
< 32; i
++) {
727 uint64_t *q
= aa64_vfp_qreg(env
, i
);
729 uint64_t fp_val
[2] = { q
[1], q
[0] };
730 ret
= kvm_set_one_reg(cs
, AARCH64_SIMD_CORE_REG(fp_regs
.vregs
[i
]),
733 ret
= kvm_set_one_reg(cs
, AARCH64_SIMD_CORE_REG(fp_regs
.vregs
[i
]), q
);
744 * KVM SVE registers come in slices where ZREGs have a slice size of 2048 bits
745 * and PREGS and the FFR have a slice size of 256 bits. However we simply hard
746 * code the slice index to zero for now as it's unlikely we'll need more than
747 * one slice for quite some time.
749 static int kvm_arch_put_sve(CPUState
*cs
)
751 ARMCPU
*cpu
= ARM_CPU(cs
);
752 CPUARMState
*env
= &cpu
->env
;
753 uint64_t tmp
[ARM_MAX_VQ
* 2];
757 for (n
= 0; n
< KVM_ARM64_SVE_NUM_ZREGS
; ++n
) {
758 r
= sve_bswap64(tmp
, &env
->vfp
.zregs
[n
].d
[0], cpu
->sve_max_vq
* 2);
759 ret
= kvm_set_one_reg(cs
, KVM_REG_ARM64_SVE_ZREG(n
, 0), r
);
765 for (n
= 0; n
< KVM_ARM64_SVE_NUM_PREGS
; ++n
) {
766 r
= sve_bswap64(tmp
, r
= &env
->vfp
.pregs
[n
].p
[0],
767 DIV_ROUND_UP(cpu
->sve_max_vq
* 2, 8));
768 ret
= kvm_set_one_reg(cs
, KVM_REG_ARM64_SVE_PREG(n
, 0), r
);
774 r
= sve_bswap64(tmp
, &env
->vfp
.pregs
[FFR_PRED_NUM
].p
[0],
775 DIV_ROUND_UP(cpu
->sve_max_vq
* 2, 8));
776 ret
= kvm_set_one_reg(cs
, KVM_REG_ARM64_SVE_FFR(0), r
);
784 int kvm_arch_put_registers(CPUState
*cs
, int level
)
791 ARMCPU
*cpu
= ARM_CPU(cs
);
792 CPUARMState
*env
= &cpu
->env
;
794 /* If we are in AArch32 mode then we need to copy the AArch32 regs to the
795 * AArch64 registers before pushing them out to 64-bit KVM.
798 aarch64_sync_32_to_64(env
);
801 for (i
= 0; i
< 31; i
++) {
802 ret
= kvm_set_one_reg(cs
, AARCH64_CORE_REG(regs
.regs
[i
]),
809 /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the
810 * QEMU side we keep the current SP in xregs[31] as well.
812 aarch64_save_sp(env
, 1);
814 ret
= kvm_set_one_reg(cs
, AARCH64_CORE_REG(regs
.sp
), &env
->sp_el
[0]);
819 ret
= kvm_set_one_reg(cs
, AARCH64_CORE_REG(sp_el1
), &env
->sp_el
[1]);
824 /* Note that KVM thinks pstate is 64 bit but we use a uint32_t */
826 val
= pstate_read(env
);
828 val
= cpsr_read(env
);
830 ret
= kvm_set_one_reg(cs
, AARCH64_CORE_REG(regs
.pstate
), &val
);
835 ret
= kvm_set_one_reg(cs
, AARCH64_CORE_REG(regs
.pc
), &env
->pc
);
840 ret
= kvm_set_one_reg(cs
, AARCH64_CORE_REG(elr_el1
), &env
->elr_el
[1]);
845 /* Saved Program State Registers
847 * Before we restore from the banked_spsr[] array we need to
848 * ensure that any modifications to env->spsr are correctly
849 * reflected in the banks.
851 el
= arm_current_el(env
);
852 if (el
> 0 && !is_a64(env
)) {
853 i
= bank_number(env
->uncached_cpsr
& CPSR_M
);
854 env
->banked_spsr
[i
] = env
->spsr
;
857 /* KVM 0-4 map to QEMU banks 1-5 */
858 for (i
= 0; i
< KVM_NR_SPSR
; i
++) {
859 ret
= kvm_set_one_reg(cs
, AARCH64_CORE_REG(spsr
[i
]),
860 &env
->banked_spsr
[i
+ 1]);
866 if (cpu_isar_feature(aa64_sve
, cpu
)) {
867 ret
= kvm_arch_put_sve(cs
);
869 ret
= kvm_arch_put_fpsimd(cs
);
875 fpr
= vfp_get_fpsr(env
);
876 ret
= kvm_set_one_reg(cs
, AARCH64_SIMD_CTRL_REG(fp_regs
.fpsr
), &fpr
);
881 fpr
= vfp_get_fpcr(env
);
882 ret
= kvm_set_one_reg(cs
, AARCH64_SIMD_CTRL_REG(fp_regs
.fpcr
), &fpr
);
887 write_cpustate_to_list(cpu
, true);
889 if (!write_list_to_kvmstate(cpu
, level
)) {
894 * Setting VCPU events should be triggered after syncing the registers
895 * to avoid overwriting potential changes made by KVM upon calling
896 * KVM_SET_VCPU_EVENTS ioctl
898 ret
= kvm_put_vcpu_events(cpu
);
903 kvm_arm_sync_mpstate_to_kvm(cpu
);
908 static int kvm_arch_get_fpsimd(CPUState
*cs
)
910 CPUARMState
*env
= &ARM_CPU(cs
)->env
;
913 for (i
= 0; i
< 32; i
++) {
914 uint64_t *q
= aa64_vfp_qreg(env
, i
);
915 ret
= kvm_get_one_reg(cs
, AARCH64_SIMD_CORE_REG(fp_regs
.vregs
[i
]), q
);
921 t
= q
[0], q
[0] = q
[1], q
[1] = t
;
930 * KVM SVE registers come in slices where ZREGs have a slice size of 2048 bits
931 * and PREGS and the FFR have a slice size of 256 bits. However we simply hard
932 * code the slice index to zero for now as it's unlikely we'll need more than
933 * one slice for quite some time.
935 static int kvm_arch_get_sve(CPUState
*cs
)
937 ARMCPU
*cpu
= ARM_CPU(cs
);
938 CPUARMState
*env
= &cpu
->env
;
942 for (n
= 0; n
< KVM_ARM64_SVE_NUM_ZREGS
; ++n
) {
943 r
= &env
->vfp
.zregs
[n
].d
[0];
944 ret
= kvm_get_one_reg(cs
, KVM_REG_ARM64_SVE_ZREG(n
, 0), r
);
948 sve_bswap64(r
, r
, cpu
->sve_max_vq
* 2);
951 for (n
= 0; n
< KVM_ARM64_SVE_NUM_PREGS
; ++n
) {
952 r
= &env
->vfp
.pregs
[n
].p
[0];
953 ret
= kvm_get_one_reg(cs
, KVM_REG_ARM64_SVE_PREG(n
, 0), r
);
957 sve_bswap64(r
, r
, DIV_ROUND_UP(cpu
->sve_max_vq
* 2, 8));
960 r
= &env
->vfp
.pregs
[FFR_PRED_NUM
].p
[0];
961 ret
= kvm_get_one_reg(cs
, KVM_REG_ARM64_SVE_FFR(0), r
);
965 sve_bswap64(r
, r
, DIV_ROUND_UP(cpu
->sve_max_vq
* 2, 8));
970 int kvm_arch_get_registers(CPUState
*cs
)
977 ARMCPU
*cpu
= ARM_CPU(cs
);
978 CPUARMState
*env
= &cpu
->env
;
980 for (i
= 0; i
< 31; i
++) {
981 ret
= kvm_get_one_reg(cs
, AARCH64_CORE_REG(regs
.regs
[i
]),
988 ret
= kvm_get_one_reg(cs
, AARCH64_CORE_REG(regs
.sp
), &env
->sp_el
[0]);
993 ret
= kvm_get_one_reg(cs
, AARCH64_CORE_REG(sp_el1
), &env
->sp_el
[1]);
998 ret
= kvm_get_one_reg(cs
, AARCH64_CORE_REG(regs
.pstate
), &val
);
1003 env
->aarch64
= ((val
& PSTATE_nRW
) == 0);
1005 pstate_write(env
, val
);
1007 cpsr_write(env
, val
, 0xffffffff, CPSRWriteRaw
);
1010 /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the
1011 * QEMU side we keep the current SP in xregs[31] as well.
1013 aarch64_restore_sp(env
, 1);
1015 ret
= kvm_get_one_reg(cs
, AARCH64_CORE_REG(regs
.pc
), &env
->pc
);
1020 /* If we are in AArch32 mode then we need to sync the AArch32 regs with the
1021 * incoming AArch64 regs received from 64-bit KVM.
1022 * We must perform this after all of the registers have been acquired from
1026 aarch64_sync_64_to_32(env
);
1029 ret
= kvm_get_one_reg(cs
, AARCH64_CORE_REG(elr_el1
), &env
->elr_el
[1]);
1034 /* Fetch the SPSR registers
1036 * KVM SPSRs 0-4 map to QEMU banks 1-5
1038 for (i
= 0; i
< KVM_NR_SPSR
; i
++) {
1039 ret
= kvm_get_one_reg(cs
, AARCH64_CORE_REG(spsr
[i
]),
1040 &env
->banked_spsr
[i
+ 1]);
1046 el
= arm_current_el(env
);
1047 if (el
> 0 && !is_a64(env
)) {
1048 i
= bank_number(env
->uncached_cpsr
& CPSR_M
);
1049 env
->spsr
= env
->banked_spsr
[i
];
1052 if (cpu_isar_feature(aa64_sve
, cpu
)) {
1053 ret
= kvm_arch_get_sve(cs
);
1055 ret
= kvm_arch_get_fpsimd(cs
);
1061 ret
= kvm_get_one_reg(cs
, AARCH64_SIMD_CTRL_REG(fp_regs
.fpsr
), &fpr
);
1065 vfp_set_fpsr(env
, fpr
);
1067 ret
= kvm_get_one_reg(cs
, AARCH64_SIMD_CTRL_REG(fp_regs
.fpcr
), &fpr
);
1071 vfp_set_fpcr(env
, fpr
);
1073 ret
= kvm_get_vcpu_events(cpu
);
1078 if (!write_kvmstate_to_list(cpu
)) {
1081 /* Note that it's OK to have registers which aren't in CPUState,
1082 * so we can ignore a failure return here.
1084 write_list_to_cpustate(cpu
);
1086 kvm_arm_sync_mpstate_to_qemu(cpu
);
1088 /* TODO: other registers */
1092 void kvm_arch_on_sigbus_vcpu(CPUState
*c
, int code
, void *addr
)
1094 ram_addr_t ram_addr
;
1097 assert(code
== BUS_MCEERR_AR
|| code
== BUS_MCEERR_AO
);
1099 if (acpi_ghes_present() && addr
) {
1100 ram_addr
= qemu_ram_addr_from_host(addr
);
1101 if (ram_addr
!= RAM_ADDR_INVALID
&&
1102 kvm_physical_memory_addr_from_host(c
->kvm_state
, addr
, &paddr
)) {
1103 kvm_hwpoison_page_add(ram_addr
);
1105 * If this is a BUS_MCEERR_AR, we know we have been called
1106 * synchronously from the vCPU thread, so we can easily
1107 * synchronize the state and inject an error.
1109 * TODO: we currently don't tell the guest at all about
1110 * BUS_MCEERR_AO. In that case we might either be being
1111 * called synchronously from the vCPU thread, or a bit
1112 * later from the main thread, so doing the injection of
1113 * the error would be more complicated.
1115 if (code
== BUS_MCEERR_AR
) {
1116 kvm_cpu_synchronize_state(c
);
1117 if (!acpi_ghes_record_errors(ACPI_HEST_SRC_ID_SEA
, paddr
)) {
1118 kvm_inject_arm_sea(c
);
1120 error_report("failed to record the error");
1126 if (code
== BUS_MCEERR_AO
) {
1127 error_report("Hardware memory error at addr %p for memory used by "
1128 "QEMU itself instead of guest system!", addr
);
1132 if (code
== BUS_MCEERR_AR
) {
1133 error_report("Hardware memory error!");
1138 /* C6.6.29 BRK instruction */
1139 static const uint32_t brk_insn
= 0xd4200000;
1141 int kvm_arch_insert_sw_breakpoint(CPUState
*cs
, struct kvm_sw_breakpoint
*bp
)
1143 if (have_guest_debug
) {
1144 if (cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&bp
->saved_insn
, 4, 0) ||
1145 cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&brk_insn
, 4, 1)) {
1150 error_report("guest debug not supported on this kernel");
1155 int kvm_arch_remove_sw_breakpoint(CPUState
*cs
, struct kvm_sw_breakpoint
*bp
)
1157 static uint32_t brk
;
1159 if (have_guest_debug
) {
1160 if (cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&brk
, 4, 0) ||
1162 cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&bp
->saved_insn
, 4, 1)) {
1167 error_report("guest debug not supported on this kernel");
1172 /* See v8 ARM ARM D7.2.27 ESR_ELx, Exception Syndrome Register
1174 * To minimise translating between kernel and user-space the kernel
1175 * ABI just provides user-space with the full exception syndrome
1176 * register value to be decoded in QEMU.
1179 bool kvm_arm_handle_debug(CPUState
*cs
, struct kvm_debug_exit_arch
*debug_exit
)
1181 int hsr_ec
= syn_get_ec(debug_exit
->hsr
);
1182 ARMCPU
*cpu
= ARM_CPU(cs
);
1183 CPUARMState
*env
= &cpu
->env
;
1185 /* Ensure PC is synchronised */
1186 kvm_cpu_synchronize_state(cs
);
1189 case EC_SOFTWARESTEP
:
1190 if (cs
->singlestep_enabled
) {
1194 * The kernel should have suppressed the guest's ability to
1195 * single step at this point so something has gone wrong.
1197 error_report("%s: guest single-step while debugging unsupported"
1198 " (%"PRIx64
", %"PRIx32
")",
1199 __func__
, env
->pc
, debug_exit
->hsr
);
1204 if (kvm_find_sw_breakpoint(cs
, env
->pc
)) {
1209 if (find_hw_breakpoint(cs
, env
->pc
)) {
1215 CPUWatchpoint
*wp
= find_hw_watchpoint(cs
, debug_exit
->far
);
1217 cs
->watchpoint_hit
= wp
;
1223 error_report("%s: unhandled debug exit (%"PRIx32
", %"PRIx64
")",
1224 __func__
, debug_exit
->hsr
, env
->pc
);
1227 /* If we are not handling the debug exception it must belong to
1228 * the guest. Let's re-use the existing TCG interrupt code to set
1229 * everything up properly.
1231 cs
->exception_index
= EXCP_BKPT
;
1232 env
->exception
.syndrome
= debug_exit
->hsr
;
1233 env
->exception
.vaddress
= debug_exit
->far
;
1234 env
->exception
.target_el
= 1;
1235 qemu_mutex_lock_iothread();
1236 arm_cpu_do_interrupt(cs
);
1237 qemu_mutex_unlock_iothread();
1242 #define ARM64_REG_ESR_EL1 ARM64_SYS_REG(3, 0, 5, 2, 0)
1243 #define ARM64_REG_TCR_EL1 ARM64_SYS_REG(3, 0, 2, 0, 2)
1248 * AARCH64: DFSC, bits [5:0]
1252 * FS[3:0] - DFSR[3:0]
1256 #define ESR_DFSC(aarch64, lpae, v) \
1257 ((aarch64 || (lpae)) ? ((v) & 0x3F) \
1258 : (((v) >> 6) | ((v) & 0x1F)))
1260 #define ESR_DFSC_EXTABT(aarch64, lpae) \
1261 ((aarch64) ? 0x10 : (lpae) ? 0x10 : 0x8)
1263 bool kvm_arm_verify_ext_dabt_pending(CPUState
*cs
)
1267 if (!kvm_get_one_reg(cs
, ARM64_REG_ESR_EL1
, &dfsr_val
)) {
1268 ARMCPU
*cpu
= ARM_CPU(cs
);
1269 CPUARMState
*env
= &cpu
->env
;
1270 int aarch64_mode
= arm_feature(env
, ARM_FEATURE_AARCH64
);
1273 if (!aarch64_mode
) {
1276 if (!kvm_get_one_reg(cs
, ARM64_REG_TCR_EL1
, &ttbcr
)) {
1277 lpae
= arm_feature(env
, ARM_FEATURE_LPAE
)
1278 && (ttbcr
& TTBCR_EAE
);
1282 * The verification here is based on the DFSC bits
1283 * of the ESR_EL1 reg only
1285 return (ESR_DFSC(aarch64_mode
, lpae
, dfsr_val
) ==
1286 ESR_DFSC_EXTABT(aarch64_mode
, lpae
));