1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
6 * Derived from arch/arm/kvm/guest.c:
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
11 #include <linux/bits.h>
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/nospec.h>
15 #include <linux/kvm_host.h>
16 #include <linux/module.h>
17 #include <linux/stddef.h>
18 #include <linux/string.h>
19 #include <linux/vmalloc.h>
21 #include <kvm/arm_psci.h>
22 #include <asm/cputype.h>
23 #include <linux/uaccess.h>
24 #include <asm/fpsimd.h>
26 #include <asm/kvm_emulate.h>
27 #include <asm/kvm_coproc.h>
28 #include <asm/kvm_host.h>
29 #include <asm/sigcontext.h>
33 #define VM_STAT(x) { #x, offsetof(struct kvm, stat.x), KVM_STAT_VM }
34 #define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU }
36 struct kvm_stats_debugfs_item debugfs_entries
[] = {
37 VCPU_STAT(hvc_exit_stat
),
38 VCPU_STAT(wfe_exit_stat
),
39 VCPU_STAT(wfi_exit_stat
),
40 VCPU_STAT(mmio_exit_user
),
41 VCPU_STAT(mmio_exit_kernel
),
46 int kvm_arch_vcpu_setup(struct kvm_vcpu
*vcpu
)
51 static bool core_reg_offset_is_vreg(u64 off
)
53 return off
>= KVM_REG_ARM_CORE_REG(fp_regs
.vregs
) &&
54 off
< KVM_REG_ARM_CORE_REG(fp_regs
.fpsr
);
57 static u64
core_reg_offset_from_id(u64 id
)
59 return id
& ~(KVM_REG_ARCH_MASK
| KVM_REG_SIZE_MASK
| KVM_REG_ARM_CORE
);
62 static int validate_core_offset(const struct kvm_vcpu
*vcpu
,
63 const struct kvm_one_reg
*reg
)
65 u64 off
= core_reg_offset_from_id(reg
->id
);
69 case KVM_REG_ARM_CORE_REG(regs
.regs
[0]) ...
70 KVM_REG_ARM_CORE_REG(regs
.regs
[30]):
71 case KVM_REG_ARM_CORE_REG(regs
.sp
):
72 case KVM_REG_ARM_CORE_REG(regs
.pc
):
73 case KVM_REG_ARM_CORE_REG(regs
.pstate
):
74 case KVM_REG_ARM_CORE_REG(sp_el1
):
75 case KVM_REG_ARM_CORE_REG(elr_el1
):
76 case KVM_REG_ARM_CORE_REG(spsr
[0]) ...
77 KVM_REG_ARM_CORE_REG(spsr
[KVM_NR_SPSR
- 1]):
81 case KVM_REG_ARM_CORE_REG(fp_regs
.vregs
[0]) ...
82 KVM_REG_ARM_CORE_REG(fp_regs
.vregs
[31]):
83 size
= sizeof(__uint128_t
);
86 case KVM_REG_ARM_CORE_REG(fp_regs
.fpsr
):
87 case KVM_REG_ARM_CORE_REG(fp_regs
.fpcr
):
95 if (KVM_REG_SIZE(reg
->id
) != size
||
96 !IS_ALIGNED(off
, size
/ sizeof(__u32
)))
100 * The KVM_REG_ARM64_SVE regs must be used instead of
101 * KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on
104 if (vcpu_has_sve(vcpu
) && core_reg_offset_is_vreg(off
))
110 static int get_core_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
113 * Because the kvm_regs structure is a mix of 32, 64 and
114 * 128bit fields, we index it as if it was a 32bit
115 * array. Hence below, nr_regs is the number of entries, and
116 * off the index in the "array".
118 __u32 __user
*uaddr
= (__u32 __user
*)(unsigned long)reg
->addr
;
119 struct kvm_regs
*regs
= vcpu_gp_regs(vcpu
);
120 int nr_regs
= sizeof(*regs
) / sizeof(__u32
);
123 /* Our ID is an index into the kvm_regs struct. */
124 off
= core_reg_offset_from_id(reg
->id
);
125 if (off
>= nr_regs
||
126 (off
+ (KVM_REG_SIZE(reg
->id
) / sizeof(__u32
))) >= nr_regs
)
129 if (validate_core_offset(vcpu
, reg
))
132 if (copy_to_user(uaddr
, ((u32
*)regs
) + off
, KVM_REG_SIZE(reg
->id
)))
138 static int set_core_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
140 __u32 __user
*uaddr
= (__u32 __user
*)(unsigned long)reg
->addr
;
141 struct kvm_regs
*regs
= vcpu_gp_regs(vcpu
);
142 int nr_regs
= sizeof(*regs
) / sizeof(__u32
);
148 /* Our ID is an index into the kvm_regs struct. */
149 off
= core_reg_offset_from_id(reg
->id
);
150 if (off
>= nr_regs
||
151 (off
+ (KVM_REG_SIZE(reg
->id
) / sizeof(__u32
))) >= nr_regs
)
154 if (validate_core_offset(vcpu
, reg
))
157 if (KVM_REG_SIZE(reg
->id
) > sizeof(tmp
))
160 if (copy_from_user(valp
, uaddr
, KVM_REG_SIZE(reg
->id
))) {
165 if (off
== KVM_REG_ARM_CORE_REG(regs
.pstate
)) {
166 u64 mode
= (*(u64
*)valp
) & PSR_AA32_MODE_MASK
;
168 case PSR_AA32_MODE_USR
:
169 if (!system_supports_32bit_el0())
172 case PSR_AA32_MODE_FIQ
:
173 case PSR_AA32_MODE_IRQ
:
174 case PSR_AA32_MODE_SVC
:
175 case PSR_AA32_MODE_ABT
:
176 case PSR_AA32_MODE_UND
:
177 if (!vcpu_el1_is_32bit(vcpu
))
183 if (vcpu_el1_is_32bit(vcpu
))
192 memcpy((u32
*)regs
+ off
, valp
, KVM_REG_SIZE(reg
->id
));
197 #define vq_word(vq) (((vq) - SVE_VQ_MIN) / 64)
198 #define vq_mask(vq) ((u64)1 << ((vq) - SVE_VQ_MIN) % 64)
200 static bool vq_present(
201 const u64 (*const vqs
)[KVM_ARM64_SVE_VLS_WORDS
],
204 return (*vqs
)[vq_word(vq
)] & vq_mask(vq
);
207 static int get_sve_vls(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
209 unsigned int max_vq
, vq
;
210 u64 vqs
[KVM_ARM64_SVE_VLS_WORDS
];
212 if (!vcpu_has_sve(vcpu
))
215 if (WARN_ON(!sve_vl_valid(vcpu
->arch
.sve_max_vl
)))
218 memset(vqs
, 0, sizeof(vqs
));
220 max_vq
= sve_vq_from_vl(vcpu
->arch
.sve_max_vl
);
221 for (vq
= SVE_VQ_MIN
; vq
<= max_vq
; ++vq
)
222 if (sve_vq_available(vq
))
223 vqs
[vq_word(vq
)] |= vq_mask(vq
);
225 if (copy_to_user((void __user
*)reg
->addr
, vqs
, sizeof(vqs
)))
231 static int set_sve_vls(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
233 unsigned int max_vq
, vq
;
234 u64 vqs
[KVM_ARM64_SVE_VLS_WORDS
];
236 if (!vcpu_has_sve(vcpu
))
239 if (kvm_arm_vcpu_sve_finalized(vcpu
))
240 return -EPERM
; /* too late! */
242 if (WARN_ON(vcpu
->arch
.sve_state
))
245 if (copy_from_user(vqs
, (const void __user
*)reg
->addr
, sizeof(vqs
)))
249 for (vq
= SVE_VQ_MIN
; vq
<= SVE_VQ_MAX
; ++vq
)
250 if (vq_present(&vqs
, vq
))
253 if (max_vq
> sve_vq_from_vl(kvm_sve_max_vl
))
257 * Vector lengths supported by the host can't currently be
258 * hidden from the guest individually: instead we can only set a
259 * maxmium via ZCR_EL2.LEN. So, make sure the available vector
260 * lengths match the set requested exactly up to the requested
263 for (vq
= SVE_VQ_MIN
; vq
<= max_vq
; ++vq
)
264 if (vq_present(&vqs
, vq
) != sve_vq_available(vq
))
267 /* Can't run with no vector lengths at all: */
268 if (max_vq
< SVE_VQ_MIN
)
271 /* vcpu->arch.sve_state will be alloc'd by kvm_vcpu_finalize_sve() */
272 vcpu
->arch
.sve_max_vl
= sve_vl_from_vq(max_vq
);
277 #define SVE_REG_SLICE_SHIFT 0
278 #define SVE_REG_SLICE_BITS 5
279 #define SVE_REG_ID_SHIFT (SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS)
280 #define SVE_REG_ID_BITS 5
282 #define SVE_REG_SLICE_MASK \
283 GENMASK(SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS - 1, \
285 #define SVE_REG_ID_MASK \
286 GENMASK(SVE_REG_ID_SHIFT + SVE_REG_ID_BITS - 1, SVE_REG_ID_SHIFT)
288 #define SVE_NUM_SLICES (1 << SVE_REG_SLICE_BITS)
290 #define KVM_SVE_ZREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_ZREG(0, 0))
291 #define KVM_SVE_PREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_PREG(0, 0))
294 * Number of register slices required to cover each whole SVE register.
295 * NOTE: Only the first slice every exists, for now.
296 * If you are tempted to modify this, you must also rework sve_reg_to_region()
299 #define vcpu_sve_slices(vcpu) 1
301 /* Bounds of a single SVE register slice within vcpu->arch.sve_state */
302 struct sve_state_reg_region
{
303 unsigned int koffset
; /* offset into sve_state in kernel memory */
304 unsigned int klen
; /* length in kernel memory */
305 unsigned int upad
; /* extra trailing padding in user memory */
309 * Validate SVE register ID and get sanitised bounds for user/kernel SVE
312 static int sve_reg_to_region(struct sve_state_reg_region
*region
,
313 struct kvm_vcpu
*vcpu
,
314 const struct kvm_one_reg
*reg
)
316 /* reg ID ranges for Z- registers */
317 const u64 zreg_id_min
= KVM_REG_ARM64_SVE_ZREG(0, 0);
318 const u64 zreg_id_max
= KVM_REG_ARM64_SVE_ZREG(SVE_NUM_ZREGS
- 1,
321 /* reg ID ranges for P- registers and FFR (which are contiguous) */
322 const u64 preg_id_min
= KVM_REG_ARM64_SVE_PREG(0, 0);
323 const u64 preg_id_max
= KVM_REG_ARM64_SVE_FFR(SVE_NUM_SLICES
- 1);
326 unsigned int reg_num
;
328 unsigned int reqoffset
, reqlen
; /* User-requested offset and length */
329 unsigned int maxlen
; /* Maxmimum permitted length */
331 size_t sve_state_size
;
333 const u64 last_preg_id
= KVM_REG_ARM64_SVE_PREG(SVE_NUM_PREGS
- 1,
336 /* Verify that the P-regs and FFR really do have contiguous IDs: */
337 BUILD_BUG_ON(KVM_REG_ARM64_SVE_FFR(0) != last_preg_id
+ 1);
339 /* Verify that we match the UAPI header: */
340 BUILD_BUG_ON(SVE_NUM_SLICES
!= KVM_ARM64_SVE_MAX_SLICES
);
342 reg_num
= (reg
->id
& SVE_REG_ID_MASK
) >> SVE_REG_ID_SHIFT
;
344 if (reg
->id
>= zreg_id_min
&& reg
->id
<= zreg_id_max
) {
345 if (!vcpu_has_sve(vcpu
) || (reg
->id
& SVE_REG_SLICE_MASK
) > 0)
348 vq
= sve_vq_from_vl(vcpu
->arch
.sve_max_vl
);
350 reqoffset
= SVE_SIG_ZREG_OFFSET(vq
, reg_num
) -
352 reqlen
= KVM_SVE_ZREG_SIZE
;
353 maxlen
= SVE_SIG_ZREG_SIZE(vq
);
354 } else if (reg
->id
>= preg_id_min
&& reg
->id
<= preg_id_max
) {
355 if (!vcpu_has_sve(vcpu
) || (reg
->id
& SVE_REG_SLICE_MASK
) > 0)
358 vq
= sve_vq_from_vl(vcpu
->arch
.sve_max_vl
);
360 reqoffset
= SVE_SIG_PREG_OFFSET(vq
, reg_num
) -
362 reqlen
= KVM_SVE_PREG_SIZE
;
363 maxlen
= SVE_SIG_PREG_SIZE(vq
);
368 sve_state_size
= vcpu_sve_state_size(vcpu
);
369 if (WARN_ON(!sve_state_size
))
372 region
->koffset
= array_index_nospec(reqoffset
, sve_state_size
);
373 region
->klen
= min(maxlen
, reqlen
);
374 region
->upad
= reqlen
- region
->klen
;
379 static int get_sve_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
382 struct sve_state_reg_region region
;
383 char __user
*uptr
= (char __user
*)reg
->addr
;
385 /* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
386 if (reg
->id
== KVM_REG_ARM64_SVE_VLS
)
387 return get_sve_vls(vcpu
, reg
);
389 /* Try to interpret reg ID as an architectural SVE register... */
390 ret
= sve_reg_to_region(®ion
, vcpu
, reg
);
394 if (!kvm_arm_vcpu_sve_finalized(vcpu
))
397 if (copy_to_user(uptr
, vcpu
->arch
.sve_state
+ region
.koffset
,
399 clear_user(uptr
+ region
.klen
, region
.upad
))
405 static int set_sve_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
408 struct sve_state_reg_region region
;
409 const char __user
*uptr
= (const char __user
*)reg
->addr
;
411 /* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
412 if (reg
->id
== KVM_REG_ARM64_SVE_VLS
)
413 return set_sve_vls(vcpu
, reg
);
415 /* Try to interpret reg ID as an architectural SVE register... */
416 ret
= sve_reg_to_region(®ion
, vcpu
, reg
);
420 if (!kvm_arm_vcpu_sve_finalized(vcpu
))
423 if (copy_from_user(vcpu
->arch
.sve_state
+ region
.koffset
, uptr
,
430 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
435 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
440 static int copy_core_reg_indices(const struct kvm_vcpu
*vcpu
,
441 u64 __user
*uindices
)
445 const u64 core_reg
= KVM_REG_ARM64
| KVM_REG_SIZE_U64
| KVM_REG_ARM_CORE
;
447 for (i
= 0; i
< sizeof(struct kvm_regs
) / sizeof(__u32
); i
++) {
449 * The KVM_REG_ARM64_SVE regs must be used instead of
450 * KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on
453 if (vcpu_has_sve(vcpu
) && core_reg_offset_is_vreg(i
))
457 if (put_user(core_reg
| i
, uindices
))
468 static unsigned long num_core_regs(const struct kvm_vcpu
*vcpu
)
470 return copy_core_reg_indices(vcpu
, NULL
);
474 * ARM64 versions of the TIMER registers, always available on arm64
477 #define NUM_TIMER_REGS 3
479 static bool is_timer_reg(u64 index
)
482 case KVM_REG_ARM_TIMER_CTL
:
483 case KVM_REG_ARM_TIMER_CNT
:
484 case KVM_REG_ARM_TIMER_CVAL
:
490 static int copy_timer_indices(struct kvm_vcpu
*vcpu
, u64 __user
*uindices
)
492 if (put_user(KVM_REG_ARM_TIMER_CTL
, uindices
))
495 if (put_user(KVM_REG_ARM_TIMER_CNT
, uindices
))
498 if (put_user(KVM_REG_ARM_TIMER_CVAL
, uindices
))
504 static int set_timer_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
506 void __user
*uaddr
= (void __user
*)(long)reg
->addr
;
510 ret
= copy_from_user(&val
, uaddr
, KVM_REG_SIZE(reg
->id
));
514 return kvm_arm_timer_set_reg(vcpu
, reg
->id
, val
);
517 static int get_timer_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
519 void __user
*uaddr
= (void __user
*)(long)reg
->addr
;
522 val
= kvm_arm_timer_get_reg(vcpu
, reg
->id
);
523 return copy_to_user(uaddr
, &val
, KVM_REG_SIZE(reg
->id
)) ? -EFAULT
: 0;
526 static unsigned long num_sve_regs(const struct kvm_vcpu
*vcpu
)
528 const unsigned int slices
= vcpu_sve_slices(vcpu
);
530 if (!vcpu_has_sve(vcpu
))
533 /* Policed by KVM_GET_REG_LIST: */
534 WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu
));
536 return slices
* (SVE_NUM_PREGS
+ SVE_NUM_ZREGS
+ 1 /* FFR */)
537 + 1; /* KVM_REG_ARM64_SVE_VLS */
540 static int copy_sve_reg_indices(const struct kvm_vcpu
*vcpu
,
541 u64 __user
*uindices
)
543 const unsigned int slices
= vcpu_sve_slices(vcpu
);
548 if (!vcpu_has_sve(vcpu
))
551 /* Policed by KVM_GET_REG_LIST: */
552 WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu
));
555 * Enumerate this first, so that userspace can save/restore in
556 * the order reported by KVM_GET_REG_LIST:
558 reg
= KVM_REG_ARM64_SVE_VLS
;
559 if (put_user(reg
, uindices
++))
563 for (i
= 0; i
< slices
; i
++) {
564 for (n
= 0; n
< SVE_NUM_ZREGS
; n
++) {
565 reg
= KVM_REG_ARM64_SVE_ZREG(n
, i
);
566 if (put_user(reg
, uindices
++))
571 for (n
= 0; n
< SVE_NUM_PREGS
; n
++) {
572 reg
= KVM_REG_ARM64_SVE_PREG(n
, i
);
573 if (put_user(reg
, uindices
++))
578 reg
= KVM_REG_ARM64_SVE_FFR(i
);
579 if (put_user(reg
, uindices
++))
588 * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG
590 * This is for all registers.
592 unsigned long kvm_arm_num_regs(struct kvm_vcpu
*vcpu
)
594 unsigned long res
= 0;
596 res
+= num_core_regs(vcpu
);
597 res
+= num_sve_regs(vcpu
);
598 res
+= kvm_arm_num_sys_reg_descs(vcpu
);
599 res
+= kvm_arm_get_fw_num_regs(vcpu
);
600 res
+= NUM_TIMER_REGS
;
606 * kvm_arm_copy_reg_indices - get indices of all registers.
608 * We do core registers right here, then we append system regs.
610 int kvm_arm_copy_reg_indices(struct kvm_vcpu
*vcpu
, u64 __user
*uindices
)
614 ret
= copy_core_reg_indices(vcpu
, uindices
);
619 ret
= copy_sve_reg_indices(vcpu
, uindices
);
624 ret
= kvm_arm_copy_fw_reg_indices(vcpu
, uindices
);
627 uindices
+= kvm_arm_get_fw_num_regs(vcpu
);
629 ret
= copy_timer_indices(vcpu
, uindices
);
632 uindices
+= NUM_TIMER_REGS
;
634 return kvm_arm_copy_sys_reg_indices(vcpu
, uindices
);
637 int kvm_arm_get_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
639 /* We currently use nothing arch-specific in upper 32 bits */
640 if ((reg
->id
& ~KVM_REG_SIZE_MASK
) >> 32 != KVM_REG_ARM64
>> 32)
643 switch (reg
->id
& KVM_REG_ARM_COPROC_MASK
) {
644 case KVM_REG_ARM_CORE
: return get_core_reg(vcpu
, reg
);
645 case KVM_REG_ARM_FW
: return kvm_arm_get_fw_reg(vcpu
, reg
);
646 case KVM_REG_ARM64_SVE
: return get_sve_reg(vcpu
, reg
);
649 if (is_timer_reg(reg
->id
))
650 return get_timer_reg(vcpu
, reg
);
652 return kvm_arm_sys_reg_get_reg(vcpu
, reg
);
655 int kvm_arm_set_reg(struct kvm_vcpu
*vcpu
, const struct kvm_one_reg
*reg
)
657 /* We currently use nothing arch-specific in upper 32 bits */
658 if ((reg
->id
& ~KVM_REG_SIZE_MASK
) >> 32 != KVM_REG_ARM64
>> 32)
661 switch (reg
->id
& KVM_REG_ARM_COPROC_MASK
) {
662 case KVM_REG_ARM_CORE
: return set_core_reg(vcpu
, reg
);
663 case KVM_REG_ARM_FW
: return kvm_arm_set_fw_reg(vcpu
, reg
);
664 case KVM_REG_ARM64_SVE
: return set_sve_reg(vcpu
, reg
);
667 if (is_timer_reg(reg
->id
))
668 return set_timer_reg(vcpu
, reg
);
670 return kvm_arm_sys_reg_set_reg(vcpu
, reg
);
673 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
674 struct kvm_sregs
*sregs
)
679 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
680 struct kvm_sregs
*sregs
)
685 int __kvm_arm_vcpu_get_events(struct kvm_vcpu
*vcpu
,
686 struct kvm_vcpu_events
*events
)
688 events
->exception
.serror_pending
= !!(vcpu
->arch
.hcr_el2
& HCR_VSE
);
689 events
->exception
.serror_has_esr
= cpus_have_const_cap(ARM64_HAS_RAS_EXTN
);
691 if (events
->exception
.serror_pending
&& events
->exception
.serror_has_esr
)
692 events
->exception
.serror_esr
= vcpu_get_vsesr(vcpu
);
697 int __kvm_arm_vcpu_set_events(struct kvm_vcpu
*vcpu
,
698 struct kvm_vcpu_events
*events
)
700 bool serror_pending
= events
->exception
.serror_pending
;
701 bool has_esr
= events
->exception
.serror_has_esr
;
703 if (serror_pending
&& has_esr
) {
704 if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN
))
707 if (!((events
->exception
.serror_esr
) & ~ESR_ELx_ISS_MASK
))
708 kvm_set_sei_esr(vcpu
, events
->exception
.serror_esr
);
711 } else if (serror_pending
) {
712 kvm_inject_vabt(vcpu
);
718 int __attribute_const__
kvm_target_cpu(void)
720 unsigned long implementor
= read_cpuid_implementor();
721 unsigned long part_number
= read_cpuid_part_number();
723 switch (implementor
) {
724 case ARM_CPU_IMP_ARM
:
725 switch (part_number
) {
726 case ARM_CPU_PART_AEM_V8
:
727 return KVM_ARM_TARGET_AEM_V8
;
728 case ARM_CPU_PART_FOUNDATION
:
729 return KVM_ARM_TARGET_FOUNDATION_V8
;
730 case ARM_CPU_PART_CORTEX_A53
:
731 return KVM_ARM_TARGET_CORTEX_A53
;
732 case ARM_CPU_PART_CORTEX_A57
:
733 return KVM_ARM_TARGET_CORTEX_A57
;
736 case ARM_CPU_IMP_APM
:
737 switch (part_number
) {
738 case APM_CPU_PART_POTENZA
:
739 return KVM_ARM_TARGET_XGENE_POTENZA
;
744 /* Return a default generic target */
745 return KVM_ARM_TARGET_GENERIC_V8
;
748 int kvm_vcpu_preferred_target(struct kvm_vcpu_init
*init
)
750 int target
= kvm_target_cpu();
755 memset(init
, 0, sizeof(*init
));
758 * For now, we don't return any features.
759 * In future, we might use features to return target
760 * specific features available for the preferred
763 init
->target
= (__u32
)target
;
768 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
773 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
778 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu
,
779 struct kvm_translation
*tr
)
784 #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
785 KVM_GUESTDBG_USE_SW_BP | \
786 KVM_GUESTDBG_USE_HW | \
787 KVM_GUESTDBG_SINGLESTEP)
790 * kvm_arch_vcpu_ioctl_set_guest_debug - set up guest debugging
791 * @kvm: pointer to the KVM struct
792 * @kvm_guest_debug: the ioctl data buffer
794 * This sets up and enables the VM for guest debugging. Userspace
795 * passes in a control flag to enable different debug types and
796 * potentially other architecture specific information in the rest of
799 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu
*vcpu
,
800 struct kvm_guest_debug
*dbg
)
804 trace_kvm_set_guest_debug(vcpu
, dbg
->control
);
806 if (dbg
->control
& ~KVM_GUESTDBG_VALID_MASK
) {
811 if (dbg
->control
& KVM_GUESTDBG_ENABLE
) {
812 vcpu
->guest_debug
= dbg
->control
;
814 /* Hardware assisted Break and Watch points */
815 if (vcpu
->guest_debug
& KVM_GUESTDBG_USE_HW
) {
816 vcpu
->arch
.external_debug_state
= dbg
->arch
;
820 /* If not enabled clear all flags */
821 vcpu
->guest_debug
= 0;
828 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu
*vcpu
,
829 struct kvm_device_attr
*attr
)
833 switch (attr
->group
) {
834 case KVM_ARM_VCPU_PMU_V3_CTRL
:
835 ret
= kvm_arm_pmu_v3_set_attr(vcpu
, attr
);
837 case KVM_ARM_VCPU_TIMER_CTRL
:
838 ret
= kvm_arm_timer_set_attr(vcpu
, attr
);
848 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu
*vcpu
,
849 struct kvm_device_attr
*attr
)
853 switch (attr
->group
) {
854 case KVM_ARM_VCPU_PMU_V3_CTRL
:
855 ret
= kvm_arm_pmu_v3_get_attr(vcpu
, attr
);
857 case KVM_ARM_VCPU_TIMER_CTRL
:
858 ret
= kvm_arm_timer_get_attr(vcpu
, attr
);
868 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu
*vcpu
,
869 struct kvm_device_attr
*attr
)
873 switch (attr
->group
) {
874 case KVM_ARM_VCPU_PMU_V3_CTRL
:
875 ret
= kvm_arm_pmu_v3_has_attr(vcpu
, attr
);
877 case KVM_ARM_VCPU_TIMER_CTRL
:
878 ret
= kvm_arm_timer_has_attr(vcpu
, attr
);