1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
6 * Anup Patel <anup.patel@wdc.com>
9 #include <linux/bitops.h>
10 #include <linux/entry-kvm.h>
11 #include <linux/errno.h>
12 #include <linux/err.h>
13 #include <linux/kdebug.h>
14 #include <linux/module.h>
15 #include <linux/percpu.h>
16 #include <linux/uaccess.h>
17 #include <linux/vmalloc.h>
18 #include <linux/sched/signal.h>
20 #include <linux/kvm_host.h>
22 #include <asm/cacheflush.h>
23 #include <asm/hwcap.h>
26 const struct _kvm_stats_desc kvm_vcpu_stats_desc
[] = {
27 KVM_GENERIC_VCPU_STATS(),
28 STATS_DESC_COUNTER(VCPU
, ecall_exit_stat
),
29 STATS_DESC_COUNTER(VCPU
, wfi_exit_stat
),
30 STATS_DESC_COUNTER(VCPU
, mmio_exit_user
),
31 STATS_DESC_COUNTER(VCPU
, mmio_exit_kernel
),
32 STATS_DESC_COUNTER(VCPU
, csr_exit_user
),
33 STATS_DESC_COUNTER(VCPU
, csr_exit_kernel
),
34 STATS_DESC_COUNTER(VCPU
, signal_exits
),
35 STATS_DESC_COUNTER(VCPU
, exits
)
38 const struct kvm_stats_header kvm_vcpu_stats_header
= {
39 .name_size
= KVM_STATS_NAME_SIZE
,
40 .num_desc
= ARRAY_SIZE(kvm_vcpu_stats_desc
),
41 .id_offset
= sizeof(struct kvm_stats_header
),
42 .desc_offset
= sizeof(struct kvm_stats_header
) + KVM_STATS_NAME_SIZE
,
43 .data_offset
= sizeof(struct kvm_stats_header
) + KVM_STATS_NAME_SIZE
+
44 sizeof(kvm_vcpu_stats_desc
),
47 #define KVM_RISCV_BASE_ISA_MASK GENMASK(25, 0)
49 #define KVM_ISA_EXT_ARR(ext) [KVM_RISCV_ISA_EXT_##ext] = RISCV_ISA_EXT_##ext
51 /* Mapping between KVM ISA Extension ID & Host ISA extension ID */
52 static const unsigned long kvm_isa_ext_arr
[] = {
53 [KVM_RISCV_ISA_EXT_A
] = RISCV_ISA_EXT_a
,
54 [KVM_RISCV_ISA_EXT_C
] = RISCV_ISA_EXT_c
,
55 [KVM_RISCV_ISA_EXT_D
] = RISCV_ISA_EXT_d
,
56 [KVM_RISCV_ISA_EXT_F
] = RISCV_ISA_EXT_f
,
57 [KVM_RISCV_ISA_EXT_H
] = RISCV_ISA_EXT_h
,
58 [KVM_RISCV_ISA_EXT_I
] = RISCV_ISA_EXT_i
,
59 [KVM_RISCV_ISA_EXT_M
] = RISCV_ISA_EXT_m
,
61 KVM_ISA_EXT_ARR(SSAIA
),
62 KVM_ISA_EXT_ARR(SSTC
),
63 KVM_ISA_EXT_ARR(SVINVAL
),
64 KVM_ISA_EXT_ARR(SVPBMT
),
66 KVM_ISA_EXT_ARR(ZIHINTPAUSE
),
67 KVM_ISA_EXT_ARR(ZICBOM
),
68 KVM_ISA_EXT_ARR(ZICBOZ
),
71 static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext
)
75 for (i
= 0; i
< KVM_RISCV_ISA_EXT_MAX
; i
++) {
76 if (kvm_isa_ext_arr
[i
] == base_ext
)
80 return KVM_RISCV_ISA_EXT_MAX
;
83 static bool kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext
)
86 case KVM_RISCV_ISA_EXT_H
:
95 static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext
)
98 case KVM_RISCV_ISA_EXT_A
:
99 case KVM_RISCV_ISA_EXT_C
:
100 case KVM_RISCV_ISA_EXT_I
:
101 case KVM_RISCV_ISA_EXT_M
:
102 case KVM_RISCV_ISA_EXT_SSAIA
:
103 case KVM_RISCV_ISA_EXT_SSTC
:
104 case KVM_RISCV_ISA_EXT_SVINVAL
:
105 case KVM_RISCV_ISA_EXT_ZIHINTPAUSE
:
106 case KVM_RISCV_ISA_EXT_ZBB
:
115 static void kvm_riscv_reset_vcpu(struct kvm_vcpu
*vcpu
)
117 struct kvm_vcpu_csr
*csr
= &vcpu
->arch
.guest_csr
;
118 struct kvm_vcpu_csr
*reset_csr
= &vcpu
->arch
.guest_reset_csr
;
119 struct kvm_cpu_context
*cntx
= &vcpu
->arch
.guest_context
;
120 struct kvm_cpu_context
*reset_cntx
= &vcpu
->arch
.guest_reset_context
;
124 * The preemption should be disabled here because it races with
125 * kvm_sched_out/kvm_sched_in(called from preempt notifiers) which
126 * also calls vcpu_load/put.
129 loaded
= (vcpu
->cpu
!= -1);
131 kvm_arch_vcpu_put(vcpu
);
133 vcpu
->arch
.last_exit_cpu
= -1;
135 memcpy(csr
, reset_csr
, sizeof(*csr
));
137 memcpy(cntx
, reset_cntx
, sizeof(*cntx
));
139 kvm_riscv_vcpu_fp_reset(vcpu
);
141 kvm_riscv_vcpu_timer_reset(vcpu
);
143 kvm_riscv_vcpu_aia_reset(vcpu
);
145 bitmap_zero(vcpu
->arch
.irqs_pending
, KVM_RISCV_VCPU_NR_IRQS
);
146 bitmap_zero(vcpu
->arch
.irqs_pending_mask
, KVM_RISCV_VCPU_NR_IRQS
);
148 kvm_riscv_vcpu_pmu_reset(vcpu
);
150 vcpu
->arch
.hfence_head
= 0;
151 vcpu
->arch
.hfence_tail
= 0;
152 memset(vcpu
->arch
.hfence_queue
, 0, sizeof(vcpu
->arch
.hfence_queue
));
154 /* Reset the guest CSRs for hotplug usecase */
156 kvm_arch_vcpu_load(vcpu
, smp_processor_id());
160 int kvm_arch_vcpu_precreate(struct kvm
*kvm
, unsigned int id
)
165 int kvm_arch_vcpu_create(struct kvm_vcpu
*vcpu
)
168 struct kvm_cpu_context
*cntx
;
169 struct kvm_vcpu_csr
*reset_csr
= &vcpu
->arch
.guest_reset_csr
;
170 unsigned long host_isa
, i
;
172 /* Mark this VCPU never ran */
173 vcpu
->arch
.ran_atleast_once
= false;
174 vcpu
->arch
.mmu_page_cache
.gfp_zero
= __GFP_ZERO
;
175 bitmap_zero(vcpu
->arch
.isa
, RISCV_ISA_EXT_MAX
);
177 /* Setup ISA features available to VCPU */
178 for (i
= 0; i
< ARRAY_SIZE(kvm_isa_ext_arr
); i
++) {
179 host_isa
= kvm_isa_ext_arr
[i
];
180 if (__riscv_isa_extension_available(NULL
, host_isa
) &&
181 kvm_riscv_vcpu_isa_enable_allowed(i
))
182 set_bit(host_isa
, vcpu
->arch
.isa
);
185 /* Setup vendor, arch, and implementation details */
186 vcpu
->arch
.mvendorid
= sbi_get_mvendorid();
187 vcpu
->arch
.marchid
= sbi_get_marchid();
188 vcpu
->arch
.mimpid
= sbi_get_mimpid();
190 /* Setup VCPU hfence queue */
191 spin_lock_init(&vcpu
->arch
.hfence_lock
);
193 /* Setup reset state of shadow SSTATUS and HSTATUS CSRs */
194 cntx
= &vcpu
->arch
.guest_reset_context
;
195 cntx
->sstatus
= SR_SPP
| SR_SPIE
;
197 cntx
->hstatus
|= HSTATUS_VTW
;
198 cntx
->hstatus
|= HSTATUS_SPVP
;
199 cntx
->hstatus
|= HSTATUS_SPV
;
201 /* By default, make CY, TM, and IR counters accessible in VU mode */
202 reset_csr
->scounteren
= 0x7;
204 /* Setup VCPU timer */
205 kvm_riscv_vcpu_timer_init(vcpu
);
207 /* setup performance monitoring */
208 kvm_riscv_vcpu_pmu_init(vcpu
);
211 rc
= kvm_riscv_vcpu_aia_init(vcpu
);
216 kvm_riscv_reset_vcpu(vcpu
);
221 void kvm_arch_vcpu_postcreate(struct kvm_vcpu
*vcpu
)
224 * vcpu with id 0 is the designated boot cpu.
225 * Keep all vcpus with non-zero id in power-off state so that
226 * they can be brought up using SBI HSM extension.
228 if (vcpu
->vcpu_idx
!= 0)
229 kvm_riscv_vcpu_power_off(vcpu
);
232 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
)
234 /* Cleanup VCPU AIA context */
235 kvm_riscv_vcpu_aia_deinit(vcpu
);
237 /* Cleanup VCPU timer */
238 kvm_riscv_vcpu_timer_deinit(vcpu
);
240 kvm_riscv_vcpu_pmu_deinit(vcpu
);
242 /* Free unused pages pre-allocated for G-stage page table mappings */
243 kvm_mmu_free_memory_cache(&vcpu
->arch
.mmu_page_cache
);
246 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
)
248 return kvm_riscv_vcpu_timer_pending(vcpu
);
251 void kvm_arch_vcpu_blocking(struct kvm_vcpu
*vcpu
)
253 kvm_riscv_aia_wakeon_hgei(vcpu
, true);
256 void kvm_arch_vcpu_unblocking(struct kvm_vcpu
*vcpu
)
258 kvm_riscv_aia_wakeon_hgei(vcpu
, false);
261 int kvm_arch_vcpu_runnable(struct kvm_vcpu
*vcpu
)
263 return (kvm_riscv_vcpu_has_interrupts(vcpu
, -1UL) &&
264 !vcpu
->arch
.power_off
&& !vcpu
->arch
.pause
);
267 int kvm_arch_vcpu_should_kick(struct kvm_vcpu
*vcpu
)
269 return kvm_vcpu_exiting_guest_mode(vcpu
) == IN_GUEST_MODE
;
272 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu
*vcpu
)
274 return (vcpu
->arch
.guest_context
.sstatus
& SR_SPP
) ? true : false;
277 vm_fault_t
kvm_arch_vcpu_fault(struct kvm_vcpu
*vcpu
, struct vm_fault
*vmf
)
279 return VM_FAULT_SIGBUS
;
282 static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu
*vcpu
,
283 const struct kvm_one_reg
*reg
)
285 unsigned long __user
*uaddr
=
286 (unsigned long __user
*)(unsigned long)reg
->addr
;
287 unsigned long reg_num
= reg
->id
& ~(KVM_REG_ARCH_MASK
|
289 KVM_REG_RISCV_CONFIG
);
290 unsigned long reg_val
;
292 if (KVM_REG_SIZE(reg
->id
) != sizeof(unsigned long))
296 case KVM_REG_RISCV_CONFIG_REG(isa
):
297 reg_val
= vcpu
->arch
.isa
[0] & KVM_RISCV_BASE_ISA_MASK
;
299 case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size
):
300 if (!riscv_isa_extension_available(vcpu
->arch
.isa
, ZICBOM
))
302 reg_val
= riscv_cbom_block_size
;
304 case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size
):
305 if (!riscv_isa_extension_available(vcpu
->arch
.isa
, ZICBOZ
))
307 reg_val
= riscv_cboz_block_size
;
309 case KVM_REG_RISCV_CONFIG_REG(mvendorid
):
310 reg_val
= vcpu
->arch
.mvendorid
;
312 case KVM_REG_RISCV_CONFIG_REG(marchid
):
313 reg_val
= vcpu
->arch
.marchid
;
315 case KVM_REG_RISCV_CONFIG_REG(mimpid
):
316 reg_val
= vcpu
->arch
.mimpid
;
322 if (copy_to_user(uaddr
, ®_val
, KVM_REG_SIZE(reg
->id
)))
328 static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu
*vcpu
,
329 const struct kvm_one_reg
*reg
)
331 unsigned long __user
*uaddr
=
332 (unsigned long __user
*)(unsigned long)reg
->addr
;
333 unsigned long reg_num
= reg
->id
& ~(KVM_REG_ARCH_MASK
|
335 KVM_REG_RISCV_CONFIG
);
336 unsigned long i
, isa_ext
, reg_val
;
338 if (KVM_REG_SIZE(reg
->id
) != sizeof(unsigned long))
341 if (copy_from_user(®_val
, uaddr
, KVM_REG_SIZE(reg
->id
)))
345 case KVM_REG_RISCV_CONFIG_REG(isa
):
347 * This ONE REG interface is only defined for
348 * single letter extensions.
350 if (fls(reg_val
) >= RISCV_ISA_EXT_BASE
)
353 if (!vcpu
->arch
.ran_atleast_once
) {
354 /* Ignore the enable/disable request for certain extensions */
355 for (i
= 0; i
< RISCV_ISA_EXT_BASE
; i
++) {
356 isa_ext
= kvm_riscv_vcpu_base2isa_ext(i
);
357 if (isa_ext
>= KVM_RISCV_ISA_EXT_MAX
) {
361 if (!kvm_riscv_vcpu_isa_enable_allowed(isa_ext
))
362 if (reg_val
& BIT(i
))
364 if (!kvm_riscv_vcpu_isa_disable_allowed(isa_ext
))
365 if (!(reg_val
& BIT(i
)))
368 reg_val
&= riscv_isa_extension_base(NULL
);
369 /* Do not modify anything beyond single letter extensions */
370 reg_val
= (vcpu
->arch
.isa
[0] & ~KVM_RISCV_BASE_ISA_MASK
) |
371 (reg_val
& KVM_RISCV_BASE_ISA_MASK
);
372 vcpu
->arch
.isa
[0] = reg_val
;
373 kvm_riscv_vcpu_fp_reset(vcpu
);
378 case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size
):
380 case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size
):
382 case KVM_REG_RISCV_CONFIG_REG(mvendorid
):
383 if (!vcpu
->arch
.ran_atleast_once
)
384 vcpu
->arch
.mvendorid
= reg_val
;
388 case KVM_REG_RISCV_CONFIG_REG(marchid
):
389 if (!vcpu
->arch
.ran_atleast_once
)
390 vcpu
->arch
.marchid
= reg_val
;
394 case KVM_REG_RISCV_CONFIG_REG(mimpid
):
395 if (!vcpu
->arch
.ran_atleast_once
)
396 vcpu
->arch
.mimpid
= reg_val
;
407 static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu
*vcpu
,
408 const struct kvm_one_reg
*reg
)
410 struct kvm_cpu_context
*cntx
= &vcpu
->arch
.guest_context
;
411 unsigned long __user
*uaddr
=
412 (unsigned long __user
*)(unsigned long)reg
->addr
;
413 unsigned long reg_num
= reg
->id
& ~(KVM_REG_ARCH_MASK
|
416 unsigned long reg_val
;
418 if (KVM_REG_SIZE(reg
->id
) != sizeof(unsigned long))
420 if (reg_num
>= sizeof(struct kvm_riscv_core
) / sizeof(unsigned long))
423 if (reg_num
== KVM_REG_RISCV_CORE_REG(regs
.pc
))
424 reg_val
= cntx
->sepc
;
425 else if (KVM_REG_RISCV_CORE_REG(regs
.pc
) < reg_num
&&
426 reg_num
<= KVM_REG_RISCV_CORE_REG(regs
.t6
))
427 reg_val
= ((unsigned long *)cntx
)[reg_num
];
428 else if (reg_num
== KVM_REG_RISCV_CORE_REG(mode
))
429 reg_val
= (cntx
->sstatus
& SR_SPP
) ?
430 KVM_RISCV_MODE_S
: KVM_RISCV_MODE_U
;
434 if (copy_to_user(uaddr
, ®_val
, KVM_REG_SIZE(reg
->id
)))
440 static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu
*vcpu
,
441 const struct kvm_one_reg
*reg
)
443 struct kvm_cpu_context
*cntx
= &vcpu
->arch
.guest_context
;
444 unsigned long __user
*uaddr
=
445 (unsigned long __user
*)(unsigned long)reg
->addr
;
446 unsigned long reg_num
= reg
->id
& ~(KVM_REG_ARCH_MASK
|
449 unsigned long reg_val
;
451 if (KVM_REG_SIZE(reg
->id
) != sizeof(unsigned long))
453 if (reg_num
>= sizeof(struct kvm_riscv_core
) / sizeof(unsigned long))
456 if (copy_from_user(®_val
, uaddr
, KVM_REG_SIZE(reg
->id
)))
459 if (reg_num
== KVM_REG_RISCV_CORE_REG(regs
.pc
))
460 cntx
->sepc
= reg_val
;
461 else if (KVM_REG_RISCV_CORE_REG(regs
.pc
) < reg_num
&&
462 reg_num
<= KVM_REG_RISCV_CORE_REG(regs
.t6
))
463 ((unsigned long *)cntx
)[reg_num
] = reg_val
;
464 else if (reg_num
== KVM_REG_RISCV_CORE_REG(mode
)) {
465 if (reg_val
== KVM_RISCV_MODE_S
)
466 cntx
->sstatus
|= SR_SPP
;
468 cntx
->sstatus
&= ~SR_SPP
;
475 static int kvm_riscv_vcpu_general_get_csr(struct kvm_vcpu
*vcpu
,
476 unsigned long reg_num
,
477 unsigned long *out_val
)
479 struct kvm_vcpu_csr
*csr
= &vcpu
->arch
.guest_csr
;
481 if (reg_num
>= sizeof(struct kvm_riscv_csr
) / sizeof(unsigned long))
484 if (reg_num
== KVM_REG_RISCV_CSR_REG(sip
)) {
485 kvm_riscv_vcpu_flush_interrupts(vcpu
);
486 *out_val
= (csr
->hvip
>> VSIP_TO_HVIP_SHIFT
) & VSIP_VALID_MASK
;
487 *out_val
|= csr
->hvip
& ~IRQ_LOCAL_MASK
;
489 *out_val
= ((unsigned long *)csr
)[reg_num
];
494 static inline int kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu
*vcpu
,
495 unsigned long reg_num
,
496 unsigned long reg_val
)
498 struct kvm_vcpu_csr
*csr
= &vcpu
->arch
.guest_csr
;
500 if (reg_num
>= sizeof(struct kvm_riscv_csr
) / sizeof(unsigned long))
503 if (reg_num
== KVM_REG_RISCV_CSR_REG(sip
)) {
504 reg_val
&= VSIP_VALID_MASK
;
505 reg_val
<<= VSIP_TO_HVIP_SHIFT
;
508 ((unsigned long *)csr
)[reg_num
] = reg_val
;
510 if (reg_num
== KVM_REG_RISCV_CSR_REG(sip
))
511 WRITE_ONCE(vcpu
->arch
.irqs_pending_mask
[0], 0);
516 static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu
*vcpu
,
517 const struct kvm_one_reg
*reg
)
520 unsigned long __user
*uaddr
=
521 (unsigned long __user
*)(unsigned long)reg
->addr
;
522 unsigned long reg_num
= reg
->id
& ~(KVM_REG_ARCH_MASK
|
525 unsigned long reg_val
, reg_subtype
;
527 if (KVM_REG_SIZE(reg
->id
) != sizeof(unsigned long))
530 reg_subtype
= reg_num
& KVM_REG_RISCV_SUBTYPE_MASK
;
531 reg_num
&= ~KVM_REG_RISCV_SUBTYPE_MASK
;
532 switch (reg_subtype
) {
533 case KVM_REG_RISCV_CSR_GENERAL
:
534 rc
= kvm_riscv_vcpu_general_get_csr(vcpu
, reg_num
, ®_val
);
536 case KVM_REG_RISCV_CSR_AIA
:
537 rc
= kvm_riscv_vcpu_aia_get_csr(vcpu
, reg_num
, ®_val
);
546 if (copy_to_user(uaddr
, ®_val
, KVM_REG_SIZE(reg
->id
)))
552 static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu
*vcpu
,
553 const struct kvm_one_reg
*reg
)
556 unsigned long __user
*uaddr
=
557 (unsigned long __user
*)(unsigned long)reg
->addr
;
558 unsigned long reg_num
= reg
->id
& ~(KVM_REG_ARCH_MASK
|
561 unsigned long reg_val
, reg_subtype
;
563 if (KVM_REG_SIZE(reg
->id
) != sizeof(unsigned long))
566 if (copy_from_user(®_val
, uaddr
, KVM_REG_SIZE(reg
->id
)))
569 reg_subtype
= reg_num
& KVM_REG_RISCV_SUBTYPE_MASK
;
570 reg_num
&= ~KVM_REG_RISCV_SUBTYPE_MASK
;
571 switch (reg_subtype
) {
572 case KVM_REG_RISCV_CSR_GENERAL
:
573 rc
= kvm_riscv_vcpu_general_set_csr(vcpu
, reg_num
, reg_val
);
575 case KVM_REG_RISCV_CSR_AIA
:
576 rc
= kvm_riscv_vcpu_aia_set_csr(vcpu
, reg_num
, reg_val
);
588 static int kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu
*vcpu
,
589 const struct kvm_one_reg
*reg
)
591 unsigned long __user
*uaddr
=
592 (unsigned long __user
*)(unsigned long)reg
->addr
;
593 unsigned long reg_num
= reg
->id
& ~(KVM_REG_ARCH_MASK
|
595 KVM_REG_RISCV_ISA_EXT
);
596 unsigned long reg_val
= 0;
597 unsigned long host_isa_ext
;
599 if (KVM_REG_SIZE(reg
->id
) != sizeof(unsigned long))
602 if (reg_num
>= KVM_RISCV_ISA_EXT_MAX
||
603 reg_num
>= ARRAY_SIZE(kvm_isa_ext_arr
))
606 host_isa_ext
= kvm_isa_ext_arr
[reg_num
];
607 if (__riscv_isa_extension_available(vcpu
->arch
.isa
, host_isa_ext
))
608 reg_val
= 1; /* Mark the given extension as available */
610 if (copy_to_user(uaddr
, ®_val
, KVM_REG_SIZE(reg
->id
)))
616 static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu
*vcpu
,
617 const struct kvm_one_reg
*reg
)
619 unsigned long __user
*uaddr
=
620 (unsigned long __user
*)(unsigned long)reg
->addr
;
621 unsigned long reg_num
= reg
->id
& ~(KVM_REG_ARCH_MASK
|
623 KVM_REG_RISCV_ISA_EXT
);
624 unsigned long reg_val
;
625 unsigned long host_isa_ext
;
627 if (KVM_REG_SIZE(reg
->id
) != sizeof(unsigned long))
630 if (reg_num
>= KVM_RISCV_ISA_EXT_MAX
||
631 reg_num
>= ARRAY_SIZE(kvm_isa_ext_arr
))
634 if (copy_from_user(®_val
, uaddr
, KVM_REG_SIZE(reg
->id
)))
637 host_isa_ext
= kvm_isa_ext_arr
[reg_num
];
638 if (!__riscv_isa_extension_available(NULL
, host_isa_ext
))
641 if (!vcpu
->arch
.ran_atleast_once
) {
643 * All multi-letter extension and a few single letter
644 * extension can be disabled
647 kvm_riscv_vcpu_isa_enable_allowed(reg_num
))
648 set_bit(host_isa_ext
, vcpu
->arch
.isa
);
650 kvm_riscv_vcpu_isa_disable_allowed(reg_num
))
651 clear_bit(host_isa_ext
, vcpu
->arch
.isa
);
654 kvm_riscv_vcpu_fp_reset(vcpu
);
662 static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu
*vcpu
,
663 const struct kvm_one_reg
*reg
)
665 switch (reg
->id
& KVM_REG_RISCV_TYPE_MASK
) {
666 case KVM_REG_RISCV_CONFIG
:
667 return kvm_riscv_vcpu_set_reg_config(vcpu
, reg
);
668 case KVM_REG_RISCV_CORE
:
669 return kvm_riscv_vcpu_set_reg_core(vcpu
, reg
);
670 case KVM_REG_RISCV_CSR
:
671 return kvm_riscv_vcpu_set_reg_csr(vcpu
, reg
);
672 case KVM_REG_RISCV_TIMER
:
673 return kvm_riscv_vcpu_set_reg_timer(vcpu
, reg
);
674 case KVM_REG_RISCV_FP_F
:
675 return kvm_riscv_vcpu_set_reg_fp(vcpu
, reg
,
677 case KVM_REG_RISCV_FP_D
:
678 return kvm_riscv_vcpu_set_reg_fp(vcpu
, reg
,
680 case KVM_REG_RISCV_ISA_EXT
:
681 return kvm_riscv_vcpu_set_reg_isa_ext(vcpu
, reg
);
682 case KVM_REG_RISCV_SBI_EXT
:
683 return kvm_riscv_vcpu_set_reg_sbi_ext(vcpu
, reg
);
691 static int kvm_riscv_vcpu_get_reg(struct kvm_vcpu
*vcpu
,
692 const struct kvm_one_reg
*reg
)
694 switch (reg
->id
& KVM_REG_RISCV_TYPE_MASK
) {
695 case KVM_REG_RISCV_CONFIG
:
696 return kvm_riscv_vcpu_get_reg_config(vcpu
, reg
);
697 case KVM_REG_RISCV_CORE
:
698 return kvm_riscv_vcpu_get_reg_core(vcpu
, reg
);
699 case KVM_REG_RISCV_CSR
:
700 return kvm_riscv_vcpu_get_reg_csr(vcpu
, reg
);
701 case KVM_REG_RISCV_TIMER
:
702 return kvm_riscv_vcpu_get_reg_timer(vcpu
, reg
);
703 case KVM_REG_RISCV_FP_F
:
704 return kvm_riscv_vcpu_get_reg_fp(vcpu
, reg
,
706 case KVM_REG_RISCV_FP_D
:
707 return kvm_riscv_vcpu_get_reg_fp(vcpu
, reg
,
709 case KVM_REG_RISCV_ISA_EXT
:
710 return kvm_riscv_vcpu_get_reg_isa_ext(vcpu
, reg
);
711 case KVM_REG_RISCV_SBI_EXT
:
712 return kvm_riscv_vcpu_get_reg_sbi_ext(vcpu
, reg
);
720 long kvm_arch_vcpu_async_ioctl(struct file
*filp
,
721 unsigned int ioctl
, unsigned long arg
)
723 struct kvm_vcpu
*vcpu
= filp
->private_data
;
724 void __user
*argp
= (void __user
*)arg
;
726 if (ioctl
== KVM_INTERRUPT
) {
727 struct kvm_interrupt irq
;
729 if (copy_from_user(&irq
, argp
, sizeof(irq
)))
732 if (irq
.irq
== KVM_INTERRUPT_SET
)
733 return kvm_riscv_vcpu_set_interrupt(vcpu
, IRQ_VS_EXT
);
735 return kvm_riscv_vcpu_unset_interrupt(vcpu
, IRQ_VS_EXT
);
741 long kvm_arch_vcpu_ioctl(struct file
*filp
,
742 unsigned int ioctl
, unsigned long arg
)
744 struct kvm_vcpu
*vcpu
= filp
->private_data
;
745 void __user
*argp
= (void __user
*)arg
;
749 case KVM_SET_ONE_REG
:
750 case KVM_GET_ONE_REG
: {
751 struct kvm_one_reg reg
;
754 if (copy_from_user(®
, argp
, sizeof(reg
)))
757 if (ioctl
== KVM_SET_ONE_REG
)
758 r
= kvm_riscv_vcpu_set_reg(vcpu
, ®
);
760 r
= kvm_riscv_vcpu_get_reg(vcpu
, ®
);
770 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
771 struct kvm_sregs
*sregs
)
776 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
777 struct kvm_sregs
*sregs
)
782 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
787 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
792 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu
,
793 struct kvm_translation
*tr
)
798 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
803 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
808 void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu
*vcpu
)
810 struct kvm_vcpu_csr
*csr
= &vcpu
->arch
.guest_csr
;
811 unsigned long mask
, val
;
813 if (READ_ONCE(vcpu
->arch
.irqs_pending_mask
[0])) {
814 mask
= xchg_acquire(&vcpu
->arch
.irqs_pending_mask
[0], 0);
815 val
= READ_ONCE(vcpu
->arch
.irqs_pending
[0]) & mask
;
821 /* Flush AIA high interrupts */
822 kvm_riscv_vcpu_aia_flush_interrupts(vcpu
);
825 void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu
*vcpu
)
828 struct kvm_vcpu_arch
*v
= &vcpu
->arch
;
829 struct kvm_vcpu_csr
*csr
= &vcpu
->arch
.guest_csr
;
831 /* Read current HVIP and VSIE CSRs */
832 csr
->vsie
= csr_read(CSR_VSIE
);
834 /* Sync-up HVIP.VSSIP bit changes does by Guest */
835 hvip
= csr_read(CSR_HVIP
);
836 if ((csr
->hvip
^ hvip
) & (1UL << IRQ_VS_SOFT
)) {
837 if (hvip
& (1UL << IRQ_VS_SOFT
)) {
838 if (!test_and_set_bit(IRQ_VS_SOFT
,
839 v
->irqs_pending_mask
))
840 set_bit(IRQ_VS_SOFT
, v
->irqs_pending
);
842 if (!test_and_set_bit(IRQ_VS_SOFT
,
843 v
->irqs_pending_mask
))
844 clear_bit(IRQ_VS_SOFT
, v
->irqs_pending
);
848 /* Sync-up AIA high interrupts */
849 kvm_riscv_vcpu_aia_sync_interrupts(vcpu
);
851 /* Sync-up timer CSRs */
852 kvm_riscv_vcpu_timer_sync(vcpu
);
855 int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu
*vcpu
, unsigned int irq
)
858 * We only allow VS-mode software, timer, and external
859 * interrupts when irq is one of the local interrupts
860 * defined by RISC-V privilege specification.
862 if (irq
< IRQ_LOCAL_MAX
&&
863 irq
!= IRQ_VS_SOFT
&&
864 irq
!= IRQ_VS_TIMER
&&
868 set_bit(irq
, vcpu
->arch
.irqs_pending
);
869 smp_mb__before_atomic();
870 set_bit(irq
, vcpu
->arch
.irqs_pending_mask
);
877 int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu
*vcpu
, unsigned int irq
)
880 * We only allow VS-mode software, timer, and external
881 * interrupts when irq is one of the local interrupts
882 * defined by RISC-V privilege specification.
884 if (irq
< IRQ_LOCAL_MAX
&&
885 irq
!= IRQ_VS_SOFT
&&
886 irq
!= IRQ_VS_TIMER
&&
890 clear_bit(irq
, vcpu
->arch
.irqs_pending
);
891 smp_mb__before_atomic();
892 set_bit(irq
, vcpu
->arch
.irqs_pending_mask
);
897 bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu
*vcpu
, u64 mask
)
901 ie
= ((vcpu
->arch
.guest_csr
.vsie
& VSIP_VALID_MASK
)
902 << VSIP_TO_HVIP_SHIFT
) & (unsigned long)mask
;
903 ie
|= vcpu
->arch
.guest_csr
.vsie
& ~IRQ_LOCAL_MASK
&
905 if (READ_ONCE(vcpu
->arch
.irqs_pending
[0]) & ie
)
908 /* Check AIA high interrupts */
909 return kvm_riscv_vcpu_aia_has_interrupts(vcpu
, mask
);
912 void kvm_riscv_vcpu_power_off(struct kvm_vcpu
*vcpu
)
914 vcpu
->arch
.power_off
= true;
915 kvm_make_request(KVM_REQ_SLEEP
, vcpu
);
919 void kvm_riscv_vcpu_power_on(struct kvm_vcpu
*vcpu
)
921 vcpu
->arch
.power_off
= false;
922 kvm_vcpu_wake_up(vcpu
);
925 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu
*vcpu
,
926 struct kvm_mp_state
*mp_state
)
928 if (vcpu
->arch
.power_off
)
929 mp_state
->mp_state
= KVM_MP_STATE_STOPPED
;
931 mp_state
->mp_state
= KVM_MP_STATE_RUNNABLE
;
936 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu
*vcpu
,
937 struct kvm_mp_state
*mp_state
)
941 switch (mp_state
->mp_state
) {
942 case KVM_MP_STATE_RUNNABLE
:
943 vcpu
->arch
.power_off
= false;
945 case KVM_MP_STATE_STOPPED
:
946 kvm_riscv_vcpu_power_off(vcpu
);
955 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu
*vcpu
,
956 struct kvm_guest_debug
*dbg
)
958 /* TODO; To be implemented later. */
962 static void kvm_riscv_vcpu_update_config(const unsigned long *isa
)
966 if (riscv_isa_extension_available(isa
, SVPBMT
))
967 henvcfg
|= ENVCFG_PBMTE
;
969 if (riscv_isa_extension_available(isa
, SSTC
))
970 henvcfg
|= ENVCFG_STCE
;
972 if (riscv_isa_extension_available(isa
, ZICBOM
))
973 henvcfg
|= (ENVCFG_CBIE
| ENVCFG_CBCFE
);
975 if (riscv_isa_extension_available(isa
, ZICBOZ
))
976 henvcfg
|= ENVCFG_CBZE
;
978 csr_write(CSR_HENVCFG
, henvcfg
);
980 csr_write(CSR_HENVCFGH
, henvcfg
>> 32);
984 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
986 struct kvm_vcpu_csr
*csr
= &vcpu
->arch
.guest_csr
;
988 csr_write(CSR_VSSTATUS
, csr
->vsstatus
);
989 csr_write(CSR_VSIE
, csr
->vsie
);
990 csr_write(CSR_VSTVEC
, csr
->vstvec
);
991 csr_write(CSR_VSSCRATCH
, csr
->vsscratch
);
992 csr_write(CSR_VSEPC
, csr
->vsepc
);
993 csr_write(CSR_VSCAUSE
, csr
->vscause
);
994 csr_write(CSR_VSTVAL
, csr
->vstval
);
995 csr_write(CSR_HVIP
, csr
->hvip
);
996 csr_write(CSR_VSATP
, csr
->vsatp
);
998 kvm_riscv_vcpu_update_config(vcpu
->arch
.isa
);
1000 kvm_riscv_gstage_update_hgatp(vcpu
);
1002 kvm_riscv_vcpu_timer_restore(vcpu
);
1004 kvm_riscv_vcpu_host_fp_save(&vcpu
->arch
.host_context
);
1005 kvm_riscv_vcpu_guest_fp_restore(&vcpu
->arch
.guest_context
,
1008 kvm_riscv_vcpu_aia_load(vcpu
, cpu
);
1013 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
1015 struct kvm_vcpu_csr
*csr
= &vcpu
->arch
.guest_csr
;
1019 kvm_riscv_vcpu_aia_put(vcpu
);
1021 kvm_riscv_vcpu_guest_fp_save(&vcpu
->arch
.guest_context
,
1023 kvm_riscv_vcpu_host_fp_restore(&vcpu
->arch
.host_context
);
1025 kvm_riscv_vcpu_timer_save(vcpu
);
1027 csr
->vsstatus
= csr_read(CSR_VSSTATUS
);
1028 csr
->vsie
= csr_read(CSR_VSIE
);
1029 csr
->vstvec
= csr_read(CSR_VSTVEC
);
1030 csr
->vsscratch
= csr_read(CSR_VSSCRATCH
);
1031 csr
->vsepc
= csr_read(CSR_VSEPC
);
1032 csr
->vscause
= csr_read(CSR_VSCAUSE
);
1033 csr
->vstval
= csr_read(CSR_VSTVAL
);
1034 csr
->hvip
= csr_read(CSR_HVIP
);
1035 csr
->vsatp
= csr_read(CSR_VSATP
);
1038 static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu
*vcpu
)
1040 struct rcuwait
*wait
= kvm_arch_vcpu_get_wait(vcpu
);
1042 if (kvm_request_pending(vcpu
)) {
1043 if (kvm_check_request(KVM_REQ_SLEEP
, vcpu
)) {
1044 kvm_vcpu_srcu_read_unlock(vcpu
);
1045 rcuwait_wait_event(wait
,
1046 (!vcpu
->arch
.power_off
) && (!vcpu
->arch
.pause
),
1047 TASK_INTERRUPTIBLE
);
1048 kvm_vcpu_srcu_read_lock(vcpu
);
1050 if (vcpu
->arch
.power_off
|| vcpu
->arch
.pause
) {
1052 * Awaken to handle a signal, request to
1053 * sleep again later.
1055 kvm_make_request(KVM_REQ_SLEEP
, vcpu
);
1059 if (kvm_check_request(KVM_REQ_VCPU_RESET
, vcpu
))
1060 kvm_riscv_reset_vcpu(vcpu
);
1062 if (kvm_check_request(KVM_REQ_UPDATE_HGATP
, vcpu
))
1063 kvm_riscv_gstage_update_hgatp(vcpu
);
1065 if (kvm_check_request(KVM_REQ_FENCE_I
, vcpu
))
1066 kvm_riscv_fence_i_process(vcpu
);
1069 * The generic KVM_REQ_TLB_FLUSH is same as
1070 * KVM_REQ_HFENCE_GVMA_VMID_ALL
1072 if (kvm_check_request(KVM_REQ_HFENCE_GVMA_VMID_ALL
, vcpu
))
1073 kvm_riscv_hfence_gvma_vmid_all_process(vcpu
);
1075 if (kvm_check_request(KVM_REQ_HFENCE_VVMA_ALL
, vcpu
))
1076 kvm_riscv_hfence_vvma_all_process(vcpu
);
1078 if (kvm_check_request(KVM_REQ_HFENCE
, vcpu
))
1079 kvm_riscv_hfence_process(vcpu
);
1083 static void kvm_riscv_update_hvip(struct kvm_vcpu
*vcpu
)
1085 struct kvm_vcpu_csr
*csr
= &vcpu
->arch
.guest_csr
;
1087 csr_write(CSR_HVIP
, csr
->hvip
);
1088 kvm_riscv_vcpu_aia_update_hvip(vcpu
);
1092 * Actually run the vCPU, entering an RCU extended quiescent state (EQS) while
1093 * the vCPU is running.
1095 * This must be noinstr as instrumentation may make use of RCU, and this is not
1096 * safe during the EQS.
1098 static void noinstr
kvm_riscv_vcpu_enter_exit(struct kvm_vcpu
*vcpu
)
1100 guest_state_enter_irqoff();
1101 __kvm_riscv_switch_to(&vcpu
->arch
);
1102 vcpu
->arch
.last_exit_cpu
= vcpu
->cpu
;
1103 guest_state_exit_irqoff();
1106 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
)
1109 struct kvm_cpu_trap trap
;
1110 struct kvm_run
*run
= vcpu
->run
;
1112 /* Mark this VCPU ran at least once */
1113 vcpu
->arch
.ran_atleast_once
= true;
1115 kvm_vcpu_srcu_read_lock(vcpu
);
1117 switch (run
->exit_reason
) {
1119 /* Process MMIO value returned from user-space */
1120 ret
= kvm_riscv_vcpu_mmio_return(vcpu
, vcpu
->run
);
1122 case KVM_EXIT_RISCV_SBI
:
1123 /* Process SBI value returned from user-space */
1124 ret
= kvm_riscv_vcpu_sbi_return(vcpu
, vcpu
->run
);
1126 case KVM_EXIT_RISCV_CSR
:
1127 /* Process CSR value returned from user-space */
1128 ret
= kvm_riscv_vcpu_csr_return(vcpu
, vcpu
->run
);
1135 kvm_vcpu_srcu_read_unlock(vcpu
);
1139 if (run
->immediate_exit
) {
1140 kvm_vcpu_srcu_read_unlock(vcpu
);
1146 kvm_sigset_activate(vcpu
);
1149 run
->exit_reason
= KVM_EXIT_UNKNOWN
;
1151 /* Check conditions before entering the guest */
1152 ret
= xfer_to_guest_mode_handle_work(vcpu
);
1157 kvm_riscv_gstage_vmid_update(vcpu
);
1159 kvm_riscv_check_vcpu_requests(vcpu
);
1163 /* Update AIA HW state before entering guest */
1164 ret
= kvm_riscv_vcpu_aia_update(vcpu
);
1170 local_irq_disable();
1173 * Ensure we set mode to IN_GUEST_MODE after we disable
1174 * interrupts and before the final VCPU requests check.
1175 * See the comment in kvm_vcpu_exiting_guest_mode() and
1176 * Documentation/virt/kvm/vcpu-requests.rst
1178 vcpu
->mode
= IN_GUEST_MODE
;
1180 kvm_vcpu_srcu_read_unlock(vcpu
);
1181 smp_mb__after_srcu_read_unlock();
1184 * We might have got VCPU interrupts updated asynchronously
1185 * so update it in HW.
1187 kvm_riscv_vcpu_flush_interrupts(vcpu
);
1189 /* Update HVIP CSR for current CPU */
1190 kvm_riscv_update_hvip(vcpu
);
1193 kvm_riscv_gstage_vmid_ver_changed(&vcpu
->kvm
->arch
.vmid
) ||
1194 kvm_request_pending(vcpu
) ||
1195 xfer_to_guest_mode_work_pending()) {
1196 vcpu
->mode
= OUTSIDE_GUEST_MODE
;
1199 kvm_vcpu_srcu_read_lock(vcpu
);
1204 * Cleanup stale TLB enteries
1206 * Note: This should be done after G-stage VMID has been
1207 * updated using kvm_riscv_gstage_vmid_ver_changed()
1209 kvm_riscv_local_tlb_sanitize(vcpu
);
1211 guest_timing_enter_irqoff();
1213 kvm_riscv_vcpu_enter_exit(vcpu
);
1215 vcpu
->mode
= OUTSIDE_GUEST_MODE
;
1219 * Save SCAUSE, STVAL, HTVAL, and HTINST because we might
1220 * get an interrupt between __kvm_riscv_switch_to() and
1221 * local_irq_enable() which can potentially change CSRs.
1223 trap
.sepc
= vcpu
->arch
.guest_context
.sepc
;
1224 trap
.scause
= csr_read(CSR_SCAUSE
);
1225 trap
.stval
= csr_read(CSR_STVAL
);
1226 trap
.htval
= csr_read(CSR_HTVAL
);
1227 trap
.htinst
= csr_read(CSR_HTINST
);
1229 /* Syncup interrupts state with HW */
1230 kvm_riscv_vcpu_sync_interrupts(vcpu
);
1233 * We must ensure that any pending interrupts are taken before
1234 * we exit guest timing so that timer ticks are accounted as
1235 * guest time. Transiently unmask interrupts so that any
1236 * pending interrupts are taken.
1238 * There's no barrier which ensures that pending interrupts are
1239 * recognised, so we just hope that the CPU takes any pending
1240 * interrupts between the enable and disable.
1243 local_irq_disable();
1245 guest_timing_exit_irqoff();
1251 kvm_vcpu_srcu_read_lock(vcpu
);
1253 ret
= kvm_riscv_vcpu_exit(vcpu
, run
, &trap
);
1256 kvm_sigset_deactivate(vcpu
);
1260 kvm_vcpu_srcu_read_unlock(vcpu
);