1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
6 * Anup Patel <anup.patel@wdc.com>
9 #include <linux/bitops.h>
10 #include <linux/entry-kvm.h>
11 #include <linux/errno.h>
12 #include <linux/err.h>
13 #include <linux/kdebug.h>
14 #include <linux/module.h>
15 #include <linux/percpu.h>
16 #include <linux/uaccess.h>
17 #include <linux/vmalloc.h>
18 #include <linux/sched/signal.h>
20 #include <linux/kvm_host.h>
22 #include <asm/cacheflush.h>
23 #include <asm/hwcap.h>
26 const struct _kvm_stats_desc kvm_vcpu_stats_desc
[] = {
27 KVM_GENERIC_VCPU_STATS(),
28 STATS_DESC_COUNTER(VCPU
, ecall_exit_stat
),
29 STATS_DESC_COUNTER(VCPU
, wfi_exit_stat
),
30 STATS_DESC_COUNTER(VCPU
, mmio_exit_user
),
31 STATS_DESC_COUNTER(VCPU
, mmio_exit_kernel
),
32 STATS_DESC_COUNTER(VCPU
, csr_exit_user
),
33 STATS_DESC_COUNTER(VCPU
, csr_exit_kernel
),
34 STATS_DESC_COUNTER(VCPU
, signal_exits
),
35 STATS_DESC_COUNTER(VCPU
, exits
)
38 const struct kvm_stats_header kvm_vcpu_stats_header
= {
39 .name_size
= KVM_STATS_NAME_SIZE
,
40 .num_desc
= ARRAY_SIZE(kvm_vcpu_stats_desc
),
41 .id_offset
= sizeof(struct kvm_stats_header
),
42 .desc_offset
= sizeof(struct kvm_stats_header
) + KVM_STATS_NAME_SIZE
,
43 .data_offset
= sizeof(struct kvm_stats_header
) + KVM_STATS_NAME_SIZE
+
44 sizeof(kvm_vcpu_stats_desc
),
47 #define KVM_RISCV_BASE_ISA_MASK GENMASK(25, 0)
49 #define KVM_ISA_EXT_ARR(ext) [KVM_RISCV_ISA_EXT_##ext] = RISCV_ISA_EXT_##ext
51 /* Mapping between KVM ISA Extension ID & Host ISA extension ID */
52 static const unsigned long kvm_isa_ext_arr
[] = {
53 [KVM_RISCV_ISA_EXT_A
] = RISCV_ISA_EXT_a
,
54 [KVM_RISCV_ISA_EXT_C
] = RISCV_ISA_EXT_c
,
55 [KVM_RISCV_ISA_EXT_D
] = RISCV_ISA_EXT_d
,
56 [KVM_RISCV_ISA_EXT_F
] = RISCV_ISA_EXT_f
,
57 [KVM_RISCV_ISA_EXT_H
] = RISCV_ISA_EXT_h
,
58 [KVM_RISCV_ISA_EXT_I
] = RISCV_ISA_EXT_i
,
59 [KVM_RISCV_ISA_EXT_M
] = RISCV_ISA_EXT_m
,
61 KVM_ISA_EXT_ARR(SSAIA
),
62 KVM_ISA_EXT_ARR(SSTC
),
63 KVM_ISA_EXT_ARR(SVINVAL
),
64 KVM_ISA_EXT_ARR(SVPBMT
),
66 KVM_ISA_EXT_ARR(ZIHINTPAUSE
),
67 KVM_ISA_EXT_ARR(ZICBOM
),
68 KVM_ISA_EXT_ARR(ZICBOZ
),
71 static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext
)
75 for (i
= 0; i
< KVM_RISCV_ISA_EXT_MAX
; i
++) {
76 if (kvm_isa_ext_arr
[i
] == base_ext
)
80 return KVM_RISCV_ISA_EXT_MAX
;
83 static bool kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext
)
86 case KVM_RISCV_ISA_EXT_H
:
95 static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext
)
98 case KVM_RISCV_ISA_EXT_A
:
99 case KVM_RISCV_ISA_EXT_C
:
100 case KVM_RISCV_ISA_EXT_I
:
101 case KVM_RISCV_ISA_EXT_M
:
102 case KVM_RISCV_ISA_EXT_SSAIA
:
103 case KVM_RISCV_ISA_EXT_SSTC
:
104 case KVM_RISCV_ISA_EXT_SVINVAL
:
105 case KVM_RISCV_ISA_EXT_ZIHINTPAUSE
:
106 case KVM_RISCV_ISA_EXT_ZBB
:
115 static void kvm_riscv_reset_vcpu(struct kvm_vcpu
*vcpu
)
117 struct kvm_vcpu_csr
*csr
= &vcpu
->arch
.guest_csr
;
118 struct kvm_vcpu_csr
*reset_csr
= &vcpu
->arch
.guest_reset_csr
;
119 struct kvm_cpu_context
*cntx
= &vcpu
->arch
.guest_context
;
120 struct kvm_cpu_context
*reset_cntx
= &vcpu
->arch
.guest_reset_context
;
124 * The preemption should be disabled here because it races with
125 * kvm_sched_out/kvm_sched_in(called from preempt notifiers) which
126 * also calls vcpu_load/put.
129 loaded
= (vcpu
->cpu
!= -1);
131 kvm_arch_vcpu_put(vcpu
);
133 vcpu
->arch
.last_exit_cpu
= -1;
135 memcpy(csr
, reset_csr
, sizeof(*csr
));
137 memcpy(cntx
, reset_cntx
, sizeof(*cntx
));
139 kvm_riscv_vcpu_fp_reset(vcpu
);
141 kvm_riscv_vcpu_timer_reset(vcpu
);
143 kvm_riscv_vcpu_aia_reset(vcpu
);
145 bitmap_zero(vcpu
->arch
.irqs_pending
, KVM_RISCV_VCPU_NR_IRQS
);
146 bitmap_zero(vcpu
->arch
.irqs_pending_mask
, KVM_RISCV_VCPU_NR_IRQS
);
148 kvm_riscv_vcpu_pmu_reset(vcpu
);
150 vcpu
->arch
.hfence_head
= 0;
151 vcpu
->arch
.hfence_tail
= 0;
152 memset(vcpu
->arch
.hfence_queue
, 0, sizeof(vcpu
->arch
.hfence_queue
));
154 /* Reset the guest CSRs for hotplug usecase */
156 kvm_arch_vcpu_load(vcpu
, smp_processor_id());
160 int kvm_arch_vcpu_precreate(struct kvm
*kvm
, unsigned int id
)
165 int kvm_arch_vcpu_create(struct kvm_vcpu
*vcpu
)
168 struct kvm_cpu_context
*cntx
;
169 struct kvm_vcpu_csr
*reset_csr
= &vcpu
->arch
.guest_reset_csr
;
170 unsigned long host_isa
, i
;
172 /* Mark this VCPU never ran */
173 vcpu
->arch
.ran_atleast_once
= false;
174 vcpu
->arch
.mmu_page_cache
.gfp_zero
= __GFP_ZERO
;
175 bitmap_zero(vcpu
->arch
.isa
, RISCV_ISA_EXT_MAX
);
177 /* Setup ISA features available to VCPU */
178 for (i
= 0; i
< ARRAY_SIZE(kvm_isa_ext_arr
); i
++) {
179 host_isa
= kvm_isa_ext_arr
[i
];
180 if (__riscv_isa_extension_available(NULL
, host_isa
) &&
181 kvm_riscv_vcpu_isa_enable_allowed(i
))
182 set_bit(host_isa
, vcpu
->arch
.isa
);
185 /* Setup vendor, arch, and implementation details */
186 vcpu
->arch
.mvendorid
= sbi_get_mvendorid();
187 vcpu
->arch
.marchid
= sbi_get_marchid();
188 vcpu
->arch
.mimpid
= sbi_get_mimpid();
190 /* Setup VCPU hfence queue */
191 spin_lock_init(&vcpu
->arch
.hfence_lock
);
193 /* Setup reset state of shadow SSTATUS and HSTATUS CSRs */
194 cntx
= &vcpu
->arch
.guest_reset_context
;
195 cntx
->sstatus
= SR_SPP
| SR_SPIE
;
197 cntx
->hstatus
|= HSTATUS_VTW
;
198 cntx
->hstatus
|= HSTATUS_SPVP
;
199 cntx
->hstatus
|= HSTATUS_SPV
;
201 /* By default, make CY, TM, and IR counters accessible in VU mode */
202 reset_csr
->scounteren
= 0x7;
204 /* Setup VCPU timer */
205 kvm_riscv_vcpu_timer_init(vcpu
);
207 /* setup performance monitoring */
208 kvm_riscv_vcpu_pmu_init(vcpu
);
211 rc
= kvm_riscv_vcpu_aia_init(vcpu
);
216 kvm_riscv_reset_vcpu(vcpu
);
221 void kvm_arch_vcpu_postcreate(struct kvm_vcpu
*vcpu
)
224 * vcpu with id 0 is the designated boot cpu.
225 * Keep all vcpus with non-zero id in power-off state so that
226 * they can be brought up using SBI HSM extension.
228 if (vcpu
->vcpu_idx
!= 0)
229 kvm_riscv_vcpu_power_off(vcpu
);
232 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
)
234 /* Cleanup VCPU AIA context */
235 kvm_riscv_vcpu_aia_deinit(vcpu
);
237 /* Cleanup VCPU timer */
238 kvm_riscv_vcpu_timer_deinit(vcpu
);
240 kvm_riscv_vcpu_pmu_deinit(vcpu
);
242 /* Free unused pages pre-allocated for G-stage page table mappings */
243 kvm_mmu_free_memory_cache(&vcpu
->arch
.mmu_page_cache
);
246 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
)
248 return kvm_riscv_vcpu_timer_pending(vcpu
);
251 void kvm_arch_vcpu_blocking(struct kvm_vcpu
*vcpu
)
255 void kvm_arch_vcpu_unblocking(struct kvm_vcpu
*vcpu
)
259 int kvm_arch_vcpu_runnable(struct kvm_vcpu
*vcpu
)
261 return (kvm_riscv_vcpu_has_interrupts(vcpu
, -1UL) &&
262 !vcpu
->arch
.power_off
&& !vcpu
->arch
.pause
);
265 int kvm_arch_vcpu_should_kick(struct kvm_vcpu
*vcpu
)
267 return kvm_vcpu_exiting_guest_mode(vcpu
) == IN_GUEST_MODE
;
270 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu
*vcpu
)
272 return (vcpu
->arch
.guest_context
.sstatus
& SR_SPP
) ? true : false;
275 vm_fault_t
kvm_arch_vcpu_fault(struct kvm_vcpu
*vcpu
, struct vm_fault
*vmf
)
277 return VM_FAULT_SIGBUS
;
280 static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu
*vcpu
,
281 const struct kvm_one_reg
*reg
)
283 unsigned long __user
*uaddr
=
284 (unsigned long __user
*)(unsigned long)reg
->addr
;
285 unsigned long reg_num
= reg
->id
& ~(KVM_REG_ARCH_MASK
|
287 KVM_REG_RISCV_CONFIG
);
288 unsigned long reg_val
;
290 if (KVM_REG_SIZE(reg
->id
) != sizeof(unsigned long))
294 case KVM_REG_RISCV_CONFIG_REG(isa
):
295 reg_val
= vcpu
->arch
.isa
[0] & KVM_RISCV_BASE_ISA_MASK
;
297 case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size
):
298 if (!riscv_isa_extension_available(vcpu
->arch
.isa
, ZICBOM
))
300 reg_val
= riscv_cbom_block_size
;
302 case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size
):
303 if (!riscv_isa_extension_available(vcpu
->arch
.isa
, ZICBOZ
))
305 reg_val
= riscv_cboz_block_size
;
307 case KVM_REG_RISCV_CONFIG_REG(mvendorid
):
308 reg_val
= vcpu
->arch
.mvendorid
;
310 case KVM_REG_RISCV_CONFIG_REG(marchid
):
311 reg_val
= vcpu
->arch
.marchid
;
313 case KVM_REG_RISCV_CONFIG_REG(mimpid
):
314 reg_val
= vcpu
->arch
.mimpid
;
320 if (copy_to_user(uaddr
, ®_val
, KVM_REG_SIZE(reg
->id
)))
326 static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu
*vcpu
,
327 const struct kvm_one_reg
*reg
)
329 unsigned long __user
*uaddr
=
330 (unsigned long __user
*)(unsigned long)reg
->addr
;
331 unsigned long reg_num
= reg
->id
& ~(KVM_REG_ARCH_MASK
|
333 KVM_REG_RISCV_CONFIG
);
334 unsigned long i
, isa_ext
, reg_val
;
336 if (KVM_REG_SIZE(reg
->id
) != sizeof(unsigned long))
339 if (copy_from_user(®_val
, uaddr
, KVM_REG_SIZE(reg
->id
)))
343 case KVM_REG_RISCV_CONFIG_REG(isa
):
345 * This ONE REG interface is only defined for
346 * single letter extensions.
348 if (fls(reg_val
) >= RISCV_ISA_EXT_BASE
)
351 if (!vcpu
->arch
.ran_atleast_once
) {
352 /* Ignore the enable/disable request for certain extensions */
353 for (i
= 0; i
< RISCV_ISA_EXT_BASE
; i
++) {
354 isa_ext
= kvm_riscv_vcpu_base2isa_ext(i
);
355 if (isa_ext
>= KVM_RISCV_ISA_EXT_MAX
) {
359 if (!kvm_riscv_vcpu_isa_enable_allowed(isa_ext
))
360 if (reg_val
& BIT(i
))
362 if (!kvm_riscv_vcpu_isa_disable_allowed(isa_ext
))
363 if (!(reg_val
& BIT(i
)))
366 reg_val
&= riscv_isa_extension_base(NULL
);
367 /* Do not modify anything beyond single letter extensions */
368 reg_val
= (vcpu
->arch
.isa
[0] & ~KVM_RISCV_BASE_ISA_MASK
) |
369 (reg_val
& KVM_RISCV_BASE_ISA_MASK
);
370 vcpu
->arch
.isa
[0] = reg_val
;
371 kvm_riscv_vcpu_fp_reset(vcpu
);
376 case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size
):
378 case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size
):
380 case KVM_REG_RISCV_CONFIG_REG(mvendorid
):
381 if (!vcpu
->arch
.ran_atleast_once
)
382 vcpu
->arch
.mvendorid
= reg_val
;
386 case KVM_REG_RISCV_CONFIG_REG(marchid
):
387 if (!vcpu
->arch
.ran_atleast_once
)
388 vcpu
->arch
.marchid
= reg_val
;
392 case KVM_REG_RISCV_CONFIG_REG(mimpid
):
393 if (!vcpu
->arch
.ran_atleast_once
)
394 vcpu
->arch
.mimpid
= reg_val
;
405 static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu
*vcpu
,
406 const struct kvm_one_reg
*reg
)
408 struct kvm_cpu_context
*cntx
= &vcpu
->arch
.guest_context
;
409 unsigned long __user
*uaddr
=
410 (unsigned long __user
*)(unsigned long)reg
->addr
;
411 unsigned long reg_num
= reg
->id
& ~(KVM_REG_ARCH_MASK
|
414 unsigned long reg_val
;
416 if (KVM_REG_SIZE(reg
->id
) != sizeof(unsigned long))
418 if (reg_num
>= sizeof(struct kvm_riscv_core
) / sizeof(unsigned long))
421 if (reg_num
== KVM_REG_RISCV_CORE_REG(regs
.pc
))
422 reg_val
= cntx
->sepc
;
423 else if (KVM_REG_RISCV_CORE_REG(regs
.pc
) < reg_num
&&
424 reg_num
<= KVM_REG_RISCV_CORE_REG(regs
.t6
))
425 reg_val
= ((unsigned long *)cntx
)[reg_num
];
426 else if (reg_num
== KVM_REG_RISCV_CORE_REG(mode
))
427 reg_val
= (cntx
->sstatus
& SR_SPP
) ?
428 KVM_RISCV_MODE_S
: KVM_RISCV_MODE_U
;
432 if (copy_to_user(uaddr
, ®_val
, KVM_REG_SIZE(reg
->id
)))
438 static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu
*vcpu
,
439 const struct kvm_one_reg
*reg
)
441 struct kvm_cpu_context
*cntx
= &vcpu
->arch
.guest_context
;
442 unsigned long __user
*uaddr
=
443 (unsigned long __user
*)(unsigned long)reg
->addr
;
444 unsigned long reg_num
= reg
->id
& ~(KVM_REG_ARCH_MASK
|
447 unsigned long reg_val
;
449 if (KVM_REG_SIZE(reg
->id
) != sizeof(unsigned long))
451 if (reg_num
>= sizeof(struct kvm_riscv_core
) / sizeof(unsigned long))
454 if (copy_from_user(®_val
, uaddr
, KVM_REG_SIZE(reg
->id
)))
457 if (reg_num
== KVM_REG_RISCV_CORE_REG(regs
.pc
))
458 cntx
->sepc
= reg_val
;
459 else if (KVM_REG_RISCV_CORE_REG(regs
.pc
) < reg_num
&&
460 reg_num
<= KVM_REG_RISCV_CORE_REG(regs
.t6
))
461 ((unsigned long *)cntx
)[reg_num
] = reg_val
;
462 else if (reg_num
== KVM_REG_RISCV_CORE_REG(mode
)) {
463 if (reg_val
== KVM_RISCV_MODE_S
)
464 cntx
->sstatus
|= SR_SPP
;
466 cntx
->sstatus
&= ~SR_SPP
;
473 static int kvm_riscv_vcpu_general_get_csr(struct kvm_vcpu
*vcpu
,
474 unsigned long reg_num
,
475 unsigned long *out_val
)
477 struct kvm_vcpu_csr
*csr
= &vcpu
->arch
.guest_csr
;
479 if (reg_num
>= sizeof(struct kvm_riscv_csr
) / sizeof(unsigned long))
482 if (reg_num
== KVM_REG_RISCV_CSR_REG(sip
)) {
483 kvm_riscv_vcpu_flush_interrupts(vcpu
);
484 *out_val
= (csr
->hvip
>> VSIP_TO_HVIP_SHIFT
) & VSIP_VALID_MASK
;
485 *out_val
|= csr
->hvip
& ~IRQ_LOCAL_MASK
;
487 *out_val
= ((unsigned long *)csr
)[reg_num
];
492 static inline int kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu
*vcpu
,
493 unsigned long reg_num
,
494 unsigned long reg_val
)
496 struct kvm_vcpu_csr
*csr
= &vcpu
->arch
.guest_csr
;
498 if (reg_num
>= sizeof(struct kvm_riscv_csr
) / sizeof(unsigned long))
501 if (reg_num
== KVM_REG_RISCV_CSR_REG(sip
)) {
502 reg_val
&= VSIP_VALID_MASK
;
503 reg_val
<<= VSIP_TO_HVIP_SHIFT
;
506 ((unsigned long *)csr
)[reg_num
] = reg_val
;
508 if (reg_num
== KVM_REG_RISCV_CSR_REG(sip
))
509 WRITE_ONCE(vcpu
->arch
.irqs_pending_mask
[0], 0);
514 static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu
*vcpu
,
515 const struct kvm_one_reg
*reg
)
518 unsigned long __user
*uaddr
=
519 (unsigned long __user
*)(unsigned long)reg
->addr
;
520 unsigned long reg_num
= reg
->id
& ~(KVM_REG_ARCH_MASK
|
523 unsigned long reg_val
, reg_subtype
;
525 if (KVM_REG_SIZE(reg
->id
) != sizeof(unsigned long))
528 reg_subtype
= reg_num
& KVM_REG_RISCV_SUBTYPE_MASK
;
529 reg_num
&= ~KVM_REG_RISCV_SUBTYPE_MASK
;
530 switch (reg_subtype
) {
531 case KVM_REG_RISCV_CSR_GENERAL
:
532 rc
= kvm_riscv_vcpu_general_get_csr(vcpu
, reg_num
, ®_val
);
534 case KVM_REG_RISCV_CSR_AIA
:
535 rc
= kvm_riscv_vcpu_aia_get_csr(vcpu
, reg_num
, ®_val
);
544 if (copy_to_user(uaddr
, ®_val
, KVM_REG_SIZE(reg
->id
)))
550 static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu
*vcpu
,
551 const struct kvm_one_reg
*reg
)
554 unsigned long __user
*uaddr
=
555 (unsigned long __user
*)(unsigned long)reg
->addr
;
556 unsigned long reg_num
= reg
->id
& ~(KVM_REG_ARCH_MASK
|
559 unsigned long reg_val
, reg_subtype
;
561 if (KVM_REG_SIZE(reg
->id
) != sizeof(unsigned long))
564 if (copy_from_user(®_val
, uaddr
, KVM_REG_SIZE(reg
->id
)))
567 reg_subtype
= reg_num
& KVM_REG_RISCV_SUBTYPE_MASK
;
568 reg_num
&= ~KVM_REG_RISCV_SUBTYPE_MASK
;
569 switch (reg_subtype
) {
570 case KVM_REG_RISCV_CSR_GENERAL
:
571 rc
= kvm_riscv_vcpu_general_set_csr(vcpu
, reg_num
, reg_val
);
573 case KVM_REG_RISCV_CSR_AIA
:
574 rc
= kvm_riscv_vcpu_aia_set_csr(vcpu
, reg_num
, reg_val
);
586 static int kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu
*vcpu
,
587 const struct kvm_one_reg
*reg
)
589 unsigned long __user
*uaddr
=
590 (unsigned long __user
*)(unsigned long)reg
->addr
;
591 unsigned long reg_num
= reg
->id
& ~(KVM_REG_ARCH_MASK
|
593 KVM_REG_RISCV_ISA_EXT
);
594 unsigned long reg_val
= 0;
595 unsigned long host_isa_ext
;
597 if (KVM_REG_SIZE(reg
->id
) != sizeof(unsigned long))
600 if (reg_num
>= KVM_RISCV_ISA_EXT_MAX
||
601 reg_num
>= ARRAY_SIZE(kvm_isa_ext_arr
))
604 host_isa_ext
= kvm_isa_ext_arr
[reg_num
];
605 if (__riscv_isa_extension_available(vcpu
->arch
.isa
, host_isa_ext
))
606 reg_val
= 1; /* Mark the given extension as available */
608 if (copy_to_user(uaddr
, ®_val
, KVM_REG_SIZE(reg
->id
)))
614 static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu
*vcpu
,
615 const struct kvm_one_reg
*reg
)
617 unsigned long __user
*uaddr
=
618 (unsigned long __user
*)(unsigned long)reg
->addr
;
619 unsigned long reg_num
= reg
->id
& ~(KVM_REG_ARCH_MASK
|
621 KVM_REG_RISCV_ISA_EXT
);
622 unsigned long reg_val
;
623 unsigned long host_isa_ext
;
625 if (KVM_REG_SIZE(reg
->id
) != sizeof(unsigned long))
628 if (reg_num
>= KVM_RISCV_ISA_EXT_MAX
||
629 reg_num
>= ARRAY_SIZE(kvm_isa_ext_arr
))
632 if (copy_from_user(®_val
, uaddr
, KVM_REG_SIZE(reg
->id
)))
635 host_isa_ext
= kvm_isa_ext_arr
[reg_num
];
636 if (!__riscv_isa_extension_available(NULL
, host_isa_ext
))
639 if (!vcpu
->arch
.ran_atleast_once
) {
641 * All multi-letter extension and a few single letter
642 * extension can be disabled
645 kvm_riscv_vcpu_isa_enable_allowed(reg_num
))
646 set_bit(host_isa_ext
, vcpu
->arch
.isa
);
648 kvm_riscv_vcpu_isa_disable_allowed(reg_num
))
649 clear_bit(host_isa_ext
, vcpu
->arch
.isa
);
652 kvm_riscv_vcpu_fp_reset(vcpu
);
660 static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu
*vcpu
,
661 const struct kvm_one_reg
*reg
)
663 switch (reg
->id
& KVM_REG_RISCV_TYPE_MASK
) {
664 case KVM_REG_RISCV_CONFIG
:
665 return kvm_riscv_vcpu_set_reg_config(vcpu
, reg
);
666 case KVM_REG_RISCV_CORE
:
667 return kvm_riscv_vcpu_set_reg_core(vcpu
, reg
);
668 case KVM_REG_RISCV_CSR
:
669 return kvm_riscv_vcpu_set_reg_csr(vcpu
, reg
);
670 case KVM_REG_RISCV_TIMER
:
671 return kvm_riscv_vcpu_set_reg_timer(vcpu
, reg
);
672 case KVM_REG_RISCV_FP_F
:
673 return kvm_riscv_vcpu_set_reg_fp(vcpu
, reg
,
675 case KVM_REG_RISCV_FP_D
:
676 return kvm_riscv_vcpu_set_reg_fp(vcpu
, reg
,
678 case KVM_REG_RISCV_ISA_EXT
:
679 return kvm_riscv_vcpu_set_reg_isa_ext(vcpu
, reg
);
680 case KVM_REG_RISCV_SBI_EXT
:
681 return kvm_riscv_vcpu_set_reg_sbi_ext(vcpu
, reg
);
689 static int kvm_riscv_vcpu_get_reg(struct kvm_vcpu
*vcpu
,
690 const struct kvm_one_reg
*reg
)
692 switch (reg
->id
& KVM_REG_RISCV_TYPE_MASK
) {
693 case KVM_REG_RISCV_CONFIG
:
694 return kvm_riscv_vcpu_get_reg_config(vcpu
, reg
);
695 case KVM_REG_RISCV_CORE
:
696 return kvm_riscv_vcpu_get_reg_core(vcpu
, reg
);
697 case KVM_REG_RISCV_CSR
:
698 return kvm_riscv_vcpu_get_reg_csr(vcpu
, reg
);
699 case KVM_REG_RISCV_TIMER
:
700 return kvm_riscv_vcpu_get_reg_timer(vcpu
, reg
);
701 case KVM_REG_RISCV_FP_F
:
702 return kvm_riscv_vcpu_get_reg_fp(vcpu
, reg
,
704 case KVM_REG_RISCV_FP_D
:
705 return kvm_riscv_vcpu_get_reg_fp(vcpu
, reg
,
707 case KVM_REG_RISCV_ISA_EXT
:
708 return kvm_riscv_vcpu_get_reg_isa_ext(vcpu
, reg
);
709 case KVM_REG_RISCV_SBI_EXT
:
710 return kvm_riscv_vcpu_get_reg_sbi_ext(vcpu
, reg
);
718 long kvm_arch_vcpu_async_ioctl(struct file
*filp
,
719 unsigned int ioctl
, unsigned long arg
)
721 struct kvm_vcpu
*vcpu
= filp
->private_data
;
722 void __user
*argp
= (void __user
*)arg
;
724 if (ioctl
== KVM_INTERRUPT
) {
725 struct kvm_interrupt irq
;
727 if (copy_from_user(&irq
, argp
, sizeof(irq
)))
730 if (irq
.irq
== KVM_INTERRUPT_SET
)
731 return kvm_riscv_vcpu_set_interrupt(vcpu
, IRQ_VS_EXT
);
733 return kvm_riscv_vcpu_unset_interrupt(vcpu
, IRQ_VS_EXT
);
739 long kvm_arch_vcpu_ioctl(struct file
*filp
,
740 unsigned int ioctl
, unsigned long arg
)
742 struct kvm_vcpu
*vcpu
= filp
->private_data
;
743 void __user
*argp
= (void __user
*)arg
;
747 case KVM_SET_ONE_REG
:
748 case KVM_GET_ONE_REG
: {
749 struct kvm_one_reg reg
;
752 if (copy_from_user(®
, argp
, sizeof(reg
)))
755 if (ioctl
== KVM_SET_ONE_REG
)
756 r
= kvm_riscv_vcpu_set_reg(vcpu
, ®
);
758 r
= kvm_riscv_vcpu_get_reg(vcpu
, ®
);
768 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
769 struct kvm_sregs
*sregs
)
774 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
775 struct kvm_sregs
*sregs
)
780 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
785 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
790 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu
,
791 struct kvm_translation
*tr
)
796 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
801 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
806 void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu
*vcpu
)
808 struct kvm_vcpu_csr
*csr
= &vcpu
->arch
.guest_csr
;
809 unsigned long mask
, val
;
811 if (READ_ONCE(vcpu
->arch
.irqs_pending_mask
[0])) {
812 mask
= xchg_acquire(&vcpu
->arch
.irqs_pending_mask
[0], 0);
813 val
= READ_ONCE(vcpu
->arch
.irqs_pending
[0]) & mask
;
819 /* Flush AIA high interrupts */
820 kvm_riscv_vcpu_aia_flush_interrupts(vcpu
);
823 void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu
*vcpu
)
826 struct kvm_vcpu_arch
*v
= &vcpu
->arch
;
827 struct kvm_vcpu_csr
*csr
= &vcpu
->arch
.guest_csr
;
829 /* Read current HVIP and VSIE CSRs */
830 csr
->vsie
= csr_read(CSR_VSIE
);
832 /* Sync-up HVIP.VSSIP bit changes does by Guest */
833 hvip
= csr_read(CSR_HVIP
);
834 if ((csr
->hvip
^ hvip
) & (1UL << IRQ_VS_SOFT
)) {
835 if (hvip
& (1UL << IRQ_VS_SOFT
)) {
836 if (!test_and_set_bit(IRQ_VS_SOFT
,
837 v
->irqs_pending_mask
))
838 set_bit(IRQ_VS_SOFT
, v
->irqs_pending
);
840 if (!test_and_set_bit(IRQ_VS_SOFT
,
841 v
->irqs_pending_mask
))
842 clear_bit(IRQ_VS_SOFT
, v
->irqs_pending
);
846 /* Sync-up AIA high interrupts */
847 kvm_riscv_vcpu_aia_sync_interrupts(vcpu
);
849 /* Sync-up timer CSRs */
850 kvm_riscv_vcpu_timer_sync(vcpu
);
853 int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu
*vcpu
, unsigned int irq
)
856 * We only allow VS-mode software, timer, and external
857 * interrupts when irq is one of the local interrupts
858 * defined by RISC-V privilege specification.
860 if (irq
< IRQ_LOCAL_MAX
&&
861 irq
!= IRQ_VS_SOFT
&&
862 irq
!= IRQ_VS_TIMER
&&
866 set_bit(irq
, vcpu
->arch
.irqs_pending
);
867 smp_mb__before_atomic();
868 set_bit(irq
, vcpu
->arch
.irqs_pending_mask
);
875 int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu
*vcpu
, unsigned int irq
)
878 * We only allow VS-mode software, timer, and external
879 * interrupts when irq is one of the local interrupts
880 * defined by RISC-V privilege specification.
882 if (irq
< IRQ_LOCAL_MAX
&&
883 irq
!= IRQ_VS_SOFT
&&
884 irq
!= IRQ_VS_TIMER
&&
888 clear_bit(irq
, vcpu
->arch
.irqs_pending
);
889 smp_mb__before_atomic();
890 set_bit(irq
, vcpu
->arch
.irqs_pending_mask
);
895 bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu
*vcpu
, u64 mask
)
899 ie
= ((vcpu
->arch
.guest_csr
.vsie
& VSIP_VALID_MASK
)
900 << VSIP_TO_HVIP_SHIFT
) & (unsigned long)mask
;
901 ie
|= vcpu
->arch
.guest_csr
.vsie
& ~IRQ_LOCAL_MASK
&
903 if (READ_ONCE(vcpu
->arch
.irqs_pending
[0]) & ie
)
906 /* Check AIA high interrupts */
907 return kvm_riscv_vcpu_aia_has_interrupts(vcpu
, mask
);
910 void kvm_riscv_vcpu_power_off(struct kvm_vcpu
*vcpu
)
912 vcpu
->arch
.power_off
= true;
913 kvm_make_request(KVM_REQ_SLEEP
, vcpu
);
917 void kvm_riscv_vcpu_power_on(struct kvm_vcpu
*vcpu
)
919 vcpu
->arch
.power_off
= false;
920 kvm_vcpu_wake_up(vcpu
);
923 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu
*vcpu
,
924 struct kvm_mp_state
*mp_state
)
926 if (vcpu
->arch
.power_off
)
927 mp_state
->mp_state
= KVM_MP_STATE_STOPPED
;
929 mp_state
->mp_state
= KVM_MP_STATE_RUNNABLE
;
934 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu
*vcpu
,
935 struct kvm_mp_state
*mp_state
)
939 switch (mp_state
->mp_state
) {
940 case KVM_MP_STATE_RUNNABLE
:
941 vcpu
->arch
.power_off
= false;
943 case KVM_MP_STATE_STOPPED
:
944 kvm_riscv_vcpu_power_off(vcpu
);
953 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu
*vcpu
,
954 struct kvm_guest_debug
*dbg
)
956 /* TODO; To be implemented later. */
960 static void kvm_riscv_vcpu_update_config(const unsigned long *isa
)
964 if (riscv_isa_extension_available(isa
, SVPBMT
))
965 henvcfg
|= ENVCFG_PBMTE
;
967 if (riscv_isa_extension_available(isa
, SSTC
))
968 henvcfg
|= ENVCFG_STCE
;
970 if (riscv_isa_extension_available(isa
, ZICBOM
))
971 henvcfg
|= (ENVCFG_CBIE
| ENVCFG_CBCFE
);
973 if (riscv_isa_extension_available(isa
, ZICBOZ
))
974 henvcfg
|= ENVCFG_CBZE
;
976 csr_write(CSR_HENVCFG
, henvcfg
);
978 csr_write(CSR_HENVCFGH
, henvcfg
>> 32);
982 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
984 struct kvm_vcpu_csr
*csr
= &vcpu
->arch
.guest_csr
;
986 csr_write(CSR_VSSTATUS
, csr
->vsstatus
);
987 csr_write(CSR_VSIE
, csr
->vsie
);
988 csr_write(CSR_VSTVEC
, csr
->vstvec
);
989 csr_write(CSR_VSSCRATCH
, csr
->vsscratch
);
990 csr_write(CSR_VSEPC
, csr
->vsepc
);
991 csr_write(CSR_VSCAUSE
, csr
->vscause
);
992 csr_write(CSR_VSTVAL
, csr
->vstval
);
993 csr_write(CSR_HVIP
, csr
->hvip
);
994 csr_write(CSR_VSATP
, csr
->vsatp
);
996 kvm_riscv_vcpu_update_config(vcpu
->arch
.isa
);
998 kvm_riscv_gstage_update_hgatp(vcpu
);
1000 kvm_riscv_vcpu_timer_restore(vcpu
);
1002 kvm_riscv_vcpu_host_fp_save(&vcpu
->arch
.host_context
);
1003 kvm_riscv_vcpu_guest_fp_restore(&vcpu
->arch
.guest_context
,
1006 kvm_riscv_vcpu_aia_load(vcpu
, cpu
);
1011 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
1013 struct kvm_vcpu_csr
*csr
= &vcpu
->arch
.guest_csr
;
1017 kvm_riscv_vcpu_aia_put(vcpu
);
1019 kvm_riscv_vcpu_guest_fp_save(&vcpu
->arch
.guest_context
,
1021 kvm_riscv_vcpu_host_fp_restore(&vcpu
->arch
.host_context
);
1023 kvm_riscv_vcpu_timer_save(vcpu
);
1025 csr
->vsstatus
= csr_read(CSR_VSSTATUS
);
1026 csr
->vsie
= csr_read(CSR_VSIE
);
1027 csr
->vstvec
= csr_read(CSR_VSTVEC
);
1028 csr
->vsscratch
= csr_read(CSR_VSSCRATCH
);
1029 csr
->vsepc
= csr_read(CSR_VSEPC
);
1030 csr
->vscause
= csr_read(CSR_VSCAUSE
);
1031 csr
->vstval
= csr_read(CSR_VSTVAL
);
1032 csr
->hvip
= csr_read(CSR_HVIP
);
1033 csr
->vsatp
= csr_read(CSR_VSATP
);
1036 static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu
*vcpu
)
1038 struct rcuwait
*wait
= kvm_arch_vcpu_get_wait(vcpu
);
1040 if (kvm_request_pending(vcpu
)) {
1041 if (kvm_check_request(KVM_REQ_SLEEP
, vcpu
)) {
1042 kvm_vcpu_srcu_read_unlock(vcpu
);
1043 rcuwait_wait_event(wait
,
1044 (!vcpu
->arch
.power_off
) && (!vcpu
->arch
.pause
),
1045 TASK_INTERRUPTIBLE
);
1046 kvm_vcpu_srcu_read_lock(vcpu
);
1048 if (vcpu
->arch
.power_off
|| vcpu
->arch
.pause
) {
1050 * Awaken to handle a signal, request to
1051 * sleep again later.
1053 kvm_make_request(KVM_REQ_SLEEP
, vcpu
);
1057 if (kvm_check_request(KVM_REQ_VCPU_RESET
, vcpu
))
1058 kvm_riscv_reset_vcpu(vcpu
);
1060 if (kvm_check_request(KVM_REQ_UPDATE_HGATP
, vcpu
))
1061 kvm_riscv_gstage_update_hgatp(vcpu
);
1063 if (kvm_check_request(KVM_REQ_FENCE_I
, vcpu
))
1064 kvm_riscv_fence_i_process(vcpu
);
1067 * The generic KVM_REQ_TLB_FLUSH is same as
1068 * KVM_REQ_HFENCE_GVMA_VMID_ALL
1070 if (kvm_check_request(KVM_REQ_HFENCE_GVMA_VMID_ALL
, vcpu
))
1071 kvm_riscv_hfence_gvma_vmid_all_process(vcpu
);
1073 if (kvm_check_request(KVM_REQ_HFENCE_VVMA_ALL
, vcpu
))
1074 kvm_riscv_hfence_vvma_all_process(vcpu
);
1076 if (kvm_check_request(KVM_REQ_HFENCE
, vcpu
))
1077 kvm_riscv_hfence_process(vcpu
);
1081 static void kvm_riscv_update_hvip(struct kvm_vcpu
*vcpu
)
1083 struct kvm_vcpu_csr
*csr
= &vcpu
->arch
.guest_csr
;
1085 csr_write(CSR_HVIP
, csr
->hvip
);
1086 kvm_riscv_vcpu_aia_update_hvip(vcpu
);
1090 * Actually run the vCPU, entering an RCU extended quiescent state (EQS) while
1091 * the vCPU is running.
1093 * This must be noinstr as instrumentation may make use of RCU, and this is not
1094 * safe during the EQS.
1096 static void noinstr
kvm_riscv_vcpu_enter_exit(struct kvm_vcpu
*vcpu
)
1098 guest_state_enter_irqoff();
1099 __kvm_riscv_switch_to(&vcpu
->arch
);
1100 vcpu
->arch
.last_exit_cpu
= vcpu
->cpu
;
1101 guest_state_exit_irqoff();
1104 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
)
1107 struct kvm_cpu_trap trap
;
1108 struct kvm_run
*run
= vcpu
->run
;
1110 /* Mark this VCPU ran at least once */
1111 vcpu
->arch
.ran_atleast_once
= true;
1113 kvm_vcpu_srcu_read_lock(vcpu
);
1115 switch (run
->exit_reason
) {
1117 /* Process MMIO value returned from user-space */
1118 ret
= kvm_riscv_vcpu_mmio_return(vcpu
, vcpu
->run
);
1120 case KVM_EXIT_RISCV_SBI
:
1121 /* Process SBI value returned from user-space */
1122 ret
= kvm_riscv_vcpu_sbi_return(vcpu
, vcpu
->run
);
1124 case KVM_EXIT_RISCV_CSR
:
1125 /* Process CSR value returned from user-space */
1126 ret
= kvm_riscv_vcpu_csr_return(vcpu
, vcpu
->run
);
1133 kvm_vcpu_srcu_read_unlock(vcpu
);
1137 if (run
->immediate_exit
) {
1138 kvm_vcpu_srcu_read_unlock(vcpu
);
1144 kvm_sigset_activate(vcpu
);
1147 run
->exit_reason
= KVM_EXIT_UNKNOWN
;
1149 /* Check conditions before entering the guest */
1150 ret
= xfer_to_guest_mode_handle_work(vcpu
);
1155 kvm_riscv_gstage_vmid_update(vcpu
);
1157 kvm_riscv_check_vcpu_requests(vcpu
);
1161 /* Update AIA HW state before entering guest */
1162 ret
= kvm_riscv_vcpu_aia_update(vcpu
);
1168 local_irq_disable();
1171 * Ensure we set mode to IN_GUEST_MODE after we disable
1172 * interrupts and before the final VCPU requests check.
1173 * See the comment in kvm_vcpu_exiting_guest_mode() and
1174 * Documentation/virt/kvm/vcpu-requests.rst
1176 vcpu
->mode
= IN_GUEST_MODE
;
1178 kvm_vcpu_srcu_read_unlock(vcpu
);
1179 smp_mb__after_srcu_read_unlock();
1182 * We might have got VCPU interrupts updated asynchronously
1183 * so update it in HW.
1185 kvm_riscv_vcpu_flush_interrupts(vcpu
);
1187 /* Update HVIP CSR for current CPU */
1188 kvm_riscv_update_hvip(vcpu
);
1191 kvm_riscv_gstage_vmid_ver_changed(&vcpu
->kvm
->arch
.vmid
) ||
1192 kvm_request_pending(vcpu
) ||
1193 xfer_to_guest_mode_work_pending()) {
1194 vcpu
->mode
= OUTSIDE_GUEST_MODE
;
1197 kvm_vcpu_srcu_read_lock(vcpu
);
1202 * Cleanup stale TLB enteries
1204 * Note: This should be done after G-stage VMID has been
1205 * updated using kvm_riscv_gstage_vmid_ver_changed()
1207 kvm_riscv_local_tlb_sanitize(vcpu
);
1209 guest_timing_enter_irqoff();
1211 kvm_riscv_vcpu_enter_exit(vcpu
);
1213 vcpu
->mode
= OUTSIDE_GUEST_MODE
;
1217 * Save SCAUSE, STVAL, HTVAL, and HTINST because we might
1218 * get an interrupt between __kvm_riscv_switch_to() and
1219 * local_irq_enable() which can potentially change CSRs.
1221 trap
.sepc
= vcpu
->arch
.guest_context
.sepc
;
1222 trap
.scause
= csr_read(CSR_SCAUSE
);
1223 trap
.stval
= csr_read(CSR_STVAL
);
1224 trap
.htval
= csr_read(CSR_HTVAL
);
1225 trap
.htinst
= csr_read(CSR_HTINST
);
1227 /* Syncup interrupts state with HW */
1228 kvm_riscv_vcpu_sync_interrupts(vcpu
);
1231 * We must ensure that any pending interrupts are taken before
1232 * we exit guest timing so that timer ticks are accounted as
1233 * guest time. Transiently unmask interrupts so that any
1234 * pending interrupts are taken.
1236 * There's no barrier which ensures that pending interrupts are
1237 * recognised, so we just hope that the CPU takes any pending
1238 * interrupts between the enable and disable.
1241 local_irq_disable();
1243 guest_timing_exit_irqoff();
1249 kvm_vcpu_srcu_read_lock(vcpu
);
1251 ret
= kvm_riscv_vcpu_exit(vcpu
, run
, &trap
);
1254 kvm_sigset_deactivate(vcpu
);
1258 kvm_vcpu_srcu_read_unlock(vcpu
);