1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
6 * Anup Patel <anup.patel@wdc.com>
9 #include <linux/bitops.h>
10 #include <linux/entry-kvm.h>
11 #include <linux/errno.h>
12 #include <linux/err.h>
13 #include <linux/kdebug.h>
14 #include <linux/module.h>
15 #include <linux/percpu.h>
16 #include <linux/uaccess.h>
17 #include <linux/vmalloc.h>
18 #include <linux/sched/signal.h>
20 #include <linux/kvm_host.h>
22 #include <asm/cacheflush.h>
23 #include <asm/hwcap.h>
25 #include <asm/vector.h>
26 #include <asm/kvm_vcpu_vector.h>
28 const struct _kvm_stats_desc kvm_vcpu_stats_desc
[] = {
29 KVM_GENERIC_VCPU_STATS(),
30 STATS_DESC_COUNTER(VCPU
, ecall_exit_stat
),
31 STATS_DESC_COUNTER(VCPU
, wfi_exit_stat
),
32 STATS_DESC_COUNTER(VCPU
, mmio_exit_user
),
33 STATS_DESC_COUNTER(VCPU
, mmio_exit_kernel
),
34 STATS_DESC_COUNTER(VCPU
, csr_exit_user
),
35 STATS_DESC_COUNTER(VCPU
, csr_exit_kernel
),
36 STATS_DESC_COUNTER(VCPU
, signal_exits
),
37 STATS_DESC_COUNTER(VCPU
, exits
)
40 const struct kvm_stats_header kvm_vcpu_stats_header
= {
41 .name_size
= KVM_STATS_NAME_SIZE
,
42 .num_desc
= ARRAY_SIZE(kvm_vcpu_stats_desc
),
43 .id_offset
= sizeof(struct kvm_stats_header
),
44 .desc_offset
= sizeof(struct kvm_stats_header
) + KVM_STATS_NAME_SIZE
,
45 .data_offset
= sizeof(struct kvm_stats_header
) + KVM_STATS_NAME_SIZE
+
46 sizeof(kvm_vcpu_stats_desc
),
49 #define KVM_RISCV_BASE_ISA_MASK GENMASK(25, 0)
51 #define KVM_ISA_EXT_ARR(ext) [KVM_RISCV_ISA_EXT_##ext] = RISCV_ISA_EXT_##ext
53 /* Mapping between KVM ISA Extension ID & Host ISA extension ID */
54 static const unsigned long kvm_isa_ext_arr
[] = {
55 [KVM_RISCV_ISA_EXT_A
] = RISCV_ISA_EXT_a
,
56 [KVM_RISCV_ISA_EXT_C
] = RISCV_ISA_EXT_c
,
57 [KVM_RISCV_ISA_EXT_D
] = RISCV_ISA_EXT_d
,
58 [KVM_RISCV_ISA_EXT_F
] = RISCV_ISA_EXT_f
,
59 [KVM_RISCV_ISA_EXT_H
] = RISCV_ISA_EXT_h
,
60 [KVM_RISCV_ISA_EXT_I
] = RISCV_ISA_EXT_i
,
61 [KVM_RISCV_ISA_EXT_M
] = RISCV_ISA_EXT_m
,
62 [KVM_RISCV_ISA_EXT_V
] = RISCV_ISA_EXT_v
,
64 KVM_ISA_EXT_ARR(SSAIA
),
65 KVM_ISA_EXT_ARR(SSTC
),
66 KVM_ISA_EXT_ARR(SVINVAL
),
67 KVM_ISA_EXT_ARR(SVPBMT
),
69 KVM_ISA_EXT_ARR(ZIHINTPAUSE
),
70 KVM_ISA_EXT_ARR(ZICBOM
),
71 KVM_ISA_EXT_ARR(ZICBOZ
),
74 static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext
)
78 for (i
= 0; i
< KVM_RISCV_ISA_EXT_MAX
; i
++) {
79 if (kvm_isa_ext_arr
[i
] == base_ext
)
83 return KVM_RISCV_ISA_EXT_MAX
;
86 static bool kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext
)
89 case KVM_RISCV_ISA_EXT_H
:
91 case KVM_RISCV_ISA_EXT_V
:
92 return riscv_v_vstate_ctrl_user_allowed();
100 static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext
)
103 case KVM_RISCV_ISA_EXT_A
:
104 case KVM_RISCV_ISA_EXT_C
:
105 case KVM_RISCV_ISA_EXT_I
:
106 case KVM_RISCV_ISA_EXT_M
:
107 case KVM_RISCV_ISA_EXT_SSAIA
:
108 case KVM_RISCV_ISA_EXT_SSTC
:
109 case KVM_RISCV_ISA_EXT_SVINVAL
:
110 case KVM_RISCV_ISA_EXT_ZIHINTPAUSE
:
111 case KVM_RISCV_ISA_EXT_ZBB
:
120 static void kvm_riscv_reset_vcpu(struct kvm_vcpu
*vcpu
)
122 struct kvm_vcpu_csr
*csr
= &vcpu
->arch
.guest_csr
;
123 struct kvm_vcpu_csr
*reset_csr
= &vcpu
->arch
.guest_reset_csr
;
124 struct kvm_cpu_context
*cntx
= &vcpu
->arch
.guest_context
;
125 struct kvm_cpu_context
*reset_cntx
= &vcpu
->arch
.guest_reset_context
;
129 * The preemption should be disabled here because it races with
130 * kvm_sched_out/kvm_sched_in(called from preempt notifiers) which
131 * also calls vcpu_load/put.
134 loaded
= (vcpu
->cpu
!= -1);
136 kvm_arch_vcpu_put(vcpu
);
138 vcpu
->arch
.last_exit_cpu
= -1;
140 memcpy(csr
, reset_csr
, sizeof(*csr
));
142 memcpy(cntx
, reset_cntx
, sizeof(*cntx
));
144 kvm_riscv_vcpu_fp_reset(vcpu
);
146 kvm_riscv_vcpu_vector_reset(vcpu
);
148 kvm_riscv_vcpu_timer_reset(vcpu
);
150 kvm_riscv_vcpu_aia_reset(vcpu
);
152 bitmap_zero(vcpu
->arch
.irqs_pending
, KVM_RISCV_VCPU_NR_IRQS
);
153 bitmap_zero(vcpu
->arch
.irqs_pending_mask
, KVM_RISCV_VCPU_NR_IRQS
);
155 kvm_riscv_vcpu_pmu_reset(vcpu
);
157 vcpu
->arch
.hfence_head
= 0;
158 vcpu
->arch
.hfence_tail
= 0;
159 memset(vcpu
->arch
.hfence_queue
, 0, sizeof(vcpu
->arch
.hfence_queue
));
161 /* Reset the guest CSRs for hotplug usecase */
163 kvm_arch_vcpu_load(vcpu
, smp_processor_id());
167 int kvm_arch_vcpu_precreate(struct kvm
*kvm
, unsigned int id
)
172 int kvm_arch_vcpu_create(struct kvm_vcpu
*vcpu
)
175 struct kvm_cpu_context
*cntx
;
176 struct kvm_vcpu_csr
*reset_csr
= &vcpu
->arch
.guest_reset_csr
;
177 unsigned long host_isa
, i
;
179 /* Mark this VCPU never ran */
180 vcpu
->arch
.ran_atleast_once
= false;
181 vcpu
->arch
.mmu_page_cache
.gfp_zero
= __GFP_ZERO
;
182 bitmap_zero(vcpu
->arch
.isa
, RISCV_ISA_EXT_MAX
);
184 /* Setup ISA features available to VCPU */
185 for (i
= 0; i
< ARRAY_SIZE(kvm_isa_ext_arr
); i
++) {
186 host_isa
= kvm_isa_ext_arr
[i
];
187 if (__riscv_isa_extension_available(NULL
, host_isa
) &&
188 kvm_riscv_vcpu_isa_enable_allowed(i
))
189 set_bit(host_isa
, vcpu
->arch
.isa
);
192 /* Setup vendor, arch, and implementation details */
193 vcpu
->arch
.mvendorid
= sbi_get_mvendorid();
194 vcpu
->arch
.marchid
= sbi_get_marchid();
195 vcpu
->arch
.mimpid
= sbi_get_mimpid();
197 /* Setup VCPU hfence queue */
198 spin_lock_init(&vcpu
->arch
.hfence_lock
);
200 /* Setup reset state of shadow SSTATUS and HSTATUS CSRs */
201 cntx
= &vcpu
->arch
.guest_reset_context
;
202 cntx
->sstatus
= SR_SPP
| SR_SPIE
;
204 cntx
->hstatus
|= HSTATUS_VTW
;
205 cntx
->hstatus
|= HSTATUS_SPVP
;
206 cntx
->hstatus
|= HSTATUS_SPV
;
208 if (kvm_riscv_vcpu_alloc_vector_context(vcpu
, cntx
))
211 /* By default, make CY, TM, and IR counters accessible in VU mode */
212 reset_csr
->scounteren
= 0x7;
214 /* Setup VCPU timer */
215 kvm_riscv_vcpu_timer_init(vcpu
);
217 /* setup performance monitoring */
218 kvm_riscv_vcpu_pmu_init(vcpu
);
221 rc
= kvm_riscv_vcpu_aia_init(vcpu
);
226 kvm_riscv_reset_vcpu(vcpu
);
231 void kvm_arch_vcpu_postcreate(struct kvm_vcpu
*vcpu
)
234 * vcpu with id 0 is the designated boot cpu.
235 * Keep all vcpus with non-zero id in power-off state so that
236 * they can be brought up using SBI HSM extension.
238 if (vcpu
->vcpu_idx
!= 0)
239 kvm_riscv_vcpu_power_off(vcpu
);
242 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
)
244 /* Cleanup VCPU AIA context */
245 kvm_riscv_vcpu_aia_deinit(vcpu
);
247 /* Cleanup VCPU timer */
248 kvm_riscv_vcpu_timer_deinit(vcpu
);
250 kvm_riscv_vcpu_pmu_deinit(vcpu
);
252 /* Free unused pages pre-allocated for G-stage page table mappings */
253 kvm_mmu_free_memory_cache(&vcpu
->arch
.mmu_page_cache
);
255 /* Free vector context space for host and guest kernel */
256 kvm_riscv_vcpu_free_vector_context(vcpu
);
259 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
)
261 return kvm_riscv_vcpu_timer_pending(vcpu
);
264 void kvm_arch_vcpu_blocking(struct kvm_vcpu
*vcpu
)
268 void kvm_arch_vcpu_unblocking(struct kvm_vcpu
*vcpu
)
272 int kvm_arch_vcpu_runnable(struct kvm_vcpu
*vcpu
)
274 return (kvm_riscv_vcpu_has_interrupts(vcpu
, -1UL) &&
275 !vcpu
->arch
.power_off
&& !vcpu
->arch
.pause
);
278 int kvm_arch_vcpu_should_kick(struct kvm_vcpu
*vcpu
)
280 return kvm_vcpu_exiting_guest_mode(vcpu
) == IN_GUEST_MODE
;
283 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu
*vcpu
)
285 return (vcpu
->arch
.guest_context
.sstatus
& SR_SPP
) ? true : false;
288 vm_fault_t
kvm_arch_vcpu_fault(struct kvm_vcpu
*vcpu
, struct vm_fault
*vmf
)
290 return VM_FAULT_SIGBUS
;
293 static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu
*vcpu
,
294 const struct kvm_one_reg
*reg
)
296 unsigned long __user
*uaddr
=
297 (unsigned long __user
*)(unsigned long)reg
->addr
;
298 unsigned long reg_num
= reg
->id
& ~(KVM_REG_ARCH_MASK
|
300 KVM_REG_RISCV_CONFIG
);
301 unsigned long reg_val
;
303 if (KVM_REG_SIZE(reg
->id
) != sizeof(unsigned long))
307 case KVM_REG_RISCV_CONFIG_REG(isa
):
308 reg_val
= vcpu
->arch
.isa
[0] & KVM_RISCV_BASE_ISA_MASK
;
310 case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size
):
311 if (!riscv_isa_extension_available(vcpu
->arch
.isa
, ZICBOM
))
313 reg_val
= riscv_cbom_block_size
;
315 case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size
):
316 if (!riscv_isa_extension_available(vcpu
->arch
.isa
, ZICBOZ
))
318 reg_val
= riscv_cboz_block_size
;
320 case KVM_REG_RISCV_CONFIG_REG(mvendorid
):
321 reg_val
= vcpu
->arch
.mvendorid
;
323 case KVM_REG_RISCV_CONFIG_REG(marchid
):
324 reg_val
= vcpu
->arch
.marchid
;
326 case KVM_REG_RISCV_CONFIG_REG(mimpid
):
327 reg_val
= vcpu
->arch
.mimpid
;
333 if (copy_to_user(uaddr
, ®_val
, KVM_REG_SIZE(reg
->id
)))
339 static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu
*vcpu
,
340 const struct kvm_one_reg
*reg
)
342 unsigned long __user
*uaddr
=
343 (unsigned long __user
*)(unsigned long)reg
->addr
;
344 unsigned long reg_num
= reg
->id
& ~(KVM_REG_ARCH_MASK
|
346 KVM_REG_RISCV_CONFIG
);
347 unsigned long i
, isa_ext
, reg_val
;
349 if (KVM_REG_SIZE(reg
->id
) != sizeof(unsigned long))
352 if (copy_from_user(®_val
, uaddr
, KVM_REG_SIZE(reg
->id
)))
356 case KVM_REG_RISCV_CONFIG_REG(isa
):
358 * This ONE REG interface is only defined for
359 * single letter extensions.
361 if (fls(reg_val
) >= RISCV_ISA_EXT_BASE
)
364 if (!vcpu
->arch
.ran_atleast_once
) {
365 /* Ignore the enable/disable request for certain extensions */
366 for (i
= 0; i
< RISCV_ISA_EXT_BASE
; i
++) {
367 isa_ext
= kvm_riscv_vcpu_base2isa_ext(i
);
368 if (isa_ext
>= KVM_RISCV_ISA_EXT_MAX
) {
372 if (!kvm_riscv_vcpu_isa_enable_allowed(isa_ext
))
373 if (reg_val
& BIT(i
))
375 if (!kvm_riscv_vcpu_isa_disable_allowed(isa_ext
))
376 if (!(reg_val
& BIT(i
)))
379 reg_val
&= riscv_isa_extension_base(NULL
);
380 /* Do not modify anything beyond single letter extensions */
381 reg_val
= (vcpu
->arch
.isa
[0] & ~KVM_RISCV_BASE_ISA_MASK
) |
382 (reg_val
& KVM_RISCV_BASE_ISA_MASK
);
383 vcpu
->arch
.isa
[0] = reg_val
;
384 kvm_riscv_vcpu_fp_reset(vcpu
);
389 case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size
):
391 case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size
):
393 case KVM_REG_RISCV_CONFIG_REG(mvendorid
):
394 if (!vcpu
->arch
.ran_atleast_once
)
395 vcpu
->arch
.mvendorid
= reg_val
;
399 case KVM_REG_RISCV_CONFIG_REG(marchid
):
400 if (!vcpu
->arch
.ran_atleast_once
)
401 vcpu
->arch
.marchid
= reg_val
;
405 case KVM_REG_RISCV_CONFIG_REG(mimpid
):
406 if (!vcpu
->arch
.ran_atleast_once
)
407 vcpu
->arch
.mimpid
= reg_val
;
418 static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu
*vcpu
,
419 const struct kvm_one_reg
*reg
)
421 struct kvm_cpu_context
*cntx
= &vcpu
->arch
.guest_context
;
422 unsigned long __user
*uaddr
=
423 (unsigned long __user
*)(unsigned long)reg
->addr
;
424 unsigned long reg_num
= reg
->id
& ~(KVM_REG_ARCH_MASK
|
427 unsigned long reg_val
;
429 if (KVM_REG_SIZE(reg
->id
) != sizeof(unsigned long))
431 if (reg_num
>= sizeof(struct kvm_riscv_core
) / sizeof(unsigned long))
434 if (reg_num
== KVM_REG_RISCV_CORE_REG(regs
.pc
))
435 reg_val
= cntx
->sepc
;
436 else if (KVM_REG_RISCV_CORE_REG(regs
.pc
) < reg_num
&&
437 reg_num
<= KVM_REG_RISCV_CORE_REG(regs
.t6
))
438 reg_val
= ((unsigned long *)cntx
)[reg_num
];
439 else if (reg_num
== KVM_REG_RISCV_CORE_REG(mode
))
440 reg_val
= (cntx
->sstatus
& SR_SPP
) ?
441 KVM_RISCV_MODE_S
: KVM_RISCV_MODE_U
;
445 if (copy_to_user(uaddr
, ®_val
, KVM_REG_SIZE(reg
->id
)))
451 static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu
*vcpu
,
452 const struct kvm_one_reg
*reg
)
454 struct kvm_cpu_context
*cntx
= &vcpu
->arch
.guest_context
;
455 unsigned long __user
*uaddr
=
456 (unsigned long __user
*)(unsigned long)reg
->addr
;
457 unsigned long reg_num
= reg
->id
& ~(KVM_REG_ARCH_MASK
|
460 unsigned long reg_val
;
462 if (KVM_REG_SIZE(reg
->id
) != sizeof(unsigned long))
464 if (reg_num
>= sizeof(struct kvm_riscv_core
) / sizeof(unsigned long))
467 if (copy_from_user(®_val
, uaddr
, KVM_REG_SIZE(reg
->id
)))
470 if (reg_num
== KVM_REG_RISCV_CORE_REG(regs
.pc
))
471 cntx
->sepc
= reg_val
;
472 else if (KVM_REG_RISCV_CORE_REG(regs
.pc
) < reg_num
&&
473 reg_num
<= KVM_REG_RISCV_CORE_REG(regs
.t6
))
474 ((unsigned long *)cntx
)[reg_num
] = reg_val
;
475 else if (reg_num
== KVM_REG_RISCV_CORE_REG(mode
)) {
476 if (reg_val
== KVM_RISCV_MODE_S
)
477 cntx
->sstatus
|= SR_SPP
;
479 cntx
->sstatus
&= ~SR_SPP
;
486 static int kvm_riscv_vcpu_general_get_csr(struct kvm_vcpu
*vcpu
,
487 unsigned long reg_num
,
488 unsigned long *out_val
)
490 struct kvm_vcpu_csr
*csr
= &vcpu
->arch
.guest_csr
;
492 if (reg_num
>= sizeof(struct kvm_riscv_csr
) / sizeof(unsigned long))
495 if (reg_num
== KVM_REG_RISCV_CSR_REG(sip
)) {
496 kvm_riscv_vcpu_flush_interrupts(vcpu
);
497 *out_val
= (csr
->hvip
>> VSIP_TO_HVIP_SHIFT
) & VSIP_VALID_MASK
;
498 *out_val
|= csr
->hvip
& ~IRQ_LOCAL_MASK
;
500 *out_val
= ((unsigned long *)csr
)[reg_num
];
505 static inline int kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu
*vcpu
,
506 unsigned long reg_num
,
507 unsigned long reg_val
)
509 struct kvm_vcpu_csr
*csr
= &vcpu
->arch
.guest_csr
;
511 if (reg_num
>= sizeof(struct kvm_riscv_csr
) / sizeof(unsigned long))
514 if (reg_num
== KVM_REG_RISCV_CSR_REG(sip
)) {
515 reg_val
&= VSIP_VALID_MASK
;
516 reg_val
<<= VSIP_TO_HVIP_SHIFT
;
519 ((unsigned long *)csr
)[reg_num
] = reg_val
;
521 if (reg_num
== KVM_REG_RISCV_CSR_REG(sip
))
522 WRITE_ONCE(vcpu
->arch
.irqs_pending_mask
[0], 0);
527 static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu
*vcpu
,
528 const struct kvm_one_reg
*reg
)
531 unsigned long __user
*uaddr
=
532 (unsigned long __user
*)(unsigned long)reg
->addr
;
533 unsigned long reg_num
= reg
->id
& ~(KVM_REG_ARCH_MASK
|
536 unsigned long reg_val
, reg_subtype
;
538 if (KVM_REG_SIZE(reg
->id
) != sizeof(unsigned long))
541 reg_subtype
= reg_num
& KVM_REG_RISCV_SUBTYPE_MASK
;
542 reg_num
&= ~KVM_REG_RISCV_SUBTYPE_MASK
;
543 switch (reg_subtype
) {
544 case KVM_REG_RISCV_CSR_GENERAL
:
545 rc
= kvm_riscv_vcpu_general_get_csr(vcpu
, reg_num
, ®_val
);
547 case KVM_REG_RISCV_CSR_AIA
:
548 rc
= kvm_riscv_vcpu_aia_get_csr(vcpu
, reg_num
, ®_val
);
557 if (copy_to_user(uaddr
, ®_val
, KVM_REG_SIZE(reg
->id
)))
563 static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu
*vcpu
,
564 const struct kvm_one_reg
*reg
)
567 unsigned long __user
*uaddr
=
568 (unsigned long __user
*)(unsigned long)reg
->addr
;
569 unsigned long reg_num
= reg
->id
& ~(KVM_REG_ARCH_MASK
|
572 unsigned long reg_val
, reg_subtype
;
574 if (KVM_REG_SIZE(reg
->id
) != sizeof(unsigned long))
577 if (copy_from_user(®_val
, uaddr
, KVM_REG_SIZE(reg
->id
)))
580 reg_subtype
= reg_num
& KVM_REG_RISCV_SUBTYPE_MASK
;
581 reg_num
&= ~KVM_REG_RISCV_SUBTYPE_MASK
;
582 switch (reg_subtype
) {
583 case KVM_REG_RISCV_CSR_GENERAL
:
584 rc
= kvm_riscv_vcpu_general_set_csr(vcpu
, reg_num
, reg_val
);
586 case KVM_REG_RISCV_CSR_AIA
:
587 rc
= kvm_riscv_vcpu_aia_set_csr(vcpu
, reg_num
, reg_val
);
599 static int kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu
*vcpu
,
600 const struct kvm_one_reg
*reg
)
602 unsigned long __user
*uaddr
=
603 (unsigned long __user
*)(unsigned long)reg
->addr
;
604 unsigned long reg_num
= reg
->id
& ~(KVM_REG_ARCH_MASK
|
606 KVM_REG_RISCV_ISA_EXT
);
607 unsigned long reg_val
= 0;
608 unsigned long host_isa_ext
;
610 if (KVM_REG_SIZE(reg
->id
) != sizeof(unsigned long))
613 if (reg_num
>= KVM_RISCV_ISA_EXT_MAX
||
614 reg_num
>= ARRAY_SIZE(kvm_isa_ext_arr
))
617 host_isa_ext
= kvm_isa_ext_arr
[reg_num
];
618 if (__riscv_isa_extension_available(vcpu
->arch
.isa
, host_isa_ext
))
619 reg_val
= 1; /* Mark the given extension as available */
621 if (copy_to_user(uaddr
, ®_val
, KVM_REG_SIZE(reg
->id
)))
627 static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu
*vcpu
,
628 const struct kvm_one_reg
*reg
)
630 unsigned long __user
*uaddr
=
631 (unsigned long __user
*)(unsigned long)reg
->addr
;
632 unsigned long reg_num
= reg
->id
& ~(KVM_REG_ARCH_MASK
|
634 KVM_REG_RISCV_ISA_EXT
);
635 unsigned long reg_val
;
636 unsigned long host_isa_ext
;
638 if (KVM_REG_SIZE(reg
->id
) != sizeof(unsigned long))
641 if (reg_num
>= KVM_RISCV_ISA_EXT_MAX
||
642 reg_num
>= ARRAY_SIZE(kvm_isa_ext_arr
))
645 if (copy_from_user(®_val
, uaddr
, KVM_REG_SIZE(reg
->id
)))
648 host_isa_ext
= kvm_isa_ext_arr
[reg_num
];
649 if (!__riscv_isa_extension_available(NULL
, host_isa_ext
))
652 if (!vcpu
->arch
.ran_atleast_once
) {
654 * All multi-letter extension and a few single letter
655 * extension can be disabled
658 kvm_riscv_vcpu_isa_enable_allowed(reg_num
))
659 set_bit(host_isa_ext
, vcpu
->arch
.isa
);
661 kvm_riscv_vcpu_isa_disable_allowed(reg_num
))
662 clear_bit(host_isa_ext
, vcpu
->arch
.isa
);
665 kvm_riscv_vcpu_fp_reset(vcpu
);
673 static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu
*vcpu
,
674 const struct kvm_one_reg
*reg
)
676 switch (reg
->id
& KVM_REG_RISCV_TYPE_MASK
) {
677 case KVM_REG_RISCV_CONFIG
:
678 return kvm_riscv_vcpu_set_reg_config(vcpu
, reg
);
679 case KVM_REG_RISCV_CORE
:
680 return kvm_riscv_vcpu_set_reg_core(vcpu
, reg
);
681 case KVM_REG_RISCV_CSR
:
682 return kvm_riscv_vcpu_set_reg_csr(vcpu
, reg
);
683 case KVM_REG_RISCV_TIMER
:
684 return kvm_riscv_vcpu_set_reg_timer(vcpu
, reg
);
685 case KVM_REG_RISCV_FP_F
:
686 return kvm_riscv_vcpu_set_reg_fp(vcpu
, reg
,
688 case KVM_REG_RISCV_FP_D
:
689 return kvm_riscv_vcpu_set_reg_fp(vcpu
, reg
,
691 case KVM_REG_RISCV_ISA_EXT
:
692 return kvm_riscv_vcpu_set_reg_isa_ext(vcpu
, reg
);
693 case KVM_REG_RISCV_SBI_EXT
:
694 return kvm_riscv_vcpu_set_reg_sbi_ext(vcpu
, reg
);
695 case KVM_REG_RISCV_VECTOR
:
696 return kvm_riscv_vcpu_set_reg_vector(vcpu
, reg
,
697 KVM_REG_RISCV_VECTOR
);
705 static int kvm_riscv_vcpu_get_reg(struct kvm_vcpu
*vcpu
,
706 const struct kvm_one_reg
*reg
)
708 switch (reg
->id
& KVM_REG_RISCV_TYPE_MASK
) {
709 case KVM_REG_RISCV_CONFIG
:
710 return kvm_riscv_vcpu_get_reg_config(vcpu
, reg
);
711 case KVM_REG_RISCV_CORE
:
712 return kvm_riscv_vcpu_get_reg_core(vcpu
, reg
);
713 case KVM_REG_RISCV_CSR
:
714 return kvm_riscv_vcpu_get_reg_csr(vcpu
, reg
);
715 case KVM_REG_RISCV_TIMER
:
716 return kvm_riscv_vcpu_get_reg_timer(vcpu
, reg
);
717 case KVM_REG_RISCV_FP_F
:
718 return kvm_riscv_vcpu_get_reg_fp(vcpu
, reg
,
720 case KVM_REG_RISCV_FP_D
:
721 return kvm_riscv_vcpu_get_reg_fp(vcpu
, reg
,
723 case KVM_REG_RISCV_ISA_EXT
:
724 return kvm_riscv_vcpu_get_reg_isa_ext(vcpu
, reg
);
725 case KVM_REG_RISCV_SBI_EXT
:
726 return kvm_riscv_vcpu_get_reg_sbi_ext(vcpu
, reg
);
727 case KVM_REG_RISCV_VECTOR
:
728 return kvm_riscv_vcpu_get_reg_vector(vcpu
, reg
,
729 KVM_REG_RISCV_VECTOR
);
737 long kvm_arch_vcpu_async_ioctl(struct file
*filp
,
738 unsigned int ioctl
, unsigned long arg
)
740 struct kvm_vcpu
*vcpu
= filp
->private_data
;
741 void __user
*argp
= (void __user
*)arg
;
743 if (ioctl
== KVM_INTERRUPT
) {
744 struct kvm_interrupt irq
;
746 if (copy_from_user(&irq
, argp
, sizeof(irq
)))
749 if (irq
.irq
== KVM_INTERRUPT_SET
)
750 return kvm_riscv_vcpu_set_interrupt(vcpu
, IRQ_VS_EXT
);
752 return kvm_riscv_vcpu_unset_interrupt(vcpu
, IRQ_VS_EXT
);
758 long kvm_arch_vcpu_ioctl(struct file
*filp
,
759 unsigned int ioctl
, unsigned long arg
)
761 struct kvm_vcpu
*vcpu
= filp
->private_data
;
762 void __user
*argp
= (void __user
*)arg
;
766 case KVM_SET_ONE_REG
:
767 case KVM_GET_ONE_REG
: {
768 struct kvm_one_reg reg
;
771 if (copy_from_user(®
, argp
, sizeof(reg
)))
774 if (ioctl
== KVM_SET_ONE_REG
)
775 r
= kvm_riscv_vcpu_set_reg(vcpu
, ®
);
777 r
= kvm_riscv_vcpu_get_reg(vcpu
, ®
);
787 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
788 struct kvm_sregs
*sregs
)
793 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
794 struct kvm_sregs
*sregs
)
799 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
804 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
809 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu
,
810 struct kvm_translation
*tr
)
815 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
820 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
825 void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu
*vcpu
)
827 struct kvm_vcpu_csr
*csr
= &vcpu
->arch
.guest_csr
;
828 unsigned long mask
, val
;
830 if (READ_ONCE(vcpu
->arch
.irqs_pending_mask
[0])) {
831 mask
= xchg_acquire(&vcpu
->arch
.irqs_pending_mask
[0], 0);
832 val
= READ_ONCE(vcpu
->arch
.irqs_pending
[0]) & mask
;
838 /* Flush AIA high interrupts */
839 kvm_riscv_vcpu_aia_flush_interrupts(vcpu
);
842 void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu
*vcpu
)
845 struct kvm_vcpu_arch
*v
= &vcpu
->arch
;
846 struct kvm_vcpu_csr
*csr
= &vcpu
->arch
.guest_csr
;
848 /* Read current HVIP and VSIE CSRs */
849 csr
->vsie
= csr_read(CSR_VSIE
);
851 /* Sync-up HVIP.VSSIP bit changes does by Guest */
852 hvip
= csr_read(CSR_HVIP
);
853 if ((csr
->hvip
^ hvip
) & (1UL << IRQ_VS_SOFT
)) {
854 if (hvip
& (1UL << IRQ_VS_SOFT
)) {
855 if (!test_and_set_bit(IRQ_VS_SOFT
,
856 v
->irqs_pending_mask
))
857 set_bit(IRQ_VS_SOFT
, v
->irqs_pending
);
859 if (!test_and_set_bit(IRQ_VS_SOFT
,
860 v
->irqs_pending_mask
))
861 clear_bit(IRQ_VS_SOFT
, v
->irqs_pending
);
865 /* Sync-up AIA high interrupts */
866 kvm_riscv_vcpu_aia_sync_interrupts(vcpu
);
868 /* Sync-up timer CSRs */
869 kvm_riscv_vcpu_timer_sync(vcpu
);
872 int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu
*vcpu
, unsigned int irq
)
875 * We only allow VS-mode software, timer, and external
876 * interrupts when irq is one of the local interrupts
877 * defined by RISC-V privilege specification.
879 if (irq
< IRQ_LOCAL_MAX
&&
880 irq
!= IRQ_VS_SOFT
&&
881 irq
!= IRQ_VS_TIMER
&&
885 set_bit(irq
, vcpu
->arch
.irqs_pending
);
886 smp_mb__before_atomic();
887 set_bit(irq
, vcpu
->arch
.irqs_pending_mask
);
894 int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu
*vcpu
, unsigned int irq
)
897 * We only allow VS-mode software, timer, and external
898 * interrupts when irq is one of the local interrupts
899 * defined by RISC-V privilege specification.
901 if (irq
< IRQ_LOCAL_MAX
&&
902 irq
!= IRQ_VS_SOFT
&&
903 irq
!= IRQ_VS_TIMER
&&
907 clear_bit(irq
, vcpu
->arch
.irqs_pending
);
908 smp_mb__before_atomic();
909 set_bit(irq
, vcpu
->arch
.irqs_pending_mask
);
914 bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu
*vcpu
, u64 mask
)
918 ie
= ((vcpu
->arch
.guest_csr
.vsie
& VSIP_VALID_MASK
)
919 << VSIP_TO_HVIP_SHIFT
) & (unsigned long)mask
;
920 ie
|= vcpu
->arch
.guest_csr
.vsie
& ~IRQ_LOCAL_MASK
&
922 if (READ_ONCE(vcpu
->arch
.irqs_pending
[0]) & ie
)
925 /* Check AIA high interrupts */
926 return kvm_riscv_vcpu_aia_has_interrupts(vcpu
, mask
);
929 void kvm_riscv_vcpu_power_off(struct kvm_vcpu
*vcpu
)
931 vcpu
->arch
.power_off
= true;
932 kvm_make_request(KVM_REQ_SLEEP
, vcpu
);
936 void kvm_riscv_vcpu_power_on(struct kvm_vcpu
*vcpu
)
938 vcpu
->arch
.power_off
= false;
939 kvm_vcpu_wake_up(vcpu
);
942 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu
*vcpu
,
943 struct kvm_mp_state
*mp_state
)
945 if (vcpu
->arch
.power_off
)
946 mp_state
->mp_state
= KVM_MP_STATE_STOPPED
;
948 mp_state
->mp_state
= KVM_MP_STATE_RUNNABLE
;
953 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu
*vcpu
,
954 struct kvm_mp_state
*mp_state
)
958 switch (mp_state
->mp_state
) {
959 case KVM_MP_STATE_RUNNABLE
:
960 vcpu
->arch
.power_off
= false;
962 case KVM_MP_STATE_STOPPED
:
963 kvm_riscv_vcpu_power_off(vcpu
);
972 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu
*vcpu
,
973 struct kvm_guest_debug
*dbg
)
975 /* TODO; To be implemented later. */
979 static void kvm_riscv_vcpu_update_config(const unsigned long *isa
)
983 if (riscv_isa_extension_available(isa
, SVPBMT
))
984 henvcfg
|= ENVCFG_PBMTE
;
986 if (riscv_isa_extension_available(isa
, SSTC
))
987 henvcfg
|= ENVCFG_STCE
;
989 if (riscv_isa_extension_available(isa
, ZICBOM
))
990 henvcfg
|= (ENVCFG_CBIE
| ENVCFG_CBCFE
);
992 if (riscv_isa_extension_available(isa
, ZICBOZ
))
993 henvcfg
|= ENVCFG_CBZE
;
995 csr_write(CSR_HENVCFG
, henvcfg
);
997 csr_write(CSR_HENVCFGH
, henvcfg
>> 32);
1001 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
1003 struct kvm_vcpu_csr
*csr
= &vcpu
->arch
.guest_csr
;
1005 csr_write(CSR_VSSTATUS
, csr
->vsstatus
);
1006 csr_write(CSR_VSIE
, csr
->vsie
);
1007 csr_write(CSR_VSTVEC
, csr
->vstvec
);
1008 csr_write(CSR_VSSCRATCH
, csr
->vsscratch
);
1009 csr_write(CSR_VSEPC
, csr
->vsepc
);
1010 csr_write(CSR_VSCAUSE
, csr
->vscause
);
1011 csr_write(CSR_VSTVAL
, csr
->vstval
);
1012 csr_write(CSR_HVIP
, csr
->hvip
);
1013 csr_write(CSR_VSATP
, csr
->vsatp
);
1015 kvm_riscv_vcpu_update_config(vcpu
->arch
.isa
);
1017 kvm_riscv_gstage_update_hgatp(vcpu
);
1019 kvm_riscv_vcpu_timer_restore(vcpu
);
1021 kvm_riscv_vcpu_host_fp_save(&vcpu
->arch
.host_context
);
1022 kvm_riscv_vcpu_guest_fp_restore(&vcpu
->arch
.guest_context
,
1024 kvm_riscv_vcpu_host_vector_save(&vcpu
->arch
.host_context
);
1025 kvm_riscv_vcpu_guest_vector_restore(&vcpu
->arch
.guest_context
,
1028 kvm_riscv_vcpu_aia_load(vcpu
, cpu
);
1033 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
1035 struct kvm_vcpu_csr
*csr
= &vcpu
->arch
.guest_csr
;
1039 kvm_riscv_vcpu_aia_put(vcpu
);
1041 kvm_riscv_vcpu_guest_fp_save(&vcpu
->arch
.guest_context
,
1043 kvm_riscv_vcpu_host_fp_restore(&vcpu
->arch
.host_context
);
1045 kvm_riscv_vcpu_timer_save(vcpu
);
1046 kvm_riscv_vcpu_guest_vector_save(&vcpu
->arch
.guest_context
,
1048 kvm_riscv_vcpu_host_vector_restore(&vcpu
->arch
.host_context
);
1050 csr
->vsstatus
= csr_read(CSR_VSSTATUS
);
1051 csr
->vsie
= csr_read(CSR_VSIE
);
1052 csr
->vstvec
= csr_read(CSR_VSTVEC
);
1053 csr
->vsscratch
= csr_read(CSR_VSSCRATCH
);
1054 csr
->vsepc
= csr_read(CSR_VSEPC
);
1055 csr
->vscause
= csr_read(CSR_VSCAUSE
);
1056 csr
->vstval
= csr_read(CSR_VSTVAL
);
1057 csr
->hvip
= csr_read(CSR_HVIP
);
1058 csr
->vsatp
= csr_read(CSR_VSATP
);
1061 static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu
*vcpu
)
1063 struct rcuwait
*wait
= kvm_arch_vcpu_get_wait(vcpu
);
1065 if (kvm_request_pending(vcpu
)) {
1066 if (kvm_check_request(KVM_REQ_SLEEP
, vcpu
)) {
1067 kvm_vcpu_srcu_read_unlock(vcpu
);
1068 rcuwait_wait_event(wait
,
1069 (!vcpu
->arch
.power_off
) && (!vcpu
->arch
.pause
),
1070 TASK_INTERRUPTIBLE
);
1071 kvm_vcpu_srcu_read_lock(vcpu
);
1073 if (vcpu
->arch
.power_off
|| vcpu
->arch
.pause
) {
1075 * Awaken to handle a signal, request to
1076 * sleep again later.
1078 kvm_make_request(KVM_REQ_SLEEP
, vcpu
);
1082 if (kvm_check_request(KVM_REQ_VCPU_RESET
, vcpu
))
1083 kvm_riscv_reset_vcpu(vcpu
);
1085 if (kvm_check_request(KVM_REQ_UPDATE_HGATP
, vcpu
))
1086 kvm_riscv_gstage_update_hgatp(vcpu
);
1088 if (kvm_check_request(KVM_REQ_FENCE_I
, vcpu
))
1089 kvm_riscv_fence_i_process(vcpu
);
1092 * The generic KVM_REQ_TLB_FLUSH is same as
1093 * KVM_REQ_HFENCE_GVMA_VMID_ALL
1095 if (kvm_check_request(KVM_REQ_HFENCE_GVMA_VMID_ALL
, vcpu
))
1096 kvm_riscv_hfence_gvma_vmid_all_process(vcpu
);
1098 if (kvm_check_request(KVM_REQ_HFENCE_VVMA_ALL
, vcpu
))
1099 kvm_riscv_hfence_vvma_all_process(vcpu
);
1101 if (kvm_check_request(KVM_REQ_HFENCE
, vcpu
))
1102 kvm_riscv_hfence_process(vcpu
);
1106 static void kvm_riscv_update_hvip(struct kvm_vcpu
*vcpu
)
1108 struct kvm_vcpu_csr
*csr
= &vcpu
->arch
.guest_csr
;
1110 csr_write(CSR_HVIP
, csr
->hvip
);
1111 kvm_riscv_vcpu_aia_update_hvip(vcpu
);
1115 * Actually run the vCPU, entering an RCU extended quiescent state (EQS) while
1116 * the vCPU is running.
1118 * This must be noinstr as instrumentation may make use of RCU, and this is not
1119 * safe during the EQS.
1121 static void noinstr
kvm_riscv_vcpu_enter_exit(struct kvm_vcpu
*vcpu
)
1123 guest_state_enter_irqoff();
1124 __kvm_riscv_switch_to(&vcpu
->arch
);
1125 vcpu
->arch
.last_exit_cpu
= vcpu
->cpu
;
1126 guest_state_exit_irqoff();
1129 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
)
1132 struct kvm_cpu_trap trap
;
1133 struct kvm_run
*run
= vcpu
->run
;
1135 /* Mark this VCPU ran at least once */
1136 vcpu
->arch
.ran_atleast_once
= true;
1138 kvm_vcpu_srcu_read_lock(vcpu
);
1140 switch (run
->exit_reason
) {
1142 /* Process MMIO value returned from user-space */
1143 ret
= kvm_riscv_vcpu_mmio_return(vcpu
, vcpu
->run
);
1145 case KVM_EXIT_RISCV_SBI
:
1146 /* Process SBI value returned from user-space */
1147 ret
= kvm_riscv_vcpu_sbi_return(vcpu
, vcpu
->run
);
1149 case KVM_EXIT_RISCV_CSR
:
1150 /* Process CSR value returned from user-space */
1151 ret
= kvm_riscv_vcpu_csr_return(vcpu
, vcpu
->run
);
1158 kvm_vcpu_srcu_read_unlock(vcpu
);
1162 if (run
->immediate_exit
) {
1163 kvm_vcpu_srcu_read_unlock(vcpu
);
1169 kvm_sigset_activate(vcpu
);
1172 run
->exit_reason
= KVM_EXIT_UNKNOWN
;
1174 /* Check conditions before entering the guest */
1175 ret
= xfer_to_guest_mode_handle_work(vcpu
);
1180 kvm_riscv_gstage_vmid_update(vcpu
);
1182 kvm_riscv_check_vcpu_requests(vcpu
);
1186 /* Update AIA HW state before entering guest */
1187 ret
= kvm_riscv_vcpu_aia_update(vcpu
);
1193 local_irq_disable();
1196 * Ensure we set mode to IN_GUEST_MODE after we disable
1197 * interrupts and before the final VCPU requests check.
1198 * See the comment in kvm_vcpu_exiting_guest_mode() and
1199 * Documentation/virt/kvm/vcpu-requests.rst
1201 vcpu
->mode
= IN_GUEST_MODE
;
1203 kvm_vcpu_srcu_read_unlock(vcpu
);
1204 smp_mb__after_srcu_read_unlock();
1207 * We might have got VCPU interrupts updated asynchronously
1208 * so update it in HW.
1210 kvm_riscv_vcpu_flush_interrupts(vcpu
);
1212 /* Update HVIP CSR for current CPU */
1213 kvm_riscv_update_hvip(vcpu
);
1216 kvm_riscv_gstage_vmid_ver_changed(&vcpu
->kvm
->arch
.vmid
) ||
1217 kvm_request_pending(vcpu
) ||
1218 xfer_to_guest_mode_work_pending()) {
1219 vcpu
->mode
= OUTSIDE_GUEST_MODE
;
1222 kvm_vcpu_srcu_read_lock(vcpu
);
1227 * Cleanup stale TLB enteries
1229 * Note: This should be done after G-stage VMID has been
1230 * updated using kvm_riscv_gstage_vmid_ver_changed()
1232 kvm_riscv_local_tlb_sanitize(vcpu
);
1234 guest_timing_enter_irqoff();
1236 kvm_riscv_vcpu_enter_exit(vcpu
);
1238 vcpu
->mode
= OUTSIDE_GUEST_MODE
;
1242 * Save SCAUSE, STVAL, HTVAL, and HTINST because we might
1243 * get an interrupt between __kvm_riscv_switch_to() and
1244 * local_irq_enable() which can potentially change CSRs.
1246 trap
.sepc
= vcpu
->arch
.guest_context
.sepc
;
1247 trap
.scause
= csr_read(CSR_SCAUSE
);
1248 trap
.stval
= csr_read(CSR_STVAL
);
1249 trap
.htval
= csr_read(CSR_HTVAL
);
1250 trap
.htinst
= csr_read(CSR_HTINST
);
1252 /* Syncup interrupts state with HW */
1253 kvm_riscv_vcpu_sync_interrupts(vcpu
);
1256 * We must ensure that any pending interrupts are taken before
1257 * we exit guest timing so that timer ticks are accounted as
1258 * guest time. Transiently unmask interrupts so that any
1259 * pending interrupts are taken.
1261 * There's no barrier which ensures that pending interrupts are
1262 * recognised, so we just hope that the CPU takes any pending
1263 * interrupts between the enable and disable.
1266 local_irq_disable();
1268 guest_timing_exit_irqoff();
1274 kvm_vcpu_srcu_read_lock(vcpu
);
1276 ret
= kvm_riscv_vcpu_exit(vcpu
, run
, &trap
);
1279 kvm_sigset_deactivate(vcpu
);
1283 kvm_vcpu_srcu_read_unlock(vcpu
);