]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - arch/riscv/kvm/vcpu.c
RISC-V: KVM: Implement guest external interrupt line management
[mirror_ubuntu-kernels.git] / arch / riscv / kvm / vcpu.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4 *
5 * Authors:
6 * Anup Patel <anup.patel@wdc.com>
7 */
8
9 #include <linux/bitops.h>
10 #include <linux/entry-kvm.h>
11 #include <linux/errno.h>
12 #include <linux/err.h>
13 #include <linux/kdebug.h>
14 #include <linux/module.h>
15 #include <linux/percpu.h>
16 #include <linux/uaccess.h>
17 #include <linux/vmalloc.h>
18 #include <linux/sched/signal.h>
19 #include <linux/fs.h>
20 #include <linux/kvm_host.h>
21 #include <asm/csr.h>
22 #include <asm/cacheflush.h>
23 #include <asm/hwcap.h>
24 #include <asm/sbi.h>
25
26 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
27 KVM_GENERIC_VCPU_STATS(),
28 STATS_DESC_COUNTER(VCPU, ecall_exit_stat),
29 STATS_DESC_COUNTER(VCPU, wfi_exit_stat),
30 STATS_DESC_COUNTER(VCPU, mmio_exit_user),
31 STATS_DESC_COUNTER(VCPU, mmio_exit_kernel),
32 STATS_DESC_COUNTER(VCPU, csr_exit_user),
33 STATS_DESC_COUNTER(VCPU, csr_exit_kernel),
34 STATS_DESC_COUNTER(VCPU, signal_exits),
35 STATS_DESC_COUNTER(VCPU, exits)
36 };
37
38 const struct kvm_stats_header kvm_vcpu_stats_header = {
39 .name_size = KVM_STATS_NAME_SIZE,
40 .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
41 .id_offset = sizeof(struct kvm_stats_header),
42 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
43 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
44 sizeof(kvm_vcpu_stats_desc),
45 };
46
47 #define KVM_RISCV_BASE_ISA_MASK GENMASK(25, 0)
48
49 #define KVM_ISA_EXT_ARR(ext) [KVM_RISCV_ISA_EXT_##ext] = RISCV_ISA_EXT_##ext
50
51 /* Mapping between KVM ISA Extension ID & Host ISA extension ID */
52 static const unsigned long kvm_isa_ext_arr[] = {
53 [KVM_RISCV_ISA_EXT_A] = RISCV_ISA_EXT_a,
54 [KVM_RISCV_ISA_EXT_C] = RISCV_ISA_EXT_c,
55 [KVM_RISCV_ISA_EXT_D] = RISCV_ISA_EXT_d,
56 [KVM_RISCV_ISA_EXT_F] = RISCV_ISA_EXT_f,
57 [KVM_RISCV_ISA_EXT_H] = RISCV_ISA_EXT_h,
58 [KVM_RISCV_ISA_EXT_I] = RISCV_ISA_EXT_i,
59 [KVM_RISCV_ISA_EXT_M] = RISCV_ISA_EXT_m,
60
61 KVM_ISA_EXT_ARR(SSAIA),
62 KVM_ISA_EXT_ARR(SSTC),
63 KVM_ISA_EXT_ARR(SVINVAL),
64 KVM_ISA_EXT_ARR(SVPBMT),
65 KVM_ISA_EXT_ARR(ZBB),
66 KVM_ISA_EXT_ARR(ZIHINTPAUSE),
67 KVM_ISA_EXT_ARR(ZICBOM),
68 KVM_ISA_EXT_ARR(ZICBOZ),
69 };
70
71 static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext)
72 {
73 unsigned long i;
74
75 for (i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
76 if (kvm_isa_ext_arr[i] == base_ext)
77 return i;
78 }
79
80 return KVM_RISCV_ISA_EXT_MAX;
81 }
82
83 static bool kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext)
84 {
85 switch (ext) {
86 case KVM_RISCV_ISA_EXT_H:
87 return false;
88 default:
89 break;
90 }
91
92 return true;
93 }
94
95 static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
96 {
97 switch (ext) {
98 case KVM_RISCV_ISA_EXT_A:
99 case KVM_RISCV_ISA_EXT_C:
100 case KVM_RISCV_ISA_EXT_I:
101 case KVM_RISCV_ISA_EXT_M:
102 case KVM_RISCV_ISA_EXT_SSAIA:
103 case KVM_RISCV_ISA_EXT_SSTC:
104 case KVM_RISCV_ISA_EXT_SVINVAL:
105 case KVM_RISCV_ISA_EXT_ZIHINTPAUSE:
106 case KVM_RISCV_ISA_EXT_ZBB:
107 return false;
108 default:
109 break;
110 }
111
112 return true;
113 }
114
115 static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
116 {
117 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
118 struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;
119 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
120 struct kvm_cpu_context *reset_cntx = &vcpu->arch.guest_reset_context;
121 bool loaded;
122
123 /**
124 * The preemption should be disabled here because it races with
125 * kvm_sched_out/kvm_sched_in(called from preempt notifiers) which
126 * also calls vcpu_load/put.
127 */
128 get_cpu();
129 loaded = (vcpu->cpu != -1);
130 if (loaded)
131 kvm_arch_vcpu_put(vcpu);
132
133 vcpu->arch.last_exit_cpu = -1;
134
135 memcpy(csr, reset_csr, sizeof(*csr));
136
137 memcpy(cntx, reset_cntx, sizeof(*cntx));
138
139 kvm_riscv_vcpu_fp_reset(vcpu);
140
141 kvm_riscv_vcpu_timer_reset(vcpu);
142
143 kvm_riscv_vcpu_aia_reset(vcpu);
144
145 bitmap_zero(vcpu->arch.irqs_pending, KVM_RISCV_VCPU_NR_IRQS);
146 bitmap_zero(vcpu->arch.irqs_pending_mask, KVM_RISCV_VCPU_NR_IRQS);
147
148 kvm_riscv_vcpu_pmu_reset(vcpu);
149
150 vcpu->arch.hfence_head = 0;
151 vcpu->arch.hfence_tail = 0;
152 memset(vcpu->arch.hfence_queue, 0, sizeof(vcpu->arch.hfence_queue));
153
154 /* Reset the guest CSRs for hotplug usecase */
155 if (loaded)
156 kvm_arch_vcpu_load(vcpu, smp_processor_id());
157 put_cpu();
158 }
159
160 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
161 {
162 return 0;
163 }
164
165 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
166 {
167 int rc;
168 struct kvm_cpu_context *cntx;
169 struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;
170 unsigned long host_isa, i;
171
172 /* Mark this VCPU never ran */
173 vcpu->arch.ran_atleast_once = false;
174 vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
175 bitmap_zero(vcpu->arch.isa, RISCV_ISA_EXT_MAX);
176
177 /* Setup ISA features available to VCPU */
178 for (i = 0; i < ARRAY_SIZE(kvm_isa_ext_arr); i++) {
179 host_isa = kvm_isa_ext_arr[i];
180 if (__riscv_isa_extension_available(NULL, host_isa) &&
181 kvm_riscv_vcpu_isa_enable_allowed(i))
182 set_bit(host_isa, vcpu->arch.isa);
183 }
184
185 /* Setup vendor, arch, and implementation details */
186 vcpu->arch.mvendorid = sbi_get_mvendorid();
187 vcpu->arch.marchid = sbi_get_marchid();
188 vcpu->arch.mimpid = sbi_get_mimpid();
189
190 /* Setup VCPU hfence queue */
191 spin_lock_init(&vcpu->arch.hfence_lock);
192
193 /* Setup reset state of shadow SSTATUS and HSTATUS CSRs */
194 cntx = &vcpu->arch.guest_reset_context;
195 cntx->sstatus = SR_SPP | SR_SPIE;
196 cntx->hstatus = 0;
197 cntx->hstatus |= HSTATUS_VTW;
198 cntx->hstatus |= HSTATUS_SPVP;
199 cntx->hstatus |= HSTATUS_SPV;
200
201 /* By default, make CY, TM, and IR counters accessible in VU mode */
202 reset_csr->scounteren = 0x7;
203
204 /* Setup VCPU timer */
205 kvm_riscv_vcpu_timer_init(vcpu);
206
207 /* setup performance monitoring */
208 kvm_riscv_vcpu_pmu_init(vcpu);
209
210 /* Setup VCPU AIA */
211 rc = kvm_riscv_vcpu_aia_init(vcpu);
212 if (rc)
213 return rc;
214
215 /* Reset VCPU */
216 kvm_riscv_reset_vcpu(vcpu);
217
218 return 0;
219 }
220
221 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
222 {
223 /**
224 * vcpu with id 0 is the designated boot cpu.
225 * Keep all vcpus with non-zero id in power-off state so that
226 * they can be brought up using SBI HSM extension.
227 */
228 if (vcpu->vcpu_idx != 0)
229 kvm_riscv_vcpu_power_off(vcpu);
230 }
231
232 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
233 {
234 /* Cleanup VCPU AIA context */
235 kvm_riscv_vcpu_aia_deinit(vcpu);
236
237 /* Cleanup VCPU timer */
238 kvm_riscv_vcpu_timer_deinit(vcpu);
239
240 kvm_riscv_vcpu_pmu_deinit(vcpu);
241
242 /* Free unused pages pre-allocated for G-stage page table mappings */
243 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
244 }
245
246 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
247 {
248 return kvm_riscv_vcpu_timer_pending(vcpu);
249 }
250
251 void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
252 {
253 kvm_riscv_aia_wakeon_hgei(vcpu, true);
254 }
255
256 void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
257 {
258 kvm_riscv_aia_wakeon_hgei(vcpu, false);
259 }
260
261 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
262 {
263 return (kvm_riscv_vcpu_has_interrupts(vcpu, -1UL) &&
264 !vcpu->arch.power_off && !vcpu->arch.pause);
265 }
266
267 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
268 {
269 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
270 }
271
272 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
273 {
274 return (vcpu->arch.guest_context.sstatus & SR_SPP) ? true : false;
275 }
276
277 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
278 {
279 return VM_FAULT_SIGBUS;
280 }
281
282 static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu,
283 const struct kvm_one_reg *reg)
284 {
285 unsigned long __user *uaddr =
286 (unsigned long __user *)(unsigned long)reg->addr;
287 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
288 KVM_REG_SIZE_MASK |
289 KVM_REG_RISCV_CONFIG);
290 unsigned long reg_val;
291
292 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
293 return -EINVAL;
294
295 switch (reg_num) {
296 case KVM_REG_RISCV_CONFIG_REG(isa):
297 reg_val = vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK;
298 break;
299 case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
300 if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
301 return -EINVAL;
302 reg_val = riscv_cbom_block_size;
303 break;
304 case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
305 if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
306 return -EINVAL;
307 reg_val = riscv_cboz_block_size;
308 break;
309 case KVM_REG_RISCV_CONFIG_REG(mvendorid):
310 reg_val = vcpu->arch.mvendorid;
311 break;
312 case KVM_REG_RISCV_CONFIG_REG(marchid):
313 reg_val = vcpu->arch.marchid;
314 break;
315 case KVM_REG_RISCV_CONFIG_REG(mimpid):
316 reg_val = vcpu->arch.mimpid;
317 break;
318 default:
319 return -EINVAL;
320 }
321
322 if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
323 return -EFAULT;
324
325 return 0;
326 }
327
328 static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
329 const struct kvm_one_reg *reg)
330 {
331 unsigned long __user *uaddr =
332 (unsigned long __user *)(unsigned long)reg->addr;
333 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
334 KVM_REG_SIZE_MASK |
335 KVM_REG_RISCV_CONFIG);
336 unsigned long i, isa_ext, reg_val;
337
338 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
339 return -EINVAL;
340
341 if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
342 return -EFAULT;
343
344 switch (reg_num) {
345 case KVM_REG_RISCV_CONFIG_REG(isa):
346 /*
347 * This ONE REG interface is only defined for
348 * single letter extensions.
349 */
350 if (fls(reg_val) >= RISCV_ISA_EXT_BASE)
351 return -EINVAL;
352
353 if (!vcpu->arch.ran_atleast_once) {
354 /* Ignore the enable/disable request for certain extensions */
355 for (i = 0; i < RISCV_ISA_EXT_BASE; i++) {
356 isa_ext = kvm_riscv_vcpu_base2isa_ext(i);
357 if (isa_ext >= KVM_RISCV_ISA_EXT_MAX) {
358 reg_val &= ~BIT(i);
359 continue;
360 }
361 if (!kvm_riscv_vcpu_isa_enable_allowed(isa_ext))
362 if (reg_val & BIT(i))
363 reg_val &= ~BIT(i);
364 if (!kvm_riscv_vcpu_isa_disable_allowed(isa_ext))
365 if (!(reg_val & BIT(i)))
366 reg_val |= BIT(i);
367 }
368 reg_val &= riscv_isa_extension_base(NULL);
369 /* Do not modify anything beyond single letter extensions */
370 reg_val = (vcpu->arch.isa[0] & ~KVM_RISCV_BASE_ISA_MASK) |
371 (reg_val & KVM_RISCV_BASE_ISA_MASK);
372 vcpu->arch.isa[0] = reg_val;
373 kvm_riscv_vcpu_fp_reset(vcpu);
374 } else {
375 return -EOPNOTSUPP;
376 }
377 break;
378 case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
379 return -EOPNOTSUPP;
380 case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
381 return -EOPNOTSUPP;
382 case KVM_REG_RISCV_CONFIG_REG(mvendorid):
383 if (!vcpu->arch.ran_atleast_once)
384 vcpu->arch.mvendorid = reg_val;
385 else
386 return -EBUSY;
387 break;
388 case KVM_REG_RISCV_CONFIG_REG(marchid):
389 if (!vcpu->arch.ran_atleast_once)
390 vcpu->arch.marchid = reg_val;
391 else
392 return -EBUSY;
393 break;
394 case KVM_REG_RISCV_CONFIG_REG(mimpid):
395 if (!vcpu->arch.ran_atleast_once)
396 vcpu->arch.mimpid = reg_val;
397 else
398 return -EBUSY;
399 break;
400 default:
401 return -EINVAL;
402 }
403
404 return 0;
405 }
406
407 static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu *vcpu,
408 const struct kvm_one_reg *reg)
409 {
410 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
411 unsigned long __user *uaddr =
412 (unsigned long __user *)(unsigned long)reg->addr;
413 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
414 KVM_REG_SIZE_MASK |
415 KVM_REG_RISCV_CORE);
416 unsigned long reg_val;
417
418 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
419 return -EINVAL;
420 if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
421 return -EINVAL;
422
423 if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
424 reg_val = cntx->sepc;
425 else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
426 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
427 reg_val = ((unsigned long *)cntx)[reg_num];
428 else if (reg_num == KVM_REG_RISCV_CORE_REG(mode))
429 reg_val = (cntx->sstatus & SR_SPP) ?
430 KVM_RISCV_MODE_S : KVM_RISCV_MODE_U;
431 else
432 return -EINVAL;
433
434 if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
435 return -EFAULT;
436
437 return 0;
438 }
439
440 static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu,
441 const struct kvm_one_reg *reg)
442 {
443 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
444 unsigned long __user *uaddr =
445 (unsigned long __user *)(unsigned long)reg->addr;
446 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
447 KVM_REG_SIZE_MASK |
448 KVM_REG_RISCV_CORE);
449 unsigned long reg_val;
450
451 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
452 return -EINVAL;
453 if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
454 return -EINVAL;
455
456 if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
457 return -EFAULT;
458
459 if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
460 cntx->sepc = reg_val;
461 else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
462 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
463 ((unsigned long *)cntx)[reg_num] = reg_val;
464 else if (reg_num == KVM_REG_RISCV_CORE_REG(mode)) {
465 if (reg_val == KVM_RISCV_MODE_S)
466 cntx->sstatus |= SR_SPP;
467 else
468 cntx->sstatus &= ~SR_SPP;
469 } else
470 return -EINVAL;
471
472 return 0;
473 }
474
475 static int kvm_riscv_vcpu_general_get_csr(struct kvm_vcpu *vcpu,
476 unsigned long reg_num,
477 unsigned long *out_val)
478 {
479 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
480
481 if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
482 return -EINVAL;
483
484 if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
485 kvm_riscv_vcpu_flush_interrupts(vcpu);
486 *out_val = (csr->hvip >> VSIP_TO_HVIP_SHIFT) & VSIP_VALID_MASK;
487 *out_val |= csr->hvip & ~IRQ_LOCAL_MASK;
488 } else
489 *out_val = ((unsigned long *)csr)[reg_num];
490
491 return 0;
492 }
493
494 static inline int kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu *vcpu,
495 unsigned long reg_num,
496 unsigned long reg_val)
497 {
498 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
499
500 if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
501 return -EINVAL;
502
503 if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
504 reg_val &= VSIP_VALID_MASK;
505 reg_val <<= VSIP_TO_HVIP_SHIFT;
506 }
507
508 ((unsigned long *)csr)[reg_num] = reg_val;
509
510 if (reg_num == KVM_REG_RISCV_CSR_REG(sip))
511 WRITE_ONCE(vcpu->arch.irqs_pending_mask[0], 0);
512
513 return 0;
514 }
515
516 static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu,
517 const struct kvm_one_reg *reg)
518 {
519 int rc;
520 unsigned long __user *uaddr =
521 (unsigned long __user *)(unsigned long)reg->addr;
522 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
523 KVM_REG_SIZE_MASK |
524 KVM_REG_RISCV_CSR);
525 unsigned long reg_val, reg_subtype;
526
527 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
528 return -EINVAL;
529
530 reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
531 reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
532 switch (reg_subtype) {
533 case KVM_REG_RISCV_CSR_GENERAL:
534 rc = kvm_riscv_vcpu_general_get_csr(vcpu, reg_num, &reg_val);
535 break;
536 case KVM_REG_RISCV_CSR_AIA:
537 rc = kvm_riscv_vcpu_aia_get_csr(vcpu, reg_num, &reg_val);
538 break;
539 default:
540 rc = -EINVAL;
541 break;
542 }
543 if (rc)
544 return rc;
545
546 if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
547 return -EFAULT;
548
549 return 0;
550 }
551
552 static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu,
553 const struct kvm_one_reg *reg)
554 {
555 int rc;
556 unsigned long __user *uaddr =
557 (unsigned long __user *)(unsigned long)reg->addr;
558 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
559 KVM_REG_SIZE_MASK |
560 KVM_REG_RISCV_CSR);
561 unsigned long reg_val, reg_subtype;
562
563 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
564 return -EINVAL;
565
566 if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
567 return -EFAULT;
568
569 reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
570 reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
571 switch (reg_subtype) {
572 case KVM_REG_RISCV_CSR_GENERAL:
573 rc = kvm_riscv_vcpu_general_set_csr(vcpu, reg_num, reg_val);
574 break;
575 case KVM_REG_RISCV_CSR_AIA:
576 rc = kvm_riscv_vcpu_aia_set_csr(vcpu, reg_num, reg_val);
577 break;
578 default:
579 rc = -EINVAL;
580 break;
581 }
582 if (rc)
583 return rc;
584
585 return 0;
586 }
587
588 static int kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu *vcpu,
589 const struct kvm_one_reg *reg)
590 {
591 unsigned long __user *uaddr =
592 (unsigned long __user *)(unsigned long)reg->addr;
593 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
594 KVM_REG_SIZE_MASK |
595 KVM_REG_RISCV_ISA_EXT);
596 unsigned long reg_val = 0;
597 unsigned long host_isa_ext;
598
599 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
600 return -EINVAL;
601
602 if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
603 reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
604 return -EINVAL;
605
606 host_isa_ext = kvm_isa_ext_arr[reg_num];
607 if (__riscv_isa_extension_available(vcpu->arch.isa, host_isa_ext))
608 reg_val = 1; /* Mark the given extension as available */
609
610 if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
611 return -EFAULT;
612
613 return 0;
614 }
615
616 static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
617 const struct kvm_one_reg *reg)
618 {
619 unsigned long __user *uaddr =
620 (unsigned long __user *)(unsigned long)reg->addr;
621 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
622 KVM_REG_SIZE_MASK |
623 KVM_REG_RISCV_ISA_EXT);
624 unsigned long reg_val;
625 unsigned long host_isa_ext;
626
627 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
628 return -EINVAL;
629
630 if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
631 reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
632 return -EINVAL;
633
634 if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
635 return -EFAULT;
636
637 host_isa_ext = kvm_isa_ext_arr[reg_num];
638 if (!__riscv_isa_extension_available(NULL, host_isa_ext))
639 return -EOPNOTSUPP;
640
641 if (!vcpu->arch.ran_atleast_once) {
642 /*
643 * All multi-letter extension and a few single letter
644 * extension can be disabled
645 */
646 if (reg_val == 1 &&
647 kvm_riscv_vcpu_isa_enable_allowed(reg_num))
648 set_bit(host_isa_ext, vcpu->arch.isa);
649 else if (!reg_val &&
650 kvm_riscv_vcpu_isa_disable_allowed(reg_num))
651 clear_bit(host_isa_ext, vcpu->arch.isa);
652 else
653 return -EINVAL;
654 kvm_riscv_vcpu_fp_reset(vcpu);
655 } else {
656 return -EOPNOTSUPP;
657 }
658
659 return 0;
660 }
661
662 static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
663 const struct kvm_one_reg *reg)
664 {
665 switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
666 case KVM_REG_RISCV_CONFIG:
667 return kvm_riscv_vcpu_set_reg_config(vcpu, reg);
668 case KVM_REG_RISCV_CORE:
669 return kvm_riscv_vcpu_set_reg_core(vcpu, reg);
670 case KVM_REG_RISCV_CSR:
671 return kvm_riscv_vcpu_set_reg_csr(vcpu, reg);
672 case KVM_REG_RISCV_TIMER:
673 return kvm_riscv_vcpu_set_reg_timer(vcpu, reg);
674 case KVM_REG_RISCV_FP_F:
675 return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
676 KVM_REG_RISCV_FP_F);
677 case KVM_REG_RISCV_FP_D:
678 return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
679 KVM_REG_RISCV_FP_D);
680 case KVM_REG_RISCV_ISA_EXT:
681 return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg);
682 case KVM_REG_RISCV_SBI_EXT:
683 return kvm_riscv_vcpu_set_reg_sbi_ext(vcpu, reg);
684 default:
685 break;
686 }
687
688 return -EINVAL;
689 }
690
691 static int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
692 const struct kvm_one_reg *reg)
693 {
694 switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
695 case KVM_REG_RISCV_CONFIG:
696 return kvm_riscv_vcpu_get_reg_config(vcpu, reg);
697 case KVM_REG_RISCV_CORE:
698 return kvm_riscv_vcpu_get_reg_core(vcpu, reg);
699 case KVM_REG_RISCV_CSR:
700 return kvm_riscv_vcpu_get_reg_csr(vcpu, reg);
701 case KVM_REG_RISCV_TIMER:
702 return kvm_riscv_vcpu_get_reg_timer(vcpu, reg);
703 case KVM_REG_RISCV_FP_F:
704 return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
705 KVM_REG_RISCV_FP_F);
706 case KVM_REG_RISCV_FP_D:
707 return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
708 KVM_REG_RISCV_FP_D);
709 case KVM_REG_RISCV_ISA_EXT:
710 return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg);
711 case KVM_REG_RISCV_SBI_EXT:
712 return kvm_riscv_vcpu_get_reg_sbi_ext(vcpu, reg);
713 default:
714 break;
715 }
716
717 return -EINVAL;
718 }
719
720 long kvm_arch_vcpu_async_ioctl(struct file *filp,
721 unsigned int ioctl, unsigned long arg)
722 {
723 struct kvm_vcpu *vcpu = filp->private_data;
724 void __user *argp = (void __user *)arg;
725
726 if (ioctl == KVM_INTERRUPT) {
727 struct kvm_interrupt irq;
728
729 if (copy_from_user(&irq, argp, sizeof(irq)))
730 return -EFAULT;
731
732 if (irq.irq == KVM_INTERRUPT_SET)
733 return kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_VS_EXT);
734 else
735 return kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_EXT);
736 }
737
738 return -ENOIOCTLCMD;
739 }
740
741 long kvm_arch_vcpu_ioctl(struct file *filp,
742 unsigned int ioctl, unsigned long arg)
743 {
744 struct kvm_vcpu *vcpu = filp->private_data;
745 void __user *argp = (void __user *)arg;
746 long r = -EINVAL;
747
748 switch (ioctl) {
749 case KVM_SET_ONE_REG:
750 case KVM_GET_ONE_REG: {
751 struct kvm_one_reg reg;
752
753 r = -EFAULT;
754 if (copy_from_user(&reg, argp, sizeof(reg)))
755 break;
756
757 if (ioctl == KVM_SET_ONE_REG)
758 r = kvm_riscv_vcpu_set_reg(vcpu, &reg);
759 else
760 r = kvm_riscv_vcpu_get_reg(vcpu, &reg);
761 break;
762 }
763 default:
764 break;
765 }
766
767 return r;
768 }
769
770 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
771 struct kvm_sregs *sregs)
772 {
773 return -EINVAL;
774 }
775
776 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
777 struct kvm_sregs *sregs)
778 {
779 return -EINVAL;
780 }
781
782 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
783 {
784 return -EINVAL;
785 }
786
787 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
788 {
789 return -EINVAL;
790 }
791
792 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
793 struct kvm_translation *tr)
794 {
795 return -EINVAL;
796 }
797
798 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
799 {
800 return -EINVAL;
801 }
802
803 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
804 {
805 return -EINVAL;
806 }
807
808 void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu)
809 {
810 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
811 unsigned long mask, val;
812
813 if (READ_ONCE(vcpu->arch.irqs_pending_mask[0])) {
814 mask = xchg_acquire(&vcpu->arch.irqs_pending_mask[0], 0);
815 val = READ_ONCE(vcpu->arch.irqs_pending[0]) & mask;
816
817 csr->hvip &= ~mask;
818 csr->hvip |= val;
819 }
820
821 /* Flush AIA high interrupts */
822 kvm_riscv_vcpu_aia_flush_interrupts(vcpu);
823 }
824
825 void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu)
826 {
827 unsigned long hvip;
828 struct kvm_vcpu_arch *v = &vcpu->arch;
829 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
830
831 /* Read current HVIP and VSIE CSRs */
832 csr->vsie = csr_read(CSR_VSIE);
833
834 /* Sync-up HVIP.VSSIP bit changes does by Guest */
835 hvip = csr_read(CSR_HVIP);
836 if ((csr->hvip ^ hvip) & (1UL << IRQ_VS_SOFT)) {
837 if (hvip & (1UL << IRQ_VS_SOFT)) {
838 if (!test_and_set_bit(IRQ_VS_SOFT,
839 v->irqs_pending_mask))
840 set_bit(IRQ_VS_SOFT, v->irqs_pending);
841 } else {
842 if (!test_and_set_bit(IRQ_VS_SOFT,
843 v->irqs_pending_mask))
844 clear_bit(IRQ_VS_SOFT, v->irqs_pending);
845 }
846 }
847
848 /* Sync-up AIA high interrupts */
849 kvm_riscv_vcpu_aia_sync_interrupts(vcpu);
850
851 /* Sync-up timer CSRs */
852 kvm_riscv_vcpu_timer_sync(vcpu);
853 }
854
855 int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
856 {
857 /*
858 * We only allow VS-mode software, timer, and external
859 * interrupts when irq is one of the local interrupts
860 * defined by RISC-V privilege specification.
861 */
862 if (irq < IRQ_LOCAL_MAX &&
863 irq != IRQ_VS_SOFT &&
864 irq != IRQ_VS_TIMER &&
865 irq != IRQ_VS_EXT)
866 return -EINVAL;
867
868 set_bit(irq, vcpu->arch.irqs_pending);
869 smp_mb__before_atomic();
870 set_bit(irq, vcpu->arch.irqs_pending_mask);
871
872 kvm_vcpu_kick(vcpu);
873
874 return 0;
875 }
876
877 int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
878 {
879 /*
880 * We only allow VS-mode software, timer, and external
881 * interrupts when irq is one of the local interrupts
882 * defined by RISC-V privilege specification.
883 */
884 if (irq < IRQ_LOCAL_MAX &&
885 irq != IRQ_VS_SOFT &&
886 irq != IRQ_VS_TIMER &&
887 irq != IRQ_VS_EXT)
888 return -EINVAL;
889
890 clear_bit(irq, vcpu->arch.irqs_pending);
891 smp_mb__before_atomic();
892 set_bit(irq, vcpu->arch.irqs_pending_mask);
893
894 return 0;
895 }
896
897 bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, u64 mask)
898 {
899 unsigned long ie;
900
901 ie = ((vcpu->arch.guest_csr.vsie & VSIP_VALID_MASK)
902 << VSIP_TO_HVIP_SHIFT) & (unsigned long)mask;
903 ie |= vcpu->arch.guest_csr.vsie & ~IRQ_LOCAL_MASK &
904 (unsigned long)mask;
905 if (READ_ONCE(vcpu->arch.irqs_pending[0]) & ie)
906 return true;
907
908 /* Check AIA high interrupts */
909 return kvm_riscv_vcpu_aia_has_interrupts(vcpu, mask);
910 }
911
912 void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu)
913 {
914 vcpu->arch.power_off = true;
915 kvm_make_request(KVM_REQ_SLEEP, vcpu);
916 kvm_vcpu_kick(vcpu);
917 }
918
919 void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu)
920 {
921 vcpu->arch.power_off = false;
922 kvm_vcpu_wake_up(vcpu);
923 }
924
925 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
926 struct kvm_mp_state *mp_state)
927 {
928 if (vcpu->arch.power_off)
929 mp_state->mp_state = KVM_MP_STATE_STOPPED;
930 else
931 mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
932
933 return 0;
934 }
935
936 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
937 struct kvm_mp_state *mp_state)
938 {
939 int ret = 0;
940
941 switch (mp_state->mp_state) {
942 case KVM_MP_STATE_RUNNABLE:
943 vcpu->arch.power_off = false;
944 break;
945 case KVM_MP_STATE_STOPPED:
946 kvm_riscv_vcpu_power_off(vcpu);
947 break;
948 default:
949 ret = -EINVAL;
950 }
951
952 return ret;
953 }
954
955 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
956 struct kvm_guest_debug *dbg)
957 {
958 /* TODO; To be implemented later. */
959 return -EINVAL;
960 }
961
962 static void kvm_riscv_vcpu_update_config(const unsigned long *isa)
963 {
964 u64 henvcfg = 0;
965
966 if (riscv_isa_extension_available(isa, SVPBMT))
967 henvcfg |= ENVCFG_PBMTE;
968
969 if (riscv_isa_extension_available(isa, SSTC))
970 henvcfg |= ENVCFG_STCE;
971
972 if (riscv_isa_extension_available(isa, ZICBOM))
973 henvcfg |= (ENVCFG_CBIE | ENVCFG_CBCFE);
974
975 if (riscv_isa_extension_available(isa, ZICBOZ))
976 henvcfg |= ENVCFG_CBZE;
977
978 csr_write(CSR_HENVCFG, henvcfg);
979 #ifdef CONFIG_32BIT
980 csr_write(CSR_HENVCFGH, henvcfg >> 32);
981 #endif
982 }
983
984 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
985 {
986 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
987
988 csr_write(CSR_VSSTATUS, csr->vsstatus);
989 csr_write(CSR_VSIE, csr->vsie);
990 csr_write(CSR_VSTVEC, csr->vstvec);
991 csr_write(CSR_VSSCRATCH, csr->vsscratch);
992 csr_write(CSR_VSEPC, csr->vsepc);
993 csr_write(CSR_VSCAUSE, csr->vscause);
994 csr_write(CSR_VSTVAL, csr->vstval);
995 csr_write(CSR_HVIP, csr->hvip);
996 csr_write(CSR_VSATP, csr->vsatp);
997
998 kvm_riscv_vcpu_update_config(vcpu->arch.isa);
999
1000 kvm_riscv_gstage_update_hgatp(vcpu);
1001
1002 kvm_riscv_vcpu_timer_restore(vcpu);
1003
1004 kvm_riscv_vcpu_host_fp_save(&vcpu->arch.host_context);
1005 kvm_riscv_vcpu_guest_fp_restore(&vcpu->arch.guest_context,
1006 vcpu->arch.isa);
1007
1008 kvm_riscv_vcpu_aia_load(vcpu, cpu);
1009
1010 vcpu->cpu = cpu;
1011 }
1012
1013 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1014 {
1015 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
1016
1017 vcpu->cpu = -1;
1018
1019 kvm_riscv_vcpu_aia_put(vcpu);
1020
1021 kvm_riscv_vcpu_guest_fp_save(&vcpu->arch.guest_context,
1022 vcpu->arch.isa);
1023 kvm_riscv_vcpu_host_fp_restore(&vcpu->arch.host_context);
1024
1025 kvm_riscv_vcpu_timer_save(vcpu);
1026
1027 csr->vsstatus = csr_read(CSR_VSSTATUS);
1028 csr->vsie = csr_read(CSR_VSIE);
1029 csr->vstvec = csr_read(CSR_VSTVEC);
1030 csr->vsscratch = csr_read(CSR_VSSCRATCH);
1031 csr->vsepc = csr_read(CSR_VSEPC);
1032 csr->vscause = csr_read(CSR_VSCAUSE);
1033 csr->vstval = csr_read(CSR_VSTVAL);
1034 csr->hvip = csr_read(CSR_HVIP);
1035 csr->vsatp = csr_read(CSR_VSATP);
1036 }
1037
1038 static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu)
1039 {
1040 struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
1041
1042 if (kvm_request_pending(vcpu)) {
1043 if (kvm_check_request(KVM_REQ_SLEEP, vcpu)) {
1044 kvm_vcpu_srcu_read_unlock(vcpu);
1045 rcuwait_wait_event(wait,
1046 (!vcpu->arch.power_off) && (!vcpu->arch.pause),
1047 TASK_INTERRUPTIBLE);
1048 kvm_vcpu_srcu_read_lock(vcpu);
1049
1050 if (vcpu->arch.power_off || vcpu->arch.pause) {
1051 /*
1052 * Awaken to handle a signal, request to
1053 * sleep again later.
1054 */
1055 kvm_make_request(KVM_REQ_SLEEP, vcpu);
1056 }
1057 }
1058
1059 if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
1060 kvm_riscv_reset_vcpu(vcpu);
1061
1062 if (kvm_check_request(KVM_REQ_UPDATE_HGATP, vcpu))
1063 kvm_riscv_gstage_update_hgatp(vcpu);
1064
1065 if (kvm_check_request(KVM_REQ_FENCE_I, vcpu))
1066 kvm_riscv_fence_i_process(vcpu);
1067
1068 /*
1069 * The generic KVM_REQ_TLB_FLUSH is same as
1070 * KVM_REQ_HFENCE_GVMA_VMID_ALL
1071 */
1072 if (kvm_check_request(KVM_REQ_HFENCE_GVMA_VMID_ALL, vcpu))
1073 kvm_riscv_hfence_gvma_vmid_all_process(vcpu);
1074
1075 if (kvm_check_request(KVM_REQ_HFENCE_VVMA_ALL, vcpu))
1076 kvm_riscv_hfence_vvma_all_process(vcpu);
1077
1078 if (kvm_check_request(KVM_REQ_HFENCE, vcpu))
1079 kvm_riscv_hfence_process(vcpu);
1080 }
1081 }
1082
1083 static void kvm_riscv_update_hvip(struct kvm_vcpu *vcpu)
1084 {
1085 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
1086
1087 csr_write(CSR_HVIP, csr->hvip);
1088 kvm_riscv_vcpu_aia_update_hvip(vcpu);
1089 }
1090
1091 /*
1092 * Actually run the vCPU, entering an RCU extended quiescent state (EQS) while
1093 * the vCPU is running.
1094 *
1095 * This must be noinstr as instrumentation may make use of RCU, and this is not
1096 * safe during the EQS.
1097 */
1098 static void noinstr kvm_riscv_vcpu_enter_exit(struct kvm_vcpu *vcpu)
1099 {
1100 guest_state_enter_irqoff();
1101 __kvm_riscv_switch_to(&vcpu->arch);
1102 vcpu->arch.last_exit_cpu = vcpu->cpu;
1103 guest_state_exit_irqoff();
1104 }
1105
1106 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
1107 {
1108 int ret;
1109 struct kvm_cpu_trap trap;
1110 struct kvm_run *run = vcpu->run;
1111
1112 /* Mark this VCPU ran at least once */
1113 vcpu->arch.ran_atleast_once = true;
1114
1115 kvm_vcpu_srcu_read_lock(vcpu);
1116
1117 switch (run->exit_reason) {
1118 case KVM_EXIT_MMIO:
1119 /* Process MMIO value returned from user-space */
1120 ret = kvm_riscv_vcpu_mmio_return(vcpu, vcpu->run);
1121 break;
1122 case KVM_EXIT_RISCV_SBI:
1123 /* Process SBI value returned from user-space */
1124 ret = kvm_riscv_vcpu_sbi_return(vcpu, vcpu->run);
1125 break;
1126 case KVM_EXIT_RISCV_CSR:
1127 /* Process CSR value returned from user-space */
1128 ret = kvm_riscv_vcpu_csr_return(vcpu, vcpu->run);
1129 break;
1130 default:
1131 ret = 0;
1132 break;
1133 }
1134 if (ret) {
1135 kvm_vcpu_srcu_read_unlock(vcpu);
1136 return ret;
1137 }
1138
1139 if (run->immediate_exit) {
1140 kvm_vcpu_srcu_read_unlock(vcpu);
1141 return -EINTR;
1142 }
1143
1144 vcpu_load(vcpu);
1145
1146 kvm_sigset_activate(vcpu);
1147
1148 ret = 1;
1149 run->exit_reason = KVM_EXIT_UNKNOWN;
1150 while (ret > 0) {
1151 /* Check conditions before entering the guest */
1152 ret = xfer_to_guest_mode_handle_work(vcpu);
1153 if (ret)
1154 continue;
1155 ret = 1;
1156
1157 kvm_riscv_gstage_vmid_update(vcpu);
1158
1159 kvm_riscv_check_vcpu_requests(vcpu);
1160
1161 preempt_disable();
1162
1163 /* Update AIA HW state before entering guest */
1164 ret = kvm_riscv_vcpu_aia_update(vcpu);
1165 if (ret <= 0) {
1166 preempt_enable();
1167 continue;
1168 }
1169
1170 local_irq_disable();
1171
1172 /*
1173 * Ensure we set mode to IN_GUEST_MODE after we disable
1174 * interrupts and before the final VCPU requests check.
1175 * See the comment in kvm_vcpu_exiting_guest_mode() and
1176 * Documentation/virt/kvm/vcpu-requests.rst
1177 */
1178 vcpu->mode = IN_GUEST_MODE;
1179
1180 kvm_vcpu_srcu_read_unlock(vcpu);
1181 smp_mb__after_srcu_read_unlock();
1182
1183 /*
1184 * We might have got VCPU interrupts updated asynchronously
1185 * so update it in HW.
1186 */
1187 kvm_riscv_vcpu_flush_interrupts(vcpu);
1188
1189 /* Update HVIP CSR for current CPU */
1190 kvm_riscv_update_hvip(vcpu);
1191
1192 if (ret <= 0 ||
1193 kvm_riscv_gstage_vmid_ver_changed(&vcpu->kvm->arch.vmid) ||
1194 kvm_request_pending(vcpu) ||
1195 xfer_to_guest_mode_work_pending()) {
1196 vcpu->mode = OUTSIDE_GUEST_MODE;
1197 local_irq_enable();
1198 preempt_enable();
1199 kvm_vcpu_srcu_read_lock(vcpu);
1200 continue;
1201 }
1202
1203 /*
1204 * Cleanup stale TLB enteries
1205 *
1206 * Note: This should be done after G-stage VMID has been
1207 * updated using kvm_riscv_gstage_vmid_ver_changed()
1208 */
1209 kvm_riscv_local_tlb_sanitize(vcpu);
1210
1211 guest_timing_enter_irqoff();
1212
1213 kvm_riscv_vcpu_enter_exit(vcpu);
1214
1215 vcpu->mode = OUTSIDE_GUEST_MODE;
1216 vcpu->stat.exits++;
1217
1218 /*
1219 * Save SCAUSE, STVAL, HTVAL, and HTINST because we might
1220 * get an interrupt between __kvm_riscv_switch_to() and
1221 * local_irq_enable() which can potentially change CSRs.
1222 */
1223 trap.sepc = vcpu->arch.guest_context.sepc;
1224 trap.scause = csr_read(CSR_SCAUSE);
1225 trap.stval = csr_read(CSR_STVAL);
1226 trap.htval = csr_read(CSR_HTVAL);
1227 trap.htinst = csr_read(CSR_HTINST);
1228
1229 /* Syncup interrupts state with HW */
1230 kvm_riscv_vcpu_sync_interrupts(vcpu);
1231
1232 /*
1233 * We must ensure that any pending interrupts are taken before
1234 * we exit guest timing so that timer ticks are accounted as
1235 * guest time. Transiently unmask interrupts so that any
1236 * pending interrupts are taken.
1237 *
1238 * There's no barrier which ensures that pending interrupts are
1239 * recognised, so we just hope that the CPU takes any pending
1240 * interrupts between the enable and disable.
1241 */
1242 local_irq_enable();
1243 local_irq_disable();
1244
1245 guest_timing_exit_irqoff();
1246
1247 local_irq_enable();
1248
1249 preempt_enable();
1250
1251 kvm_vcpu_srcu_read_lock(vcpu);
1252
1253 ret = kvm_riscv_vcpu_exit(vcpu, run, &trap);
1254 }
1255
1256 kvm_sigset_deactivate(vcpu);
1257
1258 vcpu_put(vcpu);
1259
1260 kvm_vcpu_srcu_read_unlock(vcpu);
1261
1262 return ret;
1263 }