2 * RISC-V implementation of KVM hooks
4 * Copyright (c) 2020 Huawei Technologies Co., Ltd
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2 or later, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include <sys/ioctl.h>
22 #include <linux/kvm.h>
24 #include "qemu/timer.h"
25 #include "qapi/error.h"
26 #include "qemu/error-report.h"
27 #include "qemu/main-loop.h"
28 #include "qapi/visitor.h"
29 #include "sysemu/sysemu.h"
30 #include "sysemu/kvm.h"
31 #include "sysemu/kvm_int.h"
34 #include "hw/core/accel-cpu.h"
35 #include "hw/pci/pci.h"
36 #include "exec/memattrs.h"
37 #include "exec/address-spaces.h"
38 #include "hw/boards.h"
40 #include "hw/intc/riscv_imsic.h"
42 #include "hw/loader.h"
43 #include "kvm_riscv.h"
44 #include "sbi_ecall_interface.h"
45 #include "chardev/char-fe.h"
46 #include "migration/migration.h"
47 #include "sysemu/runstate.h"
48 #include "hw/riscv/numa.h"
50 void riscv_kvm_aplic_request(void *opaque
, int irq
, int level
)
52 kvm_set_irq(kvm_state
, irq
, !!level
);
55 static bool cap_has_mp_state
;
57 static uint64_t kvm_riscv_reg_id(CPURISCVState
*env
, uint64_t type
,
60 uint64_t id
= KVM_REG_RISCV
| type
| idx
;
62 switch (riscv_cpu_mxl(env
)) {
64 id
|= KVM_REG_SIZE_U32
;
67 id
|= KVM_REG_SIZE_U64
;
70 g_assert_not_reached();
75 #define RISCV_CORE_REG(env, name) kvm_riscv_reg_id(env, KVM_REG_RISCV_CORE, \
76 KVM_REG_RISCV_CORE_REG(name))
78 #define RISCV_CSR_REG(env, name) kvm_riscv_reg_id(env, KVM_REG_RISCV_CSR, \
79 KVM_REG_RISCV_CSR_REG(name))
81 #define RISCV_TIMER_REG(env, name) kvm_riscv_reg_id(env, KVM_REG_RISCV_TIMER, \
82 KVM_REG_RISCV_TIMER_REG(name))
84 #define RISCV_FP_F_REG(env, idx) kvm_riscv_reg_id(env, KVM_REG_RISCV_FP_F, idx)
86 #define RISCV_FP_D_REG(env, idx) kvm_riscv_reg_id(env, KVM_REG_RISCV_FP_D, idx)
88 #define KVM_RISCV_GET_CSR(cs, env, csr, reg) \
90 int _ret = kvm_get_one_reg(cs, RISCV_CSR_REG(env, csr), ®); \
96 #define KVM_RISCV_SET_CSR(cs, env, csr, reg) \
98 int _ret = kvm_set_one_reg(cs, RISCV_CSR_REG(env, csr), ®); \
104 #define KVM_RISCV_GET_TIMER(cs, env, name, reg) \
106 int ret = kvm_get_one_reg(cs, RISCV_TIMER_REG(env, name), ®); \
112 #define KVM_RISCV_SET_TIMER(cs, env, name, reg) \
114 int ret = kvm_set_one_reg(cs, RISCV_TIMER_REG(env, name), ®); \
120 typedef struct KVMCPUConfig
{
122 const char *description
;
129 #define KVM_MISA_CFG(_bit, _reg_id) \
130 {.offset = _bit, .kvm_reg_id = _reg_id}
132 /* KVM ISA extensions */
133 static KVMCPUConfig kvm_misa_ext_cfgs
[] = {
134 KVM_MISA_CFG(RVA
, KVM_RISCV_ISA_EXT_A
),
135 KVM_MISA_CFG(RVC
, KVM_RISCV_ISA_EXT_C
),
136 KVM_MISA_CFG(RVD
, KVM_RISCV_ISA_EXT_D
),
137 KVM_MISA_CFG(RVF
, KVM_RISCV_ISA_EXT_F
),
138 KVM_MISA_CFG(RVH
, KVM_RISCV_ISA_EXT_H
),
139 KVM_MISA_CFG(RVI
, KVM_RISCV_ISA_EXT_I
),
140 KVM_MISA_CFG(RVM
, KVM_RISCV_ISA_EXT_M
),
143 static void kvm_cpu_get_misa_ext_cfg(Object
*obj
, Visitor
*v
,
145 void *opaque
, Error
**errp
)
147 KVMCPUConfig
*misa_ext_cfg
= opaque
;
148 target_ulong misa_bit
= misa_ext_cfg
->offset
;
149 RISCVCPU
*cpu
= RISCV_CPU(obj
);
150 CPURISCVState
*env
= &cpu
->env
;
151 bool value
= env
->misa_ext_mask
& misa_bit
;
153 visit_type_bool(v
, name
, &value
, errp
);
156 static void kvm_cpu_set_misa_ext_cfg(Object
*obj
, Visitor
*v
,
158 void *opaque
, Error
**errp
)
160 KVMCPUConfig
*misa_ext_cfg
= opaque
;
161 target_ulong misa_bit
= misa_ext_cfg
->offset
;
162 RISCVCPU
*cpu
= RISCV_CPU(obj
);
163 CPURISCVState
*env
= &cpu
->env
;
164 bool value
, host_bit
;
166 if (!visit_type_bool(v
, name
, &value
, errp
)) {
170 host_bit
= env
->misa_ext_mask
& misa_bit
;
172 if (value
== host_bit
) {
177 misa_ext_cfg
->user_set
= true;
182 * Forbid users to enable extensions that aren't
183 * available in the hart.
185 error_setg(errp
, "Enabling MISA bit '%s' is not allowed: it's not "
186 "enabled in the host", misa_ext_cfg
->name
);
189 static void kvm_riscv_update_cpu_misa_ext(RISCVCPU
*cpu
, CPUState
*cs
)
191 CPURISCVState
*env
= &cpu
->env
;
195 for (i
= 0; i
< ARRAY_SIZE(kvm_misa_ext_cfgs
); i
++) {
196 KVMCPUConfig
*misa_cfg
= &kvm_misa_ext_cfgs
[i
];
197 target_ulong misa_bit
= misa_cfg
->offset
;
199 if (!misa_cfg
->user_set
) {
203 /* If we're here we're going to disable the MISA bit */
205 id
= kvm_riscv_reg_id(env
, KVM_REG_RISCV_ISA_EXT
,
206 misa_cfg
->kvm_reg_id
);
207 ret
= kvm_set_one_reg(cs
, id
, ®
);
210 * We're not checking for -EINVAL because if the bit is about
211 * to be disabled, it means that it was already enabled by
212 * KVM. We determined that by fetching the 'isa' register
213 * during init() time. Any error at this point is worth
216 error_report("Unable to set KVM reg %s, error %d",
217 misa_cfg
->name
, ret
);
220 env
->misa_ext
&= ~misa_bit
;
224 #define KVM_EXT_CFG(_name, _prop, _reg_id) \
225 {.name = _name, .offset = CPU_CFG_OFFSET(_prop), \
226 .kvm_reg_id = _reg_id}
228 static KVMCPUConfig kvm_multi_ext_cfgs
[] = {
229 KVM_EXT_CFG("zicbom", ext_zicbom
, KVM_RISCV_ISA_EXT_ZICBOM
),
230 KVM_EXT_CFG("zicboz", ext_zicboz
, KVM_RISCV_ISA_EXT_ZICBOZ
),
231 KVM_EXT_CFG("zicntr", ext_zicntr
, KVM_RISCV_ISA_EXT_ZICNTR
),
232 KVM_EXT_CFG("zicsr", ext_zicsr
, KVM_RISCV_ISA_EXT_ZICSR
),
233 KVM_EXT_CFG("zifencei", ext_zifencei
, KVM_RISCV_ISA_EXT_ZIFENCEI
),
234 KVM_EXT_CFG("zihintpause", ext_zihintpause
, KVM_RISCV_ISA_EXT_ZIHINTPAUSE
),
235 KVM_EXT_CFG("zihpm", ext_zihpm
, KVM_RISCV_ISA_EXT_ZIHPM
),
236 KVM_EXT_CFG("zba", ext_zba
, KVM_RISCV_ISA_EXT_ZBA
),
237 KVM_EXT_CFG("zbb", ext_zbb
, KVM_RISCV_ISA_EXT_ZBB
),
238 KVM_EXT_CFG("zbs", ext_zbs
, KVM_RISCV_ISA_EXT_ZBS
),
239 KVM_EXT_CFG("ssaia", ext_ssaia
, KVM_RISCV_ISA_EXT_SSAIA
),
240 KVM_EXT_CFG("sstc", ext_sstc
, KVM_RISCV_ISA_EXT_SSTC
),
241 KVM_EXT_CFG("svinval", ext_svinval
, KVM_RISCV_ISA_EXT_SVINVAL
),
242 KVM_EXT_CFG("svnapot", ext_svnapot
, KVM_RISCV_ISA_EXT_SVNAPOT
),
243 KVM_EXT_CFG("svpbmt", ext_svpbmt
, KVM_RISCV_ISA_EXT_SVPBMT
),
246 static void *kvmconfig_get_cfg_addr(RISCVCPU
*cpu
, KVMCPUConfig
*kvmcfg
)
248 return (void *)&cpu
->cfg
+ kvmcfg
->offset
;
251 static void kvm_cpu_cfg_set(RISCVCPU
*cpu
, KVMCPUConfig
*multi_ext
,
254 bool *ext_enabled
= kvmconfig_get_cfg_addr(cpu
, multi_ext
);
259 static uint32_t kvm_cpu_cfg_get(RISCVCPU
*cpu
,
260 KVMCPUConfig
*multi_ext
)
262 bool *ext_enabled
= kvmconfig_get_cfg_addr(cpu
, multi_ext
);
267 static void kvm_cpu_get_multi_ext_cfg(Object
*obj
, Visitor
*v
,
269 void *opaque
, Error
**errp
)
271 KVMCPUConfig
*multi_ext_cfg
= opaque
;
272 RISCVCPU
*cpu
= RISCV_CPU(obj
);
273 bool value
= kvm_cpu_cfg_get(cpu
, multi_ext_cfg
);
275 visit_type_bool(v
, name
, &value
, errp
);
278 static void kvm_cpu_set_multi_ext_cfg(Object
*obj
, Visitor
*v
,
280 void *opaque
, Error
**errp
)
282 KVMCPUConfig
*multi_ext_cfg
= opaque
;
283 RISCVCPU
*cpu
= RISCV_CPU(obj
);
284 bool value
, host_val
;
286 if (!visit_type_bool(v
, name
, &value
, errp
)) {
290 host_val
= kvm_cpu_cfg_get(cpu
, multi_ext_cfg
);
293 * Ignore if the user is setting the same value
296 if (value
== host_val
) {
300 if (!multi_ext_cfg
->supported
) {
302 * Error out if the user is trying to enable an
303 * extension that KVM doesn't support. Ignore
307 error_setg(errp
, "KVM does not support disabling extension %s",
308 multi_ext_cfg
->name
);
314 multi_ext_cfg
->user_set
= true;
315 kvm_cpu_cfg_set(cpu
, multi_ext_cfg
, value
);
318 static KVMCPUConfig kvm_cbom_blocksize
= {
319 .name
= "cbom_blocksize",
320 .offset
= CPU_CFG_OFFSET(cbom_blocksize
),
321 .kvm_reg_id
= KVM_REG_RISCV_CONFIG_REG(zicbom_block_size
)
324 static KVMCPUConfig kvm_cboz_blocksize
= {
325 .name
= "cboz_blocksize",
326 .offset
= CPU_CFG_OFFSET(cboz_blocksize
),
327 .kvm_reg_id
= KVM_REG_RISCV_CONFIG_REG(zicboz_block_size
)
330 static void kvm_cpu_set_cbomz_blksize(Object
*obj
, Visitor
*v
,
332 void *opaque
, Error
**errp
)
334 KVMCPUConfig
*cbomz_cfg
= opaque
;
335 RISCVCPU
*cpu
= RISCV_CPU(obj
);
336 uint16_t value
, *host_val
;
338 if (!visit_type_uint16(v
, name
, &value
, errp
)) {
342 host_val
= kvmconfig_get_cfg_addr(cpu
, cbomz_cfg
);
344 if (value
!= *host_val
) {
345 error_report("Unable to set %s to a different value than "
347 cbomz_cfg
->name
, *host_val
);
351 cbomz_cfg
->user_set
= true;
354 static void kvm_riscv_update_cpu_cfg_isa_ext(RISCVCPU
*cpu
, CPUState
*cs
)
356 CPURISCVState
*env
= &cpu
->env
;
360 for (i
= 0; i
< ARRAY_SIZE(kvm_multi_ext_cfgs
); i
++) {
361 KVMCPUConfig
*multi_ext_cfg
= &kvm_multi_ext_cfgs
[i
];
363 if (!multi_ext_cfg
->user_set
) {
367 id
= kvm_riscv_reg_id(env
, KVM_REG_RISCV_ISA_EXT
,
368 multi_ext_cfg
->kvm_reg_id
);
369 reg
= kvm_cpu_cfg_get(cpu
, multi_ext_cfg
);
370 ret
= kvm_set_one_reg(cs
, id
, ®
);
372 error_report("Unable to %s extension %s in KVM, error %d",
373 reg
? "enable" : "disable",
374 multi_ext_cfg
->name
, ret
);
380 static void cpu_get_cfg_unavailable(Object
*obj
, Visitor
*v
,
382 void *opaque
, Error
**errp
)
386 visit_type_bool(v
, name
, &value
, errp
);
389 static void cpu_set_cfg_unavailable(Object
*obj
, Visitor
*v
,
391 void *opaque
, Error
**errp
)
393 const char *propname
= opaque
;
396 if (!visit_type_bool(v
, name
, &value
, errp
)) {
401 error_setg(errp
, "extension %s is not available with KVM",
406 static void riscv_cpu_add_kvm_unavail_prop(Object
*obj
, const char *prop_name
)
408 /* Check if KVM created the property already */
409 if (object_property_find(obj
, prop_name
)) {
414 * Set the default to disabled for every extension
415 * unknown to KVM and error out if the user attempts
416 * to enable any of them.
418 object_property_add(obj
, prop_name
, "bool",
419 cpu_get_cfg_unavailable
,
420 cpu_set_cfg_unavailable
,
421 NULL
, (void *)prop_name
);
424 static void riscv_cpu_add_kvm_unavail_prop_array(Object
*obj
,
425 const RISCVCPUMultiExtConfig
*array
)
427 const RISCVCPUMultiExtConfig
*prop
;
431 for (prop
= array
; prop
&& prop
->name
; prop
++) {
432 riscv_cpu_add_kvm_unavail_prop(obj
, prop
->name
);
436 static void kvm_riscv_add_cpu_user_properties(Object
*cpu_obj
)
440 riscv_add_satp_mode_properties(cpu_obj
);
442 for (i
= 0; i
< ARRAY_SIZE(kvm_misa_ext_cfgs
); i
++) {
443 KVMCPUConfig
*misa_cfg
= &kvm_misa_ext_cfgs
[i
];
444 int bit
= misa_cfg
->offset
;
446 misa_cfg
->name
= riscv_get_misa_ext_name(bit
);
447 misa_cfg
->description
= riscv_get_misa_ext_description(bit
);
449 object_property_add(cpu_obj
, misa_cfg
->name
, "bool",
450 kvm_cpu_get_misa_ext_cfg
,
451 kvm_cpu_set_misa_ext_cfg
,
453 object_property_set_description(cpu_obj
, misa_cfg
->name
,
454 misa_cfg
->description
);
457 for (i
= 0; misa_bits
[i
] != 0; i
++) {
458 const char *ext_name
= riscv_get_misa_ext_name(misa_bits
[i
]);
459 riscv_cpu_add_kvm_unavail_prop(cpu_obj
, ext_name
);
462 for (i
= 0; i
< ARRAY_SIZE(kvm_multi_ext_cfgs
); i
++) {
463 KVMCPUConfig
*multi_cfg
= &kvm_multi_ext_cfgs
[i
];
465 object_property_add(cpu_obj
, multi_cfg
->name
, "bool",
466 kvm_cpu_get_multi_ext_cfg
,
467 kvm_cpu_set_multi_ext_cfg
,
471 object_property_add(cpu_obj
, "cbom_blocksize", "uint16",
472 NULL
, kvm_cpu_set_cbomz_blksize
,
473 NULL
, &kvm_cbom_blocksize
);
475 object_property_add(cpu_obj
, "cboz_blocksize", "uint16",
476 NULL
, kvm_cpu_set_cbomz_blksize
,
477 NULL
, &kvm_cboz_blocksize
);
479 riscv_cpu_add_kvm_unavail_prop_array(cpu_obj
, riscv_cpu_extensions
);
480 riscv_cpu_add_kvm_unavail_prop_array(cpu_obj
, riscv_cpu_vendor_exts
);
481 riscv_cpu_add_kvm_unavail_prop_array(cpu_obj
, riscv_cpu_experimental_exts
);
484 static int kvm_riscv_get_regs_core(CPUState
*cs
)
489 CPURISCVState
*env
= &RISCV_CPU(cs
)->env
;
491 ret
= kvm_get_one_reg(cs
, RISCV_CORE_REG(env
, regs
.pc
), ®
);
497 for (i
= 1; i
< 32; i
++) {
498 uint64_t id
= kvm_riscv_reg_id(env
, KVM_REG_RISCV_CORE
, i
);
499 ret
= kvm_get_one_reg(cs
, id
, ®
);
509 static int kvm_riscv_put_regs_core(CPUState
*cs
)
514 CPURISCVState
*env
= &RISCV_CPU(cs
)->env
;
517 ret
= kvm_set_one_reg(cs
, RISCV_CORE_REG(env
, regs
.pc
), ®
);
522 for (i
= 1; i
< 32; i
++) {
523 uint64_t id
= kvm_riscv_reg_id(env
, KVM_REG_RISCV_CORE
, i
);
525 ret
= kvm_set_one_reg(cs
, id
, ®
);
534 static int kvm_riscv_get_regs_csr(CPUState
*cs
)
536 CPURISCVState
*env
= &RISCV_CPU(cs
)->env
;
538 KVM_RISCV_GET_CSR(cs
, env
, sstatus
, env
->mstatus
);
539 KVM_RISCV_GET_CSR(cs
, env
, sie
, env
->mie
);
540 KVM_RISCV_GET_CSR(cs
, env
, stvec
, env
->stvec
);
541 KVM_RISCV_GET_CSR(cs
, env
, sscratch
, env
->sscratch
);
542 KVM_RISCV_GET_CSR(cs
, env
, sepc
, env
->sepc
);
543 KVM_RISCV_GET_CSR(cs
, env
, scause
, env
->scause
);
544 KVM_RISCV_GET_CSR(cs
, env
, stval
, env
->stval
);
545 KVM_RISCV_GET_CSR(cs
, env
, sip
, env
->mip
);
546 KVM_RISCV_GET_CSR(cs
, env
, satp
, env
->satp
);
551 static int kvm_riscv_put_regs_csr(CPUState
*cs
)
553 CPURISCVState
*env
= &RISCV_CPU(cs
)->env
;
555 KVM_RISCV_SET_CSR(cs
, env
, sstatus
, env
->mstatus
);
556 KVM_RISCV_SET_CSR(cs
, env
, sie
, env
->mie
);
557 KVM_RISCV_SET_CSR(cs
, env
, stvec
, env
->stvec
);
558 KVM_RISCV_SET_CSR(cs
, env
, sscratch
, env
->sscratch
);
559 KVM_RISCV_SET_CSR(cs
, env
, sepc
, env
->sepc
);
560 KVM_RISCV_SET_CSR(cs
, env
, scause
, env
->scause
);
561 KVM_RISCV_SET_CSR(cs
, env
, stval
, env
->stval
);
562 KVM_RISCV_SET_CSR(cs
, env
, sip
, env
->mip
);
563 KVM_RISCV_SET_CSR(cs
, env
, satp
, env
->satp
);
568 static int kvm_riscv_get_regs_fp(CPUState
*cs
)
572 CPURISCVState
*env
= &RISCV_CPU(cs
)->env
;
574 if (riscv_has_ext(env
, RVD
)) {
576 for (i
= 0; i
< 32; i
++) {
577 ret
= kvm_get_one_reg(cs
, RISCV_FP_D_REG(env
, i
), ®
);
586 if (riscv_has_ext(env
, RVF
)) {
588 for (i
= 0; i
< 32; i
++) {
589 ret
= kvm_get_one_reg(cs
, RISCV_FP_F_REG(env
, i
), ®
);
601 static int kvm_riscv_put_regs_fp(CPUState
*cs
)
605 CPURISCVState
*env
= &RISCV_CPU(cs
)->env
;
607 if (riscv_has_ext(env
, RVD
)) {
609 for (i
= 0; i
< 32; i
++) {
611 ret
= kvm_set_one_reg(cs
, RISCV_FP_D_REG(env
, i
), ®
);
619 if (riscv_has_ext(env
, RVF
)) {
621 for (i
= 0; i
< 32; i
++) {
623 ret
= kvm_set_one_reg(cs
, RISCV_FP_F_REG(env
, i
), ®
);
634 static void kvm_riscv_get_regs_timer(CPUState
*cs
)
636 CPURISCVState
*env
= &RISCV_CPU(cs
)->env
;
638 if (env
->kvm_timer_dirty
) {
642 KVM_RISCV_GET_TIMER(cs
, env
, time
, env
->kvm_timer_time
);
643 KVM_RISCV_GET_TIMER(cs
, env
, compare
, env
->kvm_timer_compare
);
644 KVM_RISCV_GET_TIMER(cs
, env
, state
, env
->kvm_timer_state
);
645 KVM_RISCV_GET_TIMER(cs
, env
, frequency
, env
->kvm_timer_frequency
);
647 env
->kvm_timer_dirty
= true;
650 static void kvm_riscv_put_regs_timer(CPUState
*cs
)
653 CPURISCVState
*env
= &RISCV_CPU(cs
)->env
;
655 if (!env
->kvm_timer_dirty
) {
659 KVM_RISCV_SET_TIMER(cs
, env
, time
, env
->kvm_timer_time
);
660 KVM_RISCV_SET_TIMER(cs
, env
, compare
, env
->kvm_timer_compare
);
663 * To set register of RISCV_TIMER_REG(state) will occur a error from KVM
664 * on env->kvm_timer_state == 0, It's better to adapt in KVM, but it
665 * doesn't matter that adaping in QEMU now.
666 * TODO If KVM changes, adapt here.
668 if (env
->kvm_timer_state
) {
669 KVM_RISCV_SET_TIMER(cs
, env
, state
, env
->kvm_timer_state
);
673 * For now, migration will not work between Hosts with different timer
674 * frequency. Therefore, we should check whether they are the same here
675 * during the migration.
677 if (migration_is_running(migrate_get_current()->state
)) {
678 KVM_RISCV_GET_TIMER(cs
, env
, frequency
, reg
);
679 if (reg
!= env
->kvm_timer_frequency
) {
680 error_report("Dst Hosts timer frequency != Src Hosts");
684 env
->kvm_timer_dirty
= false;
687 typedef struct KVMScratchCPU
{
694 * Heavily inspired by kvm_arm_create_scratch_host_vcpu()
695 * from target/arm/kvm.c.
697 static bool kvm_riscv_create_scratch_vcpu(KVMScratchCPU
*scratch
)
699 int kvmfd
= -1, vmfd
= -1, cpufd
= -1;
701 kvmfd
= qemu_open_old("/dev/kvm", O_RDWR
);
706 vmfd
= ioctl(kvmfd
, KVM_CREATE_VM
, 0);
707 } while (vmfd
== -1 && errno
== EINTR
);
711 cpufd
= ioctl(vmfd
, KVM_CREATE_VCPU
, 0);
716 scratch
->kvmfd
= kvmfd
;
717 scratch
->vmfd
= vmfd
;
718 scratch
->cpufd
= cpufd
;
736 static void kvm_riscv_destroy_scratch_vcpu(KVMScratchCPU
*scratch
)
738 close(scratch
->cpufd
);
739 close(scratch
->vmfd
);
740 close(scratch
->kvmfd
);
743 static void kvm_riscv_init_machine_ids(RISCVCPU
*cpu
, KVMScratchCPU
*kvmcpu
)
745 CPURISCVState
*env
= &cpu
->env
;
746 struct kvm_one_reg reg
;
749 reg
.id
= kvm_riscv_reg_id(env
, KVM_REG_RISCV_CONFIG
,
750 KVM_REG_RISCV_CONFIG_REG(mvendorid
));
751 reg
.addr
= (uint64_t)&cpu
->cfg
.mvendorid
;
752 ret
= ioctl(kvmcpu
->cpufd
, KVM_GET_ONE_REG
, ®
);
754 error_report("Unable to retrieve mvendorid from host, error %d", ret
);
757 reg
.id
= kvm_riscv_reg_id(env
, KVM_REG_RISCV_CONFIG
,
758 KVM_REG_RISCV_CONFIG_REG(marchid
));
759 reg
.addr
= (uint64_t)&cpu
->cfg
.marchid
;
760 ret
= ioctl(kvmcpu
->cpufd
, KVM_GET_ONE_REG
, ®
);
762 error_report("Unable to retrieve marchid from host, error %d", ret
);
765 reg
.id
= kvm_riscv_reg_id(env
, KVM_REG_RISCV_CONFIG
,
766 KVM_REG_RISCV_CONFIG_REG(mimpid
));
767 reg
.addr
= (uint64_t)&cpu
->cfg
.mimpid
;
768 ret
= ioctl(kvmcpu
->cpufd
, KVM_GET_ONE_REG
, ®
);
770 error_report("Unable to retrieve mimpid from host, error %d", ret
);
774 static void kvm_riscv_init_misa_ext_mask(RISCVCPU
*cpu
,
775 KVMScratchCPU
*kvmcpu
)
777 CPURISCVState
*env
= &cpu
->env
;
778 struct kvm_one_reg reg
;
781 reg
.id
= kvm_riscv_reg_id(env
, KVM_REG_RISCV_CONFIG
,
782 KVM_REG_RISCV_CONFIG_REG(isa
));
783 reg
.addr
= (uint64_t)&env
->misa_ext_mask
;
784 ret
= ioctl(kvmcpu
->cpufd
, KVM_GET_ONE_REG
, ®
);
787 error_report("Unable to fetch ISA register from KVM, "
789 kvm_riscv_destroy_scratch_vcpu(kvmcpu
);
793 env
->misa_ext
= env
->misa_ext_mask
;
796 static void kvm_riscv_read_cbomz_blksize(RISCVCPU
*cpu
, KVMScratchCPU
*kvmcpu
,
797 KVMCPUConfig
*cbomz_cfg
)
799 CPURISCVState
*env
= &cpu
->env
;
800 struct kvm_one_reg reg
;
803 reg
.id
= kvm_riscv_reg_id(env
, KVM_REG_RISCV_CONFIG
,
804 cbomz_cfg
->kvm_reg_id
);
805 reg
.addr
= (uint64_t)kvmconfig_get_cfg_addr(cpu
, cbomz_cfg
);
806 ret
= ioctl(kvmcpu
->cpufd
, KVM_GET_ONE_REG
, ®
);
808 error_report("Unable to read KVM reg %s, error %d",
809 cbomz_cfg
->name
, ret
);
814 static void kvm_riscv_read_multiext_legacy(RISCVCPU
*cpu
,
815 KVMScratchCPU
*kvmcpu
)
817 CPURISCVState
*env
= &cpu
->env
;
821 for (i
= 0; i
< ARRAY_SIZE(kvm_multi_ext_cfgs
); i
++) {
822 KVMCPUConfig
*multi_ext_cfg
= &kvm_multi_ext_cfgs
[i
];
823 struct kvm_one_reg reg
;
825 reg
.id
= kvm_riscv_reg_id(env
, KVM_REG_RISCV_ISA_EXT
,
826 multi_ext_cfg
->kvm_reg_id
);
827 reg
.addr
= (uint64_t)&val
;
828 ret
= ioctl(kvmcpu
->cpufd
, KVM_GET_ONE_REG
, ®
);
830 if (errno
== EINVAL
) {
831 /* Silently default to 'false' if KVM does not support it. */
832 multi_ext_cfg
->supported
= false;
835 error_report("Unable to read ISA_EXT KVM register %s: %s",
836 multi_ext_cfg
->name
, strerror(errno
));
840 multi_ext_cfg
->supported
= true;
843 kvm_cpu_cfg_set(cpu
, multi_ext_cfg
, val
);
846 if (cpu
->cfg
.ext_zicbom
) {
847 kvm_riscv_read_cbomz_blksize(cpu
, kvmcpu
, &kvm_cbom_blocksize
);
850 if (cpu
->cfg
.ext_zicboz
) {
851 kvm_riscv_read_cbomz_blksize(cpu
, kvmcpu
, &kvm_cboz_blocksize
);
855 static int uint64_cmp(const void *a
, const void *b
)
857 uint64_t val1
= *(const uint64_t *)a
;
858 uint64_t val2
= *(const uint64_t *)b
;
871 static void kvm_riscv_init_multiext_cfg(RISCVCPU
*cpu
, KVMScratchCPU
*kvmcpu
)
873 KVMCPUConfig
*multi_ext_cfg
;
874 struct kvm_one_reg reg
;
875 struct kvm_reg_list rl_struct
;
876 struct kvm_reg_list
*reglist
;
877 uint64_t val
, reg_id
, *reg_search
;
881 ret
= ioctl(kvmcpu
->cpufd
, KVM_GET_REG_LIST
, &rl_struct
);
884 * If KVM_GET_REG_LIST isn't supported we'll get errno 22
885 * (EINVAL). Use read_legacy() in this case.
887 if (errno
== EINVAL
) {
888 return kvm_riscv_read_multiext_legacy(cpu
, kvmcpu
);
889 } else if (errno
!= E2BIG
) {
891 * E2BIG is an expected error message for the API since we
892 * don't know the number of registers. The right amount will
893 * be written in rl_struct.n.
895 * Error out if we get any other errno.
897 error_report("Error when accessing get-reg-list: %s",
902 reglist
= g_malloc(sizeof(struct kvm_reg_list
) +
903 rl_struct
.n
* sizeof(uint64_t));
904 reglist
->n
= rl_struct
.n
;
905 ret
= ioctl(kvmcpu
->cpufd
, KVM_GET_REG_LIST
, reglist
);
907 error_report("Error when reading KVM_GET_REG_LIST: %s",
912 /* sort reglist to use bsearch() */
913 qsort(®list
->reg
, reglist
->n
, sizeof(uint64_t), uint64_cmp
);
915 for (i
= 0; i
< ARRAY_SIZE(kvm_multi_ext_cfgs
); i
++) {
916 multi_ext_cfg
= &kvm_multi_ext_cfgs
[i
];
917 reg_id
= kvm_riscv_reg_id(&cpu
->env
, KVM_REG_RISCV_ISA_EXT
,
918 multi_ext_cfg
->kvm_reg_id
);
919 reg_search
= bsearch(®_id
, reglist
->reg
, reglist
->n
,
920 sizeof(uint64_t), uint64_cmp
);
926 reg
.addr
= (uint64_t)&val
;
927 ret
= ioctl(kvmcpu
->cpufd
, KVM_GET_ONE_REG
, ®
);
929 error_report("Unable to read ISA_EXT KVM register %s: %s",
930 multi_ext_cfg
->name
, strerror(errno
));
934 multi_ext_cfg
->supported
= true;
935 kvm_cpu_cfg_set(cpu
, multi_ext_cfg
, val
);
938 if (cpu
->cfg
.ext_zicbom
) {
939 kvm_riscv_read_cbomz_blksize(cpu
, kvmcpu
, &kvm_cbom_blocksize
);
942 if (cpu
->cfg
.ext_zicboz
) {
943 kvm_riscv_read_cbomz_blksize(cpu
, kvmcpu
, &kvm_cboz_blocksize
);
947 static void riscv_init_kvm_registers(Object
*cpu_obj
)
949 RISCVCPU
*cpu
= RISCV_CPU(cpu_obj
);
950 KVMScratchCPU kvmcpu
;
952 if (!kvm_riscv_create_scratch_vcpu(&kvmcpu
)) {
956 kvm_riscv_init_machine_ids(cpu
, &kvmcpu
);
957 kvm_riscv_init_misa_ext_mask(cpu
, &kvmcpu
);
958 kvm_riscv_init_multiext_cfg(cpu
, &kvmcpu
);
960 kvm_riscv_destroy_scratch_vcpu(&kvmcpu
);
963 const KVMCapabilityInfo kvm_arch_required_capabilities
[] = {
967 int kvm_arch_get_registers(CPUState
*cs
)
971 ret
= kvm_riscv_get_regs_core(cs
);
976 ret
= kvm_riscv_get_regs_csr(cs
);
981 ret
= kvm_riscv_get_regs_fp(cs
);
989 int kvm_riscv_sync_mpstate_to_kvm(RISCVCPU
*cpu
, int state
)
991 if (cap_has_mp_state
) {
992 struct kvm_mp_state mp_state
= {
996 int ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_MP_STATE
, &mp_state
);
998 fprintf(stderr
, "%s: failed to sync MP_STATE %d/%s\n",
999 __func__
, ret
, strerror(-ret
));
1007 int kvm_arch_put_registers(CPUState
*cs
, int level
)
1011 ret
= kvm_riscv_put_regs_core(cs
);
1016 ret
= kvm_riscv_put_regs_csr(cs
);
1021 ret
= kvm_riscv_put_regs_fp(cs
);
1026 if (KVM_PUT_RESET_STATE
== level
) {
1027 RISCVCPU
*cpu
= RISCV_CPU(cs
);
1028 if (cs
->cpu_index
== 0) {
1029 ret
= kvm_riscv_sync_mpstate_to_kvm(cpu
, KVM_MP_STATE_RUNNABLE
);
1031 ret
= kvm_riscv_sync_mpstate_to_kvm(cpu
, KVM_MP_STATE_STOPPED
);
1041 int kvm_arch_release_virq_post(int virq
)
1046 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry
*route
,
1047 uint64_t address
, uint32_t data
, PCIDevice
*dev
)
1052 int kvm_arch_destroy_vcpu(CPUState
*cs
)
1057 unsigned long kvm_arch_vcpu_id(CPUState
*cpu
)
1059 return cpu
->cpu_index
;
1062 static void kvm_riscv_vm_state_change(void *opaque
, bool running
,
1065 CPUState
*cs
= opaque
;
1068 kvm_riscv_put_regs_timer(cs
);
1070 kvm_riscv_get_regs_timer(cs
);
1074 void kvm_arch_init_irq_routing(KVMState
*s
)
1078 static int kvm_vcpu_set_machine_ids(RISCVCPU
*cpu
, CPUState
*cs
)
1080 CPURISCVState
*env
= &cpu
->env
;
1085 id
= kvm_riscv_reg_id(env
, KVM_REG_RISCV_CONFIG
,
1086 KVM_REG_RISCV_CONFIG_REG(mvendorid
));
1088 * cfg.mvendorid is an uint32 but a target_ulong will
1089 * be written. Assign it to a target_ulong var to avoid
1090 * writing pieces of other cpu->cfg fields in the reg.
1092 reg
= cpu
->cfg
.mvendorid
;
1093 ret
= kvm_set_one_reg(cs
, id
, ®
);
1098 id
= kvm_riscv_reg_id(env
, KVM_REG_RISCV_CONFIG
,
1099 KVM_REG_RISCV_CONFIG_REG(marchid
));
1100 ret
= kvm_set_one_reg(cs
, id
, &cpu
->cfg
.marchid
);
1105 id
= kvm_riscv_reg_id(env
, KVM_REG_RISCV_CONFIG
,
1106 KVM_REG_RISCV_CONFIG_REG(mimpid
));
1107 ret
= kvm_set_one_reg(cs
, id
, &cpu
->cfg
.mimpid
);
1112 int kvm_arch_init_vcpu(CPUState
*cs
)
1115 RISCVCPU
*cpu
= RISCV_CPU(cs
);
1117 qemu_add_vm_change_state_handler(kvm_riscv_vm_state_change
, cs
);
1119 if (!object_dynamic_cast(OBJECT(cpu
), TYPE_RISCV_CPU_HOST
)) {
1120 ret
= kvm_vcpu_set_machine_ids(cpu
, cs
);
1126 kvm_riscv_update_cpu_misa_ext(cpu
, cs
);
1127 kvm_riscv_update_cpu_cfg_isa_ext(cpu
, cs
);
1132 int kvm_arch_msi_data_to_gsi(uint32_t data
)
1137 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry
*route
,
1138 int vector
, PCIDevice
*dev
)
1143 int kvm_arch_get_default_type(MachineState
*ms
)
1148 int kvm_arch_init(MachineState
*ms
, KVMState
*s
)
1150 cap_has_mp_state
= kvm_check_extension(s
, KVM_CAP_MP_STATE
);
1154 int kvm_arch_irqchip_create(KVMState
*s
)
1156 if (kvm_kernel_irqchip_split()) {
1157 error_report("-machine kernel_irqchip=split is not supported on RISC-V.");
1162 * We can create the VAIA using the newer device control API.
1164 return kvm_check_extension(s
, KVM_CAP_DEVICE_CTRL
);
1167 int kvm_arch_process_async_events(CPUState
*cs
)
1172 void kvm_arch_pre_run(CPUState
*cs
, struct kvm_run
*run
)
1176 MemTxAttrs
kvm_arch_post_run(CPUState
*cs
, struct kvm_run
*run
)
1178 return MEMTXATTRS_UNSPECIFIED
;
1181 bool kvm_arch_stop_on_emulation_error(CPUState
*cs
)
1186 static int kvm_riscv_handle_sbi(CPUState
*cs
, struct kvm_run
*run
)
1190 switch (run
->riscv_sbi
.extension_id
) {
1191 case SBI_EXT_0_1_CONSOLE_PUTCHAR
:
1192 ch
= run
->riscv_sbi
.args
[0];
1193 qemu_chr_fe_write(serial_hd(0)->be
, &ch
, sizeof(ch
));
1195 case SBI_EXT_0_1_CONSOLE_GETCHAR
:
1196 ret
= qemu_chr_fe_read_all(serial_hd(0)->be
, &ch
, sizeof(ch
));
1197 if (ret
== sizeof(ch
)) {
1198 run
->riscv_sbi
.ret
[0] = ch
;
1200 run
->riscv_sbi
.ret
[0] = -1;
1205 qemu_log_mask(LOG_UNIMP
,
1206 "%s: un-handled SBI EXIT, specific reasons is %lu\n",
1207 __func__
, run
->riscv_sbi
.extension_id
);
1214 int kvm_arch_handle_exit(CPUState
*cs
, struct kvm_run
*run
)
1217 switch (run
->exit_reason
) {
1218 case KVM_EXIT_RISCV_SBI
:
1219 ret
= kvm_riscv_handle_sbi(cs
, run
);
1222 qemu_log_mask(LOG_UNIMP
, "%s: un-handled exit reason %d\n",
1223 __func__
, run
->exit_reason
);
1230 void kvm_riscv_reset_vcpu(RISCVCPU
*cpu
)
1232 CPURISCVState
*env
= &cpu
->env
;
1235 if (!kvm_enabled()) {
1238 for (i
= 0; i
< 32; i
++) {
1241 env
->pc
= cpu
->env
.kernel_addr
;
1242 env
->gpr
[10] = kvm_arch_vcpu_id(CPU(cpu
)); /* a0 */
1243 env
->gpr
[11] = cpu
->env
.fdt_addr
; /* a1 */
1254 void kvm_riscv_set_irq(RISCVCPU
*cpu
, int irq
, int level
)
1257 unsigned virq
= level
? KVM_INTERRUPT_SET
: KVM_INTERRUPT_UNSET
;
1259 if (irq
!= IRQ_S_EXT
) {
1260 perror("kvm riscv set irq != IRQ_S_EXT\n");
1264 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_INTERRUPT
, &virq
);
1266 perror("Set irq failed");
1271 bool kvm_arch_cpu_check_are_resettable(void)
1276 static int aia_mode
;
1278 static const char *kvm_aia_mode_str(uint64_t mode
)
1281 case KVM_DEV_RISCV_AIA_MODE_EMUL
:
1283 case KVM_DEV_RISCV_AIA_MODE_HWACCEL
:
1285 case KVM_DEV_RISCV_AIA_MODE_AUTO
:
1291 static char *riscv_get_kvm_aia(Object
*obj
, Error
**errp
)
1293 return g_strdup(kvm_aia_mode_str(aia_mode
));
1296 static void riscv_set_kvm_aia(Object
*obj
, const char *val
, Error
**errp
)
1298 if (!strcmp(val
, "emul")) {
1299 aia_mode
= KVM_DEV_RISCV_AIA_MODE_EMUL
;
1300 } else if (!strcmp(val
, "hwaccel")) {
1301 aia_mode
= KVM_DEV_RISCV_AIA_MODE_HWACCEL
;
1302 } else if (!strcmp(val
, "auto")) {
1303 aia_mode
= KVM_DEV_RISCV_AIA_MODE_AUTO
;
1305 error_setg(errp
, "Invalid KVM AIA mode");
1306 error_append_hint(errp
, "Valid values are emul, hwaccel, and auto.\n");
1310 void kvm_arch_accel_class_init(ObjectClass
*oc
)
1312 object_class_property_add_str(oc
, "riscv-aia", riscv_get_kvm_aia
,
1314 object_class_property_set_description(oc
, "riscv-aia",
1315 "Set KVM AIA mode. Valid values are "
1316 "emul, hwaccel, and auto. Default "
1318 object_property_set_default_str(object_class_property_find(oc
, "riscv-aia"),
1322 void kvm_riscv_aia_create(MachineState
*machine
, uint64_t group_shift
,
1323 uint64_t aia_irq_num
, uint64_t aia_msi_num
,
1324 uint64_t aplic_base
, uint64_t imsic_base
,
1329 uint64_t default_aia_mode
;
1330 uint64_t socket_count
= riscv_socket_count(machine
);
1331 uint64_t max_hart_per_socket
= 0;
1332 uint64_t socket
, base_hart
, hart_count
, socket_imsic_base
, imsic_addr
;
1333 uint64_t socket_bits
, hart_bits
, guest_bits
;
1335 aia_fd
= kvm_create_device(kvm_state
, KVM_DEV_TYPE_RISCV_AIA
, false);
1338 error_report("Unable to create in-kernel irqchip");
1342 ret
= kvm_device_access(aia_fd
, KVM_DEV_RISCV_AIA_GRP_CONFIG
,
1343 KVM_DEV_RISCV_AIA_CONFIG_MODE
,
1344 &default_aia_mode
, false, NULL
);
1346 error_report("KVM AIA: failed to get current KVM AIA mode");
1349 qemu_log("KVM AIA: default mode is %s\n",
1350 kvm_aia_mode_str(default_aia_mode
));
1352 if (default_aia_mode
!= aia_mode
) {
1353 ret
= kvm_device_access(aia_fd
, KVM_DEV_RISCV_AIA_GRP_CONFIG
,
1354 KVM_DEV_RISCV_AIA_CONFIG_MODE
,
1355 &aia_mode
, true, NULL
);
1357 warn_report("KVM AIA: failed to set KVM AIA mode");
1359 qemu_log("KVM AIA: set current mode to %s\n",
1360 kvm_aia_mode_str(aia_mode
));
1363 ret
= kvm_device_access(aia_fd
, KVM_DEV_RISCV_AIA_GRP_CONFIG
,
1364 KVM_DEV_RISCV_AIA_CONFIG_SRCS
,
1365 &aia_irq_num
, true, NULL
);
1367 error_report("KVM AIA: failed to set number of input irq lines");
1371 ret
= kvm_device_access(aia_fd
, KVM_DEV_RISCV_AIA_GRP_CONFIG
,
1372 KVM_DEV_RISCV_AIA_CONFIG_IDS
,
1373 &aia_msi_num
, true, NULL
);
1375 error_report("KVM AIA: failed to set number of msi");
1379 socket_bits
= find_last_bit(&socket_count
, BITS_PER_LONG
) + 1;
1380 ret
= kvm_device_access(aia_fd
, KVM_DEV_RISCV_AIA_GRP_CONFIG
,
1381 KVM_DEV_RISCV_AIA_CONFIG_GROUP_BITS
,
1382 &socket_bits
, true, NULL
);
1384 error_report("KVM AIA: failed to set group_bits");
1388 ret
= kvm_device_access(aia_fd
, KVM_DEV_RISCV_AIA_GRP_CONFIG
,
1389 KVM_DEV_RISCV_AIA_CONFIG_GROUP_SHIFT
,
1390 &group_shift
, true, NULL
);
1392 error_report("KVM AIA: failed to set group_shift");
1396 guest_bits
= guest_num
== 0 ? 0 :
1397 find_last_bit(&guest_num
, BITS_PER_LONG
) + 1;
1398 ret
= kvm_device_access(aia_fd
, KVM_DEV_RISCV_AIA_GRP_CONFIG
,
1399 KVM_DEV_RISCV_AIA_CONFIG_GUEST_BITS
,
1400 &guest_bits
, true, NULL
);
1402 error_report("KVM AIA: failed to set guest_bits");
1406 ret
= kvm_device_access(aia_fd
, KVM_DEV_RISCV_AIA_GRP_ADDR
,
1407 KVM_DEV_RISCV_AIA_ADDR_APLIC
,
1408 &aplic_base
, true, NULL
);
1410 error_report("KVM AIA: failed to set the base address of APLIC");
1414 for (socket
= 0; socket
< socket_count
; socket
++) {
1415 socket_imsic_base
= imsic_base
+ socket
* (1U << group_shift
);
1416 hart_count
= riscv_socket_hart_count(machine
, socket
);
1417 base_hart
= riscv_socket_first_hartid(machine
, socket
);
1419 if (max_hart_per_socket
< hart_count
) {
1420 max_hart_per_socket
= hart_count
;
1423 for (i
= 0; i
< hart_count
; i
++) {
1424 imsic_addr
= socket_imsic_base
+ i
* IMSIC_HART_SIZE(guest_bits
);
1425 ret
= kvm_device_access(aia_fd
, KVM_DEV_RISCV_AIA_GRP_ADDR
,
1426 KVM_DEV_RISCV_AIA_ADDR_IMSIC(i
+ base_hart
),
1427 &imsic_addr
, true, NULL
);
1429 error_report("KVM AIA: failed to set the IMSIC address for hart %d", i
);
1435 hart_bits
= find_last_bit(&max_hart_per_socket
, BITS_PER_LONG
) + 1;
1436 ret
= kvm_device_access(aia_fd
, KVM_DEV_RISCV_AIA_GRP_CONFIG
,
1437 KVM_DEV_RISCV_AIA_CONFIG_HART_BITS
,
1438 &hart_bits
, true, NULL
);
1440 error_report("KVM AIA: failed to set hart_bits");
1444 if (kvm_has_gsi_routing()) {
1445 for (uint64_t idx
= 0; idx
< aia_irq_num
+ 1; ++idx
) {
1446 /* KVM AIA only has one APLIC instance */
1447 kvm_irqchip_add_irq_route(kvm_state
, idx
, 0, idx
);
1449 kvm_gsi_routing_allowed
= true;
1450 kvm_irqchip_commit_routes(kvm_state
);
1453 ret
= kvm_device_access(aia_fd
, KVM_DEV_RISCV_AIA_GRP_CTRL
,
1454 KVM_DEV_RISCV_AIA_CTRL_INIT
,
1457 error_report("KVM AIA: initialized fail");
1461 kvm_msi_via_irqfd_allowed
= true;
1464 static void kvm_cpu_instance_init(CPUState
*cs
)
1466 Object
*obj
= OBJECT(RISCV_CPU(cs
));
1467 DeviceState
*dev
= DEVICE(obj
);
1469 riscv_init_kvm_registers(obj
);
1471 kvm_riscv_add_cpu_user_properties(obj
);
1473 for (Property
*prop
= riscv_cpu_options
; prop
&& prop
->name
; prop
++) {
1474 /* Check if we have a specific KVM handler for the option */
1475 if (object_property_find(obj
, prop
->name
)) {
1478 qdev_property_add_static(dev
, prop
);
1482 static void kvm_cpu_accel_class_init(ObjectClass
*oc
, void *data
)
1484 AccelCPUClass
*acc
= ACCEL_CPU_CLASS(oc
);
1486 acc
->cpu_instance_init
= kvm_cpu_instance_init
;
1489 static const TypeInfo kvm_cpu_accel_type_info
= {
1490 .name
= ACCEL_CPU_NAME("kvm"),
1492 .parent
= TYPE_ACCEL_CPU
,
1493 .class_init
= kvm_cpu_accel_class_init
,
1496 static void kvm_cpu_accel_register_types(void)
1498 type_register_static(&kvm_cpu_accel_type_info
);
1500 type_init(kvm_cpu_accel_register_types
);
1502 static void riscv_host_cpu_init(Object
*obj
)
1504 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
1506 #if defined(TARGET_RISCV32)
1507 env
->misa_mxl_max
= env
->misa_mxl
= MXL_RV32
;
1508 #elif defined(TARGET_RISCV64)
1509 env
->misa_mxl_max
= env
->misa_mxl
= MXL_RV64
;
1513 static const TypeInfo riscv_kvm_cpu_type_infos
[] = {
1515 .name
= TYPE_RISCV_CPU_HOST
,
1516 .parent
= TYPE_RISCV_CPU
,
1517 .instance_init
= riscv_host_cpu_init
,
1521 DEFINE_TYPES(riscv_kvm_cpu_type_infos
)