4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/qemu-print.h"
22 #include "qemu/ctype.h"
25 #include "internals.h"
26 #include "exec/exec-all.h"
27 #include "qapi/error.h"
28 #include "qemu/error-report.h"
29 #include "hw/qdev-properties.h"
30 #include "migration/vmstate.h"
31 #include "fpu/softfloat-helpers.h"
32 #include "sysemu/kvm.h"
33 #include "kvm_riscv.h"
35 /* RISC-V CPU definitions */
37 static const char riscv_exts
[26] = "IEMAFDQCLBJTPVNSUHKORWXYZG";
39 const char * const riscv_int_regnames
[] = {
40 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1",
41 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3",
42 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4",
43 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11",
44 "x28/t3", "x29/t4", "x30/t5", "x31/t6"
47 const char * const riscv_int_regnamesh
[] = {
48 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h",
49 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h",
50 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h",
51 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h",
52 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h",
53 "x30h/t5h", "x31h/t6h"
56 const char * const riscv_fpr_regnames
[] = {
57 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5",
58 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1",
59 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7",
60 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7",
61 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9",
62 "f30/ft10", "f31/ft11"
65 static const char * const riscv_excp_names
[] = {
68 "illegal_instruction",
86 "guest_exec_page_fault",
87 "guest_load_page_fault",
89 "guest_store_page_fault",
92 static const char * const riscv_intr_names
[] = {
111 const char *riscv_cpu_get_trap_name(target_ulong cause
, bool async
)
114 return (cause
< ARRAY_SIZE(riscv_intr_names
)) ?
115 riscv_intr_names
[cause
] : "(unknown)";
117 return (cause
< ARRAY_SIZE(riscv_excp_names
)) ?
118 riscv_excp_names
[cause
] : "(unknown)";
122 static void set_misa(CPURISCVState
*env
, RISCVMXL mxl
, uint32_t ext
)
124 env
->misa_mxl_max
= env
->misa_mxl
= mxl
;
125 env
->misa_ext_mask
= env
->misa_ext
= ext
;
128 static void set_priv_version(CPURISCVState
*env
, int priv_ver
)
130 env
->priv_ver
= priv_ver
;
133 static void set_vext_version(CPURISCVState
*env
, int vext_ver
)
135 env
->vext_ver
= vext_ver
;
138 static void set_resetvec(CPURISCVState
*env
, target_ulong resetvec
)
140 #ifndef CONFIG_USER_ONLY
141 env
->resetvec
= resetvec
;
145 static void riscv_any_cpu_init(Object
*obj
)
147 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
148 #if defined(TARGET_RISCV32)
149 set_misa(env
, MXL_RV32
, RVI
| RVM
| RVA
| RVF
| RVD
| RVC
| RVU
);
150 #elif defined(TARGET_RISCV64)
151 set_misa(env
, MXL_RV64
, RVI
| RVM
| RVA
| RVF
| RVD
| RVC
| RVU
);
153 set_priv_version(env
, PRIV_VERSION_1_11_0
);
156 #if defined(TARGET_RISCV64)
157 static void rv64_base_cpu_init(Object
*obj
)
159 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
160 /* We set this in the realise function */
161 set_misa(env
, MXL_RV64
, 0);
164 static void rv64_sifive_u_cpu_init(Object
*obj
)
166 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
167 set_misa(env
, MXL_RV64
, RVI
| RVM
| RVA
| RVF
| RVD
| RVC
| RVS
| RVU
);
168 set_priv_version(env
, PRIV_VERSION_1_10_0
);
171 static void rv64_sifive_e_cpu_init(Object
*obj
)
173 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
174 set_misa(env
, MXL_RV64
, RVI
| RVM
| RVA
| RVC
| RVU
);
175 set_priv_version(env
, PRIV_VERSION_1_10_0
);
176 qdev_prop_set_bit(DEVICE(obj
), "mmu", false);
179 static void rv128_base_cpu_init(Object
*obj
)
181 if (qemu_tcg_mttcg_enabled()) {
182 /* Missing 128-bit aligned atomics */
183 error_report("128-bit RISC-V currently does not work with Multi "
184 "Threaded TCG. Please use: -accel tcg,thread=single");
187 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
188 /* We set this in the realise function */
189 set_misa(env
, MXL_RV128
, 0);
192 static void rv32_base_cpu_init(Object
*obj
)
194 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
195 /* We set this in the realise function */
196 set_misa(env
, MXL_RV32
, 0);
199 static void rv32_sifive_u_cpu_init(Object
*obj
)
201 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
202 set_misa(env
, MXL_RV32
, RVI
| RVM
| RVA
| RVF
| RVD
| RVC
| RVS
| RVU
);
203 set_priv_version(env
, PRIV_VERSION_1_10_0
);
206 static void rv32_sifive_e_cpu_init(Object
*obj
)
208 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
209 set_misa(env
, MXL_RV32
, RVI
| RVM
| RVA
| RVC
| RVU
);
210 set_priv_version(env
, PRIV_VERSION_1_10_0
);
211 qdev_prop_set_bit(DEVICE(obj
), "mmu", false);
214 static void rv32_ibex_cpu_init(Object
*obj
)
216 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
217 set_misa(env
, MXL_RV32
, RVI
| RVM
| RVC
| RVU
);
218 set_priv_version(env
, PRIV_VERSION_1_10_0
);
219 qdev_prop_set_bit(DEVICE(obj
), "mmu", false);
220 qdev_prop_set_bit(DEVICE(obj
), "x-epmp", true);
223 static void rv32_imafcu_nommu_cpu_init(Object
*obj
)
225 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
226 set_misa(env
, MXL_RV32
, RVI
| RVM
| RVA
| RVF
| RVC
| RVU
);
227 set_priv_version(env
, PRIV_VERSION_1_10_0
);
228 set_resetvec(env
, DEFAULT_RSTVEC
);
229 qdev_prop_set_bit(DEVICE(obj
), "mmu", false);
233 #if defined(CONFIG_KVM)
234 static void riscv_host_cpu_init(Object
*obj
)
236 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
237 #if defined(TARGET_RISCV32)
238 set_misa(env
, MXL_RV32
, 0);
239 #elif defined(TARGET_RISCV64)
240 set_misa(env
, MXL_RV64
, 0);
245 static ObjectClass
*riscv_cpu_class_by_name(const char *cpu_model
)
251 cpuname
= g_strsplit(cpu_model
, ",", 1);
252 typename
= g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname
[0]);
253 oc
= object_class_by_name(typename
);
256 if (!oc
|| !object_class_dynamic_cast(oc
, TYPE_RISCV_CPU
) ||
257 object_class_is_abstract(oc
)) {
263 static void riscv_cpu_dump_state(CPUState
*cs
, FILE *f
, int flags
)
265 RISCVCPU
*cpu
= RISCV_CPU(cs
);
266 CPURISCVState
*env
= &cpu
->env
;
269 #if !defined(CONFIG_USER_ONLY)
270 if (riscv_has_ext(env
, RVH
)) {
271 qemu_fprintf(f
, " %s %d\n", "V = ", riscv_cpu_virt_enabled(env
));
274 qemu_fprintf(f
, " %s " TARGET_FMT_lx
"\n", "pc ", env
->pc
);
275 #ifndef CONFIG_USER_ONLY
277 static const int dump_csrs
[] = {
314 for (int i
= 0; i
< ARRAY_SIZE(dump_csrs
); ++i
) {
315 int csrno
= dump_csrs
[i
];
316 target_ulong val
= 0;
317 RISCVException res
= riscv_csrrw_debug(env
, csrno
, &val
, 0, 0);
320 * Rely on the smode, hmode, etc, predicates within csr.c
321 * to do the filtering of the registers that are present.
323 if (res
== RISCV_EXCP_NONE
) {
324 qemu_fprintf(f
, " %-8s " TARGET_FMT_lx
"\n",
325 csr_ops
[csrno
].name
, val
);
331 for (i
= 0; i
< 32; i
++) {
332 qemu_fprintf(f
, " %-8s " TARGET_FMT_lx
,
333 riscv_int_regnames
[i
], env
->gpr
[i
]);
335 qemu_fprintf(f
, "\n");
338 if (flags
& CPU_DUMP_FPU
) {
339 for (i
= 0; i
< 32; i
++) {
340 qemu_fprintf(f
, " %-8s %016" PRIx64
,
341 riscv_fpr_regnames
[i
], env
->fpr
[i
]);
343 qemu_fprintf(f
, "\n");
349 static void riscv_cpu_set_pc(CPUState
*cs
, vaddr value
)
351 RISCVCPU
*cpu
= RISCV_CPU(cs
);
352 CPURISCVState
*env
= &cpu
->env
;
354 if (env
->xl
== MXL_RV32
) {
355 env
->pc
= (int32_t)value
;
361 static void riscv_cpu_synchronize_from_tb(CPUState
*cs
,
362 const TranslationBlock
*tb
)
364 RISCVCPU
*cpu
= RISCV_CPU(cs
);
365 CPURISCVState
*env
= &cpu
->env
;
366 RISCVMXL xl
= FIELD_EX32(tb
->flags
, TB_FLAGS
, XL
);
368 if (xl
== MXL_RV32
) {
369 env
->pc
= (int32_t)tb
->pc
;
375 static bool riscv_cpu_has_work(CPUState
*cs
)
377 #ifndef CONFIG_USER_ONLY
378 RISCVCPU
*cpu
= RISCV_CPU(cs
);
379 CPURISCVState
*env
= &cpu
->env
;
381 * Definition of the WFI instruction requires it to ignore the privilege
382 * mode and delegation registers, but respect individual enables
384 return (env
->mip
& env
->mie
) != 0;
390 void restore_state_to_opc(CPURISCVState
*env
, TranslationBlock
*tb
,
393 RISCVMXL xl
= FIELD_EX32(tb
->flags
, TB_FLAGS
, XL
);
394 if (xl
== MXL_RV32
) {
395 env
->pc
= (int32_t)data
[0];
401 static void riscv_cpu_reset(DeviceState
*dev
)
403 #ifndef CONFIG_USER_ONLY
407 CPUState
*cs
= CPU(dev
);
408 RISCVCPU
*cpu
= RISCV_CPU(cs
);
409 RISCVCPUClass
*mcc
= RISCV_CPU_GET_CLASS(cpu
);
410 CPURISCVState
*env
= &cpu
->env
;
412 mcc
->parent_reset(dev
);
413 #ifndef CONFIG_USER_ONLY
414 env
->misa_mxl
= env
->misa_mxl_max
;
416 env
->mstatus
&= ~(MSTATUS_MIE
| MSTATUS_MPRV
);
417 if (env
->misa_mxl
> MXL_RV32
) {
419 * The reset status of SXL/UXL is undefined, but mstatus is WARL
420 * and we must ensure that the value after init is valid for read.
422 env
->mstatus
= set_field(env
->mstatus
, MSTATUS64_SXL
, env
->misa_mxl
);
423 env
->mstatus
= set_field(env
->mstatus
, MSTATUS64_UXL
, env
->misa_mxl
);
424 if (riscv_has_ext(env
, RVH
)) {
425 env
->vsstatus
= set_field(env
->vsstatus
,
426 MSTATUS64_SXL
, env
->misa_mxl
);
427 env
->vsstatus
= set_field(env
->vsstatus
,
428 MSTATUS64_UXL
, env
->misa_mxl
);
429 env
->mstatus_hs
= set_field(env
->mstatus_hs
,
430 MSTATUS64_SXL
, env
->misa_mxl
);
431 env
->mstatus_hs
= set_field(env
->mstatus_hs
,
432 MSTATUS64_UXL
, env
->misa_mxl
);
436 env
->miclaim
= MIP_SGEIP
;
437 env
->pc
= env
->resetvec
;
438 env
->two_stage_lookup
= false;
440 /* Initialized default priorities of local interrupts. */
441 for (i
= 0; i
< ARRAY_SIZE(env
->miprio
); i
++) {
442 iprio
= riscv_cpu_default_priority(i
);
443 env
->miprio
[i
] = (i
== IRQ_M_EXT
) ? 0 : iprio
;
444 env
->siprio
[i
] = (i
== IRQ_S_EXT
) ? 0 : iprio
;
448 while (!riscv_cpu_hviprio_index2irq(i
, &irq
, &rdzero
)) {
450 env
->hviprio
[irq
] = env
->miprio
[irq
];
454 /* mmte is supposed to have pm.current hardwired to 1 */
455 env
->mmte
|= (PM_EXT_INITIAL
| MMTE_M_PM_CURRENT
);
457 env
->xl
= riscv_cpu_mxl(env
);
458 riscv_cpu_update_mask(env
);
459 cs
->exception_index
= RISCV_EXCP_NONE
;
461 set_default_nan_mode(1, &env
->fp_status
);
463 #ifndef CONFIG_USER_ONLY
465 kvm_riscv_reset_vcpu(cpu
);
470 static void riscv_cpu_disas_set_info(CPUState
*s
, disassemble_info
*info
)
472 RISCVCPU
*cpu
= RISCV_CPU(s
);
474 switch (riscv_cpu_mxl(&cpu
->env
)) {
476 info
->print_insn
= print_insn_riscv32
;
479 info
->print_insn
= print_insn_riscv64
;
482 info
->print_insn
= print_insn_riscv128
;
485 g_assert_not_reached();
489 static void riscv_cpu_realize(DeviceState
*dev
, Error
**errp
)
491 CPUState
*cs
= CPU(dev
);
492 RISCVCPU
*cpu
= RISCV_CPU(dev
);
493 CPURISCVState
*env
= &cpu
->env
;
494 RISCVCPUClass
*mcc
= RISCV_CPU_GET_CLASS(dev
);
495 CPUClass
*cc
= CPU_CLASS(mcc
);
496 int priv_version
= 0;
497 Error
*local_err
= NULL
;
499 cpu_exec_realizefn(cs
, &local_err
);
500 if (local_err
!= NULL
) {
501 error_propagate(errp
, local_err
);
505 if (cpu
->cfg
.priv_spec
) {
506 if (!g_strcmp0(cpu
->cfg
.priv_spec
, "v1.11.0")) {
507 priv_version
= PRIV_VERSION_1_11_0
;
508 } else if (!g_strcmp0(cpu
->cfg
.priv_spec
, "v1.10.0")) {
509 priv_version
= PRIV_VERSION_1_10_0
;
512 "Unsupported privilege spec version '%s'",
519 set_priv_version(env
, priv_version
);
520 } else if (!env
->priv_ver
) {
521 set_priv_version(env
, PRIV_VERSION_1_11_0
);
525 riscv_set_feature(env
, RISCV_FEATURE_MMU
);
529 riscv_set_feature(env
, RISCV_FEATURE_PMP
);
532 * Enhanced PMP should only be available
533 * on harts with PMP support
536 riscv_set_feature(env
, RISCV_FEATURE_EPMP
);
541 riscv_set_feature(env
, RISCV_FEATURE_AIA
);
544 set_resetvec(env
, cpu
->cfg
.resetvec
);
546 /* Validate that MISA_MXL is set properly. */
547 switch (env
->misa_mxl_max
) {
548 #ifdef TARGET_RISCV64
551 cc
->gdb_core_xml_file
= "riscv-64bit-cpu.xml";
555 cc
->gdb_core_xml_file
= "riscv-32bit-cpu.xml";
558 g_assert_not_reached();
560 assert(env
->misa_mxl_max
== env
->misa_mxl
);
562 /* If only MISA_EXT is unset for misa, then set it from properties */
563 if (env
->misa_ext
== 0) {
566 /* Do some ISA extension error checking */
567 if (cpu
->cfg
.ext_i
&& cpu
->cfg
.ext_e
) {
569 "I and E extensions are incompatible");
573 if (!cpu
->cfg
.ext_i
&& !cpu
->cfg
.ext_e
) {
575 "Either I or E extension must be set");
579 if (cpu
->cfg
.ext_g
&& !(cpu
->cfg
.ext_i
& cpu
->cfg
.ext_m
&
580 cpu
->cfg
.ext_a
& cpu
->cfg
.ext_f
&
582 warn_report("Setting G will also set IMAFD");
583 cpu
->cfg
.ext_i
= true;
584 cpu
->cfg
.ext_m
= true;
585 cpu
->cfg
.ext_a
= true;
586 cpu
->cfg
.ext_f
= true;
587 cpu
->cfg
.ext_d
= true;
590 /* Set the ISA extensions, checks should have happened above */
591 if (cpu
->cfg
.ext_i
) {
594 if (cpu
->cfg
.ext_e
) {
597 if (cpu
->cfg
.ext_m
) {
600 if (cpu
->cfg
.ext_a
) {
603 if (cpu
->cfg
.ext_f
) {
606 if (cpu
->cfg
.ext_d
) {
609 if (cpu
->cfg
.ext_c
) {
612 if (cpu
->cfg
.ext_s
) {
615 if (cpu
->cfg
.ext_u
) {
618 if (cpu
->cfg
.ext_h
) {
621 if (cpu
->cfg
.ext_v
) {
622 int vext_version
= VEXT_VERSION_1_00_0
;
624 if (!is_power_of_2(cpu
->cfg
.vlen
)) {
626 "Vector extension VLEN must be power of 2");
629 if (cpu
->cfg
.vlen
> RV_VLEN_MAX
|| cpu
->cfg
.vlen
< 128) {
631 "Vector extension implementation only supports VLEN "
632 "in the range [128, %d]", RV_VLEN_MAX
);
635 if (!is_power_of_2(cpu
->cfg
.elen
)) {
637 "Vector extension ELEN must be power of 2");
640 if (cpu
->cfg
.elen
> 64 || cpu
->cfg
.vlen
< 8) {
642 "Vector extension implementation only supports ELEN "
643 "in the range [8, 64]");
646 if (cpu
->cfg
.vext_spec
) {
647 if (!g_strcmp0(cpu
->cfg
.vext_spec
, "v1.0")) {
648 vext_version
= VEXT_VERSION_1_00_0
;
651 "Unsupported vector spec version '%s'",
656 qemu_log("vector version is not specified, "
657 "use the default value v1.0\n");
659 set_vext_version(env
, vext_version
);
661 if ((cpu
->cfg
.ext_zve32f
|| cpu
->cfg
.ext_zve64f
) && !cpu
->cfg
.ext_f
) {
662 error_setg(errp
, "Zve32f/Zve64f extension depends upon RVF.");
665 if (cpu
->cfg
.ext_j
) {
669 set_misa(env
, env
->misa_mxl
, ext
);
672 riscv_cpu_register_gdb_regs_for_features(cs
);
677 mcc
->parent_realize(dev
, errp
);
680 #ifndef CONFIG_USER_ONLY
681 static void riscv_cpu_set_irq(void *opaque
, int irq
, int level
)
683 RISCVCPU
*cpu
= RISCV_CPU(opaque
);
684 CPURISCVState
*env
= &cpu
->env
;
686 if (irq
< IRQ_LOCAL_MAX
) {
701 kvm_riscv_set_irq(cpu
, irq
, level
);
703 riscv_cpu_update_mip(cpu
, 1 << irq
, BOOL_TO_MASK(level
));
707 g_assert_not_reached();
709 } else if (irq
< (IRQ_LOCAL_MAX
+ IRQ_LOCAL_GUEST_MAX
)) {
710 /* Require H-extension for handling guest local interrupts */
711 if (!riscv_has_ext(env
, RVH
)) {
712 g_assert_not_reached();
715 /* Compute bit position in HGEIP CSR */
716 irq
= irq
- IRQ_LOCAL_MAX
+ 1;
717 if (env
->geilen
< irq
) {
718 g_assert_not_reached();
721 /* Update HGEIP CSR */
722 env
->hgeip
&= ~((target_ulong
)1 << irq
);
724 env
->hgeip
|= (target_ulong
)1 << irq
;
727 /* Update mip.SGEIP bit */
728 riscv_cpu_update_mip(cpu
, MIP_SGEIP
,
729 BOOL_TO_MASK(!!(env
->hgeie
& env
->hgeip
)));
731 g_assert_not_reached();
734 #endif /* CONFIG_USER_ONLY */
736 static void riscv_cpu_init(Object
*obj
)
738 RISCVCPU
*cpu
= RISCV_CPU(obj
);
740 cpu_set_cpustate_pointers(cpu
);
742 #ifndef CONFIG_USER_ONLY
743 qdev_init_gpio_in(DEVICE(cpu
), riscv_cpu_set_irq
,
744 IRQ_LOCAL_MAX
+ IRQ_LOCAL_GUEST_MAX
);
745 #endif /* CONFIG_USER_ONLY */
748 static Property riscv_cpu_properties
[] = {
749 /* Defaults for standard extensions */
750 DEFINE_PROP_BOOL("i", RISCVCPU
, cfg
.ext_i
, true),
751 DEFINE_PROP_BOOL("e", RISCVCPU
, cfg
.ext_e
, false),
752 DEFINE_PROP_BOOL("g", RISCVCPU
, cfg
.ext_g
, true),
753 DEFINE_PROP_BOOL("m", RISCVCPU
, cfg
.ext_m
, true),
754 DEFINE_PROP_BOOL("a", RISCVCPU
, cfg
.ext_a
, true),
755 DEFINE_PROP_BOOL("f", RISCVCPU
, cfg
.ext_f
, true),
756 DEFINE_PROP_BOOL("d", RISCVCPU
, cfg
.ext_d
, true),
757 DEFINE_PROP_BOOL("c", RISCVCPU
, cfg
.ext_c
, true),
758 DEFINE_PROP_BOOL("s", RISCVCPU
, cfg
.ext_s
, true),
759 DEFINE_PROP_BOOL("u", RISCVCPU
, cfg
.ext_u
, true),
760 DEFINE_PROP_BOOL("v", RISCVCPU
, cfg
.ext_v
, false),
761 DEFINE_PROP_BOOL("h", RISCVCPU
, cfg
.ext_h
, true),
762 DEFINE_PROP_BOOL("Counters", RISCVCPU
, cfg
.ext_counters
, true),
763 DEFINE_PROP_BOOL("Zifencei", RISCVCPU
, cfg
.ext_ifencei
, true),
764 DEFINE_PROP_BOOL("Zicsr", RISCVCPU
, cfg
.ext_icsr
, true),
765 DEFINE_PROP_BOOL("Zfh", RISCVCPU
, cfg
.ext_zfh
, false),
766 DEFINE_PROP_BOOL("Zfhmin", RISCVCPU
, cfg
.ext_zfhmin
, false),
767 DEFINE_PROP_BOOL("Zve32f", RISCVCPU
, cfg
.ext_zve32f
, false),
768 DEFINE_PROP_BOOL("Zve64f", RISCVCPU
, cfg
.ext_zve64f
, false),
769 DEFINE_PROP_BOOL("mmu", RISCVCPU
, cfg
.mmu
, true),
770 DEFINE_PROP_BOOL("pmp", RISCVCPU
, cfg
.pmp
, true),
772 DEFINE_PROP_STRING("priv_spec", RISCVCPU
, cfg
.priv_spec
),
773 DEFINE_PROP_STRING("vext_spec", RISCVCPU
, cfg
.vext_spec
),
774 DEFINE_PROP_UINT16("vlen", RISCVCPU
, cfg
.vlen
, 128),
775 DEFINE_PROP_UINT16("elen", RISCVCPU
, cfg
.elen
, 64),
777 DEFINE_PROP_BOOL("svinval", RISCVCPU
, cfg
.ext_svinval
, false),
778 DEFINE_PROP_BOOL("svnapot", RISCVCPU
, cfg
.ext_svnapot
, false),
779 DEFINE_PROP_BOOL("svpbmt", RISCVCPU
, cfg
.ext_svpbmt
, false),
781 DEFINE_PROP_BOOL("zba", RISCVCPU
, cfg
.ext_zba
, true),
782 DEFINE_PROP_BOOL("zbb", RISCVCPU
, cfg
.ext_zbb
, true),
783 DEFINE_PROP_BOOL("zbc", RISCVCPU
, cfg
.ext_zbc
, true),
784 DEFINE_PROP_BOOL("zbs", RISCVCPU
, cfg
.ext_zbs
, true),
786 /* Vendor-specific custom extensions */
787 DEFINE_PROP_BOOL("xventanacondops", RISCVCPU
, cfg
.ext_XVentanaCondOps
, false),
789 /* These are experimental so mark with 'x-' */
790 DEFINE_PROP_BOOL("x-j", RISCVCPU
, cfg
.ext_j
, false),
792 DEFINE_PROP_BOOL("x-epmp", RISCVCPU
, cfg
.epmp
, false),
793 DEFINE_PROP_BOOL("x-aia", RISCVCPU
, cfg
.aia
, false),
795 DEFINE_PROP_UINT64("resetvec", RISCVCPU
, cfg
.resetvec
, DEFAULT_RSTVEC
),
796 DEFINE_PROP_END_OF_LIST(),
799 static gchar
*riscv_gdb_arch_name(CPUState
*cs
)
801 RISCVCPU
*cpu
= RISCV_CPU(cs
);
802 CPURISCVState
*env
= &cpu
->env
;
804 switch (riscv_cpu_mxl(env
)) {
806 return g_strdup("riscv:rv32");
809 return g_strdup("riscv:rv64");
811 g_assert_not_reached();
815 static const char *riscv_gdb_get_dynamic_xml(CPUState
*cs
, const char *xmlname
)
817 RISCVCPU
*cpu
= RISCV_CPU(cs
);
819 if (strcmp(xmlname
, "riscv-csr.xml") == 0) {
820 return cpu
->dyn_csr_xml
;
821 } else if (strcmp(xmlname
, "riscv-vector.xml") == 0) {
822 return cpu
->dyn_vreg_xml
;
828 #ifndef CONFIG_USER_ONLY
829 #include "hw/core/sysemu-cpu-ops.h"
831 static const struct SysemuCPUOps riscv_sysemu_ops
= {
832 .get_phys_page_debug
= riscv_cpu_get_phys_page_debug
,
833 .write_elf64_note
= riscv_cpu_write_elf64_note
,
834 .write_elf32_note
= riscv_cpu_write_elf32_note
,
835 .legacy_vmsd
= &vmstate_riscv_cpu
,
839 #include "hw/core/tcg-cpu-ops.h"
841 static const struct TCGCPUOps riscv_tcg_ops
= {
842 .initialize
= riscv_translate_init
,
843 .synchronize_from_tb
= riscv_cpu_synchronize_from_tb
,
845 #ifndef CONFIG_USER_ONLY
846 .tlb_fill
= riscv_cpu_tlb_fill
,
847 .cpu_exec_interrupt
= riscv_cpu_exec_interrupt
,
848 .do_interrupt
= riscv_cpu_do_interrupt
,
849 .do_transaction_failed
= riscv_cpu_do_transaction_failed
,
850 .do_unaligned_access
= riscv_cpu_do_unaligned_access
,
851 #endif /* !CONFIG_USER_ONLY */
854 static void riscv_cpu_class_init(ObjectClass
*c
, void *data
)
856 RISCVCPUClass
*mcc
= RISCV_CPU_CLASS(c
);
857 CPUClass
*cc
= CPU_CLASS(c
);
858 DeviceClass
*dc
= DEVICE_CLASS(c
);
860 device_class_set_parent_realize(dc
, riscv_cpu_realize
,
861 &mcc
->parent_realize
);
863 device_class_set_parent_reset(dc
, riscv_cpu_reset
, &mcc
->parent_reset
);
865 cc
->class_by_name
= riscv_cpu_class_by_name
;
866 cc
->has_work
= riscv_cpu_has_work
;
867 cc
->dump_state
= riscv_cpu_dump_state
;
868 cc
->set_pc
= riscv_cpu_set_pc
;
869 cc
->gdb_read_register
= riscv_cpu_gdb_read_register
;
870 cc
->gdb_write_register
= riscv_cpu_gdb_write_register
;
871 cc
->gdb_num_core_regs
= 33;
872 cc
->gdb_stop_before_watchpoint
= true;
873 cc
->disas_set_info
= riscv_cpu_disas_set_info
;
874 #ifndef CONFIG_USER_ONLY
875 cc
->sysemu_ops
= &riscv_sysemu_ops
;
877 cc
->gdb_arch_name
= riscv_gdb_arch_name
;
878 cc
->gdb_get_dynamic_xml
= riscv_gdb_get_dynamic_xml
;
879 cc
->tcg_ops
= &riscv_tcg_ops
;
881 device_class_set_props(dc
, riscv_cpu_properties
);
884 char *riscv_isa_string(RISCVCPU
*cpu
)
887 const size_t maxlen
= sizeof("rv128") + sizeof(riscv_exts
) + 1;
888 char *isa_str
= g_new(char, maxlen
);
889 char *p
= isa_str
+ snprintf(isa_str
, maxlen
, "rv%d", TARGET_LONG_BITS
);
890 for (i
= 0; i
< sizeof(riscv_exts
); i
++) {
891 if (cpu
->env
.misa_ext
& RV(riscv_exts
[i
])) {
892 *p
++ = qemu_tolower(riscv_exts
[i
]);
899 static gint
riscv_cpu_list_compare(gconstpointer a
, gconstpointer b
)
901 ObjectClass
*class_a
= (ObjectClass
*)a
;
902 ObjectClass
*class_b
= (ObjectClass
*)b
;
903 const char *name_a
, *name_b
;
905 name_a
= object_class_get_name(class_a
);
906 name_b
= object_class_get_name(class_b
);
907 return strcmp(name_a
, name_b
);
910 static void riscv_cpu_list_entry(gpointer data
, gpointer user_data
)
912 const char *typename
= object_class_get_name(OBJECT_CLASS(data
));
913 int len
= strlen(typename
) - strlen(RISCV_CPU_TYPE_SUFFIX
);
915 qemu_printf("%.*s\n", len
, typename
);
918 void riscv_cpu_list(void)
922 list
= object_class_get_list(TYPE_RISCV_CPU
, false);
923 list
= g_slist_sort(list
, riscv_cpu_list_compare
);
924 g_slist_foreach(list
, riscv_cpu_list_entry
, NULL
);
928 #define DEFINE_CPU(type_name, initfn) \
931 .parent = TYPE_RISCV_CPU, \
932 .instance_init = initfn \
935 static const TypeInfo riscv_cpu_type_infos
[] = {
937 .name
= TYPE_RISCV_CPU
,
939 .instance_size
= sizeof(RISCVCPU
),
940 .instance_align
= __alignof__(RISCVCPU
),
941 .instance_init
= riscv_cpu_init
,
943 .class_size
= sizeof(RISCVCPUClass
),
944 .class_init
= riscv_cpu_class_init
,
946 DEFINE_CPU(TYPE_RISCV_CPU_ANY
, riscv_any_cpu_init
),
947 #if defined(CONFIG_KVM)
948 DEFINE_CPU(TYPE_RISCV_CPU_HOST
, riscv_host_cpu_init
),
950 #if defined(TARGET_RISCV32)
951 DEFINE_CPU(TYPE_RISCV_CPU_BASE32
, rv32_base_cpu_init
),
952 DEFINE_CPU(TYPE_RISCV_CPU_IBEX
, rv32_ibex_cpu_init
),
953 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E31
, rv32_sifive_e_cpu_init
),
954 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E34
, rv32_imafcu_nommu_cpu_init
),
955 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U34
, rv32_sifive_u_cpu_init
),
956 #elif defined(TARGET_RISCV64)
957 DEFINE_CPU(TYPE_RISCV_CPU_BASE64
, rv64_base_cpu_init
),
958 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E51
, rv64_sifive_e_cpu_init
),
959 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U54
, rv64_sifive_u_cpu_init
),
960 DEFINE_CPU(TYPE_RISCV_CPU_SHAKTI_C
, rv64_sifive_u_cpu_init
),
961 DEFINE_CPU(TYPE_RISCV_CPU_BASE128
, rv128_base_cpu_init
),
965 DEFINE_TYPES(riscv_cpu_type_infos
)