4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/qemu-print.h"
22 #include "qemu/ctype.h"
25 #include "cpu_vendorid.h"
27 #include "internals.h"
28 #include "time_helper.h"
29 #include "exec/exec-all.h"
30 #include "qapi/error.h"
31 #include "qapi/visitor.h"
32 #include "qemu/error-report.h"
33 #include "hw/qdev-properties.h"
34 #include "migration/vmstate.h"
35 #include "fpu/softfloat-helpers.h"
36 #include "sysemu/kvm.h"
37 #include "kvm_riscv.h"
40 /* RISC-V CPU definitions */
42 #define RISCV_CPU_MARCHID ((QEMU_VERSION_MAJOR << 16) | \
43 (QEMU_VERSION_MINOR << 8) | \
45 #define RISCV_CPU_MIMPID RISCV_CPU_MARCHID
47 static const char riscv_single_letter_exts
[] = "IEMAFDQCPVH";
53 int ext_enable_offset
;
56 #define ISA_EXT_DATA_ENTRY(_name, _m_letter, _min_ver, _prop) \
57 {#_name, _m_letter, _min_ver, offsetof(struct RISCVCPUConfig, _prop)}
60 * Here are the ordering rules of extension naming defined by RISC-V
62 * 1. All extensions should be separated from other multi-letter extensions
64 * 2. The first letter following the 'Z' conventionally indicates the most
65 * closely related alphabetical extension category, IMAFDQLCBKJTPVH.
66 * If multiple 'Z' extensions are named, they should be ordered first
67 * by category, then alphabetically within a category.
68 * 3. Standard supervisor-level extensions (starts with 'S') should be
69 * listed after standard unprivileged extensions. If multiple
70 * supervisor-level extensions are listed, they should be ordered
72 * 4. Non-standard extensions (starts with 'X') must be listed after all
73 * standard extensions. They must be separated from other multi-letter
74 * extensions by an underscore.
76 static const struct isa_ext_data isa_edata_arr
[] = {
77 ISA_EXT_DATA_ENTRY(h
, false, PRIV_VERSION_1_12_0
, ext_h
),
78 ISA_EXT_DATA_ENTRY(v
, false, PRIV_VERSION_1_10_0
, ext_v
),
79 ISA_EXT_DATA_ENTRY(zicbom
, true, PRIV_VERSION_1_12_0
, ext_icbom
),
80 ISA_EXT_DATA_ENTRY(zicboz
, true, PRIV_VERSION_1_12_0
, ext_icboz
),
81 ISA_EXT_DATA_ENTRY(zicond
, true, PRIV_VERSION_1_12_0
, ext_zicond
),
82 ISA_EXT_DATA_ENTRY(zicsr
, true, PRIV_VERSION_1_10_0
, ext_icsr
),
83 ISA_EXT_DATA_ENTRY(zifencei
, true, PRIV_VERSION_1_10_0
, ext_ifencei
),
84 ISA_EXT_DATA_ENTRY(zihintpause
, true, PRIV_VERSION_1_10_0
, ext_zihintpause
),
85 ISA_EXT_DATA_ENTRY(zawrs
, true, PRIV_VERSION_1_12_0
, ext_zawrs
),
86 ISA_EXT_DATA_ENTRY(zfh
, true, PRIV_VERSION_1_11_0
, ext_zfh
),
87 ISA_EXT_DATA_ENTRY(zfhmin
, true, PRIV_VERSION_1_12_0
, ext_zfhmin
),
88 ISA_EXT_DATA_ENTRY(zfinx
, true, PRIV_VERSION_1_12_0
, ext_zfinx
),
89 ISA_EXT_DATA_ENTRY(zdinx
, true, PRIV_VERSION_1_12_0
, ext_zdinx
),
90 ISA_EXT_DATA_ENTRY(zba
, true, PRIV_VERSION_1_12_0
, ext_zba
),
91 ISA_EXT_DATA_ENTRY(zbb
, true, PRIV_VERSION_1_12_0
, ext_zbb
),
92 ISA_EXT_DATA_ENTRY(zbc
, true, PRIV_VERSION_1_12_0
, ext_zbc
),
93 ISA_EXT_DATA_ENTRY(zbkb
, true, PRIV_VERSION_1_12_0
, ext_zbkb
),
94 ISA_EXT_DATA_ENTRY(zbkc
, true, PRIV_VERSION_1_12_0
, ext_zbkc
),
95 ISA_EXT_DATA_ENTRY(zbkx
, true, PRIV_VERSION_1_12_0
, ext_zbkx
),
96 ISA_EXT_DATA_ENTRY(zbs
, true, PRIV_VERSION_1_12_0
, ext_zbs
),
97 ISA_EXT_DATA_ENTRY(zk
, true, PRIV_VERSION_1_12_0
, ext_zk
),
98 ISA_EXT_DATA_ENTRY(zkn
, true, PRIV_VERSION_1_12_0
, ext_zkn
),
99 ISA_EXT_DATA_ENTRY(zknd
, true, PRIV_VERSION_1_12_0
, ext_zknd
),
100 ISA_EXT_DATA_ENTRY(zkne
, true, PRIV_VERSION_1_12_0
, ext_zkne
),
101 ISA_EXT_DATA_ENTRY(zknh
, true, PRIV_VERSION_1_12_0
, ext_zknh
),
102 ISA_EXT_DATA_ENTRY(zkr
, true, PRIV_VERSION_1_12_0
, ext_zkr
),
103 ISA_EXT_DATA_ENTRY(zks
, true, PRIV_VERSION_1_12_0
, ext_zks
),
104 ISA_EXT_DATA_ENTRY(zksed
, true, PRIV_VERSION_1_12_0
, ext_zksed
),
105 ISA_EXT_DATA_ENTRY(zksh
, true, PRIV_VERSION_1_12_0
, ext_zksh
),
106 ISA_EXT_DATA_ENTRY(zkt
, true, PRIV_VERSION_1_12_0
, ext_zkt
),
107 ISA_EXT_DATA_ENTRY(zve32f
, true, PRIV_VERSION_1_12_0
, ext_zve32f
),
108 ISA_EXT_DATA_ENTRY(zve64f
, true, PRIV_VERSION_1_12_0
, ext_zve64f
),
109 ISA_EXT_DATA_ENTRY(zve64d
, true, PRIV_VERSION_1_12_0
, ext_zve64d
),
110 ISA_EXT_DATA_ENTRY(zvfh
, true, PRIV_VERSION_1_12_0
, ext_zvfh
),
111 ISA_EXT_DATA_ENTRY(zvfhmin
, true, PRIV_VERSION_1_12_0
, ext_zvfhmin
),
112 ISA_EXT_DATA_ENTRY(zhinx
, true, PRIV_VERSION_1_12_0
, ext_zhinx
),
113 ISA_EXT_DATA_ENTRY(zhinxmin
, true, PRIV_VERSION_1_12_0
, ext_zhinxmin
),
114 ISA_EXT_DATA_ENTRY(smaia
, true, PRIV_VERSION_1_12_0
, ext_smaia
),
115 ISA_EXT_DATA_ENTRY(ssaia
, true, PRIV_VERSION_1_12_0
, ext_ssaia
),
116 ISA_EXT_DATA_ENTRY(sscofpmf
, true, PRIV_VERSION_1_12_0
, ext_sscofpmf
),
117 ISA_EXT_DATA_ENTRY(sstc
, true, PRIV_VERSION_1_12_0
, ext_sstc
),
118 ISA_EXT_DATA_ENTRY(svadu
, true, PRIV_VERSION_1_12_0
, ext_svadu
),
119 ISA_EXT_DATA_ENTRY(svinval
, true, PRIV_VERSION_1_12_0
, ext_svinval
),
120 ISA_EXT_DATA_ENTRY(svnapot
, true, PRIV_VERSION_1_12_0
, ext_svnapot
),
121 ISA_EXT_DATA_ENTRY(svpbmt
, true, PRIV_VERSION_1_12_0
, ext_svpbmt
),
122 ISA_EXT_DATA_ENTRY(xtheadba
, true, PRIV_VERSION_1_11_0
, ext_xtheadba
),
123 ISA_EXT_DATA_ENTRY(xtheadbb
, true, PRIV_VERSION_1_11_0
, ext_xtheadbb
),
124 ISA_EXT_DATA_ENTRY(xtheadbs
, true, PRIV_VERSION_1_11_0
, ext_xtheadbs
),
125 ISA_EXT_DATA_ENTRY(xtheadcmo
, true, PRIV_VERSION_1_11_0
, ext_xtheadcmo
),
126 ISA_EXT_DATA_ENTRY(xtheadcondmov
, true, PRIV_VERSION_1_11_0
, ext_xtheadcondmov
),
127 ISA_EXT_DATA_ENTRY(xtheadfmemidx
, true, PRIV_VERSION_1_11_0
, ext_xtheadfmemidx
),
128 ISA_EXT_DATA_ENTRY(xtheadfmv
, true, PRIV_VERSION_1_11_0
, ext_xtheadfmv
),
129 ISA_EXT_DATA_ENTRY(xtheadmac
, true, PRIV_VERSION_1_11_0
, ext_xtheadmac
),
130 ISA_EXT_DATA_ENTRY(xtheadmemidx
, true, PRIV_VERSION_1_11_0
, ext_xtheadmemidx
),
131 ISA_EXT_DATA_ENTRY(xtheadmempair
, true, PRIV_VERSION_1_11_0
, ext_xtheadmempair
),
132 ISA_EXT_DATA_ENTRY(xtheadsync
, true, PRIV_VERSION_1_11_0
, ext_xtheadsync
),
133 ISA_EXT_DATA_ENTRY(xventanacondops
, true, PRIV_VERSION_1_12_0
, ext_XVentanaCondOps
),
136 static bool isa_ext_is_enabled(RISCVCPU
*cpu
,
137 const struct isa_ext_data
*edata
)
139 bool *ext_enabled
= (void *)&cpu
->cfg
+ edata
->ext_enable_offset
;
144 static void isa_ext_update_enabled(RISCVCPU
*cpu
,
145 const struct isa_ext_data
*edata
, bool en
)
147 bool *ext_enabled
= (void *)&cpu
->cfg
+ edata
->ext_enable_offset
;
152 const char * const riscv_int_regnames
[] = {
153 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1",
154 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3",
155 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4",
156 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11",
157 "x28/t3", "x29/t4", "x30/t5", "x31/t6"
160 const char * const riscv_int_regnamesh
[] = {
161 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h",
162 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h",
163 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h",
164 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h",
165 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h",
166 "x30h/t5h", "x31h/t6h"
169 const char * const riscv_fpr_regnames
[] = {
170 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5",
171 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1",
172 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7",
173 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7",
174 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9",
175 "f30/ft10", "f31/ft11"
178 static const char * const riscv_excp_names
[] = {
181 "illegal_instruction",
199 "guest_exec_page_fault",
200 "guest_load_page_fault",
202 "guest_store_page_fault",
205 static const char * const riscv_intr_names
[] = {
224 static void register_cpu_props(Object
*obj
);
226 const char *riscv_cpu_get_trap_name(target_ulong cause
, bool async
)
229 return (cause
< ARRAY_SIZE(riscv_intr_names
)) ?
230 riscv_intr_names
[cause
] : "(unknown)";
232 return (cause
< ARRAY_SIZE(riscv_excp_names
)) ?
233 riscv_excp_names
[cause
] : "(unknown)";
237 static void set_misa(CPURISCVState
*env
, RISCVMXL mxl
, uint32_t ext
)
239 env
->misa_mxl_max
= env
->misa_mxl
= mxl
;
240 env
->misa_ext_mask
= env
->misa_ext
= ext
;
243 static void set_priv_version(CPURISCVState
*env
, int priv_ver
)
245 env
->priv_ver
= priv_ver
;
248 static void set_vext_version(CPURISCVState
*env
, int vext_ver
)
250 env
->vext_ver
= vext_ver
;
253 #ifndef CONFIG_USER_ONLY
254 static uint8_t satp_mode_from_str(const char *satp_mode_str
)
256 if (!strncmp(satp_mode_str
, "mbare", 5)) {
257 return VM_1_10_MBARE
;
260 if (!strncmp(satp_mode_str
, "sv32", 4)) {
264 if (!strncmp(satp_mode_str
, "sv39", 4)) {
268 if (!strncmp(satp_mode_str
, "sv48", 4)) {
272 if (!strncmp(satp_mode_str
, "sv57", 4)) {
276 if (!strncmp(satp_mode_str
, "sv64", 4)) {
280 g_assert_not_reached();
283 uint8_t satp_mode_max_from_map(uint32_t map
)
285 /* map here has at least one bit set, so no problem with clz */
286 return 31 - __builtin_clz(map
);
289 const char *satp_mode_str(uint8_t satp_mode
, bool is_32_bit
)
313 g_assert_not_reached();
316 static void set_satp_mode_max_supported(RISCVCPU
*cpu
,
319 bool rv32
= riscv_cpu_mxl(&cpu
->env
) == MXL_RV32
;
320 const bool *valid_vm
= rv32
? valid_vm_1_10_32
: valid_vm_1_10_64
;
322 for (int i
= 0; i
<= satp_mode
; ++i
) {
324 cpu
->cfg
.satp_mode
.supported
|= (1 << i
);
329 /* Set the satp mode to the max supported */
330 static void set_satp_mode_default_map(RISCVCPU
*cpu
)
332 cpu
->cfg
.satp_mode
.map
= cpu
->cfg
.satp_mode
.supported
;
336 static void riscv_any_cpu_init(Object
*obj
)
338 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
339 #if defined(TARGET_RISCV32)
340 set_misa(env
, MXL_RV32
, RVI
| RVM
| RVA
| RVF
| RVD
| RVC
| RVU
);
341 #elif defined(TARGET_RISCV64)
342 set_misa(env
, MXL_RV64
, RVI
| RVM
| RVA
| RVF
| RVD
| RVC
| RVU
);
345 #ifndef CONFIG_USER_ONLY
346 set_satp_mode_max_supported(RISCV_CPU(obj
),
347 riscv_cpu_mxl(&RISCV_CPU(obj
)->env
) == MXL_RV32
?
348 VM_1_10_SV32
: VM_1_10_SV57
);
351 set_priv_version(env
, PRIV_VERSION_1_12_0
);
352 register_cpu_props(obj
);
355 #if defined(TARGET_RISCV64)
356 static void rv64_base_cpu_init(Object
*obj
)
358 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
359 /* We set this in the realise function */
360 set_misa(env
, MXL_RV64
, 0);
361 register_cpu_props(obj
);
362 /* Set latest version of privileged specification */
363 set_priv_version(env
, PRIV_VERSION_1_12_0
);
364 #ifndef CONFIG_USER_ONLY
365 set_satp_mode_max_supported(RISCV_CPU(obj
), VM_1_10_SV57
);
369 static void rv64_sifive_u_cpu_init(Object
*obj
)
371 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
372 set_misa(env
, MXL_RV64
, RVI
| RVM
| RVA
| RVF
| RVD
| RVC
| RVS
| RVU
);
373 register_cpu_props(obj
);
374 set_priv_version(env
, PRIV_VERSION_1_10_0
);
375 #ifndef CONFIG_USER_ONLY
376 set_satp_mode_max_supported(RISCV_CPU(obj
), VM_1_10_SV39
);
380 static void rv64_sifive_e_cpu_init(Object
*obj
)
382 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
383 RISCVCPU
*cpu
= RISCV_CPU(obj
);
385 set_misa(env
, MXL_RV64
, RVI
| RVM
| RVA
| RVC
| RVU
);
386 register_cpu_props(obj
);
387 set_priv_version(env
, PRIV_VERSION_1_10_0
);
388 cpu
->cfg
.mmu
= false;
389 #ifndef CONFIG_USER_ONLY
390 set_satp_mode_max_supported(cpu
, VM_1_10_MBARE
);
394 static void rv64_thead_c906_cpu_init(Object
*obj
)
396 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
397 RISCVCPU
*cpu
= RISCV_CPU(obj
);
399 set_misa(env
, MXL_RV64
, RVI
| RVM
| RVA
| RVF
| RVD
| RVC
| RVS
| RVU
);
400 set_priv_version(env
, PRIV_VERSION_1_11_0
);
402 cpu
->cfg
.ext_g
= true;
403 cpu
->cfg
.ext_c
= true;
404 cpu
->cfg
.ext_u
= true;
405 cpu
->cfg
.ext_s
= true;
406 cpu
->cfg
.ext_icsr
= true;
407 cpu
->cfg
.ext_zfh
= true;
409 cpu
->cfg
.ext_xtheadba
= true;
410 cpu
->cfg
.ext_xtheadbb
= true;
411 cpu
->cfg
.ext_xtheadbs
= true;
412 cpu
->cfg
.ext_xtheadcmo
= true;
413 cpu
->cfg
.ext_xtheadcondmov
= true;
414 cpu
->cfg
.ext_xtheadfmemidx
= true;
415 cpu
->cfg
.ext_xtheadmac
= true;
416 cpu
->cfg
.ext_xtheadmemidx
= true;
417 cpu
->cfg
.ext_xtheadmempair
= true;
418 cpu
->cfg
.ext_xtheadsync
= true;
420 cpu
->cfg
.mvendorid
= THEAD_VENDOR_ID
;
421 #ifndef CONFIG_USER_ONLY
422 set_satp_mode_max_supported(cpu
, VM_1_10_SV39
);
426 static void rv128_base_cpu_init(Object
*obj
)
428 if (qemu_tcg_mttcg_enabled()) {
429 /* Missing 128-bit aligned atomics */
430 error_report("128-bit RISC-V currently does not work with Multi "
431 "Threaded TCG. Please use: -accel tcg,thread=single");
434 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
435 /* We set this in the realise function */
436 set_misa(env
, MXL_RV128
, 0);
437 register_cpu_props(obj
);
438 /* Set latest version of privileged specification */
439 set_priv_version(env
, PRIV_VERSION_1_12_0
);
440 #ifndef CONFIG_USER_ONLY
441 set_satp_mode_max_supported(RISCV_CPU(obj
), VM_1_10_SV57
);
445 static void rv32_base_cpu_init(Object
*obj
)
447 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
448 /* We set this in the realise function */
449 set_misa(env
, MXL_RV32
, 0);
450 register_cpu_props(obj
);
451 /* Set latest version of privileged specification */
452 set_priv_version(env
, PRIV_VERSION_1_12_0
);
453 #ifndef CONFIG_USER_ONLY
454 set_satp_mode_max_supported(RISCV_CPU(obj
), VM_1_10_SV32
);
458 static void rv32_sifive_u_cpu_init(Object
*obj
)
460 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
461 set_misa(env
, MXL_RV32
, RVI
| RVM
| RVA
| RVF
| RVD
| RVC
| RVS
| RVU
);
462 register_cpu_props(obj
);
463 set_priv_version(env
, PRIV_VERSION_1_10_0
);
464 #ifndef CONFIG_USER_ONLY
465 set_satp_mode_max_supported(RISCV_CPU(obj
), VM_1_10_SV32
);
469 static void rv32_sifive_e_cpu_init(Object
*obj
)
471 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
472 RISCVCPU
*cpu
= RISCV_CPU(obj
);
474 set_misa(env
, MXL_RV32
, RVI
| RVM
| RVA
| RVC
| RVU
);
475 register_cpu_props(obj
);
476 set_priv_version(env
, PRIV_VERSION_1_10_0
);
477 cpu
->cfg
.mmu
= false;
478 #ifndef CONFIG_USER_ONLY
479 set_satp_mode_max_supported(cpu
, VM_1_10_MBARE
);
483 static void rv32_ibex_cpu_init(Object
*obj
)
485 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
486 RISCVCPU
*cpu
= RISCV_CPU(obj
);
488 set_misa(env
, MXL_RV32
, RVI
| RVM
| RVC
| RVU
);
489 register_cpu_props(obj
);
490 set_priv_version(env
, PRIV_VERSION_1_11_0
);
491 cpu
->cfg
.mmu
= false;
492 #ifndef CONFIG_USER_ONLY
493 set_satp_mode_max_supported(cpu
, VM_1_10_MBARE
);
495 cpu
->cfg
.epmp
= true;
498 static void rv32_imafcu_nommu_cpu_init(Object
*obj
)
500 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
501 RISCVCPU
*cpu
= RISCV_CPU(obj
);
503 set_misa(env
, MXL_RV32
, RVI
| RVM
| RVA
| RVF
| RVC
| RVU
);
504 register_cpu_props(obj
);
505 set_priv_version(env
, PRIV_VERSION_1_10_0
);
506 cpu
->cfg
.mmu
= false;
507 #ifndef CONFIG_USER_ONLY
508 set_satp_mode_max_supported(cpu
, VM_1_10_MBARE
);
513 #if defined(CONFIG_KVM)
514 static void riscv_host_cpu_init(Object
*obj
)
516 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
517 #if defined(TARGET_RISCV32)
518 set_misa(env
, MXL_RV32
, 0);
519 #elif defined(TARGET_RISCV64)
520 set_misa(env
, MXL_RV64
, 0);
522 register_cpu_props(obj
);
526 static ObjectClass
*riscv_cpu_class_by_name(const char *cpu_model
)
532 cpuname
= g_strsplit(cpu_model
, ",", 1);
533 typename
= g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname
[0]);
534 oc
= object_class_by_name(typename
);
537 if (!oc
|| !object_class_dynamic_cast(oc
, TYPE_RISCV_CPU
) ||
538 object_class_is_abstract(oc
)) {
544 static void riscv_cpu_dump_state(CPUState
*cs
, FILE *f
, int flags
)
546 RISCVCPU
*cpu
= RISCV_CPU(cs
);
547 CPURISCVState
*env
= &cpu
->env
;
550 #if !defined(CONFIG_USER_ONLY)
551 if (riscv_has_ext(env
, RVH
)) {
552 qemu_fprintf(f
, " %s %d\n", "V = ", riscv_cpu_virt_enabled(env
));
555 qemu_fprintf(f
, " %s " TARGET_FMT_lx
"\n", "pc ", env
->pc
);
556 #ifndef CONFIG_USER_ONLY
558 static const int dump_csrs
[] = {
563 * CSR_SSTATUS is intentionally omitted here as its value
564 * can be figured out by looking at CSR_MSTATUS
599 for (int i
= 0; i
< ARRAY_SIZE(dump_csrs
); ++i
) {
600 int csrno
= dump_csrs
[i
];
601 target_ulong val
= 0;
602 RISCVException res
= riscv_csrrw_debug(env
, csrno
, &val
, 0, 0);
605 * Rely on the smode, hmode, etc, predicates within csr.c
606 * to do the filtering of the registers that are present.
608 if (res
== RISCV_EXCP_NONE
) {
609 qemu_fprintf(f
, " %-8s " TARGET_FMT_lx
"\n",
610 csr_ops
[csrno
].name
, val
);
616 for (i
= 0; i
< 32; i
++) {
617 qemu_fprintf(f
, " %-8s " TARGET_FMT_lx
,
618 riscv_int_regnames
[i
], env
->gpr
[i
]);
620 qemu_fprintf(f
, "\n");
623 if (flags
& CPU_DUMP_FPU
) {
624 for (i
= 0; i
< 32; i
++) {
625 qemu_fprintf(f
, " %-8s %016" PRIx64
,
626 riscv_fpr_regnames
[i
], env
->fpr
[i
]);
628 qemu_fprintf(f
, "\n");
634 static void riscv_cpu_set_pc(CPUState
*cs
, vaddr value
)
636 RISCVCPU
*cpu
= RISCV_CPU(cs
);
637 CPURISCVState
*env
= &cpu
->env
;
639 if (env
->xl
== MXL_RV32
) {
640 env
->pc
= (int32_t)value
;
646 static vaddr
riscv_cpu_get_pc(CPUState
*cs
)
648 RISCVCPU
*cpu
= RISCV_CPU(cs
);
649 CPURISCVState
*env
= &cpu
->env
;
651 /* Match cpu_get_tb_cpu_state. */
652 if (env
->xl
== MXL_RV32
) {
653 return env
->pc
& UINT32_MAX
;
658 static void riscv_cpu_synchronize_from_tb(CPUState
*cs
,
659 const TranslationBlock
*tb
)
661 RISCVCPU
*cpu
= RISCV_CPU(cs
);
662 CPURISCVState
*env
= &cpu
->env
;
663 RISCVMXL xl
= FIELD_EX32(tb
->flags
, TB_FLAGS
, XL
);
665 tcg_debug_assert(!(cs
->tcg_cflags
& CF_PCREL
));
667 if (xl
== MXL_RV32
) {
668 env
->pc
= (int32_t) tb
->pc
;
674 static bool riscv_cpu_has_work(CPUState
*cs
)
676 #ifndef CONFIG_USER_ONLY
677 RISCVCPU
*cpu
= RISCV_CPU(cs
);
678 CPURISCVState
*env
= &cpu
->env
;
680 * Definition of the WFI instruction requires it to ignore the privilege
681 * mode and delegation registers, but respect individual enables
683 return riscv_cpu_all_pending(env
) != 0;
689 static void riscv_restore_state_to_opc(CPUState
*cs
,
690 const TranslationBlock
*tb
,
691 const uint64_t *data
)
693 RISCVCPU
*cpu
= RISCV_CPU(cs
);
694 CPURISCVState
*env
= &cpu
->env
;
695 RISCVMXL xl
= FIELD_EX32(tb
->flags
, TB_FLAGS
, XL
);
697 if (xl
== MXL_RV32
) {
698 env
->pc
= (int32_t)data
[0];
705 static void riscv_cpu_reset_hold(Object
*obj
)
707 #ifndef CONFIG_USER_ONLY
711 CPUState
*cs
= CPU(obj
);
712 RISCVCPU
*cpu
= RISCV_CPU(cs
);
713 RISCVCPUClass
*mcc
= RISCV_CPU_GET_CLASS(cpu
);
714 CPURISCVState
*env
= &cpu
->env
;
716 if (mcc
->parent_phases
.hold
) {
717 mcc
->parent_phases
.hold(obj
);
719 #ifndef CONFIG_USER_ONLY
720 env
->misa_mxl
= env
->misa_mxl_max
;
722 env
->mstatus
&= ~(MSTATUS_MIE
| MSTATUS_MPRV
);
723 if (env
->misa_mxl
> MXL_RV32
) {
725 * The reset status of SXL/UXL is undefined, but mstatus is WARL
726 * and we must ensure that the value after init is valid for read.
728 env
->mstatus
= set_field(env
->mstatus
, MSTATUS64_SXL
, env
->misa_mxl
);
729 env
->mstatus
= set_field(env
->mstatus
, MSTATUS64_UXL
, env
->misa_mxl
);
730 if (riscv_has_ext(env
, RVH
)) {
731 env
->vsstatus
= set_field(env
->vsstatus
,
732 MSTATUS64_SXL
, env
->misa_mxl
);
733 env
->vsstatus
= set_field(env
->vsstatus
,
734 MSTATUS64_UXL
, env
->misa_mxl
);
735 env
->mstatus_hs
= set_field(env
->mstatus_hs
,
736 MSTATUS64_SXL
, env
->misa_mxl
);
737 env
->mstatus_hs
= set_field(env
->mstatus_hs
,
738 MSTATUS64_UXL
, env
->misa_mxl
);
742 env
->miclaim
= MIP_SGEIP
;
743 env
->pc
= env
->resetvec
;
745 env
->two_stage_lookup
= false;
747 env
->menvcfg
= (cpu
->cfg
.ext_svpbmt
? MENVCFG_PBMTE
: 0) |
748 (cpu
->cfg
.ext_svadu
? MENVCFG_HADE
: 0);
749 env
->henvcfg
= (cpu
->cfg
.ext_svpbmt
? HENVCFG_PBMTE
: 0) |
750 (cpu
->cfg
.ext_svadu
? HENVCFG_HADE
: 0);
752 /* Initialized default priorities of local interrupts. */
753 for (i
= 0; i
< ARRAY_SIZE(env
->miprio
); i
++) {
754 iprio
= riscv_cpu_default_priority(i
);
755 env
->miprio
[i
] = (i
== IRQ_M_EXT
) ? 0 : iprio
;
756 env
->siprio
[i
] = (i
== IRQ_S_EXT
) ? 0 : iprio
;
760 while (!riscv_cpu_hviprio_index2irq(i
, &irq
, &rdzero
)) {
762 env
->hviprio
[irq
] = env
->miprio
[irq
];
766 /* mmte is supposed to have pm.current hardwired to 1 */
767 env
->mmte
|= (PM_EXT_INITIAL
| MMTE_M_PM_CURRENT
);
769 env
->xl
= riscv_cpu_mxl(env
);
770 riscv_cpu_update_mask(env
);
771 cs
->exception_index
= RISCV_EXCP_NONE
;
773 set_default_nan_mode(1, &env
->fp_status
);
775 #ifndef CONFIG_USER_ONLY
776 if (cpu
->cfg
.debug
) {
777 riscv_trigger_init(env
);
781 kvm_riscv_reset_vcpu(cpu
);
786 static void riscv_cpu_disas_set_info(CPUState
*s
, disassemble_info
*info
)
788 RISCVCPU
*cpu
= RISCV_CPU(s
);
790 switch (riscv_cpu_mxl(&cpu
->env
)) {
792 info
->print_insn
= print_insn_riscv32
;
795 info
->print_insn
= print_insn_riscv64
;
798 info
->print_insn
= print_insn_riscv128
;
801 g_assert_not_reached();
806 * Check consistency between chosen extensions while setting
807 * cpu->cfg accordingly, doing a set_misa() in the end.
809 static void riscv_cpu_validate_set_extensions(RISCVCPU
*cpu
, Error
**errp
)
811 CPURISCVState
*env
= &cpu
->env
;
814 /* Do some ISA extension error checking */
815 if (cpu
->cfg
.ext_g
&& !(cpu
->cfg
.ext_i
&& cpu
->cfg
.ext_m
&&
816 cpu
->cfg
.ext_a
&& cpu
->cfg
.ext_f
&&
818 cpu
->cfg
.ext_icsr
&& cpu
->cfg
.ext_ifencei
)) {
819 warn_report("Setting G will also set IMAFD_Zicsr_Zifencei");
820 cpu
->cfg
.ext_i
= true;
821 cpu
->cfg
.ext_m
= true;
822 cpu
->cfg
.ext_a
= true;
823 cpu
->cfg
.ext_f
= true;
824 cpu
->cfg
.ext_d
= true;
825 cpu
->cfg
.ext_icsr
= true;
826 cpu
->cfg
.ext_ifencei
= true;
829 if (cpu
->cfg
.ext_i
&& cpu
->cfg
.ext_e
) {
831 "I and E extensions are incompatible");
835 if (!cpu
->cfg
.ext_i
&& !cpu
->cfg
.ext_e
) {
837 "Either I or E extension must be set");
841 if (cpu
->cfg
.ext_s
&& !cpu
->cfg
.ext_u
) {
843 "Setting S extension without U extension is illegal");
847 if (cpu
->cfg
.ext_h
&& !cpu
->cfg
.ext_i
) {
849 "H depends on an I base integer ISA with 32 x registers");
853 if (cpu
->cfg
.ext_h
&& !cpu
->cfg
.ext_s
) {
854 error_setg(errp
, "H extension implicitly requires S-mode");
858 if (cpu
->cfg
.ext_f
&& !cpu
->cfg
.ext_icsr
) {
859 error_setg(errp
, "F extension requires Zicsr");
863 if ((cpu
->cfg
.ext_zawrs
) && !cpu
->cfg
.ext_a
) {
864 error_setg(errp
, "Zawrs extension requires A extension");
868 if (cpu
->cfg
.ext_zfh
) {
869 cpu
->cfg
.ext_zfhmin
= true;
872 if (cpu
->cfg
.ext_zfhmin
&& !cpu
->cfg
.ext_f
) {
873 error_setg(errp
, "Zfh/Zfhmin extensions require F extension");
877 if (cpu
->cfg
.ext_d
&& !cpu
->cfg
.ext_f
) {
878 error_setg(errp
, "D extension requires F extension");
882 /* The V vector extension depends on the Zve64d extension */
883 if (cpu
->cfg
.ext_v
) {
884 cpu
->cfg
.ext_zve64d
= true;
887 /* The Zve64d extension depends on the Zve64f extension */
888 if (cpu
->cfg
.ext_zve64d
) {
889 cpu
->cfg
.ext_zve64f
= true;
892 /* The Zve64f extension depends on the Zve32f extension */
893 if (cpu
->cfg
.ext_zve64f
) {
894 cpu
->cfg
.ext_zve32f
= true;
897 if (cpu
->cfg
.ext_zve64d
&& !cpu
->cfg
.ext_d
) {
898 error_setg(errp
, "Zve64d/V extensions require D extension");
902 if (cpu
->cfg
.ext_zve32f
&& !cpu
->cfg
.ext_f
) {
903 error_setg(errp
, "Zve32f/Zve64f extensions require F extension");
907 if (cpu
->cfg
.ext_zvfh
) {
908 cpu
->cfg
.ext_zvfhmin
= true;
911 if (cpu
->cfg
.ext_zvfhmin
&& !cpu
->cfg
.ext_zve32f
) {
912 error_setg(errp
, "Zvfh/Zvfhmin extensions require Zve32f extension");
916 if (cpu
->cfg
.ext_zvfh
&& !cpu
->cfg
.ext_zfhmin
) {
917 error_setg(errp
, "Zvfh extensions requires Zfhmin extension");
921 /* Set the ISA extensions, checks should have happened above */
922 if (cpu
->cfg
.ext_zhinx
) {
923 cpu
->cfg
.ext_zhinxmin
= true;
926 if (cpu
->cfg
.ext_zdinx
|| cpu
->cfg
.ext_zhinxmin
) {
927 cpu
->cfg
.ext_zfinx
= true;
930 if (cpu
->cfg
.ext_zfinx
) {
931 if (!cpu
->cfg
.ext_icsr
) {
932 error_setg(errp
, "Zfinx extension requires Zicsr");
935 if (cpu
->cfg
.ext_f
) {
937 "Zfinx cannot be supported together with F extension");
942 if (cpu
->cfg
.ext_zk
) {
943 cpu
->cfg
.ext_zkn
= true;
944 cpu
->cfg
.ext_zkr
= true;
945 cpu
->cfg
.ext_zkt
= true;
948 if (cpu
->cfg
.ext_zkn
) {
949 cpu
->cfg
.ext_zbkb
= true;
950 cpu
->cfg
.ext_zbkc
= true;
951 cpu
->cfg
.ext_zbkx
= true;
952 cpu
->cfg
.ext_zkne
= true;
953 cpu
->cfg
.ext_zknd
= true;
954 cpu
->cfg
.ext_zknh
= true;
957 if (cpu
->cfg
.ext_zks
) {
958 cpu
->cfg
.ext_zbkb
= true;
959 cpu
->cfg
.ext_zbkc
= true;
960 cpu
->cfg
.ext_zbkx
= true;
961 cpu
->cfg
.ext_zksed
= true;
962 cpu
->cfg
.ext_zksh
= true;
965 if (cpu
->cfg
.ext_i
) {
968 if (cpu
->cfg
.ext_e
) {
971 if (cpu
->cfg
.ext_m
) {
974 if (cpu
->cfg
.ext_a
) {
977 if (cpu
->cfg
.ext_f
) {
980 if (cpu
->cfg
.ext_d
) {
983 if (cpu
->cfg
.ext_c
) {
986 if (cpu
->cfg
.ext_s
) {
989 if (cpu
->cfg
.ext_u
) {
992 if (cpu
->cfg
.ext_h
) {
995 if (cpu
->cfg
.ext_v
) {
996 int vext_version
= VEXT_VERSION_1_00_0
;
998 if (!is_power_of_2(cpu
->cfg
.vlen
)) {
1000 "Vector extension VLEN must be power of 2");
1003 if (cpu
->cfg
.vlen
> RV_VLEN_MAX
|| cpu
->cfg
.vlen
< 128) {
1005 "Vector extension implementation only supports VLEN "
1006 "in the range [128, %d]", RV_VLEN_MAX
);
1009 if (!is_power_of_2(cpu
->cfg
.elen
)) {
1011 "Vector extension ELEN must be power of 2");
1014 if (cpu
->cfg
.elen
> 64 || cpu
->cfg
.elen
< 8) {
1016 "Vector extension implementation only supports ELEN "
1017 "in the range [8, 64]");
1020 if (cpu
->cfg
.vext_spec
) {
1021 if (!g_strcmp0(cpu
->cfg
.vext_spec
, "v1.0")) {
1022 vext_version
= VEXT_VERSION_1_00_0
;
1025 "Unsupported vector spec version '%s'",
1026 cpu
->cfg
.vext_spec
);
1030 qemu_log("vector version is not specified, "
1031 "use the default value v1.0\n");
1033 set_vext_version(env
, vext_version
);
1035 if (cpu
->cfg
.ext_j
) {
1039 set_misa(env
, env
->misa_mxl
, ext
);
1042 #ifndef CONFIG_USER_ONLY
1043 static void riscv_cpu_satp_mode_finalize(RISCVCPU
*cpu
, Error
**errp
)
1045 bool rv32
= riscv_cpu_mxl(&cpu
->env
) == MXL_RV32
;
1046 uint8_t satp_mode_map_max
;
1047 uint8_t satp_mode_supported_max
=
1048 satp_mode_max_from_map(cpu
->cfg
.satp_mode
.supported
);
1050 if (cpu
->cfg
.satp_mode
.map
== 0) {
1051 if (cpu
->cfg
.satp_mode
.init
== 0) {
1052 /* If unset by the user, we fallback to the default satp mode. */
1053 set_satp_mode_default_map(cpu
);
1056 * Find the lowest level that was disabled and then enable the
1057 * first valid level below which can be found in
1058 * valid_vm_1_10_32/64.
1060 for (int i
= 1; i
< 16; ++i
) {
1061 if ((cpu
->cfg
.satp_mode
.init
& (1 << i
)) &&
1062 (cpu
->cfg
.satp_mode
.supported
& (1 << i
))) {
1063 for (int j
= i
- 1; j
>= 0; --j
) {
1064 if (cpu
->cfg
.satp_mode
.supported
& (1 << j
)) {
1065 cpu
->cfg
.satp_mode
.map
|= (1 << j
);
1075 satp_mode_map_max
= satp_mode_max_from_map(cpu
->cfg
.satp_mode
.map
);
1077 /* Make sure the user asked for a supported configuration (HW and qemu) */
1078 if (satp_mode_map_max
> satp_mode_supported_max
) {
1079 error_setg(errp
, "satp_mode %s is higher than hw max capability %s",
1080 satp_mode_str(satp_mode_map_max
, rv32
),
1081 satp_mode_str(satp_mode_supported_max
, rv32
));
1086 * Make sure the user did not ask for an invalid configuration as per
1087 * the specification.
1090 for (int i
= satp_mode_map_max
- 1; i
>= 0; --i
) {
1091 if (!(cpu
->cfg
.satp_mode
.map
& (1 << i
)) &&
1092 (cpu
->cfg
.satp_mode
.init
& (1 << i
)) &&
1093 (cpu
->cfg
.satp_mode
.supported
& (1 << i
))) {
1094 error_setg(errp
, "cannot disable %s satp mode if %s "
1095 "is enabled", satp_mode_str(i
, false),
1096 satp_mode_str(satp_mode_map_max
, false));
1102 /* Finally expand the map so that all valid modes are set */
1103 for (int i
= satp_mode_map_max
- 1; i
>= 0; --i
) {
1104 if (cpu
->cfg
.satp_mode
.supported
& (1 << i
)) {
1105 cpu
->cfg
.satp_mode
.map
|= (1 << i
);
1111 static void riscv_cpu_finalize_features(RISCVCPU
*cpu
, Error
**errp
)
1113 #ifndef CONFIG_USER_ONLY
1114 Error
*local_err
= NULL
;
1116 riscv_cpu_satp_mode_finalize(cpu
, &local_err
);
1117 if (local_err
!= NULL
) {
1118 error_propagate(errp
, local_err
);
1124 static void riscv_cpu_realize(DeviceState
*dev
, Error
**errp
)
1126 CPUState
*cs
= CPU(dev
);
1127 RISCVCPU
*cpu
= RISCV_CPU(dev
);
1128 CPURISCVState
*env
= &cpu
->env
;
1129 RISCVCPUClass
*mcc
= RISCV_CPU_GET_CLASS(dev
);
1130 CPUClass
*cc
= CPU_CLASS(mcc
);
1131 int i
, priv_version
= -1;
1132 Error
*local_err
= NULL
;
1134 cpu_exec_realizefn(cs
, &local_err
);
1135 if (local_err
!= NULL
) {
1136 error_propagate(errp
, local_err
);
1140 if (cpu
->cfg
.priv_spec
) {
1141 if (!g_strcmp0(cpu
->cfg
.priv_spec
, "v1.12.0")) {
1142 priv_version
= PRIV_VERSION_1_12_0
;
1143 } else if (!g_strcmp0(cpu
->cfg
.priv_spec
, "v1.11.0")) {
1144 priv_version
= PRIV_VERSION_1_11_0
;
1145 } else if (!g_strcmp0(cpu
->cfg
.priv_spec
, "v1.10.0")) {
1146 priv_version
= PRIV_VERSION_1_10_0
;
1149 "Unsupported privilege spec version '%s'",
1150 cpu
->cfg
.priv_spec
);
1155 if (priv_version
>= PRIV_VERSION_1_10_0
) {
1156 set_priv_version(env
, priv_version
);
1159 /* Force disable extensions if priv spec version does not match */
1160 for (i
= 0; i
< ARRAY_SIZE(isa_edata_arr
); i
++) {
1161 if (isa_ext_is_enabled(cpu
, &isa_edata_arr
[i
]) &&
1162 (env
->priv_ver
< isa_edata_arr
[i
].min_version
)) {
1163 isa_ext_update_enabled(cpu
, &isa_edata_arr
[i
], false);
1164 #ifndef CONFIG_USER_ONLY
1165 warn_report("disabling %s extension for hart 0x" TARGET_FMT_lx
1166 " because privilege spec version does not match",
1167 isa_edata_arr
[i
].name
, env
->mhartid
);
1169 warn_report("disabling %s extension because "
1170 "privilege spec version does not match",
1171 isa_edata_arr
[i
].name
);
1176 if (cpu
->cfg
.epmp
&& !cpu
->cfg
.pmp
) {
1178 * Enhanced PMP should only be available
1179 * on harts with PMP support
1181 error_setg(errp
, "Invalid configuration: EPMP requires PMP support");
1186 #ifndef CONFIG_USER_ONLY
1187 if (cpu
->cfg
.ext_sstc
) {
1188 riscv_timer_init(cpu
);
1190 #endif /* CONFIG_USER_ONLY */
1192 /* Validate that MISA_MXL is set properly. */
1193 switch (env
->misa_mxl_max
) {
1194 #ifdef TARGET_RISCV64
1197 cc
->gdb_core_xml_file
= "riscv-64bit-cpu.xml";
1201 cc
->gdb_core_xml_file
= "riscv-32bit-cpu.xml";
1204 g_assert_not_reached();
1206 assert(env
->misa_mxl_max
== env
->misa_mxl
);
1208 riscv_cpu_validate_set_extensions(cpu
, &local_err
);
1209 if (local_err
!= NULL
) {
1210 error_propagate(errp
, local_err
);
1214 #ifndef CONFIG_USER_ONLY
1215 if (cpu
->cfg
.pmu_num
) {
1216 if (!riscv_pmu_init(cpu
, cpu
->cfg
.pmu_num
) && cpu
->cfg
.ext_sscofpmf
) {
1217 cpu
->pmu_timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
,
1218 riscv_pmu_timer_cb
, cpu
);
1223 riscv_cpu_finalize_features(cpu
, &local_err
);
1224 if (local_err
!= NULL
) {
1225 error_propagate(errp
, local_err
);
1229 riscv_cpu_register_gdb_regs_for_features(cs
);
1234 mcc
->parent_realize(dev
, errp
);
1237 #ifndef CONFIG_USER_ONLY
1238 static void cpu_riscv_get_satp(Object
*obj
, Visitor
*v
, const char *name
,
1239 void *opaque
, Error
**errp
)
1241 RISCVSATPMap
*satp_map
= opaque
;
1242 uint8_t satp
= satp_mode_from_str(name
);
1245 value
= satp_map
->map
& (1 << satp
);
1247 visit_type_bool(v
, name
, &value
, errp
);
1250 static void cpu_riscv_set_satp(Object
*obj
, Visitor
*v
, const char *name
,
1251 void *opaque
, Error
**errp
)
1253 RISCVSATPMap
*satp_map
= opaque
;
1254 uint8_t satp
= satp_mode_from_str(name
);
1257 if (!visit_type_bool(v
, name
, &value
, errp
)) {
1261 satp_map
->map
= deposit32(satp_map
->map
, satp
, 1, value
);
1262 satp_map
->init
|= 1 << satp
;
1265 static void riscv_add_satp_mode_properties(Object
*obj
)
1267 RISCVCPU
*cpu
= RISCV_CPU(obj
);
1269 if (cpu
->env
.misa_mxl
== MXL_RV32
) {
1270 object_property_add(obj
, "sv32", "bool", cpu_riscv_get_satp
,
1271 cpu_riscv_set_satp
, NULL
, &cpu
->cfg
.satp_mode
);
1273 object_property_add(obj
, "sv39", "bool", cpu_riscv_get_satp
,
1274 cpu_riscv_set_satp
, NULL
, &cpu
->cfg
.satp_mode
);
1275 object_property_add(obj
, "sv48", "bool", cpu_riscv_get_satp
,
1276 cpu_riscv_set_satp
, NULL
, &cpu
->cfg
.satp_mode
);
1277 object_property_add(obj
, "sv57", "bool", cpu_riscv_get_satp
,
1278 cpu_riscv_set_satp
, NULL
, &cpu
->cfg
.satp_mode
);
1279 object_property_add(obj
, "sv64", "bool", cpu_riscv_get_satp
,
1280 cpu_riscv_set_satp
, NULL
, &cpu
->cfg
.satp_mode
);
1284 static void riscv_cpu_set_irq(void *opaque
, int irq
, int level
)
1286 RISCVCPU
*cpu
= RISCV_CPU(opaque
);
1287 CPURISCVState
*env
= &cpu
->env
;
1289 if (irq
< IRQ_LOCAL_MAX
) {
1302 if (kvm_enabled()) {
1303 kvm_riscv_set_irq(cpu
, irq
, level
);
1305 riscv_cpu_update_mip(cpu
, 1 << irq
, BOOL_TO_MASK(level
));
1309 if (kvm_enabled()) {
1310 kvm_riscv_set_irq(cpu
, irq
, level
);
1312 env
->external_seip
= level
;
1313 riscv_cpu_update_mip(cpu
, 1 << irq
,
1314 BOOL_TO_MASK(level
| env
->software_seip
));
1318 g_assert_not_reached();
1320 } else if (irq
< (IRQ_LOCAL_MAX
+ IRQ_LOCAL_GUEST_MAX
)) {
1321 /* Require H-extension for handling guest local interrupts */
1322 if (!riscv_has_ext(env
, RVH
)) {
1323 g_assert_not_reached();
1326 /* Compute bit position in HGEIP CSR */
1327 irq
= irq
- IRQ_LOCAL_MAX
+ 1;
1328 if (env
->geilen
< irq
) {
1329 g_assert_not_reached();
1332 /* Update HGEIP CSR */
1333 env
->hgeip
&= ~((target_ulong
)1 << irq
);
1335 env
->hgeip
|= (target_ulong
)1 << irq
;
1338 /* Update mip.SGEIP bit */
1339 riscv_cpu_update_mip(cpu
, MIP_SGEIP
,
1340 BOOL_TO_MASK(!!(env
->hgeie
& env
->hgeip
)));
1342 g_assert_not_reached();
1345 #endif /* CONFIG_USER_ONLY */
1347 static void riscv_cpu_init(Object
*obj
)
1349 RISCVCPU
*cpu
= RISCV_CPU(obj
);
1351 cpu
->cfg
.ext_ifencei
= true;
1352 cpu
->cfg
.ext_icsr
= true;
1353 cpu
->cfg
.mmu
= true;
1354 cpu
->cfg
.pmp
= true;
1356 cpu_set_cpustate_pointers(cpu
);
1358 #ifndef CONFIG_USER_ONLY
1359 qdev_init_gpio_in(DEVICE(cpu
), riscv_cpu_set_irq
,
1360 IRQ_LOCAL_MAX
+ IRQ_LOCAL_GUEST_MAX
);
1361 #endif /* CONFIG_USER_ONLY */
1364 static Property riscv_cpu_extensions
[] = {
1365 /* Defaults for standard extensions */
1366 DEFINE_PROP_BOOL("i", RISCVCPU
, cfg
.ext_i
, true),
1367 DEFINE_PROP_BOOL("e", RISCVCPU
, cfg
.ext_e
, false),
1368 DEFINE_PROP_BOOL("g", RISCVCPU
, cfg
.ext_g
, false),
1369 DEFINE_PROP_BOOL("m", RISCVCPU
, cfg
.ext_m
, true),
1370 DEFINE_PROP_BOOL("a", RISCVCPU
, cfg
.ext_a
, true),
1371 DEFINE_PROP_BOOL("f", RISCVCPU
, cfg
.ext_f
, true),
1372 DEFINE_PROP_BOOL("d", RISCVCPU
, cfg
.ext_d
, true),
1373 DEFINE_PROP_BOOL("c", RISCVCPU
, cfg
.ext_c
, true),
1374 DEFINE_PROP_BOOL("s", RISCVCPU
, cfg
.ext_s
, true),
1375 DEFINE_PROP_BOOL("u", RISCVCPU
, cfg
.ext_u
, true),
1376 DEFINE_PROP_BOOL("v", RISCVCPU
, cfg
.ext_v
, false),
1377 DEFINE_PROP_BOOL("h", RISCVCPU
, cfg
.ext_h
, true),
1378 DEFINE_PROP_UINT8("pmu-num", RISCVCPU
, cfg
.pmu_num
, 16),
1379 DEFINE_PROP_BOOL("sscofpmf", RISCVCPU
, cfg
.ext_sscofpmf
, false),
1380 DEFINE_PROP_BOOL("Zifencei", RISCVCPU
, cfg
.ext_ifencei
, true),
1381 DEFINE_PROP_BOOL("Zicsr", RISCVCPU
, cfg
.ext_icsr
, true),
1382 DEFINE_PROP_BOOL("Zihintpause", RISCVCPU
, cfg
.ext_zihintpause
, true),
1383 DEFINE_PROP_BOOL("Zawrs", RISCVCPU
, cfg
.ext_zawrs
, true),
1384 DEFINE_PROP_BOOL("Zfh", RISCVCPU
, cfg
.ext_zfh
, false),
1385 DEFINE_PROP_BOOL("Zfhmin", RISCVCPU
, cfg
.ext_zfhmin
, false),
1386 DEFINE_PROP_BOOL("Zve32f", RISCVCPU
, cfg
.ext_zve32f
, false),
1387 DEFINE_PROP_BOOL("Zve64f", RISCVCPU
, cfg
.ext_zve64f
, false),
1388 DEFINE_PROP_BOOL("Zve64d", RISCVCPU
, cfg
.ext_zve64d
, false),
1389 DEFINE_PROP_BOOL("mmu", RISCVCPU
, cfg
.mmu
, true),
1390 DEFINE_PROP_BOOL("pmp", RISCVCPU
, cfg
.pmp
, true),
1391 DEFINE_PROP_BOOL("sstc", RISCVCPU
, cfg
.ext_sstc
, true),
1393 DEFINE_PROP_STRING("priv_spec", RISCVCPU
, cfg
.priv_spec
),
1394 DEFINE_PROP_STRING("vext_spec", RISCVCPU
, cfg
.vext_spec
),
1395 DEFINE_PROP_UINT16("vlen", RISCVCPU
, cfg
.vlen
, 128),
1396 DEFINE_PROP_UINT16("elen", RISCVCPU
, cfg
.elen
, 64),
1398 DEFINE_PROP_BOOL("svadu", RISCVCPU
, cfg
.ext_svadu
, true),
1400 DEFINE_PROP_BOOL("svinval", RISCVCPU
, cfg
.ext_svinval
, false),
1401 DEFINE_PROP_BOOL("svnapot", RISCVCPU
, cfg
.ext_svnapot
, false),
1402 DEFINE_PROP_BOOL("svpbmt", RISCVCPU
, cfg
.ext_svpbmt
, false),
1404 DEFINE_PROP_BOOL("zba", RISCVCPU
, cfg
.ext_zba
, true),
1405 DEFINE_PROP_BOOL("zbb", RISCVCPU
, cfg
.ext_zbb
, true),
1406 DEFINE_PROP_BOOL("zbc", RISCVCPU
, cfg
.ext_zbc
, true),
1407 DEFINE_PROP_BOOL("zbkb", RISCVCPU
, cfg
.ext_zbkb
, false),
1408 DEFINE_PROP_BOOL("zbkc", RISCVCPU
, cfg
.ext_zbkc
, false),
1409 DEFINE_PROP_BOOL("zbkx", RISCVCPU
, cfg
.ext_zbkx
, false),
1410 DEFINE_PROP_BOOL("zbs", RISCVCPU
, cfg
.ext_zbs
, true),
1411 DEFINE_PROP_BOOL("zk", RISCVCPU
, cfg
.ext_zk
, false),
1412 DEFINE_PROP_BOOL("zkn", RISCVCPU
, cfg
.ext_zkn
, false),
1413 DEFINE_PROP_BOOL("zknd", RISCVCPU
, cfg
.ext_zknd
, false),
1414 DEFINE_PROP_BOOL("zkne", RISCVCPU
, cfg
.ext_zkne
, false),
1415 DEFINE_PROP_BOOL("zknh", RISCVCPU
, cfg
.ext_zknh
, false),
1416 DEFINE_PROP_BOOL("zkr", RISCVCPU
, cfg
.ext_zkr
, false),
1417 DEFINE_PROP_BOOL("zks", RISCVCPU
, cfg
.ext_zks
, false),
1418 DEFINE_PROP_BOOL("zksed", RISCVCPU
, cfg
.ext_zksed
, false),
1419 DEFINE_PROP_BOOL("zksh", RISCVCPU
, cfg
.ext_zksh
, false),
1420 DEFINE_PROP_BOOL("zkt", RISCVCPU
, cfg
.ext_zkt
, false),
1422 DEFINE_PROP_BOOL("zdinx", RISCVCPU
, cfg
.ext_zdinx
, false),
1423 DEFINE_PROP_BOOL("zfinx", RISCVCPU
, cfg
.ext_zfinx
, false),
1424 DEFINE_PROP_BOOL("zhinx", RISCVCPU
, cfg
.ext_zhinx
, false),
1425 DEFINE_PROP_BOOL("zhinxmin", RISCVCPU
, cfg
.ext_zhinxmin
, false),
1427 DEFINE_PROP_BOOL("zicbom", RISCVCPU
, cfg
.ext_icbom
, true),
1428 DEFINE_PROP_UINT16("cbom_blocksize", RISCVCPU
, cfg
.cbom_blocksize
, 64),
1429 DEFINE_PROP_BOOL("zicboz", RISCVCPU
, cfg
.ext_icboz
, true),
1430 DEFINE_PROP_UINT16("cboz_blocksize", RISCVCPU
, cfg
.cboz_blocksize
, 64),
1432 DEFINE_PROP_BOOL("zmmul", RISCVCPU
, cfg
.ext_zmmul
, false),
1434 /* Vendor-specific custom extensions */
1435 DEFINE_PROP_BOOL("xtheadba", RISCVCPU
, cfg
.ext_xtheadba
, false),
1436 DEFINE_PROP_BOOL("xtheadbb", RISCVCPU
, cfg
.ext_xtheadbb
, false),
1437 DEFINE_PROP_BOOL("xtheadbs", RISCVCPU
, cfg
.ext_xtheadbs
, false),
1438 DEFINE_PROP_BOOL("xtheadcmo", RISCVCPU
, cfg
.ext_xtheadcmo
, false),
1439 DEFINE_PROP_BOOL("xtheadcondmov", RISCVCPU
, cfg
.ext_xtheadcondmov
, false),
1440 DEFINE_PROP_BOOL("xtheadfmemidx", RISCVCPU
, cfg
.ext_xtheadfmemidx
, false),
1441 DEFINE_PROP_BOOL("xtheadfmv", RISCVCPU
, cfg
.ext_xtheadfmv
, false),
1442 DEFINE_PROP_BOOL("xtheadmac", RISCVCPU
, cfg
.ext_xtheadmac
, false),
1443 DEFINE_PROP_BOOL("xtheadmemidx", RISCVCPU
, cfg
.ext_xtheadmemidx
, false),
1444 DEFINE_PROP_BOOL("xtheadmempair", RISCVCPU
, cfg
.ext_xtheadmempair
, false),
1445 DEFINE_PROP_BOOL("xtheadsync", RISCVCPU
, cfg
.ext_xtheadsync
, false),
1446 DEFINE_PROP_BOOL("xventanacondops", RISCVCPU
, cfg
.ext_XVentanaCondOps
, false),
1448 /* These are experimental so mark with 'x-' */
1449 DEFINE_PROP_BOOL("x-zicond", RISCVCPU
, cfg
.ext_zicond
, false),
1450 DEFINE_PROP_BOOL("x-j", RISCVCPU
, cfg
.ext_j
, false),
1452 DEFINE_PROP_BOOL("x-epmp", RISCVCPU
, cfg
.epmp
, false),
1453 DEFINE_PROP_BOOL("x-smaia", RISCVCPU
, cfg
.ext_smaia
, false),
1454 DEFINE_PROP_BOOL("x-ssaia", RISCVCPU
, cfg
.ext_ssaia
, false),
1456 DEFINE_PROP_BOOL("x-zvfh", RISCVCPU
, cfg
.ext_zvfh
, false),
1457 DEFINE_PROP_BOOL("x-zvfhmin", RISCVCPU
, cfg
.ext_zvfhmin
, false),
1459 DEFINE_PROP_END_OF_LIST(),
1463 * Register CPU props based on env.misa_ext. If a non-zero
1464 * value was set, register only the required cpu->cfg.ext_*
1465 * properties and leave. env.misa_ext = 0 means that we want
1466 * all the default properties to be registered.
1468 static void register_cpu_props(Object
*obj
)
1470 RISCVCPU
*cpu
= RISCV_CPU(obj
);
1471 uint32_t misa_ext
= cpu
->env
.misa_ext
;
1473 DeviceState
*dev
= DEVICE(obj
);
1476 * If misa_ext is not zero, set cfg properties now to
1477 * allow them to be read during riscv_cpu_realize()
1480 if (cpu
->env
.misa_ext
!= 0) {
1481 cpu
->cfg
.ext_i
= misa_ext
& RVI
;
1482 cpu
->cfg
.ext_e
= misa_ext
& RVE
;
1483 cpu
->cfg
.ext_m
= misa_ext
& RVM
;
1484 cpu
->cfg
.ext_a
= misa_ext
& RVA
;
1485 cpu
->cfg
.ext_f
= misa_ext
& RVF
;
1486 cpu
->cfg
.ext_d
= misa_ext
& RVD
;
1487 cpu
->cfg
.ext_v
= misa_ext
& RVV
;
1488 cpu
->cfg
.ext_c
= misa_ext
& RVC
;
1489 cpu
->cfg
.ext_s
= misa_ext
& RVS
;
1490 cpu
->cfg
.ext_u
= misa_ext
& RVU
;
1491 cpu
->cfg
.ext_h
= misa_ext
& RVH
;
1492 cpu
->cfg
.ext_j
= misa_ext
& RVJ
;
1495 * We don't want to set the default riscv_cpu_extensions
1501 for (prop
= riscv_cpu_extensions
; prop
&& prop
->name
; prop
++) {
1502 qdev_property_add_static(dev
, prop
);
1505 #ifndef CONFIG_USER_ONLY
1506 riscv_add_satp_mode_properties(obj
);
1510 static Property riscv_cpu_properties
[] = {
1511 DEFINE_PROP_BOOL("debug", RISCVCPU
, cfg
.debug
, true),
1513 DEFINE_PROP_UINT32("mvendorid", RISCVCPU
, cfg
.mvendorid
, 0),
1514 DEFINE_PROP_UINT64("marchid", RISCVCPU
, cfg
.marchid
, RISCV_CPU_MARCHID
),
1515 DEFINE_PROP_UINT64("mimpid", RISCVCPU
, cfg
.mimpid
, RISCV_CPU_MIMPID
),
1517 #ifndef CONFIG_USER_ONLY
1518 DEFINE_PROP_UINT64("resetvec", RISCVCPU
, env
.resetvec
, DEFAULT_RSTVEC
),
1521 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU
, cfg
.short_isa_string
, false),
1523 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU
, cfg
.rvv_ta_all_1s
, false),
1524 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU
, cfg
.rvv_ma_all_1s
, false),
1527 * write_misa() is marked as experimental for now so mark
1528 * it with -x and default to 'false'.
1530 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU
, cfg
.misa_w
, false),
1531 DEFINE_PROP_END_OF_LIST(),
1534 static gchar
*riscv_gdb_arch_name(CPUState
*cs
)
1536 RISCVCPU
*cpu
= RISCV_CPU(cs
);
1537 CPURISCVState
*env
= &cpu
->env
;
1539 switch (riscv_cpu_mxl(env
)) {
1541 return g_strdup("riscv:rv32");
1544 return g_strdup("riscv:rv64");
1546 g_assert_not_reached();
1550 static const char *riscv_gdb_get_dynamic_xml(CPUState
*cs
, const char *xmlname
)
1552 RISCVCPU
*cpu
= RISCV_CPU(cs
);
1554 if (strcmp(xmlname
, "riscv-csr.xml") == 0) {
1555 return cpu
->dyn_csr_xml
;
1556 } else if (strcmp(xmlname
, "riscv-vector.xml") == 0) {
1557 return cpu
->dyn_vreg_xml
;
1563 #ifndef CONFIG_USER_ONLY
1564 static int64_t riscv_get_arch_id(CPUState
*cs
)
1566 RISCVCPU
*cpu
= RISCV_CPU(cs
);
1568 return cpu
->env
.mhartid
;
1571 #include "hw/core/sysemu-cpu-ops.h"
1573 static const struct SysemuCPUOps riscv_sysemu_ops
= {
1574 .get_phys_page_debug
= riscv_cpu_get_phys_page_debug
,
1575 .write_elf64_note
= riscv_cpu_write_elf64_note
,
1576 .write_elf32_note
= riscv_cpu_write_elf32_note
,
1577 .legacy_vmsd
= &vmstate_riscv_cpu
,
1581 #include "hw/core/tcg-cpu-ops.h"
1583 static const struct TCGCPUOps riscv_tcg_ops
= {
1584 .initialize
= riscv_translate_init
,
1585 .synchronize_from_tb
= riscv_cpu_synchronize_from_tb
,
1586 .restore_state_to_opc
= riscv_restore_state_to_opc
,
1588 #ifndef CONFIG_USER_ONLY
1589 .tlb_fill
= riscv_cpu_tlb_fill
,
1590 .cpu_exec_interrupt
= riscv_cpu_exec_interrupt
,
1591 .do_interrupt
= riscv_cpu_do_interrupt
,
1592 .do_transaction_failed
= riscv_cpu_do_transaction_failed
,
1593 .do_unaligned_access
= riscv_cpu_do_unaligned_access
,
1594 .debug_excp_handler
= riscv_cpu_debug_excp_handler
,
1595 .debug_check_breakpoint
= riscv_cpu_debug_check_breakpoint
,
1596 .debug_check_watchpoint
= riscv_cpu_debug_check_watchpoint
,
1597 #endif /* !CONFIG_USER_ONLY */
1600 static void riscv_cpu_class_init(ObjectClass
*c
, void *data
)
1602 RISCVCPUClass
*mcc
= RISCV_CPU_CLASS(c
);
1603 CPUClass
*cc
= CPU_CLASS(c
);
1604 DeviceClass
*dc
= DEVICE_CLASS(c
);
1605 ResettableClass
*rc
= RESETTABLE_CLASS(c
);
1607 device_class_set_parent_realize(dc
, riscv_cpu_realize
,
1608 &mcc
->parent_realize
);
1610 resettable_class_set_parent_phases(rc
, NULL
, riscv_cpu_reset_hold
, NULL
,
1611 &mcc
->parent_phases
);
1613 cc
->class_by_name
= riscv_cpu_class_by_name
;
1614 cc
->has_work
= riscv_cpu_has_work
;
1615 cc
->dump_state
= riscv_cpu_dump_state
;
1616 cc
->set_pc
= riscv_cpu_set_pc
;
1617 cc
->get_pc
= riscv_cpu_get_pc
;
1618 cc
->gdb_read_register
= riscv_cpu_gdb_read_register
;
1619 cc
->gdb_write_register
= riscv_cpu_gdb_write_register
;
1620 cc
->gdb_num_core_regs
= 33;
1621 cc
->gdb_stop_before_watchpoint
= true;
1622 cc
->disas_set_info
= riscv_cpu_disas_set_info
;
1623 #ifndef CONFIG_USER_ONLY
1624 cc
->sysemu_ops
= &riscv_sysemu_ops
;
1625 cc
->get_arch_id
= riscv_get_arch_id
;
1627 cc
->gdb_arch_name
= riscv_gdb_arch_name
;
1628 cc
->gdb_get_dynamic_xml
= riscv_gdb_get_dynamic_xml
;
1629 cc
->tcg_ops
= &riscv_tcg_ops
;
1631 device_class_set_props(dc
, riscv_cpu_properties
);
1634 static void riscv_isa_string_ext(RISCVCPU
*cpu
, char **isa_str
, int max_str_len
)
1636 char *old
= *isa_str
;
1637 char *new = *isa_str
;
1640 for (i
= 0; i
< ARRAY_SIZE(isa_edata_arr
); i
++) {
1641 if (isa_edata_arr
[i
].multi_letter
&&
1642 isa_ext_is_enabled(cpu
, &isa_edata_arr
[i
])) {
1643 new = g_strconcat(old
, "_", isa_edata_arr
[i
].name
, NULL
);
1652 char *riscv_isa_string(RISCVCPU
*cpu
)
1655 const size_t maxlen
= sizeof("rv128") + sizeof(riscv_single_letter_exts
);
1656 char *isa_str
= g_new(char, maxlen
);
1657 char *p
= isa_str
+ snprintf(isa_str
, maxlen
, "rv%d", TARGET_LONG_BITS
);
1658 for (i
= 0; i
< sizeof(riscv_single_letter_exts
) - 1; i
++) {
1659 if (cpu
->env
.misa_ext
& RV(riscv_single_letter_exts
[i
])) {
1660 *p
++ = qemu_tolower(riscv_single_letter_exts
[i
]);
1664 if (!cpu
->cfg
.short_isa_string
) {
1665 riscv_isa_string_ext(cpu
, &isa_str
, maxlen
);
1670 static gint
riscv_cpu_list_compare(gconstpointer a
, gconstpointer b
)
1672 ObjectClass
*class_a
= (ObjectClass
*)a
;
1673 ObjectClass
*class_b
= (ObjectClass
*)b
;
1674 const char *name_a
, *name_b
;
1676 name_a
= object_class_get_name(class_a
);
1677 name_b
= object_class_get_name(class_b
);
1678 return strcmp(name_a
, name_b
);
1681 static void riscv_cpu_list_entry(gpointer data
, gpointer user_data
)
1683 const char *typename
= object_class_get_name(OBJECT_CLASS(data
));
1684 int len
= strlen(typename
) - strlen(RISCV_CPU_TYPE_SUFFIX
);
1686 qemu_printf("%.*s\n", len
, typename
);
1689 void riscv_cpu_list(void)
1693 list
= object_class_get_list(TYPE_RISCV_CPU
, false);
1694 list
= g_slist_sort(list
, riscv_cpu_list_compare
);
1695 g_slist_foreach(list
, riscv_cpu_list_entry
, NULL
);
1699 #define DEFINE_CPU(type_name, initfn) \
1701 .name = type_name, \
1702 .parent = TYPE_RISCV_CPU, \
1703 .instance_init = initfn \
1706 static const TypeInfo riscv_cpu_type_infos
[] = {
1708 .name
= TYPE_RISCV_CPU
,
1710 .instance_size
= sizeof(RISCVCPU
),
1711 .instance_align
= __alignof__(RISCVCPU
),
1712 .instance_init
= riscv_cpu_init
,
1714 .class_size
= sizeof(RISCVCPUClass
),
1715 .class_init
= riscv_cpu_class_init
,
1717 DEFINE_CPU(TYPE_RISCV_CPU_ANY
, riscv_any_cpu_init
),
1718 #if defined(CONFIG_KVM)
1719 DEFINE_CPU(TYPE_RISCV_CPU_HOST
, riscv_host_cpu_init
),
1721 #if defined(TARGET_RISCV32)
1722 DEFINE_CPU(TYPE_RISCV_CPU_BASE32
, rv32_base_cpu_init
),
1723 DEFINE_CPU(TYPE_RISCV_CPU_IBEX
, rv32_ibex_cpu_init
),
1724 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E31
, rv32_sifive_e_cpu_init
),
1725 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E34
, rv32_imafcu_nommu_cpu_init
),
1726 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U34
, rv32_sifive_u_cpu_init
),
1727 #elif defined(TARGET_RISCV64)
1728 DEFINE_CPU(TYPE_RISCV_CPU_BASE64
, rv64_base_cpu_init
),
1729 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E51
, rv64_sifive_e_cpu_init
),
1730 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U54
, rv64_sifive_u_cpu_init
),
1731 DEFINE_CPU(TYPE_RISCV_CPU_SHAKTI_C
, rv64_sifive_u_cpu_init
),
1732 DEFINE_CPU(TYPE_RISCV_CPU_THEAD_C906
, rv64_thead_c906_cpu_init
),
1733 DEFINE_CPU(TYPE_RISCV_CPU_BASE128
, rv128_base_cpu_init
),
1737 DEFINE_TYPES(riscv_cpu_type_infos
)