4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/qemu-print.h"
22 #include "qemu/ctype.h"
25 #include "cpu_vendorid.h"
27 #include "internals.h"
28 #include "time_helper.h"
29 #include "exec/exec-all.h"
30 #include "qapi/error.h"
31 #include "qemu/error-report.h"
32 #include "hw/qdev-properties.h"
33 #include "migration/vmstate.h"
34 #include "fpu/softfloat-helpers.h"
35 #include "sysemu/kvm.h"
36 #include "kvm_riscv.h"
38 /* RISC-V CPU definitions */
40 #define RISCV_CPU_MARCHID ((QEMU_VERSION_MAJOR << 16) | \
41 (QEMU_VERSION_MINOR << 8) | \
43 #define RISCV_CPU_MIMPID RISCV_CPU_MARCHID
45 static const char riscv_single_letter_exts
[] = "IEMAFDQCPVH";
51 int ext_enable_offset
;
54 #define ISA_EXT_DATA_ENTRY(_name, _m_letter, _min_ver, _prop) \
55 {#_name, _m_letter, _min_ver, offsetof(struct RISCVCPUConfig, _prop)}
58 * Here are the ordering rules of extension naming defined by RISC-V
60 * 1. All extensions should be separated from other multi-letter extensions
62 * 2. The first letter following the 'Z' conventionally indicates the most
63 * closely related alphabetical extension category, IMAFDQLCBKJTPVH.
64 * If multiple 'Z' extensions are named, they should be ordered first
65 * by category, then alphabetically within a category.
66 * 3. Standard supervisor-level extensions (starts with 'S') should be
67 * listed after standard unprivileged extensions. If multiple
68 * supervisor-level extensions are listed, they should be ordered
70 * 4. Non-standard extensions (starts with 'X') must be listed after all
71 * standard extensions. They must be separated from other multi-letter
72 * extensions by an underscore.
74 static const struct isa_ext_data isa_edata_arr
[] = {
75 ISA_EXT_DATA_ENTRY(h
, false, PRIV_VERSION_1_12_0
, ext_h
),
76 ISA_EXT_DATA_ENTRY(v
, false, PRIV_VERSION_1_10_0
, ext_v
),
77 ISA_EXT_DATA_ENTRY(zicond
, true, PRIV_VERSION_1_12_0
, ext_zicond
),
78 ISA_EXT_DATA_ENTRY(zicsr
, true, PRIV_VERSION_1_10_0
, ext_icsr
),
79 ISA_EXT_DATA_ENTRY(zifencei
, true, PRIV_VERSION_1_10_0
, ext_ifencei
),
80 ISA_EXT_DATA_ENTRY(zihintpause
, true, PRIV_VERSION_1_10_0
, ext_zihintpause
),
81 ISA_EXT_DATA_ENTRY(zawrs
, true, PRIV_VERSION_1_12_0
, ext_zawrs
),
82 ISA_EXT_DATA_ENTRY(zfh
, true, PRIV_VERSION_1_11_0
, ext_zfh
),
83 ISA_EXT_DATA_ENTRY(zfhmin
, true, PRIV_VERSION_1_12_0
, ext_zfhmin
),
84 ISA_EXT_DATA_ENTRY(zfinx
, true, PRIV_VERSION_1_12_0
, ext_zfinx
),
85 ISA_EXT_DATA_ENTRY(zdinx
, true, PRIV_VERSION_1_12_0
, ext_zdinx
),
86 ISA_EXT_DATA_ENTRY(zba
, true, PRIV_VERSION_1_12_0
, ext_zba
),
87 ISA_EXT_DATA_ENTRY(zbb
, true, PRIV_VERSION_1_12_0
, ext_zbb
),
88 ISA_EXT_DATA_ENTRY(zbc
, true, PRIV_VERSION_1_12_0
, ext_zbc
),
89 ISA_EXT_DATA_ENTRY(zbkb
, true, PRIV_VERSION_1_12_0
, ext_zbkb
),
90 ISA_EXT_DATA_ENTRY(zbkc
, true, PRIV_VERSION_1_12_0
, ext_zbkc
),
91 ISA_EXT_DATA_ENTRY(zbkx
, true, PRIV_VERSION_1_12_0
, ext_zbkx
),
92 ISA_EXT_DATA_ENTRY(zbs
, true, PRIV_VERSION_1_12_0
, ext_zbs
),
93 ISA_EXT_DATA_ENTRY(zk
, true, PRIV_VERSION_1_12_0
, ext_zk
),
94 ISA_EXT_DATA_ENTRY(zkn
, true, PRIV_VERSION_1_12_0
, ext_zkn
),
95 ISA_EXT_DATA_ENTRY(zknd
, true, PRIV_VERSION_1_12_0
, ext_zknd
),
96 ISA_EXT_DATA_ENTRY(zkne
, true, PRIV_VERSION_1_12_0
, ext_zkne
),
97 ISA_EXT_DATA_ENTRY(zknh
, true, PRIV_VERSION_1_12_0
, ext_zknh
),
98 ISA_EXT_DATA_ENTRY(zkr
, true, PRIV_VERSION_1_12_0
, ext_zkr
),
99 ISA_EXT_DATA_ENTRY(zks
, true, PRIV_VERSION_1_12_0
, ext_zks
),
100 ISA_EXT_DATA_ENTRY(zksed
, true, PRIV_VERSION_1_12_0
, ext_zksed
),
101 ISA_EXT_DATA_ENTRY(zksh
, true, PRIV_VERSION_1_12_0
, ext_zksh
),
102 ISA_EXT_DATA_ENTRY(zkt
, true, PRIV_VERSION_1_12_0
, ext_zkt
),
103 ISA_EXT_DATA_ENTRY(zve32f
, true, PRIV_VERSION_1_12_0
, ext_zve32f
),
104 ISA_EXT_DATA_ENTRY(zve64f
, true, PRIV_VERSION_1_12_0
, ext_zve64f
),
105 ISA_EXT_DATA_ENTRY(zve64d
, true, PRIV_VERSION_1_12_0
, ext_zve64d
),
106 ISA_EXT_DATA_ENTRY(zvfh
, true, PRIV_VERSION_1_12_0
, ext_zvfh
),
107 ISA_EXT_DATA_ENTRY(zvfhmin
, true, PRIV_VERSION_1_12_0
, ext_zvfhmin
),
108 ISA_EXT_DATA_ENTRY(zhinx
, true, PRIV_VERSION_1_12_0
, ext_zhinx
),
109 ISA_EXT_DATA_ENTRY(zhinxmin
, true, PRIV_VERSION_1_12_0
, ext_zhinxmin
),
110 ISA_EXT_DATA_ENTRY(smaia
, true, PRIV_VERSION_1_12_0
, ext_smaia
),
111 ISA_EXT_DATA_ENTRY(ssaia
, true, PRIV_VERSION_1_12_0
, ext_ssaia
),
112 ISA_EXT_DATA_ENTRY(sscofpmf
, true, PRIV_VERSION_1_12_0
, ext_sscofpmf
),
113 ISA_EXT_DATA_ENTRY(sstc
, true, PRIV_VERSION_1_12_0
, ext_sstc
),
114 ISA_EXT_DATA_ENTRY(svadu
, true, PRIV_VERSION_1_12_0
, ext_svadu
),
115 ISA_EXT_DATA_ENTRY(svinval
, true, PRIV_VERSION_1_12_0
, ext_svinval
),
116 ISA_EXT_DATA_ENTRY(svnapot
, true, PRIV_VERSION_1_12_0
, ext_svnapot
),
117 ISA_EXT_DATA_ENTRY(svpbmt
, true, PRIV_VERSION_1_12_0
, ext_svpbmt
),
118 ISA_EXT_DATA_ENTRY(xtheadba
, true, PRIV_VERSION_1_11_0
, ext_xtheadba
),
119 ISA_EXT_DATA_ENTRY(xtheadbb
, true, PRIV_VERSION_1_11_0
, ext_xtheadbb
),
120 ISA_EXT_DATA_ENTRY(xtheadbs
, true, PRIV_VERSION_1_11_0
, ext_xtheadbs
),
121 ISA_EXT_DATA_ENTRY(xtheadcmo
, true, PRIV_VERSION_1_11_0
, ext_xtheadcmo
),
122 ISA_EXT_DATA_ENTRY(xtheadcondmov
, true, PRIV_VERSION_1_11_0
, ext_xtheadcondmov
),
123 ISA_EXT_DATA_ENTRY(xtheadfmemidx
, true, PRIV_VERSION_1_11_0
, ext_xtheadfmemidx
),
124 ISA_EXT_DATA_ENTRY(xtheadfmv
, true, PRIV_VERSION_1_11_0
, ext_xtheadfmv
),
125 ISA_EXT_DATA_ENTRY(xtheadmac
, true, PRIV_VERSION_1_11_0
, ext_xtheadmac
),
126 ISA_EXT_DATA_ENTRY(xtheadmemidx
, true, PRIV_VERSION_1_11_0
, ext_xtheadmemidx
),
127 ISA_EXT_DATA_ENTRY(xtheadmempair
, true, PRIV_VERSION_1_11_0
, ext_xtheadmempair
),
128 ISA_EXT_DATA_ENTRY(xtheadsync
, true, PRIV_VERSION_1_11_0
, ext_xtheadsync
),
129 ISA_EXT_DATA_ENTRY(xventanacondops
, true, PRIV_VERSION_1_12_0
, ext_XVentanaCondOps
),
132 static bool isa_ext_is_enabled(RISCVCPU
*cpu
,
133 const struct isa_ext_data
*edata
)
135 bool *ext_enabled
= (void *)&cpu
->cfg
+ edata
->ext_enable_offset
;
140 static void isa_ext_update_enabled(RISCVCPU
*cpu
,
141 const struct isa_ext_data
*edata
, bool en
)
143 bool *ext_enabled
= (void *)&cpu
->cfg
+ edata
->ext_enable_offset
;
148 const char * const riscv_int_regnames
[] = {
149 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1",
150 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3",
151 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4",
152 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11",
153 "x28/t3", "x29/t4", "x30/t5", "x31/t6"
156 const char * const riscv_int_regnamesh
[] = {
157 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h",
158 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h",
159 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h",
160 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h",
161 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h",
162 "x30h/t5h", "x31h/t6h"
165 const char * const riscv_fpr_regnames
[] = {
166 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5",
167 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1",
168 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7",
169 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7",
170 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9",
171 "f30/ft10", "f31/ft11"
174 static const char * const riscv_excp_names
[] = {
177 "illegal_instruction",
195 "guest_exec_page_fault",
196 "guest_load_page_fault",
198 "guest_store_page_fault",
201 static const char * const riscv_intr_names
[] = {
220 static void register_cpu_props(DeviceState
*dev
);
222 const char *riscv_cpu_get_trap_name(target_ulong cause
, bool async
)
225 return (cause
< ARRAY_SIZE(riscv_intr_names
)) ?
226 riscv_intr_names
[cause
] : "(unknown)";
228 return (cause
< ARRAY_SIZE(riscv_excp_names
)) ?
229 riscv_excp_names
[cause
] : "(unknown)";
233 static void set_misa(CPURISCVState
*env
, RISCVMXL mxl
, uint32_t ext
)
235 env
->misa_mxl_max
= env
->misa_mxl
= mxl
;
236 env
->misa_ext_mask
= env
->misa_ext
= ext
;
239 static void set_priv_version(CPURISCVState
*env
, int priv_ver
)
241 env
->priv_ver
= priv_ver
;
244 static void set_vext_version(CPURISCVState
*env
, int vext_ver
)
246 env
->vext_ver
= vext_ver
;
249 static void riscv_any_cpu_init(Object
*obj
)
251 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
252 #if defined(TARGET_RISCV32)
253 set_misa(env
, MXL_RV32
, RVI
| RVM
| RVA
| RVF
| RVD
| RVC
| RVU
);
254 #elif defined(TARGET_RISCV64)
255 set_misa(env
, MXL_RV64
, RVI
| RVM
| RVA
| RVF
| RVD
| RVC
| RVU
);
257 set_priv_version(env
, PRIV_VERSION_1_12_0
);
258 register_cpu_props(DEVICE(obj
));
261 #if defined(TARGET_RISCV64)
262 static void rv64_base_cpu_init(Object
*obj
)
264 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
265 /* We set this in the realise function */
266 set_misa(env
, MXL_RV64
, 0);
267 register_cpu_props(DEVICE(obj
));
268 /* Set latest version of privileged specification */
269 set_priv_version(env
, PRIV_VERSION_1_12_0
);
272 static void rv64_sifive_u_cpu_init(Object
*obj
)
274 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
275 set_misa(env
, MXL_RV64
, RVI
| RVM
| RVA
| RVF
| RVD
| RVC
| RVS
| RVU
);
276 register_cpu_props(DEVICE(obj
));
277 set_priv_version(env
, PRIV_VERSION_1_10_0
);
280 static void rv64_sifive_e_cpu_init(Object
*obj
)
282 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
283 RISCVCPU
*cpu
= RISCV_CPU(obj
);
285 set_misa(env
, MXL_RV64
, RVI
| RVM
| RVA
| RVC
| RVU
);
286 register_cpu_props(DEVICE(obj
));
287 set_priv_version(env
, PRIV_VERSION_1_10_0
);
288 cpu
->cfg
.mmu
= false;
291 static void rv64_thead_c906_cpu_init(Object
*obj
)
293 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
294 RISCVCPU
*cpu
= RISCV_CPU(obj
);
296 set_misa(env
, MXL_RV64
, RVI
| RVM
| RVA
| RVF
| RVD
| RVC
| RVS
| RVU
);
297 set_priv_version(env
, PRIV_VERSION_1_11_0
);
299 cpu
->cfg
.ext_g
= true;
300 cpu
->cfg
.ext_c
= true;
301 cpu
->cfg
.ext_u
= true;
302 cpu
->cfg
.ext_s
= true;
303 cpu
->cfg
.ext_icsr
= true;
304 cpu
->cfg
.ext_zfh
= true;
306 cpu
->cfg
.ext_xtheadba
= true;
307 cpu
->cfg
.ext_xtheadbb
= true;
308 cpu
->cfg
.ext_xtheadbs
= true;
309 cpu
->cfg
.ext_xtheadcmo
= true;
310 cpu
->cfg
.ext_xtheadcondmov
= true;
311 cpu
->cfg
.ext_xtheadfmemidx
= true;
312 cpu
->cfg
.ext_xtheadmac
= true;
313 cpu
->cfg
.ext_xtheadmemidx
= true;
314 cpu
->cfg
.ext_xtheadmempair
= true;
315 cpu
->cfg
.ext_xtheadsync
= true;
317 cpu
->cfg
.mvendorid
= THEAD_VENDOR_ID
;
320 static void rv128_base_cpu_init(Object
*obj
)
322 if (qemu_tcg_mttcg_enabled()) {
323 /* Missing 128-bit aligned atomics */
324 error_report("128-bit RISC-V currently does not work with Multi "
325 "Threaded TCG. Please use: -accel tcg,thread=single");
328 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
329 /* We set this in the realise function */
330 set_misa(env
, MXL_RV128
, 0);
331 register_cpu_props(DEVICE(obj
));
332 /* Set latest version of privileged specification */
333 set_priv_version(env
, PRIV_VERSION_1_12_0
);
336 static void rv32_base_cpu_init(Object
*obj
)
338 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
339 /* We set this in the realise function */
340 set_misa(env
, MXL_RV32
, 0);
341 register_cpu_props(DEVICE(obj
));
342 /* Set latest version of privileged specification */
343 set_priv_version(env
, PRIV_VERSION_1_12_0
);
346 static void rv32_sifive_u_cpu_init(Object
*obj
)
348 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
349 set_misa(env
, MXL_RV32
, RVI
| RVM
| RVA
| RVF
| RVD
| RVC
| RVS
| RVU
);
350 register_cpu_props(DEVICE(obj
));
351 set_priv_version(env
, PRIV_VERSION_1_10_0
);
354 static void rv32_sifive_e_cpu_init(Object
*obj
)
356 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
357 RISCVCPU
*cpu
= RISCV_CPU(obj
);
359 set_misa(env
, MXL_RV32
, RVI
| RVM
| RVA
| RVC
| RVU
);
360 register_cpu_props(DEVICE(obj
));
361 set_priv_version(env
, PRIV_VERSION_1_10_0
);
362 cpu
->cfg
.mmu
= false;
365 static void rv32_ibex_cpu_init(Object
*obj
)
367 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
368 RISCVCPU
*cpu
= RISCV_CPU(obj
);
370 set_misa(env
, MXL_RV32
, RVI
| RVM
| RVC
| RVU
);
371 register_cpu_props(DEVICE(obj
));
372 set_priv_version(env
, PRIV_VERSION_1_11_0
);
373 cpu
->cfg
.mmu
= false;
374 cpu
->cfg
.epmp
= true;
377 static void rv32_imafcu_nommu_cpu_init(Object
*obj
)
379 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
380 RISCVCPU
*cpu
= RISCV_CPU(obj
);
382 set_misa(env
, MXL_RV32
, RVI
| RVM
| RVA
| RVF
| RVC
| RVU
);
383 register_cpu_props(DEVICE(obj
));
384 set_priv_version(env
, PRIV_VERSION_1_10_0
);
385 cpu
->cfg
.mmu
= false;
389 #if defined(CONFIG_KVM)
390 static void riscv_host_cpu_init(Object
*obj
)
392 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
393 #if defined(TARGET_RISCV32)
394 set_misa(env
, MXL_RV32
, 0);
395 #elif defined(TARGET_RISCV64)
396 set_misa(env
, MXL_RV64
, 0);
398 register_cpu_props(DEVICE(obj
));
402 static ObjectClass
*riscv_cpu_class_by_name(const char *cpu_model
)
408 cpuname
= g_strsplit(cpu_model
, ",", 1);
409 typename
= g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname
[0]);
410 oc
= object_class_by_name(typename
);
413 if (!oc
|| !object_class_dynamic_cast(oc
, TYPE_RISCV_CPU
) ||
414 object_class_is_abstract(oc
)) {
420 static void riscv_cpu_dump_state(CPUState
*cs
, FILE *f
, int flags
)
422 RISCVCPU
*cpu
= RISCV_CPU(cs
);
423 CPURISCVState
*env
= &cpu
->env
;
426 #if !defined(CONFIG_USER_ONLY)
427 if (riscv_has_ext(env
, RVH
)) {
428 qemu_fprintf(f
, " %s %d\n", "V = ", riscv_cpu_virt_enabled(env
));
431 qemu_fprintf(f
, " %s " TARGET_FMT_lx
"\n", "pc ", env
->pc
);
432 #ifndef CONFIG_USER_ONLY
434 static const int dump_csrs
[] = {
439 * CSR_SSTATUS is intentionally omitted here as its value
440 * can be figured out by looking at CSR_MSTATUS
475 for (int i
= 0; i
< ARRAY_SIZE(dump_csrs
); ++i
) {
476 int csrno
= dump_csrs
[i
];
477 target_ulong val
= 0;
478 RISCVException res
= riscv_csrrw_debug(env
, csrno
, &val
, 0, 0);
481 * Rely on the smode, hmode, etc, predicates within csr.c
482 * to do the filtering of the registers that are present.
484 if (res
== RISCV_EXCP_NONE
) {
485 qemu_fprintf(f
, " %-8s " TARGET_FMT_lx
"\n",
486 csr_ops
[csrno
].name
, val
);
492 for (i
= 0; i
< 32; i
++) {
493 qemu_fprintf(f
, " %-8s " TARGET_FMT_lx
,
494 riscv_int_regnames
[i
], env
->gpr
[i
]);
496 qemu_fprintf(f
, "\n");
499 if (flags
& CPU_DUMP_FPU
) {
500 for (i
= 0; i
< 32; i
++) {
501 qemu_fprintf(f
, " %-8s %016" PRIx64
,
502 riscv_fpr_regnames
[i
], env
->fpr
[i
]);
504 qemu_fprintf(f
, "\n");
510 static void riscv_cpu_set_pc(CPUState
*cs
, vaddr value
)
512 RISCVCPU
*cpu
= RISCV_CPU(cs
);
513 CPURISCVState
*env
= &cpu
->env
;
515 if (env
->xl
== MXL_RV32
) {
516 env
->pc
= (int32_t)value
;
522 static vaddr
riscv_cpu_get_pc(CPUState
*cs
)
524 RISCVCPU
*cpu
= RISCV_CPU(cs
);
525 CPURISCVState
*env
= &cpu
->env
;
527 /* Match cpu_get_tb_cpu_state. */
528 if (env
->xl
== MXL_RV32
) {
529 return env
->pc
& UINT32_MAX
;
534 static void riscv_cpu_synchronize_from_tb(CPUState
*cs
,
535 const TranslationBlock
*tb
)
537 RISCVCPU
*cpu
= RISCV_CPU(cs
);
538 CPURISCVState
*env
= &cpu
->env
;
539 RISCVMXL xl
= FIELD_EX32(tb
->flags
, TB_FLAGS
, XL
);
541 if (xl
== MXL_RV32
) {
542 env
->pc
= (int32_t)tb_pc(tb
);
548 static bool riscv_cpu_has_work(CPUState
*cs
)
550 #ifndef CONFIG_USER_ONLY
551 RISCVCPU
*cpu
= RISCV_CPU(cs
);
552 CPURISCVState
*env
= &cpu
->env
;
554 * Definition of the WFI instruction requires it to ignore the privilege
555 * mode and delegation registers, but respect individual enables
557 return riscv_cpu_all_pending(env
) != 0;
563 static void riscv_restore_state_to_opc(CPUState
*cs
,
564 const TranslationBlock
*tb
,
565 const uint64_t *data
)
567 RISCVCPU
*cpu
= RISCV_CPU(cs
);
568 CPURISCVState
*env
= &cpu
->env
;
569 RISCVMXL xl
= FIELD_EX32(tb
->flags
, TB_FLAGS
, XL
);
571 if (xl
== MXL_RV32
) {
572 env
->pc
= (int32_t)data
[0];
579 static void riscv_cpu_reset_hold(Object
*obj
)
581 #ifndef CONFIG_USER_ONLY
585 CPUState
*cs
= CPU(obj
);
586 RISCVCPU
*cpu
= RISCV_CPU(cs
);
587 RISCVCPUClass
*mcc
= RISCV_CPU_GET_CLASS(cpu
);
588 CPURISCVState
*env
= &cpu
->env
;
590 if (mcc
->parent_phases
.hold
) {
591 mcc
->parent_phases
.hold(obj
);
593 #ifndef CONFIG_USER_ONLY
594 env
->misa_mxl
= env
->misa_mxl_max
;
596 env
->mstatus
&= ~(MSTATUS_MIE
| MSTATUS_MPRV
);
597 if (env
->misa_mxl
> MXL_RV32
) {
599 * The reset status of SXL/UXL is undefined, but mstatus is WARL
600 * and we must ensure that the value after init is valid for read.
602 env
->mstatus
= set_field(env
->mstatus
, MSTATUS64_SXL
, env
->misa_mxl
);
603 env
->mstatus
= set_field(env
->mstatus
, MSTATUS64_UXL
, env
->misa_mxl
);
604 if (riscv_has_ext(env
, RVH
)) {
605 env
->vsstatus
= set_field(env
->vsstatus
,
606 MSTATUS64_SXL
, env
->misa_mxl
);
607 env
->vsstatus
= set_field(env
->vsstatus
,
608 MSTATUS64_UXL
, env
->misa_mxl
);
609 env
->mstatus_hs
= set_field(env
->mstatus_hs
,
610 MSTATUS64_SXL
, env
->misa_mxl
);
611 env
->mstatus_hs
= set_field(env
->mstatus_hs
,
612 MSTATUS64_UXL
, env
->misa_mxl
);
616 env
->miclaim
= MIP_SGEIP
;
617 env
->pc
= env
->resetvec
;
619 env
->two_stage_lookup
= false;
621 env
->menvcfg
= (cpu
->cfg
.ext_svpbmt
? MENVCFG_PBMTE
: 0) |
622 (cpu
->cfg
.ext_svadu
? MENVCFG_HADE
: 0);
623 env
->henvcfg
= (cpu
->cfg
.ext_svpbmt
? HENVCFG_PBMTE
: 0) |
624 (cpu
->cfg
.ext_svadu
? HENVCFG_HADE
: 0);
626 /* Initialized default priorities of local interrupts. */
627 for (i
= 0; i
< ARRAY_SIZE(env
->miprio
); i
++) {
628 iprio
= riscv_cpu_default_priority(i
);
629 env
->miprio
[i
] = (i
== IRQ_M_EXT
) ? 0 : iprio
;
630 env
->siprio
[i
] = (i
== IRQ_S_EXT
) ? 0 : iprio
;
634 while (!riscv_cpu_hviprio_index2irq(i
, &irq
, &rdzero
)) {
636 env
->hviprio
[irq
] = env
->miprio
[irq
];
640 /* mmte is supposed to have pm.current hardwired to 1 */
641 env
->mmte
|= (PM_EXT_INITIAL
| MMTE_M_PM_CURRENT
);
643 env
->xl
= riscv_cpu_mxl(env
);
644 riscv_cpu_update_mask(env
);
645 cs
->exception_index
= RISCV_EXCP_NONE
;
647 set_default_nan_mode(1, &env
->fp_status
);
649 #ifndef CONFIG_USER_ONLY
650 if (cpu
->cfg
.debug
) {
651 riscv_trigger_init(env
);
655 kvm_riscv_reset_vcpu(cpu
);
660 static void riscv_cpu_disas_set_info(CPUState
*s
, disassemble_info
*info
)
662 RISCVCPU
*cpu
= RISCV_CPU(s
);
664 switch (riscv_cpu_mxl(&cpu
->env
)) {
666 info
->print_insn
= print_insn_riscv32
;
669 info
->print_insn
= print_insn_riscv64
;
672 info
->print_insn
= print_insn_riscv128
;
675 g_assert_not_reached();
680 * Check consistency between chosen extensions while setting
681 * cpu->cfg accordingly, doing a set_misa() in the end.
683 static void riscv_cpu_validate_set_extensions(RISCVCPU
*cpu
, Error
**errp
)
685 CPURISCVState
*env
= &cpu
->env
;
688 /* Do some ISA extension error checking */
689 if (cpu
->cfg
.ext_g
&& !(cpu
->cfg
.ext_i
&& cpu
->cfg
.ext_m
&&
690 cpu
->cfg
.ext_a
&& cpu
->cfg
.ext_f
&&
692 cpu
->cfg
.ext_icsr
&& cpu
->cfg
.ext_ifencei
)) {
693 warn_report("Setting G will also set IMAFD_Zicsr_Zifencei");
694 cpu
->cfg
.ext_i
= true;
695 cpu
->cfg
.ext_m
= true;
696 cpu
->cfg
.ext_a
= true;
697 cpu
->cfg
.ext_f
= true;
698 cpu
->cfg
.ext_d
= true;
699 cpu
->cfg
.ext_icsr
= true;
700 cpu
->cfg
.ext_ifencei
= true;
703 if (cpu
->cfg
.ext_i
&& cpu
->cfg
.ext_e
) {
705 "I and E extensions are incompatible");
709 if (!cpu
->cfg
.ext_i
&& !cpu
->cfg
.ext_e
) {
711 "Either I or E extension must be set");
715 if (cpu
->cfg
.ext_s
&& !cpu
->cfg
.ext_u
) {
717 "Setting S extension without U extension is illegal");
721 if (cpu
->cfg
.ext_h
&& !cpu
->cfg
.ext_i
) {
723 "H depends on an I base integer ISA with 32 x registers");
727 if (cpu
->cfg
.ext_h
&& !cpu
->cfg
.ext_s
) {
728 error_setg(errp
, "H extension implicitly requires S-mode");
732 if (cpu
->cfg
.ext_f
&& !cpu
->cfg
.ext_icsr
) {
733 error_setg(errp
, "F extension requires Zicsr");
737 if ((cpu
->cfg
.ext_zawrs
) && !cpu
->cfg
.ext_a
) {
738 error_setg(errp
, "Zawrs extension requires A extension");
742 if (cpu
->cfg
.ext_zfh
) {
743 cpu
->cfg
.ext_zfhmin
= true;
746 if (cpu
->cfg
.ext_zfhmin
&& !cpu
->cfg
.ext_f
) {
747 error_setg(errp
, "Zfh/Zfhmin extensions require F extension");
751 if (cpu
->cfg
.ext_d
&& !cpu
->cfg
.ext_f
) {
752 error_setg(errp
, "D extension requires F extension");
756 /* The V vector extension depends on the Zve64d extension */
757 if (cpu
->cfg
.ext_v
) {
758 cpu
->cfg
.ext_zve64d
= true;
761 /* The Zve64d extension depends on the Zve64f extension */
762 if (cpu
->cfg
.ext_zve64d
) {
763 cpu
->cfg
.ext_zve64f
= true;
766 /* The Zve64f extension depends on the Zve32f extension */
767 if (cpu
->cfg
.ext_zve64f
) {
768 cpu
->cfg
.ext_zve32f
= true;
771 if (cpu
->cfg
.ext_zve64d
&& !cpu
->cfg
.ext_d
) {
772 error_setg(errp
, "Zve64d/V extensions require D extension");
776 if (cpu
->cfg
.ext_zve32f
&& !cpu
->cfg
.ext_f
) {
777 error_setg(errp
, "Zve32f/Zve64f extensions require F extension");
781 if (cpu
->cfg
.ext_zvfh
) {
782 cpu
->cfg
.ext_zvfhmin
= true;
785 if (cpu
->cfg
.ext_zvfhmin
&& !cpu
->cfg
.ext_zve32f
) {
786 error_setg(errp
, "Zvfh/Zvfhmin extensions require Zve32f extension");
790 if (cpu
->cfg
.ext_zvfh
&& !cpu
->cfg
.ext_zfhmin
) {
791 error_setg(errp
, "Zvfh extensions requires Zfhmin extension");
795 /* Set the ISA extensions, checks should have happened above */
796 if (cpu
->cfg
.ext_zhinx
) {
797 cpu
->cfg
.ext_zhinxmin
= true;
800 if (cpu
->cfg
.ext_zdinx
|| cpu
->cfg
.ext_zhinxmin
) {
801 cpu
->cfg
.ext_zfinx
= true;
804 if (cpu
->cfg
.ext_zfinx
) {
805 if (!cpu
->cfg
.ext_icsr
) {
806 error_setg(errp
, "Zfinx extension requires Zicsr");
809 if (cpu
->cfg
.ext_f
) {
811 "Zfinx cannot be supported together with F extension");
816 if (cpu
->cfg
.ext_zk
) {
817 cpu
->cfg
.ext_zkn
= true;
818 cpu
->cfg
.ext_zkr
= true;
819 cpu
->cfg
.ext_zkt
= true;
822 if (cpu
->cfg
.ext_zkn
) {
823 cpu
->cfg
.ext_zbkb
= true;
824 cpu
->cfg
.ext_zbkc
= true;
825 cpu
->cfg
.ext_zbkx
= true;
826 cpu
->cfg
.ext_zkne
= true;
827 cpu
->cfg
.ext_zknd
= true;
828 cpu
->cfg
.ext_zknh
= true;
831 if (cpu
->cfg
.ext_zks
) {
832 cpu
->cfg
.ext_zbkb
= true;
833 cpu
->cfg
.ext_zbkc
= true;
834 cpu
->cfg
.ext_zbkx
= true;
835 cpu
->cfg
.ext_zksed
= true;
836 cpu
->cfg
.ext_zksh
= true;
839 if (cpu
->cfg
.ext_i
) {
842 if (cpu
->cfg
.ext_e
) {
845 if (cpu
->cfg
.ext_m
) {
848 if (cpu
->cfg
.ext_a
) {
851 if (cpu
->cfg
.ext_f
) {
854 if (cpu
->cfg
.ext_d
) {
857 if (cpu
->cfg
.ext_c
) {
860 if (cpu
->cfg
.ext_s
) {
863 if (cpu
->cfg
.ext_u
) {
866 if (cpu
->cfg
.ext_h
) {
869 if (cpu
->cfg
.ext_v
) {
870 int vext_version
= VEXT_VERSION_1_00_0
;
872 if (!is_power_of_2(cpu
->cfg
.vlen
)) {
874 "Vector extension VLEN must be power of 2");
877 if (cpu
->cfg
.vlen
> RV_VLEN_MAX
|| cpu
->cfg
.vlen
< 128) {
879 "Vector extension implementation only supports VLEN "
880 "in the range [128, %d]", RV_VLEN_MAX
);
883 if (!is_power_of_2(cpu
->cfg
.elen
)) {
885 "Vector extension ELEN must be power of 2");
888 if (cpu
->cfg
.elen
> 64 || cpu
->cfg
.elen
< 8) {
890 "Vector extension implementation only supports ELEN "
891 "in the range [8, 64]");
894 if (cpu
->cfg
.vext_spec
) {
895 if (!g_strcmp0(cpu
->cfg
.vext_spec
, "v1.0")) {
896 vext_version
= VEXT_VERSION_1_00_0
;
899 "Unsupported vector spec version '%s'",
904 qemu_log("vector version is not specified, "
905 "use the default value v1.0\n");
907 set_vext_version(env
, vext_version
);
909 if (cpu
->cfg
.ext_j
) {
913 set_misa(env
, env
->misa_mxl
, ext
);
916 static void riscv_cpu_realize(DeviceState
*dev
, Error
**errp
)
918 CPUState
*cs
= CPU(dev
);
919 RISCVCPU
*cpu
= RISCV_CPU(dev
);
920 CPURISCVState
*env
= &cpu
->env
;
921 RISCVCPUClass
*mcc
= RISCV_CPU_GET_CLASS(dev
);
922 CPUClass
*cc
= CPU_CLASS(mcc
);
923 int i
, priv_version
= -1;
924 Error
*local_err
= NULL
;
926 cpu_exec_realizefn(cs
, &local_err
);
927 if (local_err
!= NULL
) {
928 error_propagate(errp
, local_err
);
932 if (cpu
->cfg
.priv_spec
) {
933 if (!g_strcmp0(cpu
->cfg
.priv_spec
, "v1.12.0")) {
934 priv_version
= PRIV_VERSION_1_12_0
;
935 } else if (!g_strcmp0(cpu
->cfg
.priv_spec
, "v1.11.0")) {
936 priv_version
= PRIV_VERSION_1_11_0
;
937 } else if (!g_strcmp0(cpu
->cfg
.priv_spec
, "v1.10.0")) {
938 priv_version
= PRIV_VERSION_1_10_0
;
941 "Unsupported privilege spec version '%s'",
947 if (priv_version
>= PRIV_VERSION_1_10_0
) {
948 set_priv_version(env
, priv_version
);
951 /* Force disable extensions if priv spec version does not match */
952 for (i
= 0; i
< ARRAY_SIZE(isa_edata_arr
); i
++) {
953 if (isa_ext_is_enabled(cpu
, &isa_edata_arr
[i
]) &&
954 (env
->priv_ver
< isa_edata_arr
[i
].min_version
)) {
955 isa_ext_update_enabled(cpu
, &isa_edata_arr
[i
], false);
956 #ifndef CONFIG_USER_ONLY
957 warn_report("disabling %s extension for hart 0x" TARGET_FMT_lx
958 " because privilege spec version does not match",
959 isa_edata_arr
[i
].name
, env
->mhartid
);
961 warn_report("disabling %s extension because "
962 "privilege spec version does not match",
963 isa_edata_arr
[i
].name
);
968 if (cpu
->cfg
.epmp
&& !cpu
->cfg
.pmp
) {
970 * Enhanced PMP should only be available
971 * on harts with PMP support
973 error_setg(errp
, "Invalid configuration: EPMP requires PMP support");
978 #ifndef CONFIG_USER_ONLY
979 if (cpu
->cfg
.ext_sstc
) {
980 riscv_timer_init(cpu
);
982 #endif /* CONFIG_USER_ONLY */
984 /* Validate that MISA_MXL is set properly. */
985 switch (env
->misa_mxl_max
) {
986 #ifdef TARGET_RISCV64
989 cc
->gdb_core_xml_file
= "riscv-64bit-cpu.xml";
993 cc
->gdb_core_xml_file
= "riscv-32bit-cpu.xml";
996 g_assert_not_reached();
998 assert(env
->misa_mxl_max
== env
->misa_mxl
);
1000 riscv_cpu_validate_set_extensions(cpu
, &local_err
);
1001 if (local_err
!= NULL
) {
1002 error_propagate(errp
, local_err
);
1006 #ifndef CONFIG_USER_ONLY
1007 if (cpu
->cfg
.pmu_num
) {
1008 if (!riscv_pmu_init(cpu
, cpu
->cfg
.pmu_num
) && cpu
->cfg
.ext_sscofpmf
) {
1009 cpu
->pmu_timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
,
1010 riscv_pmu_timer_cb
, cpu
);
1015 riscv_cpu_register_gdb_regs_for_features(cs
);
1020 mcc
->parent_realize(dev
, errp
);
1023 #ifndef CONFIG_USER_ONLY
1024 static void riscv_cpu_set_irq(void *opaque
, int irq
, int level
)
1026 RISCVCPU
*cpu
= RISCV_CPU(opaque
);
1027 CPURISCVState
*env
= &cpu
->env
;
1029 if (irq
< IRQ_LOCAL_MAX
) {
1042 if (kvm_enabled()) {
1043 kvm_riscv_set_irq(cpu
, irq
, level
);
1045 riscv_cpu_update_mip(cpu
, 1 << irq
, BOOL_TO_MASK(level
));
1049 if (kvm_enabled()) {
1050 kvm_riscv_set_irq(cpu
, irq
, level
);
1052 env
->external_seip
= level
;
1053 riscv_cpu_update_mip(cpu
, 1 << irq
,
1054 BOOL_TO_MASK(level
| env
->software_seip
));
1058 g_assert_not_reached();
1060 } else if (irq
< (IRQ_LOCAL_MAX
+ IRQ_LOCAL_GUEST_MAX
)) {
1061 /* Require H-extension for handling guest local interrupts */
1062 if (!riscv_has_ext(env
, RVH
)) {
1063 g_assert_not_reached();
1066 /* Compute bit position in HGEIP CSR */
1067 irq
= irq
- IRQ_LOCAL_MAX
+ 1;
1068 if (env
->geilen
< irq
) {
1069 g_assert_not_reached();
1072 /* Update HGEIP CSR */
1073 env
->hgeip
&= ~((target_ulong
)1 << irq
);
1075 env
->hgeip
|= (target_ulong
)1 << irq
;
1078 /* Update mip.SGEIP bit */
1079 riscv_cpu_update_mip(cpu
, MIP_SGEIP
,
1080 BOOL_TO_MASK(!!(env
->hgeie
& env
->hgeip
)));
1082 g_assert_not_reached();
1085 #endif /* CONFIG_USER_ONLY */
1087 static void riscv_cpu_init(Object
*obj
)
1089 RISCVCPU
*cpu
= RISCV_CPU(obj
);
1091 cpu
->cfg
.ext_ifencei
= true;
1092 cpu
->cfg
.ext_icsr
= true;
1093 cpu
->cfg
.mmu
= true;
1094 cpu
->cfg
.pmp
= true;
1096 cpu_set_cpustate_pointers(cpu
);
1098 #ifndef CONFIG_USER_ONLY
1099 qdev_init_gpio_in(DEVICE(cpu
), riscv_cpu_set_irq
,
1100 IRQ_LOCAL_MAX
+ IRQ_LOCAL_GUEST_MAX
);
1101 #endif /* CONFIG_USER_ONLY */
1104 static Property riscv_cpu_extensions
[] = {
1105 /* Defaults for standard extensions */
1106 DEFINE_PROP_BOOL("i", RISCVCPU
, cfg
.ext_i
, true),
1107 DEFINE_PROP_BOOL("e", RISCVCPU
, cfg
.ext_e
, false),
1108 DEFINE_PROP_BOOL("g", RISCVCPU
, cfg
.ext_g
, false),
1109 DEFINE_PROP_BOOL("m", RISCVCPU
, cfg
.ext_m
, true),
1110 DEFINE_PROP_BOOL("a", RISCVCPU
, cfg
.ext_a
, true),
1111 DEFINE_PROP_BOOL("f", RISCVCPU
, cfg
.ext_f
, true),
1112 DEFINE_PROP_BOOL("d", RISCVCPU
, cfg
.ext_d
, true),
1113 DEFINE_PROP_BOOL("c", RISCVCPU
, cfg
.ext_c
, true),
1114 DEFINE_PROP_BOOL("s", RISCVCPU
, cfg
.ext_s
, true),
1115 DEFINE_PROP_BOOL("u", RISCVCPU
, cfg
.ext_u
, true),
1116 DEFINE_PROP_BOOL("v", RISCVCPU
, cfg
.ext_v
, false),
1117 DEFINE_PROP_BOOL("h", RISCVCPU
, cfg
.ext_h
, true),
1118 DEFINE_PROP_UINT8("pmu-num", RISCVCPU
, cfg
.pmu_num
, 16),
1119 DEFINE_PROP_BOOL("sscofpmf", RISCVCPU
, cfg
.ext_sscofpmf
, false),
1120 DEFINE_PROP_BOOL("Zifencei", RISCVCPU
, cfg
.ext_ifencei
, true),
1121 DEFINE_PROP_BOOL("Zicsr", RISCVCPU
, cfg
.ext_icsr
, true),
1122 DEFINE_PROP_BOOL("Zihintpause", RISCVCPU
, cfg
.ext_zihintpause
, true),
1123 DEFINE_PROP_BOOL("Zawrs", RISCVCPU
, cfg
.ext_zawrs
, true),
1124 DEFINE_PROP_BOOL("Zfh", RISCVCPU
, cfg
.ext_zfh
, false),
1125 DEFINE_PROP_BOOL("Zfhmin", RISCVCPU
, cfg
.ext_zfhmin
, false),
1126 DEFINE_PROP_BOOL("Zve32f", RISCVCPU
, cfg
.ext_zve32f
, false),
1127 DEFINE_PROP_BOOL("Zve64f", RISCVCPU
, cfg
.ext_zve64f
, false),
1128 DEFINE_PROP_BOOL("Zve64d", RISCVCPU
, cfg
.ext_zve64d
, false),
1129 DEFINE_PROP_BOOL("mmu", RISCVCPU
, cfg
.mmu
, true),
1130 DEFINE_PROP_BOOL("pmp", RISCVCPU
, cfg
.pmp
, true),
1131 DEFINE_PROP_BOOL("sstc", RISCVCPU
, cfg
.ext_sstc
, true),
1133 DEFINE_PROP_STRING("priv_spec", RISCVCPU
, cfg
.priv_spec
),
1134 DEFINE_PROP_STRING("vext_spec", RISCVCPU
, cfg
.vext_spec
),
1135 DEFINE_PROP_UINT16("vlen", RISCVCPU
, cfg
.vlen
, 128),
1136 DEFINE_PROP_UINT16("elen", RISCVCPU
, cfg
.elen
, 64),
1138 DEFINE_PROP_BOOL("svadu", RISCVCPU
, cfg
.ext_svadu
, true),
1140 DEFINE_PROP_BOOL("svinval", RISCVCPU
, cfg
.ext_svinval
, false),
1141 DEFINE_PROP_BOOL("svnapot", RISCVCPU
, cfg
.ext_svnapot
, false),
1142 DEFINE_PROP_BOOL("svpbmt", RISCVCPU
, cfg
.ext_svpbmt
, false),
1144 DEFINE_PROP_BOOL("zba", RISCVCPU
, cfg
.ext_zba
, true),
1145 DEFINE_PROP_BOOL("zbb", RISCVCPU
, cfg
.ext_zbb
, true),
1146 DEFINE_PROP_BOOL("zbc", RISCVCPU
, cfg
.ext_zbc
, true),
1147 DEFINE_PROP_BOOL("zbkb", RISCVCPU
, cfg
.ext_zbkb
, false),
1148 DEFINE_PROP_BOOL("zbkc", RISCVCPU
, cfg
.ext_zbkc
, false),
1149 DEFINE_PROP_BOOL("zbkx", RISCVCPU
, cfg
.ext_zbkx
, false),
1150 DEFINE_PROP_BOOL("zbs", RISCVCPU
, cfg
.ext_zbs
, true),
1151 DEFINE_PROP_BOOL("zk", RISCVCPU
, cfg
.ext_zk
, false),
1152 DEFINE_PROP_BOOL("zkn", RISCVCPU
, cfg
.ext_zkn
, false),
1153 DEFINE_PROP_BOOL("zknd", RISCVCPU
, cfg
.ext_zknd
, false),
1154 DEFINE_PROP_BOOL("zkne", RISCVCPU
, cfg
.ext_zkne
, false),
1155 DEFINE_PROP_BOOL("zknh", RISCVCPU
, cfg
.ext_zknh
, false),
1156 DEFINE_PROP_BOOL("zkr", RISCVCPU
, cfg
.ext_zkr
, false),
1157 DEFINE_PROP_BOOL("zks", RISCVCPU
, cfg
.ext_zks
, false),
1158 DEFINE_PROP_BOOL("zksed", RISCVCPU
, cfg
.ext_zksed
, false),
1159 DEFINE_PROP_BOOL("zksh", RISCVCPU
, cfg
.ext_zksh
, false),
1160 DEFINE_PROP_BOOL("zkt", RISCVCPU
, cfg
.ext_zkt
, false),
1162 DEFINE_PROP_BOOL("zdinx", RISCVCPU
, cfg
.ext_zdinx
, false),
1163 DEFINE_PROP_BOOL("zfinx", RISCVCPU
, cfg
.ext_zfinx
, false),
1164 DEFINE_PROP_BOOL("zhinx", RISCVCPU
, cfg
.ext_zhinx
, false),
1165 DEFINE_PROP_BOOL("zhinxmin", RISCVCPU
, cfg
.ext_zhinxmin
, false),
1167 DEFINE_PROP_BOOL("zmmul", RISCVCPU
, cfg
.ext_zmmul
, false),
1169 /* Vendor-specific custom extensions */
1170 DEFINE_PROP_BOOL("xtheadba", RISCVCPU
, cfg
.ext_xtheadba
, false),
1171 DEFINE_PROP_BOOL("xtheadbb", RISCVCPU
, cfg
.ext_xtheadbb
, false),
1172 DEFINE_PROP_BOOL("xtheadbs", RISCVCPU
, cfg
.ext_xtheadbs
, false),
1173 DEFINE_PROP_BOOL("xtheadcmo", RISCVCPU
, cfg
.ext_xtheadcmo
, false),
1174 DEFINE_PROP_BOOL("xtheadcondmov", RISCVCPU
, cfg
.ext_xtheadcondmov
, false),
1175 DEFINE_PROP_BOOL("xtheadfmemidx", RISCVCPU
, cfg
.ext_xtheadfmemidx
, false),
1176 DEFINE_PROP_BOOL("xtheadfmv", RISCVCPU
, cfg
.ext_xtheadfmv
, false),
1177 DEFINE_PROP_BOOL("xtheadmac", RISCVCPU
, cfg
.ext_xtheadmac
, false),
1178 DEFINE_PROP_BOOL("xtheadmemidx", RISCVCPU
, cfg
.ext_xtheadmemidx
, false),
1179 DEFINE_PROP_BOOL("xtheadmempair", RISCVCPU
, cfg
.ext_xtheadmempair
, false),
1180 DEFINE_PROP_BOOL("xtheadsync", RISCVCPU
, cfg
.ext_xtheadsync
, false),
1181 DEFINE_PROP_BOOL("xventanacondops", RISCVCPU
, cfg
.ext_XVentanaCondOps
, false),
1183 /* These are experimental so mark with 'x-' */
1184 DEFINE_PROP_BOOL("x-zicond", RISCVCPU
, cfg
.ext_zicond
, false),
1185 DEFINE_PROP_BOOL("x-j", RISCVCPU
, cfg
.ext_j
, false),
1187 DEFINE_PROP_BOOL("x-epmp", RISCVCPU
, cfg
.epmp
, false),
1188 DEFINE_PROP_BOOL("x-smaia", RISCVCPU
, cfg
.ext_smaia
, false),
1189 DEFINE_PROP_BOOL("x-ssaia", RISCVCPU
, cfg
.ext_ssaia
, false),
1191 DEFINE_PROP_BOOL("x-zvfh", RISCVCPU
, cfg
.ext_zvfh
, false),
1192 DEFINE_PROP_BOOL("x-zvfhmin", RISCVCPU
, cfg
.ext_zvfhmin
, false),
1194 DEFINE_PROP_END_OF_LIST(),
1198 * Register CPU props based on env.misa_ext. If a non-zero
1199 * value was set, register only the required cpu->cfg.ext_*
1200 * properties and leave. env.misa_ext = 0 means that we want
1201 * all the default properties to be registered.
1203 static void register_cpu_props(DeviceState
*dev
)
1205 RISCVCPU
*cpu
= RISCV_CPU(OBJECT(dev
));
1206 uint32_t misa_ext
= cpu
->env
.misa_ext
;
1210 * If misa_ext is not zero, set cfg properties now to
1211 * allow them to be read during riscv_cpu_realize()
1214 if (cpu
->env
.misa_ext
!= 0) {
1215 cpu
->cfg
.ext_i
= misa_ext
& RVI
;
1216 cpu
->cfg
.ext_e
= misa_ext
& RVE
;
1217 cpu
->cfg
.ext_m
= misa_ext
& RVM
;
1218 cpu
->cfg
.ext_a
= misa_ext
& RVA
;
1219 cpu
->cfg
.ext_f
= misa_ext
& RVF
;
1220 cpu
->cfg
.ext_d
= misa_ext
& RVD
;
1221 cpu
->cfg
.ext_v
= misa_ext
& RVV
;
1222 cpu
->cfg
.ext_c
= misa_ext
& RVC
;
1223 cpu
->cfg
.ext_s
= misa_ext
& RVS
;
1224 cpu
->cfg
.ext_u
= misa_ext
& RVU
;
1225 cpu
->cfg
.ext_h
= misa_ext
& RVH
;
1226 cpu
->cfg
.ext_j
= misa_ext
& RVJ
;
1229 * We don't want to set the default riscv_cpu_extensions
1235 for (prop
= riscv_cpu_extensions
; prop
&& prop
->name
; prop
++) {
1236 qdev_property_add_static(dev
, prop
);
1240 static Property riscv_cpu_properties
[] = {
1241 DEFINE_PROP_BOOL("debug", RISCVCPU
, cfg
.debug
, true),
1243 DEFINE_PROP_UINT32("mvendorid", RISCVCPU
, cfg
.mvendorid
, 0),
1244 DEFINE_PROP_UINT64("marchid", RISCVCPU
, cfg
.marchid
, RISCV_CPU_MARCHID
),
1245 DEFINE_PROP_UINT64("mimpid", RISCVCPU
, cfg
.mimpid
, RISCV_CPU_MIMPID
),
1247 #ifndef CONFIG_USER_ONLY
1248 DEFINE_PROP_UINT64("resetvec", RISCVCPU
, env
.resetvec
, DEFAULT_RSTVEC
),
1251 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU
, cfg
.short_isa_string
, false),
1253 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU
, cfg
.rvv_ta_all_1s
, false),
1254 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU
, cfg
.rvv_ma_all_1s
, false),
1257 * write_misa() is marked as experimental for now so mark
1258 * it with -x and default to 'false'.
1260 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU
, cfg
.misa_w
, false),
1261 DEFINE_PROP_END_OF_LIST(),
1264 static gchar
*riscv_gdb_arch_name(CPUState
*cs
)
1266 RISCVCPU
*cpu
= RISCV_CPU(cs
);
1267 CPURISCVState
*env
= &cpu
->env
;
1269 switch (riscv_cpu_mxl(env
)) {
1271 return g_strdup("riscv:rv32");
1274 return g_strdup("riscv:rv64");
1276 g_assert_not_reached();
1280 static const char *riscv_gdb_get_dynamic_xml(CPUState
*cs
, const char *xmlname
)
1282 RISCVCPU
*cpu
= RISCV_CPU(cs
);
1284 if (strcmp(xmlname
, "riscv-csr.xml") == 0) {
1285 return cpu
->dyn_csr_xml
;
1286 } else if (strcmp(xmlname
, "riscv-vector.xml") == 0) {
1287 return cpu
->dyn_vreg_xml
;
1293 #ifndef CONFIG_USER_ONLY
1294 #include "hw/core/sysemu-cpu-ops.h"
1296 static const struct SysemuCPUOps riscv_sysemu_ops
= {
1297 .get_phys_page_debug
= riscv_cpu_get_phys_page_debug
,
1298 .write_elf64_note
= riscv_cpu_write_elf64_note
,
1299 .write_elf32_note
= riscv_cpu_write_elf32_note
,
1300 .legacy_vmsd
= &vmstate_riscv_cpu
,
1304 #include "hw/core/tcg-cpu-ops.h"
1306 static const struct TCGCPUOps riscv_tcg_ops
= {
1307 .initialize
= riscv_translate_init
,
1308 .synchronize_from_tb
= riscv_cpu_synchronize_from_tb
,
1309 .restore_state_to_opc
= riscv_restore_state_to_opc
,
1311 #ifndef CONFIG_USER_ONLY
1312 .tlb_fill
= riscv_cpu_tlb_fill
,
1313 .cpu_exec_interrupt
= riscv_cpu_exec_interrupt
,
1314 .do_interrupt
= riscv_cpu_do_interrupt
,
1315 .do_transaction_failed
= riscv_cpu_do_transaction_failed
,
1316 .do_unaligned_access
= riscv_cpu_do_unaligned_access
,
1317 .debug_excp_handler
= riscv_cpu_debug_excp_handler
,
1318 .debug_check_breakpoint
= riscv_cpu_debug_check_breakpoint
,
1319 .debug_check_watchpoint
= riscv_cpu_debug_check_watchpoint
,
1320 #endif /* !CONFIG_USER_ONLY */
1323 static void riscv_cpu_class_init(ObjectClass
*c
, void *data
)
1325 RISCVCPUClass
*mcc
= RISCV_CPU_CLASS(c
);
1326 CPUClass
*cc
= CPU_CLASS(c
);
1327 DeviceClass
*dc
= DEVICE_CLASS(c
);
1328 ResettableClass
*rc
= RESETTABLE_CLASS(c
);
1330 device_class_set_parent_realize(dc
, riscv_cpu_realize
,
1331 &mcc
->parent_realize
);
1333 resettable_class_set_parent_phases(rc
, NULL
, riscv_cpu_reset_hold
, NULL
,
1334 &mcc
->parent_phases
);
1336 cc
->class_by_name
= riscv_cpu_class_by_name
;
1337 cc
->has_work
= riscv_cpu_has_work
;
1338 cc
->dump_state
= riscv_cpu_dump_state
;
1339 cc
->set_pc
= riscv_cpu_set_pc
;
1340 cc
->get_pc
= riscv_cpu_get_pc
;
1341 cc
->gdb_read_register
= riscv_cpu_gdb_read_register
;
1342 cc
->gdb_write_register
= riscv_cpu_gdb_write_register
;
1343 cc
->gdb_num_core_regs
= 33;
1344 cc
->gdb_stop_before_watchpoint
= true;
1345 cc
->disas_set_info
= riscv_cpu_disas_set_info
;
1346 #ifndef CONFIG_USER_ONLY
1347 cc
->sysemu_ops
= &riscv_sysemu_ops
;
1349 cc
->gdb_arch_name
= riscv_gdb_arch_name
;
1350 cc
->gdb_get_dynamic_xml
= riscv_gdb_get_dynamic_xml
;
1351 cc
->tcg_ops
= &riscv_tcg_ops
;
1353 device_class_set_props(dc
, riscv_cpu_properties
);
1356 static void riscv_isa_string_ext(RISCVCPU
*cpu
, char **isa_str
, int max_str_len
)
1358 char *old
= *isa_str
;
1359 char *new = *isa_str
;
1362 for (i
= 0; i
< ARRAY_SIZE(isa_edata_arr
); i
++) {
1363 if (isa_edata_arr
[i
].multi_letter
&&
1364 isa_ext_is_enabled(cpu
, &isa_edata_arr
[i
])) {
1365 new = g_strconcat(old
, "_", isa_edata_arr
[i
].name
, NULL
);
1374 char *riscv_isa_string(RISCVCPU
*cpu
)
1377 const size_t maxlen
= sizeof("rv128") + sizeof(riscv_single_letter_exts
);
1378 char *isa_str
= g_new(char, maxlen
);
1379 char *p
= isa_str
+ snprintf(isa_str
, maxlen
, "rv%d", TARGET_LONG_BITS
);
1380 for (i
= 0; i
< sizeof(riscv_single_letter_exts
) - 1; i
++) {
1381 if (cpu
->env
.misa_ext
& RV(riscv_single_letter_exts
[i
])) {
1382 *p
++ = qemu_tolower(riscv_single_letter_exts
[i
]);
1386 if (!cpu
->cfg
.short_isa_string
) {
1387 riscv_isa_string_ext(cpu
, &isa_str
, maxlen
);
1392 static gint
riscv_cpu_list_compare(gconstpointer a
, gconstpointer b
)
1394 ObjectClass
*class_a
= (ObjectClass
*)a
;
1395 ObjectClass
*class_b
= (ObjectClass
*)b
;
1396 const char *name_a
, *name_b
;
1398 name_a
= object_class_get_name(class_a
);
1399 name_b
= object_class_get_name(class_b
);
1400 return strcmp(name_a
, name_b
);
1403 static void riscv_cpu_list_entry(gpointer data
, gpointer user_data
)
1405 const char *typename
= object_class_get_name(OBJECT_CLASS(data
));
1406 int len
= strlen(typename
) - strlen(RISCV_CPU_TYPE_SUFFIX
);
1408 qemu_printf("%.*s\n", len
, typename
);
1411 void riscv_cpu_list(void)
1415 list
= object_class_get_list(TYPE_RISCV_CPU
, false);
1416 list
= g_slist_sort(list
, riscv_cpu_list_compare
);
1417 g_slist_foreach(list
, riscv_cpu_list_entry
, NULL
);
1421 #define DEFINE_CPU(type_name, initfn) \
1423 .name = type_name, \
1424 .parent = TYPE_RISCV_CPU, \
1425 .instance_init = initfn \
1428 static const TypeInfo riscv_cpu_type_infos
[] = {
1430 .name
= TYPE_RISCV_CPU
,
1432 .instance_size
= sizeof(RISCVCPU
),
1433 .instance_align
= __alignof__(RISCVCPU
),
1434 .instance_init
= riscv_cpu_init
,
1436 .class_size
= sizeof(RISCVCPUClass
),
1437 .class_init
= riscv_cpu_class_init
,
1439 DEFINE_CPU(TYPE_RISCV_CPU_ANY
, riscv_any_cpu_init
),
1440 #if defined(CONFIG_KVM)
1441 DEFINE_CPU(TYPE_RISCV_CPU_HOST
, riscv_host_cpu_init
),
1443 #if defined(TARGET_RISCV32)
1444 DEFINE_CPU(TYPE_RISCV_CPU_BASE32
, rv32_base_cpu_init
),
1445 DEFINE_CPU(TYPE_RISCV_CPU_IBEX
, rv32_ibex_cpu_init
),
1446 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E31
, rv32_sifive_e_cpu_init
),
1447 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E34
, rv32_imafcu_nommu_cpu_init
),
1448 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U34
, rv32_sifive_u_cpu_init
),
1449 #elif defined(TARGET_RISCV64)
1450 DEFINE_CPU(TYPE_RISCV_CPU_BASE64
, rv64_base_cpu_init
),
1451 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E51
, rv64_sifive_e_cpu_init
),
1452 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U54
, rv64_sifive_u_cpu_init
),
1453 DEFINE_CPU(TYPE_RISCV_CPU_SHAKTI_C
, rv64_sifive_u_cpu_init
),
1454 DEFINE_CPU(TYPE_RISCV_CPU_THEAD_C906
, rv64_thead_c906_cpu_init
),
1455 DEFINE_CPU(TYPE_RISCV_CPU_BASE128
, rv128_base_cpu_init
),
1459 DEFINE_TYPES(riscv_cpu_type_infos
)