]> git.proxmox.com Git - mirror_qemu.git/blob - target/riscv/cpu.c
Merge tag 'linux-user-for-8.0-pull-request' of https://gitlab.com/laurent_vivier...
[mirror_qemu.git] / target / riscv / cpu.c
1 /*
2 * QEMU RISC-V CPU
3 *
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/qemu-print.h"
22 #include "qemu/ctype.h"
23 #include "qemu/log.h"
24 #include "cpu.h"
25 #include "cpu_vendorid.h"
26 #include "pmu.h"
27 #include "internals.h"
28 #include "time_helper.h"
29 #include "exec/exec-all.h"
30 #include "qapi/error.h"
31 #include "qapi/visitor.h"
32 #include "qemu/error-report.h"
33 #include "hw/qdev-properties.h"
34 #include "migration/vmstate.h"
35 #include "fpu/softfloat-helpers.h"
36 #include "sysemu/kvm.h"
37 #include "kvm_riscv.h"
38 #include "tcg/tcg.h"
39
40 /* RISC-V CPU definitions */
41
42 #define RISCV_CPU_MARCHID ((QEMU_VERSION_MAJOR << 16) | \
43 (QEMU_VERSION_MINOR << 8) | \
44 (QEMU_VERSION_MICRO))
45 #define RISCV_CPU_MIMPID RISCV_CPU_MARCHID
46
47 static const char riscv_single_letter_exts[] = "IEMAFDQCPVH";
48
49 struct isa_ext_data {
50 const char *name;
51 bool multi_letter;
52 int min_version;
53 int ext_enable_offset;
54 };
55
56 #define ISA_EXT_DATA_ENTRY(_name, _m_letter, _min_ver, _prop) \
57 {#_name, _m_letter, _min_ver, offsetof(struct RISCVCPUConfig, _prop)}
58
59 /**
60 * Here are the ordering rules of extension naming defined by RISC-V
61 * specification :
62 * 1. All extensions should be separated from other multi-letter extensions
63 * by an underscore.
64 * 2. The first letter following the 'Z' conventionally indicates the most
65 * closely related alphabetical extension category, IMAFDQLCBKJTPVH.
66 * If multiple 'Z' extensions are named, they should be ordered first
67 * by category, then alphabetically within a category.
68 * 3. Standard supervisor-level extensions (starts with 'S') should be
69 * listed after standard unprivileged extensions. If multiple
70 * supervisor-level extensions are listed, they should be ordered
71 * alphabetically.
72 * 4. Non-standard extensions (starts with 'X') must be listed after all
73 * standard extensions. They must be separated from other multi-letter
74 * extensions by an underscore.
75 */
76 static const struct isa_ext_data isa_edata_arr[] = {
77 ISA_EXT_DATA_ENTRY(h, false, PRIV_VERSION_1_12_0, ext_h),
78 ISA_EXT_DATA_ENTRY(v, false, PRIV_VERSION_1_10_0, ext_v),
79 ISA_EXT_DATA_ENTRY(zicbom, true, PRIV_VERSION_1_12_0, ext_icbom),
80 ISA_EXT_DATA_ENTRY(zicboz, true, PRIV_VERSION_1_12_0, ext_icboz),
81 ISA_EXT_DATA_ENTRY(zicond, true, PRIV_VERSION_1_12_0, ext_zicond),
82 ISA_EXT_DATA_ENTRY(zicsr, true, PRIV_VERSION_1_10_0, ext_icsr),
83 ISA_EXT_DATA_ENTRY(zifencei, true, PRIV_VERSION_1_10_0, ext_ifencei),
84 ISA_EXT_DATA_ENTRY(zihintpause, true, PRIV_VERSION_1_10_0, ext_zihintpause),
85 ISA_EXT_DATA_ENTRY(zawrs, true, PRIV_VERSION_1_12_0, ext_zawrs),
86 ISA_EXT_DATA_ENTRY(zfh, true, PRIV_VERSION_1_11_0, ext_zfh),
87 ISA_EXT_DATA_ENTRY(zfhmin, true, PRIV_VERSION_1_12_0, ext_zfhmin),
88 ISA_EXT_DATA_ENTRY(zfinx, true, PRIV_VERSION_1_12_0, ext_zfinx),
89 ISA_EXT_DATA_ENTRY(zdinx, true, PRIV_VERSION_1_12_0, ext_zdinx),
90 ISA_EXT_DATA_ENTRY(zba, true, PRIV_VERSION_1_12_0, ext_zba),
91 ISA_EXT_DATA_ENTRY(zbb, true, PRIV_VERSION_1_12_0, ext_zbb),
92 ISA_EXT_DATA_ENTRY(zbc, true, PRIV_VERSION_1_12_0, ext_zbc),
93 ISA_EXT_DATA_ENTRY(zbkb, true, PRIV_VERSION_1_12_0, ext_zbkb),
94 ISA_EXT_DATA_ENTRY(zbkc, true, PRIV_VERSION_1_12_0, ext_zbkc),
95 ISA_EXT_DATA_ENTRY(zbkx, true, PRIV_VERSION_1_12_0, ext_zbkx),
96 ISA_EXT_DATA_ENTRY(zbs, true, PRIV_VERSION_1_12_0, ext_zbs),
97 ISA_EXT_DATA_ENTRY(zk, true, PRIV_VERSION_1_12_0, ext_zk),
98 ISA_EXT_DATA_ENTRY(zkn, true, PRIV_VERSION_1_12_0, ext_zkn),
99 ISA_EXT_DATA_ENTRY(zknd, true, PRIV_VERSION_1_12_0, ext_zknd),
100 ISA_EXT_DATA_ENTRY(zkne, true, PRIV_VERSION_1_12_0, ext_zkne),
101 ISA_EXT_DATA_ENTRY(zknh, true, PRIV_VERSION_1_12_0, ext_zknh),
102 ISA_EXT_DATA_ENTRY(zkr, true, PRIV_VERSION_1_12_0, ext_zkr),
103 ISA_EXT_DATA_ENTRY(zks, true, PRIV_VERSION_1_12_0, ext_zks),
104 ISA_EXT_DATA_ENTRY(zksed, true, PRIV_VERSION_1_12_0, ext_zksed),
105 ISA_EXT_DATA_ENTRY(zksh, true, PRIV_VERSION_1_12_0, ext_zksh),
106 ISA_EXT_DATA_ENTRY(zkt, true, PRIV_VERSION_1_12_0, ext_zkt),
107 ISA_EXT_DATA_ENTRY(zve32f, true, PRIV_VERSION_1_12_0, ext_zve32f),
108 ISA_EXT_DATA_ENTRY(zve64f, true, PRIV_VERSION_1_12_0, ext_zve64f),
109 ISA_EXT_DATA_ENTRY(zve64d, true, PRIV_VERSION_1_12_0, ext_zve64d),
110 ISA_EXT_DATA_ENTRY(zvfh, true, PRIV_VERSION_1_12_0, ext_zvfh),
111 ISA_EXT_DATA_ENTRY(zvfhmin, true, PRIV_VERSION_1_12_0, ext_zvfhmin),
112 ISA_EXT_DATA_ENTRY(zhinx, true, PRIV_VERSION_1_12_0, ext_zhinx),
113 ISA_EXT_DATA_ENTRY(zhinxmin, true, PRIV_VERSION_1_12_0, ext_zhinxmin),
114 ISA_EXT_DATA_ENTRY(smaia, true, PRIV_VERSION_1_12_0, ext_smaia),
115 ISA_EXT_DATA_ENTRY(ssaia, true, PRIV_VERSION_1_12_0, ext_ssaia),
116 ISA_EXT_DATA_ENTRY(sscofpmf, true, PRIV_VERSION_1_12_0, ext_sscofpmf),
117 ISA_EXT_DATA_ENTRY(sstc, true, PRIV_VERSION_1_12_0, ext_sstc),
118 ISA_EXT_DATA_ENTRY(svadu, true, PRIV_VERSION_1_12_0, ext_svadu),
119 ISA_EXT_DATA_ENTRY(svinval, true, PRIV_VERSION_1_12_0, ext_svinval),
120 ISA_EXT_DATA_ENTRY(svnapot, true, PRIV_VERSION_1_12_0, ext_svnapot),
121 ISA_EXT_DATA_ENTRY(svpbmt, true, PRIV_VERSION_1_12_0, ext_svpbmt),
122 ISA_EXT_DATA_ENTRY(xtheadba, true, PRIV_VERSION_1_11_0, ext_xtheadba),
123 ISA_EXT_DATA_ENTRY(xtheadbb, true, PRIV_VERSION_1_11_0, ext_xtheadbb),
124 ISA_EXT_DATA_ENTRY(xtheadbs, true, PRIV_VERSION_1_11_0, ext_xtheadbs),
125 ISA_EXT_DATA_ENTRY(xtheadcmo, true, PRIV_VERSION_1_11_0, ext_xtheadcmo),
126 ISA_EXT_DATA_ENTRY(xtheadcondmov, true, PRIV_VERSION_1_11_0, ext_xtheadcondmov),
127 ISA_EXT_DATA_ENTRY(xtheadfmemidx, true, PRIV_VERSION_1_11_0, ext_xtheadfmemidx),
128 ISA_EXT_DATA_ENTRY(xtheadfmv, true, PRIV_VERSION_1_11_0, ext_xtheadfmv),
129 ISA_EXT_DATA_ENTRY(xtheadmac, true, PRIV_VERSION_1_11_0, ext_xtheadmac),
130 ISA_EXT_DATA_ENTRY(xtheadmemidx, true, PRIV_VERSION_1_11_0, ext_xtheadmemidx),
131 ISA_EXT_DATA_ENTRY(xtheadmempair, true, PRIV_VERSION_1_11_0, ext_xtheadmempair),
132 ISA_EXT_DATA_ENTRY(xtheadsync, true, PRIV_VERSION_1_11_0, ext_xtheadsync),
133 ISA_EXT_DATA_ENTRY(xventanacondops, true, PRIV_VERSION_1_12_0, ext_XVentanaCondOps),
134 };
135
136 static bool isa_ext_is_enabled(RISCVCPU *cpu,
137 const struct isa_ext_data *edata)
138 {
139 bool *ext_enabled = (void *)&cpu->cfg + edata->ext_enable_offset;
140
141 return *ext_enabled;
142 }
143
144 static void isa_ext_update_enabled(RISCVCPU *cpu,
145 const struct isa_ext_data *edata, bool en)
146 {
147 bool *ext_enabled = (void *)&cpu->cfg + edata->ext_enable_offset;
148
149 *ext_enabled = en;
150 }
151
152 const char * const riscv_int_regnames[] = {
153 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1",
154 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3",
155 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4",
156 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11",
157 "x28/t3", "x29/t4", "x30/t5", "x31/t6"
158 };
159
160 const char * const riscv_int_regnamesh[] = {
161 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h",
162 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h",
163 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h",
164 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h",
165 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h",
166 "x30h/t5h", "x31h/t6h"
167 };
168
169 const char * const riscv_fpr_regnames[] = {
170 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5",
171 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1",
172 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7",
173 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7",
174 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9",
175 "f30/ft10", "f31/ft11"
176 };
177
178 static const char * const riscv_excp_names[] = {
179 "misaligned_fetch",
180 "fault_fetch",
181 "illegal_instruction",
182 "breakpoint",
183 "misaligned_load",
184 "fault_load",
185 "misaligned_store",
186 "fault_store",
187 "user_ecall",
188 "supervisor_ecall",
189 "hypervisor_ecall",
190 "machine_ecall",
191 "exec_page_fault",
192 "load_page_fault",
193 "reserved",
194 "store_page_fault",
195 "reserved",
196 "reserved",
197 "reserved",
198 "reserved",
199 "guest_exec_page_fault",
200 "guest_load_page_fault",
201 "reserved",
202 "guest_store_page_fault",
203 };
204
205 static const char * const riscv_intr_names[] = {
206 "u_software",
207 "s_software",
208 "vs_software",
209 "m_software",
210 "u_timer",
211 "s_timer",
212 "vs_timer",
213 "m_timer",
214 "u_external",
215 "s_external",
216 "vs_external",
217 "m_external",
218 "reserved",
219 "reserved",
220 "reserved",
221 "reserved"
222 };
223
224 static void register_cpu_props(Object *obj);
225
226 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async)
227 {
228 if (async) {
229 return (cause < ARRAY_SIZE(riscv_intr_names)) ?
230 riscv_intr_names[cause] : "(unknown)";
231 } else {
232 return (cause < ARRAY_SIZE(riscv_excp_names)) ?
233 riscv_excp_names[cause] : "(unknown)";
234 }
235 }
236
237 static void set_misa(CPURISCVState *env, RISCVMXL mxl, uint32_t ext)
238 {
239 env->misa_mxl_max = env->misa_mxl = mxl;
240 env->misa_ext_mask = env->misa_ext = ext;
241 }
242
243 static void set_priv_version(CPURISCVState *env, int priv_ver)
244 {
245 env->priv_ver = priv_ver;
246 }
247
248 static void set_vext_version(CPURISCVState *env, int vext_ver)
249 {
250 env->vext_ver = vext_ver;
251 }
252
253 #ifndef CONFIG_USER_ONLY
254 static uint8_t satp_mode_from_str(const char *satp_mode_str)
255 {
256 if (!strncmp(satp_mode_str, "mbare", 5)) {
257 return VM_1_10_MBARE;
258 }
259
260 if (!strncmp(satp_mode_str, "sv32", 4)) {
261 return VM_1_10_SV32;
262 }
263
264 if (!strncmp(satp_mode_str, "sv39", 4)) {
265 return VM_1_10_SV39;
266 }
267
268 if (!strncmp(satp_mode_str, "sv48", 4)) {
269 return VM_1_10_SV48;
270 }
271
272 if (!strncmp(satp_mode_str, "sv57", 4)) {
273 return VM_1_10_SV57;
274 }
275
276 if (!strncmp(satp_mode_str, "sv64", 4)) {
277 return VM_1_10_SV64;
278 }
279
280 g_assert_not_reached();
281 }
282
283 uint8_t satp_mode_max_from_map(uint32_t map)
284 {
285 /* map here has at least one bit set, so no problem with clz */
286 return 31 - __builtin_clz(map);
287 }
288
289 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit)
290 {
291 if (is_32_bit) {
292 switch (satp_mode) {
293 case VM_1_10_SV32:
294 return "sv32";
295 case VM_1_10_MBARE:
296 return "none";
297 }
298 } else {
299 switch (satp_mode) {
300 case VM_1_10_SV64:
301 return "sv64";
302 case VM_1_10_SV57:
303 return "sv57";
304 case VM_1_10_SV48:
305 return "sv48";
306 case VM_1_10_SV39:
307 return "sv39";
308 case VM_1_10_MBARE:
309 return "none";
310 }
311 }
312
313 g_assert_not_reached();
314 }
315
316 static void set_satp_mode_max_supported(RISCVCPU *cpu,
317 uint8_t satp_mode)
318 {
319 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32;
320 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64;
321
322 for (int i = 0; i <= satp_mode; ++i) {
323 if (valid_vm[i]) {
324 cpu->cfg.satp_mode.supported |= (1 << i);
325 }
326 }
327 }
328
329 /* Set the satp mode to the max supported */
330 static void set_satp_mode_default_map(RISCVCPU *cpu)
331 {
332 cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported;
333 }
334 #endif
335
336 static void riscv_any_cpu_init(Object *obj)
337 {
338 CPURISCVState *env = &RISCV_CPU(obj)->env;
339 #if defined(TARGET_RISCV32)
340 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVU);
341 #elif defined(TARGET_RISCV64)
342 set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVU);
343 #endif
344
345 #ifndef CONFIG_USER_ONLY
346 set_satp_mode_max_supported(RISCV_CPU(obj),
347 riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ?
348 VM_1_10_SV32 : VM_1_10_SV57);
349 #endif
350
351 set_priv_version(env, PRIV_VERSION_1_12_0);
352 register_cpu_props(obj);
353 }
354
355 #if defined(TARGET_RISCV64)
356 static void rv64_base_cpu_init(Object *obj)
357 {
358 CPURISCVState *env = &RISCV_CPU(obj)->env;
359 /* We set this in the realise function */
360 set_misa(env, MXL_RV64, 0);
361 register_cpu_props(obj);
362 /* Set latest version of privileged specification */
363 set_priv_version(env, PRIV_VERSION_1_12_0);
364 #ifndef CONFIG_USER_ONLY
365 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
366 #endif
367 }
368
369 static void rv64_sifive_u_cpu_init(Object *obj)
370 {
371 CPURISCVState *env = &RISCV_CPU(obj)->env;
372 set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
373 register_cpu_props(obj);
374 set_priv_version(env, PRIV_VERSION_1_10_0);
375 #ifndef CONFIG_USER_ONLY
376 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39);
377 #endif
378 }
379
380 static void rv64_sifive_e_cpu_init(Object *obj)
381 {
382 CPURISCVState *env = &RISCV_CPU(obj)->env;
383 RISCVCPU *cpu = RISCV_CPU(obj);
384
385 set_misa(env, MXL_RV64, RVI | RVM | RVA | RVC | RVU);
386 register_cpu_props(obj);
387 set_priv_version(env, PRIV_VERSION_1_10_0);
388 cpu->cfg.mmu = false;
389 #ifndef CONFIG_USER_ONLY
390 set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
391 #endif
392 }
393
394 static void rv64_thead_c906_cpu_init(Object *obj)
395 {
396 CPURISCVState *env = &RISCV_CPU(obj)->env;
397 RISCVCPU *cpu = RISCV_CPU(obj);
398
399 set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
400 set_priv_version(env, PRIV_VERSION_1_11_0);
401
402 cpu->cfg.ext_g = true;
403 cpu->cfg.ext_c = true;
404 cpu->cfg.ext_u = true;
405 cpu->cfg.ext_s = true;
406 cpu->cfg.ext_icsr = true;
407 cpu->cfg.ext_zfh = true;
408 cpu->cfg.mmu = true;
409 cpu->cfg.ext_xtheadba = true;
410 cpu->cfg.ext_xtheadbb = true;
411 cpu->cfg.ext_xtheadbs = true;
412 cpu->cfg.ext_xtheadcmo = true;
413 cpu->cfg.ext_xtheadcondmov = true;
414 cpu->cfg.ext_xtheadfmemidx = true;
415 cpu->cfg.ext_xtheadmac = true;
416 cpu->cfg.ext_xtheadmemidx = true;
417 cpu->cfg.ext_xtheadmempair = true;
418 cpu->cfg.ext_xtheadsync = true;
419
420 cpu->cfg.mvendorid = THEAD_VENDOR_ID;
421 #ifndef CONFIG_USER_ONLY
422 set_satp_mode_max_supported(cpu, VM_1_10_SV39);
423 #endif
424 }
425
426 static void rv128_base_cpu_init(Object *obj)
427 {
428 if (qemu_tcg_mttcg_enabled()) {
429 /* Missing 128-bit aligned atomics */
430 error_report("128-bit RISC-V currently does not work with Multi "
431 "Threaded TCG. Please use: -accel tcg,thread=single");
432 exit(EXIT_FAILURE);
433 }
434 CPURISCVState *env = &RISCV_CPU(obj)->env;
435 /* We set this in the realise function */
436 set_misa(env, MXL_RV128, 0);
437 register_cpu_props(obj);
438 /* Set latest version of privileged specification */
439 set_priv_version(env, PRIV_VERSION_1_12_0);
440 #ifndef CONFIG_USER_ONLY
441 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
442 #endif
443 }
444 #else
445 static void rv32_base_cpu_init(Object *obj)
446 {
447 CPURISCVState *env = &RISCV_CPU(obj)->env;
448 /* We set this in the realise function */
449 set_misa(env, MXL_RV32, 0);
450 register_cpu_props(obj);
451 /* Set latest version of privileged specification */
452 set_priv_version(env, PRIV_VERSION_1_12_0);
453 #ifndef CONFIG_USER_ONLY
454 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
455 #endif
456 }
457
458 static void rv32_sifive_u_cpu_init(Object *obj)
459 {
460 CPURISCVState *env = &RISCV_CPU(obj)->env;
461 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
462 register_cpu_props(obj);
463 set_priv_version(env, PRIV_VERSION_1_10_0);
464 #ifndef CONFIG_USER_ONLY
465 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
466 #endif
467 }
468
469 static void rv32_sifive_e_cpu_init(Object *obj)
470 {
471 CPURISCVState *env = &RISCV_CPU(obj)->env;
472 RISCVCPU *cpu = RISCV_CPU(obj);
473
474 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVC | RVU);
475 register_cpu_props(obj);
476 set_priv_version(env, PRIV_VERSION_1_10_0);
477 cpu->cfg.mmu = false;
478 #ifndef CONFIG_USER_ONLY
479 set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
480 #endif
481 }
482
483 static void rv32_ibex_cpu_init(Object *obj)
484 {
485 CPURISCVState *env = &RISCV_CPU(obj)->env;
486 RISCVCPU *cpu = RISCV_CPU(obj);
487
488 set_misa(env, MXL_RV32, RVI | RVM | RVC | RVU);
489 register_cpu_props(obj);
490 set_priv_version(env, PRIV_VERSION_1_11_0);
491 cpu->cfg.mmu = false;
492 #ifndef CONFIG_USER_ONLY
493 set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
494 #endif
495 cpu->cfg.epmp = true;
496 }
497
498 static void rv32_imafcu_nommu_cpu_init(Object *obj)
499 {
500 CPURISCVState *env = &RISCV_CPU(obj)->env;
501 RISCVCPU *cpu = RISCV_CPU(obj);
502
503 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVC | RVU);
504 register_cpu_props(obj);
505 set_priv_version(env, PRIV_VERSION_1_10_0);
506 cpu->cfg.mmu = false;
507 #ifndef CONFIG_USER_ONLY
508 set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
509 #endif
510 }
511 #endif
512
513 #if defined(CONFIG_KVM)
514 static void riscv_host_cpu_init(Object *obj)
515 {
516 CPURISCVState *env = &RISCV_CPU(obj)->env;
517 #if defined(TARGET_RISCV32)
518 set_misa(env, MXL_RV32, 0);
519 #elif defined(TARGET_RISCV64)
520 set_misa(env, MXL_RV64, 0);
521 #endif
522 register_cpu_props(obj);
523 }
524 #endif
525
526 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model)
527 {
528 ObjectClass *oc;
529 char *typename;
530 char **cpuname;
531
532 cpuname = g_strsplit(cpu_model, ",", 1);
533 typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]);
534 oc = object_class_by_name(typename);
535 g_strfreev(cpuname);
536 g_free(typename);
537 if (!oc || !object_class_dynamic_cast(oc, TYPE_RISCV_CPU) ||
538 object_class_is_abstract(oc)) {
539 return NULL;
540 }
541 return oc;
542 }
543
544 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags)
545 {
546 RISCVCPU *cpu = RISCV_CPU(cs);
547 CPURISCVState *env = &cpu->env;
548 int i;
549
550 #if !defined(CONFIG_USER_ONLY)
551 if (riscv_has_ext(env, RVH)) {
552 qemu_fprintf(f, " %s %d\n", "V = ", riscv_cpu_virt_enabled(env));
553 }
554 #endif
555 qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc);
556 #ifndef CONFIG_USER_ONLY
557 {
558 static const int dump_csrs[] = {
559 CSR_MHARTID,
560 CSR_MSTATUS,
561 CSR_MSTATUSH,
562 /*
563 * CSR_SSTATUS is intentionally omitted here as its value
564 * can be figured out by looking at CSR_MSTATUS
565 */
566 CSR_HSTATUS,
567 CSR_VSSTATUS,
568 CSR_MIP,
569 CSR_MIE,
570 CSR_MIDELEG,
571 CSR_HIDELEG,
572 CSR_MEDELEG,
573 CSR_HEDELEG,
574 CSR_MTVEC,
575 CSR_STVEC,
576 CSR_VSTVEC,
577 CSR_MEPC,
578 CSR_SEPC,
579 CSR_VSEPC,
580 CSR_MCAUSE,
581 CSR_SCAUSE,
582 CSR_VSCAUSE,
583 CSR_MTVAL,
584 CSR_STVAL,
585 CSR_HTVAL,
586 CSR_MTVAL2,
587 CSR_MSCRATCH,
588 CSR_SSCRATCH,
589 CSR_SATP,
590 CSR_MMTE,
591 CSR_UPMBASE,
592 CSR_UPMMASK,
593 CSR_SPMBASE,
594 CSR_SPMMASK,
595 CSR_MPMBASE,
596 CSR_MPMMASK,
597 };
598
599 for (int i = 0; i < ARRAY_SIZE(dump_csrs); ++i) {
600 int csrno = dump_csrs[i];
601 target_ulong val = 0;
602 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
603
604 /*
605 * Rely on the smode, hmode, etc, predicates within csr.c
606 * to do the filtering of the registers that are present.
607 */
608 if (res == RISCV_EXCP_NONE) {
609 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
610 csr_ops[csrno].name, val);
611 }
612 }
613 }
614 #endif
615
616 for (i = 0; i < 32; i++) {
617 qemu_fprintf(f, " %-8s " TARGET_FMT_lx,
618 riscv_int_regnames[i], env->gpr[i]);
619 if ((i & 3) == 3) {
620 qemu_fprintf(f, "\n");
621 }
622 }
623 if (flags & CPU_DUMP_FPU) {
624 for (i = 0; i < 32; i++) {
625 qemu_fprintf(f, " %-8s %016" PRIx64,
626 riscv_fpr_regnames[i], env->fpr[i]);
627 if ((i & 3) == 3) {
628 qemu_fprintf(f, "\n");
629 }
630 }
631 }
632 }
633
634 static void riscv_cpu_set_pc(CPUState *cs, vaddr value)
635 {
636 RISCVCPU *cpu = RISCV_CPU(cs);
637 CPURISCVState *env = &cpu->env;
638
639 if (env->xl == MXL_RV32) {
640 env->pc = (int32_t)value;
641 } else {
642 env->pc = value;
643 }
644 }
645
646 static vaddr riscv_cpu_get_pc(CPUState *cs)
647 {
648 RISCVCPU *cpu = RISCV_CPU(cs);
649 CPURISCVState *env = &cpu->env;
650
651 /* Match cpu_get_tb_cpu_state. */
652 if (env->xl == MXL_RV32) {
653 return env->pc & UINT32_MAX;
654 }
655 return env->pc;
656 }
657
658 static void riscv_cpu_synchronize_from_tb(CPUState *cs,
659 const TranslationBlock *tb)
660 {
661 RISCVCPU *cpu = RISCV_CPU(cs);
662 CPURISCVState *env = &cpu->env;
663 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL);
664
665 tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL));
666
667 if (xl == MXL_RV32) {
668 env->pc = (int32_t) tb->pc;
669 } else {
670 env->pc = tb->pc;
671 }
672 }
673
674 static bool riscv_cpu_has_work(CPUState *cs)
675 {
676 #ifndef CONFIG_USER_ONLY
677 RISCVCPU *cpu = RISCV_CPU(cs);
678 CPURISCVState *env = &cpu->env;
679 /*
680 * Definition of the WFI instruction requires it to ignore the privilege
681 * mode and delegation registers, but respect individual enables
682 */
683 return riscv_cpu_all_pending(env) != 0;
684 #else
685 return true;
686 #endif
687 }
688
689 static void riscv_restore_state_to_opc(CPUState *cs,
690 const TranslationBlock *tb,
691 const uint64_t *data)
692 {
693 RISCVCPU *cpu = RISCV_CPU(cs);
694 CPURISCVState *env = &cpu->env;
695 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL);
696
697 if (xl == MXL_RV32) {
698 env->pc = (int32_t)data[0];
699 } else {
700 env->pc = data[0];
701 }
702 env->bins = data[1];
703 }
704
705 static void riscv_cpu_reset_hold(Object *obj)
706 {
707 #ifndef CONFIG_USER_ONLY
708 uint8_t iprio;
709 int i, irq, rdzero;
710 #endif
711 CPUState *cs = CPU(obj);
712 RISCVCPU *cpu = RISCV_CPU(cs);
713 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
714 CPURISCVState *env = &cpu->env;
715
716 if (mcc->parent_phases.hold) {
717 mcc->parent_phases.hold(obj);
718 }
719 #ifndef CONFIG_USER_ONLY
720 env->misa_mxl = env->misa_mxl_max;
721 env->priv = PRV_M;
722 env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV);
723 if (env->misa_mxl > MXL_RV32) {
724 /*
725 * The reset status of SXL/UXL is undefined, but mstatus is WARL
726 * and we must ensure that the value after init is valid for read.
727 */
728 env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl);
729 env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl);
730 if (riscv_has_ext(env, RVH)) {
731 env->vsstatus = set_field(env->vsstatus,
732 MSTATUS64_SXL, env->misa_mxl);
733 env->vsstatus = set_field(env->vsstatus,
734 MSTATUS64_UXL, env->misa_mxl);
735 env->mstatus_hs = set_field(env->mstatus_hs,
736 MSTATUS64_SXL, env->misa_mxl);
737 env->mstatus_hs = set_field(env->mstatus_hs,
738 MSTATUS64_UXL, env->misa_mxl);
739 }
740 }
741 env->mcause = 0;
742 env->miclaim = MIP_SGEIP;
743 env->pc = env->resetvec;
744 env->bins = 0;
745 env->two_stage_lookup = false;
746
747 env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) |
748 (cpu->cfg.ext_svadu ? MENVCFG_HADE : 0);
749 env->henvcfg = (cpu->cfg.ext_svpbmt ? HENVCFG_PBMTE : 0) |
750 (cpu->cfg.ext_svadu ? HENVCFG_HADE : 0);
751
752 /* Initialized default priorities of local interrupts. */
753 for (i = 0; i < ARRAY_SIZE(env->miprio); i++) {
754 iprio = riscv_cpu_default_priority(i);
755 env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio;
756 env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio;
757 env->hviprio[i] = 0;
758 }
759 i = 0;
760 while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) {
761 if (!rdzero) {
762 env->hviprio[irq] = env->miprio[irq];
763 }
764 i++;
765 }
766 /* mmte is supposed to have pm.current hardwired to 1 */
767 env->mmte |= (PM_EXT_INITIAL | MMTE_M_PM_CURRENT);
768 #endif
769 env->xl = riscv_cpu_mxl(env);
770 riscv_cpu_update_mask(env);
771 cs->exception_index = RISCV_EXCP_NONE;
772 env->load_res = -1;
773 set_default_nan_mode(1, &env->fp_status);
774
775 #ifndef CONFIG_USER_ONLY
776 if (cpu->cfg.debug) {
777 riscv_trigger_init(env);
778 }
779
780 if (kvm_enabled()) {
781 kvm_riscv_reset_vcpu(cpu);
782 }
783 #endif
784 }
785
786 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info)
787 {
788 RISCVCPU *cpu = RISCV_CPU(s);
789
790 switch (riscv_cpu_mxl(&cpu->env)) {
791 case MXL_RV32:
792 info->print_insn = print_insn_riscv32;
793 break;
794 case MXL_RV64:
795 info->print_insn = print_insn_riscv64;
796 break;
797 case MXL_RV128:
798 info->print_insn = print_insn_riscv128;
799 break;
800 default:
801 g_assert_not_reached();
802 }
803 }
804
805 /*
806 * Check consistency between chosen extensions while setting
807 * cpu->cfg accordingly, doing a set_misa() in the end.
808 */
809 static void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
810 {
811 CPURISCVState *env = &cpu->env;
812 uint32_t ext = 0;
813
814 /* Do some ISA extension error checking */
815 if (cpu->cfg.ext_g && !(cpu->cfg.ext_i && cpu->cfg.ext_m &&
816 cpu->cfg.ext_a && cpu->cfg.ext_f &&
817 cpu->cfg.ext_d &&
818 cpu->cfg.ext_icsr && cpu->cfg.ext_ifencei)) {
819 warn_report("Setting G will also set IMAFD_Zicsr_Zifencei");
820 cpu->cfg.ext_i = true;
821 cpu->cfg.ext_m = true;
822 cpu->cfg.ext_a = true;
823 cpu->cfg.ext_f = true;
824 cpu->cfg.ext_d = true;
825 cpu->cfg.ext_icsr = true;
826 cpu->cfg.ext_ifencei = true;
827 }
828
829 if (cpu->cfg.ext_i && cpu->cfg.ext_e) {
830 error_setg(errp,
831 "I and E extensions are incompatible");
832 return;
833 }
834
835 if (!cpu->cfg.ext_i && !cpu->cfg.ext_e) {
836 error_setg(errp,
837 "Either I or E extension must be set");
838 return;
839 }
840
841 if (cpu->cfg.ext_s && !cpu->cfg.ext_u) {
842 error_setg(errp,
843 "Setting S extension without U extension is illegal");
844 return;
845 }
846
847 if (cpu->cfg.ext_h && !cpu->cfg.ext_i) {
848 error_setg(errp,
849 "H depends on an I base integer ISA with 32 x registers");
850 return;
851 }
852
853 if (cpu->cfg.ext_h && !cpu->cfg.ext_s) {
854 error_setg(errp, "H extension implicitly requires S-mode");
855 return;
856 }
857
858 if (cpu->cfg.ext_f && !cpu->cfg.ext_icsr) {
859 error_setg(errp, "F extension requires Zicsr");
860 return;
861 }
862
863 if ((cpu->cfg.ext_zawrs) && !cpu->cfg.ext_a) {
864 error_setg(errp, "Zawrs extension requires A extension");
865 return;
866 }
867
868 if (cpu->cfg.ext_zfh) {
869 cpu->cfg.ext_zfhmin = true;
870 }
871
872 if (cpu->cfg.ext_zfhmin && !cpu->cfg.ext_f) {
873 error_setg(errp, "Zfh/Zfhmin extensions require F extension");
874 return;
875 }
876
877 if (cpu->cfg.ext_d && !cpu->cfg.ext_f) {
878 error_setg(errp, "D extension requires F extension");
879 return;
880 }
881
882 /* The V vector extension depends on the Zve64d extension */
883 if (cpu->cfg.ext_v) {
884 cpu->cfg.ext_zve64d = true;
885 }
886
887 /* The Zve64d extension depends on the Zve64f extension */
888 if (cpu->cfg.ext_zve64d) {
889 cpu->cfg.ext_zve64f = true;
890 }
891
892 /* The Zve64f extension depends on the Zve32f extension */
893 if (cpu->cfg.ext_zve64f) {
894 cpu->cfg.ext_zve32f = true;
895 }
896
897 if (cpu->cfg.ext_zve64d && !cpu->cfg.ext_d) {
898 error_setg(errp, "Zve64d/V extensions require D extension");
899 return;
900 }
901
902 if (cpu->cfg.ext_zve32f && !cpu->cfg.ext_f) {
903 error_setg(errp, "Zve32f/Zve64f extensions require F extension");
904 return;
905 }
906
907 if (cpu->cfg.ext_zvfh) {
908 cpu->cfg.ext_zvfhmin = true;
909 }
910
911 if (cpu->cfg.ext_zvfhmin && !cpu->cfg.ext_zve32f) {
912 error_setg(errp, "Zvfh/Zvfhmin extensions require Zve32f extension");
913 return;
914 }
915
916 if (cpu->cfg.ext_zvfh && !cpu->cfg.ext_zfhmin) {
917 error_setg(errp, "Zvfh extensions requires Zfhmin extension");
918 return;
919 }
920
921 /* Set the ISA extensions, checks should have happened above */
922 if (cpu->cfg.ext_zhinx) {
923 cpu->cfg.ext_zhinxmin = true;
924 }
925
926 if (cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinxmin) {
927 cpu->cfg.ext_zfinx = true;
928 }
929
930 if (cpu->cfg.ext_zfinx) {
931 if (!cpu->cfg.ext_icsr) {
932 error_setg(errp, "Zfinx extension requires Zicsr");
933 return;
934 }
935 if (cpu->cfg.ext_f) {
936 error_setg(errp,
937 "Zfinx cannot be supported together with F extension");
938 return;
939 }
940 }
941
942 if (cpu->cfg.ext_zk) {
943 cpu->cfg.ext_zkn = true;
944 cpu->cfg.ext_zkr = true;
945 cpu->cfg.ext_zkt = true;
946 }
947
948 if (cpu->cfg.ext_zkn) {
949 cpu->cfg.ext_zbkb = true;
950 cpu->cfg.ext_zbkc = true;
951 cpu->cfg.ext_zbkx = true;
952 cpu->cfg.ext_zkne = true;
953 cpu->cfg.ext_zknd = true;
954 cpu->cfg.ext_zknh = true;
955 }
956
957 if (cpu->cfg.ext_zks) {
958 cpu->cfg.ext_zbkb = true;
959 cpu->cfg.ext_zbkc = true;
960 cpu->cfg.ext_zbkx = true;
961 cpu->cfg.ext_zksed = true;
962 cpu->cfg.ext_zksh = true;
963 }
964
965 if (cpu->cfg.ext_i) {
966 ext |= RVI;
967 }
968 if (cpu->cfg.ext_e) {
969 ext |= RVE;
970 }
971 if (cpu->cfg.ext_m) {
972 ext |= RVM;
973 }
974 if (cpu->cfg.ext_a) {
975 ext |= RVA;
976 }
977 if (cpu->cfg.ext_f) {
978 ext |= RVF;
979 }
980 if (cpu->cfg.ext_d) {
981 ext |= RVD;
982 }
983 if (cpu->cfg.ext_c) {
984 ext |= RVC;
985 }
986 if (cpu->cfg.ext_s) {
987 ext |= RVS;
988 }
989 if (cpu->cfg.ext_u) {
990 ext |= RVU;
991 }
992 if (cpu->cfg.ext_h) {
993 ext |= RVH;
994 }
995 if (cpu->cfg.ext_v) {
996 int vext_version = VEXT_VERSION_1_00_0;
997 ext |= RVV;
998 if (!is_power_of_2(cpu->cfg.vlen)) {
999 error_setg(errp,
1000 "Vector extension VLEN must be power of 2");
1001 return;
1002 }
1003 if (cpu->cfg.vlen > RV_VLEN_MAX || cpu->cfg.vlen < 128) {
1004 error_setg(errp,
1005 "Vector extension implementation only supports VLEN "
1006 "in the range [128, %d]", RV_VLEN_MAX);
1007 return;
1008 }
1009 if (!is_power_of_2(cpu->cfg.elen)) {
1010 error_setg(errp,
1011 "Vector extension ELEN must be power of 2");
1012 return;
1013 }
1014 if (cpu->cfg.elen > 64 || cpu->cfg.elen < 8) {
1015 error_setg(errp,
1016 "Vector extension implementation only supports ELEN "
1017 "in the range [8, 64]");
1018 return;
1019 }
1020 if (cpu->cfg.vext_spec) {
1021 if (!g_strcmp0(cpu->cfg.vext_spec, "v1.0")) {
1022 vext_version = VEXT_VERSION_1_00_0;
1023 } else {
1024 error_setg(errp,
1025 "Unsupported vector spec version '%s'",
1026 cpu->cfg.vext_spec);
1027 return;
1028 }
1029 } else {
1030 qemu_log("vector version is not specified, "
1031 "use the default value v1.0\n");
1032 }
1033 set_vext_version(env, vext_version);
1034 }
1035 if (cpu->cfg.ext_j) {
1036 ext |= RVJ;
1037 }
1038
1039 set_misa(env, env->misa_mxl, ext);
1040 }
1041
1042 #ifndef CONFIG_USER_ONLY
1043 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp)
1044 {
1045 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32;
1046 uint8_t satp_mode_map_max;
1047 uint8_t satp_mode_supported_max =
1048 satp_mode_max_from_map(cpu->cfg.satp_mode.supported);
1049
1050 if (cpu->cfg.satp_mode.map == 0) {
1051 if (cpu->cfg.satp_mode.init == 0) {
1052 /* If unset by the user, we fallback to the default satp mode. */
1053 set_satp_mode_default_map(cpu);
1054 } else {
1055 /*
1056 * Find the lowest level that was disabled and then enable the
1057 * first valid level below which can be found in
1058 * valid_vm_1_10_32/64.
1059 */
1060 for (int i = 1; i < 16; ++i) {
1061 if ((cpu->cfg.satp_mode.init & (1 << i)) &&
1062 (cpu->cfg.satp_mode.supported & (1 << i))) {
1063 for (int j = i - 1; j >= 0; --j) {
1064 if (cpu->cfg.satp_mode.supported & (1 << j)) {
1065 cpu->cfg.satp_mode.map |= (1 << j);
1066 break;
1067 }
1068 }
1069 break;
1070 }
1071 }
1072 }
1073 }
1074
1075 satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map);
1076
1077 /* Make sure the user asked for a supported configuration (HW and qemu) */
1078 if (satp_mode_map_max > satp_mode_supported_max) {
1079 error_setg(errp, "satp_mode %s is higher than hw max capability %s",
1080 satp_mode_str(satp_mode_map_max, rv32),
1081 satp_mode_str(satp_mode_supported_max, rv32));
1082 return;
1083 }
1084
1085 /*
1086 * Make sure the user did not ask for an invalid configuration as per
1087 * the specification.
1088 */
1089 if (!rv32) {
1090 for (int i = satp_mode_map_max - 1; i >= 0; --i) {
1091 if (!(cpu->cfg.satp_mode.map & (1 << i)) &&
1092 (cpu->cfg.satp_mode.init & (1 << i)) &&
1093 (cpu->cfg.satp_mode.supported & (1 << i))) {
1094 error_setg(errp, "cannot disable %s satp mode if %s "
1095 "is enabled", satp_mode_str(i, false),
1096 satp_mode_str(satp_mode_map_max, false));
1097 return;
1098 }
1099 }
1100 }
1101
1102 /* Finally expand the map so that all valid modes are set */
1103 for (int i = satp_mode_map_max - 1; i >= 0; --i) {
1104 if (cpu->cfg.satp_mode.supported & (1 << i)) {
1105 cpu->cfg.satp_mode.map |= (1 << i);
1106 }
1107 }
1108 }
1109 #endif
1110
1111 static void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
1112 {
1113 #ifndef CONFIG_USER_ONLY
1114 Error *local_err = NULL;
1115
1116 riscv_cpu_satp_mode_finalize(cpu, &local_err);
1117 if (local_err != NULL) {
1118 error_propagate(errp, local_err);
1119 return;
1120 }
1121 #endif
1122 }
1123
1124 static void riscv_cpu_realize(DeviceState *dev, Error **errp)
1125 {
1126 CPUState *cs = CPU(dev);
1127 RISCVCPU *cpu = RISCV_CPU(dev);
1128 CPURISCVState *env = &cpu->env;
1129 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev);
1130 CPUClass *cc = CPU_CLASS(mcc);
1131 int i, priv_version = -1;
1132 Error *local_err = NULL;
1133
1134 cpu_exec_realizefn(cs, &local_err);
1135 if (local_err != NULL) {
1136 error_propagate(errp, local_err);
1137 return;
1138 }
1139
1140 if (cpu->cfg.priv_spec) {
1141 if (!g_strcmp0(cpu->cfg.priv_spec, "v1.12.0")) {
1142 priv_version = PRIV_VERSION_1_12_0;
1143 } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.11.0")) {
1144 priv_version = PRIV_VERSION_1_11_0;
1145 } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.10.0")) {
1146 priv_version = PRIV_VERSION_1_10_0;
1147 } else {
1148 error_setg(errp,
1149 "Unsupported privilege spec version '%s'",
1150 cpu->cfg.priv_spec);
1151 return;
1152 }
1153 }
1154
1155 if (priv_version >= PRIV_VERSION_1_10_0) {
1156 set_priv_version(env, priv_version);
1157 }
1158
1159 /* Force disable extensions if priv spec version does not match */
1160 for (i = 0; i < ARRAY_SIZE(isa_edata_arr); i++) {
1161 if (isa_ext_is_enabled(cpu, &isa_edata_arr[i]) &&
1162 (env->priv_ver < isa_edata_arr[i].min_version)) {
1163 isa_ext_update_enabled(cpu, &isa_edata_arr[i], false);
1164 #ifndef CONFIG_USER_ONLY
1165 warn_report("disabling %s extension for hart 0x" TARGET_FMT_lx
1166 " because privilege spec version does not match",
1167 isa_edata_arr[i].name, env->mhartid);
1168 #else
1169 warn_report("disabling %s extension because "
1170 "privilege spec version does not match",
1171 isa_edata_arr[i].name);
1172 #endif
1173 }
1174 }
1175
1176 if (cpu->cfg.epmp && !cpu->cfg.pmp) {
1177 /*
1178 * Enhanced PMP should only be available
1179 * on harts with PMP support
1180 */
1181 error_setg(errp, "Invalid configuration: EPMP requires PMP support");
1182 return;
1183 }
1184
1185
1186 #ifndef CONFIG_USER_ONLY
1187 if (cpu->cfg.ext_sstc) {
1188 riscv_timer_init(cpu);
1189 }
1190 #endif /* CONFIG_USER_ONLY */
1191
1192 /* Validate that MISA_MXL is set properly. */
1193 switch (env->misa_mxl_max) {
1194 #ifdef TARGET_RISCV64
1195 case MXL_RV64:
1196 case MXL_RV128:
1197 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml";
1198 break;
1199 #endif
1200 case MXL_RV32:
1201 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml";
1202 break;
1203 default:
1204 g_assert_not_reached();
1205 }
1206 assert(env->misa_mxl_max == env->misa_mxl);
1207
1208 riscv_cpu_validate_set_extensions(cpu, &local_err);
1209 if (local_err != NULL) {
1210 error_propagate(errp, local_err);
1211 return;
1212 }
1213
1214 #ifndef CONFIG_USER_ONLY
1215 if (cpu->cfg.pmu_num) {
1216 if (!riscv_pmu_init(cpu, cpu->cfg.pmu_num) && cpu->cfg.ext_sscofpmf) {
1217 cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
1218 riscv_pmu_timer_cb, cpu);
1219 }
1220 }
1221 #endif
1222
1223 riscv_cpu_finalize_features(cpu, &local_err);
1224 if (local_err != NULL) {
1225 error_propagate(errp, local_err);
1226 return;
1227 }
1228
1229 riscv_cpu_register_gdb_regs_for_features(cs);
1230
1231 qemu_init_vcpu(cs);
1232 cpu_reset(cs);
1233
1234 mcc->parent_realize(dev, errp);
1235 }
1236
1237 #ifndef CONFIG_USER_ONLY
1238 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name,
1239 void *opaque, Error **errp)
1240 {
1241 RISCVSATPMap *satp_map = opaque;
1242 uint8_t satp = satp_mode_from_str(name);
1243 bool value;
1244
1245 value = satp_map->map & (1 << satp);
1246
1247 visit_type_bool(v, name, &value, errp);
1248 }
1249
1250 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name,
1251 void *opaque, Error **errp)
1252 {
1253 RISCVSATPMap *satp_map = opaque;
1254 uint8_t satp = satp_mode_from_str(name);
1255 bool value;
1256
1257 if (!visit_type_bool(v, name, &value, errp)) {
1258 return;
1259 }
1260
1261 satp_map->map = deposit32(satp_map->map, satp, 1, value);
1262 satp_map->init |= 1 << satp;
1263 }
1264
1265 static void riscv_add_satp_mode_properties(Object *obj)
1266 {
1267 RISCVCPU *cpu = RISCV_CPU(obj);
1268
1269 if (cpu->env.misa_mxl == MXL_RV32) {
1270 object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp,
1271 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1272 } else {
1273 object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp,
1274 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1275 object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp,
1276 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1277 object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp,
1278 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1279 object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp,
1280 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1281 }
1282 }
1283
1284 static void riscv_cpu_set_irq(void *opaque, int irq, int level)
1285 {
1286 RISCVCPU *cpu = RISCV_CPU(opaque);
1287 CPURISCVState *env = &cpu->env;
1288
1289 if (irq < IRQ_LOCAL_MAX) {
1290 switch (irq) {
1291 case IRQ_U_SOFT:
1292 case IRQ_S_SOFT:
1293 case IRQ_VS_SOFT:
1294 case IRQ_M_SOFT:
1295 case IRQ_U_TIMER:
1296 case IRQ_S_TIMER:
1297 case IRQ_VS_TIMER:
1298 case IRQ_M_TIMER:
1299 case IRQ_U_EXT:
1300 case IRQ_VS_EXT:
1301 case IRQ_M_EXT:
1302 if (kvm_enabled()) {
1303 kvm_riscv_set_irq(cpu, irq, level);
1304 } else {
1305 riscv_cpu_update_mip(cpu, 1 << irq, BOOL_TO_MASK(level));
1306 }
1307 break;
1308 case IRQ_S_EXT:
1309 if (kvm_enabled()) {
1310 kvm_riscv_set_irq(cpu, irq, level);
1311 } else {
1312 env->external_seip = level;
1313 riscv_cpu_update_mip(cpu, 1 << irq,
1314 BOOL_TO_MASK(level | env->software_seip));
1315 }
1316 break;
1317 default:
1318 g_assert_not_reached();
1319 }
1320 } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) {
1321 /* Require H-extension for handling guest local interrupts */
1322 if (!riscv_has_ext(env, RVH)) {
1323 g_assert_not_reached();
1324 }
1325
1326 /* Compute bit position in HGEIP CSR */
1327 irq = irq - IRQ_LOCAL_MAX + 1;
1328 if (env->geilen < irq) {
1329 g_assert_not_reached();
1330 }
1331
1332 /* Update HGEIP CSR */
1333 env->hgeip &= ~((target_ulong)1 << irq);
1334 if (level) {
1335 env->hgeip |= (target_ulong)1 << irq;
1336 }
1337
1338 /* Update mip.SGEIP bit */
1339 riscv_cpu_update_mip(cpu, MIP_SGEIP,
1340 BOOL_TO_MASK(!!(env->hgeie & env->hgeip)));
1341 } else {
1342 g_assert_not_reached();
1343 }
1344 }
1345 #endif /* CONFIG_USER_ONLY */
1346
1347 static void riscv_cpu_init(Object *obj)
1348 {
1349 RISCVCPU *cpu = RISCV_CPU(obj);
1350
1351 cpu->cfg.ext_ifencei = true;
1352 cpu->cfg.ext_icsr = true;
1353 cpu->cfg.mmu = true;
1354 cpu->cfg.pmp = true;
1355
1356 cpu_set_cpustate_pointers(cpu);
1357
1358 #ifndef CONFIG_USER_ONLY
1359 qdev_init_gpio_in(DEVICE(cpu), riscv_cpu_set_irq,
1360 IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX);
1361 #endif /* CONFIG_USER_ONLY */
1362 }
1363
1364 static Property riscv_cpu_extensions[] = {
1365 /* Defaults for standard extensions */
1366 DEFINE_PROP_BOOL("i", RISCVCPU, cfg.ext_i, true),
1367 DEFINE_PROP_BOOL("e", RISCVCPU, cfg.ext_e, false),
1368 DEFINE_PROP_BOOL("g", RISCVCPU, cfg.ext_g, false),
1369 DEFINE_PROP_BOOL("m", RISCVCPU, cfg.ext_m, true),
1370 DEFINE_PROP_BOOL("a", RISCVCPU, cfg.ext_a, true),
1371 DEFINE_PROP_BOOL("f", RISCVCPU, cfg.ext_f, true),
1372 DEFINE_PROP_BOOL("d", RISCVCPU, cfg.ext_d, true),
1373 DEFINE_PROP_BOOL("c", RISCVCPU, cfg.ext_c, true),
1374 DEFINE_PROP_BOOL("s", RISCVCPU, cfg.ext_s, true),
1375 DEFINE_PROP_BOOL("u", RISCVCPU, cfg.ext_u, true),
1376 DEFINE_PROP_BOOL("v", RISCVCPU, cfg.ext_v, false),
1377 DEFINE_PROP_BOOL("h", RISCVCPU, cfg.ext_h, true),
1378 DEFINE_PROP_UINT8("pmu-num", RISCVCPU, cfg.pmu_num, 16),
1379 DEFINE_PROP_BOOL("sscofpmf", RISCVCPU, cfg.ext_sscofpmf, false),
1380 DEFINE_PROP_BOOL("Zifencei", RISCVCPU, cfg.ext_ifencei, true),
1381 DEFINE_PROP_BOOL("Zicsr", RISCVCPU, cfg.ext_icsr, true),
1382 DEFINE_PROP_BOOL("Zihintpause", RISCVCPU, cfg.ext_zihintpause, true),
1383 DEFINE_PROP_BOOL("Zawrs", RISCVCPU, cfg.ext_zawrs, true),
1384 DEFINE_PROP_BOOL("Zfh", RISCVCPU, cfg.ext_zfh, false),
1385 DEFINE_PROP_BOOL("Zfhmin", RISCVCPU, cfg.ext_zfhmin, false),
1386 DEFINE_PROP_BOOL("Zve32f", RISCVCPU, cfg.ext_zve32f, false),
1387 DEFINE_PROP_BOOL("Zve64f", RISCVCPU, cfg.ext_zve64f, false),
1388 DEFINE_PROP_BOOL("Zve64d", RISCVCPU, cfg.ext_zve64d, false),
1389 DEFINE_PROP_BOOL("mmu", RISCVCPU, cfg.mmu, true),
1390 DEFINE_PROP_BOOL("pmp", RISCVCPU, cfg.pmp, true),
1391 DEFINE_PROP_BOOL("sstc", RISCVCPU, cfg.ext_sstc, true),
1392
1393 DEFINE_PROP_STRING("priv_spec", RISCVCPU, cfg.priv_spec),
1394 DEFINE_PROP_STRING("vext_spec", RISCVCPU, cfg.vext_spec),
1395 DEFINE_PROP_UINT16("vlen", RISCVCPU, cfg.vlen, 128),
1396 DEFINE_PROP_UINT16("elen", RISCVCPU, cfg.elen, 64),
1397
1398 DEFINE_PROP_BOOL("svadu", RISCVCPU, cfg.ext_svadu, true),
1399
1400 DEFINE_PROP_BOOL("svinval", RISCVCPU, cfg.ext_svinval, false),
1401 DEFINE_PROP_BOOL("svnapot", RISCVCPU, cfg.ext_svnapot, false),
1402 DEFINE_PROP_BOOL("svpbmt", RISCVCPU, cfg.ext_svpbmt, false),
1403
1404 DEFINE_PROP_BOOL("zba", RISCVCPU, cfg.ext_zba, true),
1405 DEFINE_PROP_BOOL("zbb", RISCVCPU, cfg.ext_zbb, true),
1406 DEFINE_PROP_BOOL("zbc", RISCVCPU, cfg.ext_zbc, true),
1407 DEFINE_PROP_BOOL("zbkb", RISCVCPU, cfg.ext_zbkb, false),
1408 DEFINE_PROP_BOOL("zbkc", RISCVCPU, cfg.ext_zbkc, false),
1409 DEFINE_PROP_BOOL("zbkx", RISCVCPU, cfg.ext_zbkx, false),
1410 DEFINE_PROP_BOOL("zbs", RISCVCPU, cfg.ext_zbs, true),
1411 DEFINE_PROP_BOOL("zk", RISCVCPU, cfg.ext_zk, false),
1412 DEFINE_PROP_BOOL("zkn", RISCVCPU, cfg.ext_zkn, false),
1413 DEFINE_PROP_BOOL("zknd", RISCVCPU, cfg.ext_zknd, false),
1414 DEFINE_PROP_BOOL("zkne", RISCVCPU, cfg.ext_zkne, false),
1415 DEFINE_PROP_BOOL("zknh", RISCVCPU, cfg.ext_zknh, false),
1416 DEFINE_PROP_BOOL("zkr", RISCVCPU, cfg.ext_zkr, false),
1417 DEFINE_PROP_BOOL("zks", RISCVCPU, cfg.ext_zks, false),
1418 DEFINE_PROP_BOOL("zksed", RISCVCPU, cfg.ext_zksed, false),
1419 DEFINE_PROP_BOOL("zksh", RISCVCPU, cfg.ext_zksh, false),
1420 DEFINE_PROP_BOOL("zkt", RISCVCPU, cfg.ext_zkt, false),
1421
1422 DEFINE_PROP_BOOL("zdinx", RISCVCPU, cfg.ext_zdinx, false),
1423 DEFINE_PROP_BOOL("zfinx", RISCVCPU, cfg.ext_zfinx, false),
1424 DEFINE_PROP_BOOL("zhinx", RISCVCPU, cfg.ext_zhinx, false),
1425 DEFINE_PROP_BOOL("zhinxmin", RISCVCPU, cfg.ext_zhinxmin, false),
1426
1427 DEFINE_PROP_BOOL("zicbom", RISCVCPU, cfg.ext_icbom, true),
1428 DEFINE_PROP_UINT16("cbom_blocksize", RISCVCPU, cfg.cbom_blocksize, 64),
1429 DEFINE_PROP_BOOL("zicboz", RISCVCPU, cfg.ext_icboz, true),
1430 DEFINE_PROP_UINT16("cboz_blocksize", RISCVCPU, cfg.cboz_blocksize, 64),
1431
1432 DEFINE_PROP_BOOL("zmmul", RISCVCPU, cfg.ext_zmmul, false),
1433
1434 /* Vendor-specific custom extensions */
1435 DEFINE_PROP_BOOL("xtheadba", RISCVCPU, cfg.ext_xtheadba, false),
1436 DEFINE_PROP_BOOL("xtheadbb", RISCVCPU, cfg.ext_xtheadbb, false),
1437 DEFINE_PROP_BOOL("xtheadbs", RISCVCPU, cfg.ext_xtheadbs, false),
1438 DEFINE_PROP_BOOL("xtheadcmo", RISCVCPU, cfg.ext_xtheadcmo, false),
1439 DEFINE_PROP_BOOL("xtheadcondmov", RISCVCPU, cfg.ext_xtheadcondmov, false),
1440 DEFINE_PROP_BOOL("xtheadfmemidx", RISCVCPU, cfg.ext_xtheadfmemidx, false),
1441 DEFINE_PROP_BOOL("xtheadfmv", RISCVCPU, cfg.ext_xtheadfmv, false),
1442 DEFINE_PROP_BOOL("xtheadmac", RISCVCPU, cfg.ext_xtheadmac, false),
1443 DEFINE_PROP_BOOL("xtheadmemidx", RISCVCPU, cfg.ext_xtheadmemidx, false),
1444 DEFINE_PROP_BOOL("xtheadmempair", RISCVCPU, cfg.ext_xtheadmempair, false),
1445 DEFINE_PROP_BOOL("xtheadsync", RISCVCPU, cfg.ext_xtheadsync, false),
1446 DEFINE_PROP_BOOL("xventanacondops", RISCVCPU, cfg.ext_XVentanaCondOps, false),
1447
1448 /* These are experimental so mark with 'x-' */
1449 DEFINE_PROP_BOOL("x-zicond", RISCVCPU, cfg.ext_zicond, false),
1450 DEFINE_PROP_BOOL("x-j", RISCVCPU, cfg.ext_j, false),
1451 /* ePMP 0.9.3 */
1452 DEFINE_PROP_BOOL("x-epmp", RISCVCPU, cfg.epmp, false),
1453 DEFINE_PROP_BOOL("x-smaia", RISCVCPU, cfg.ext_smaia, false),
1454 DEFINE_PROP_BOOL("x-ssaia", RISCVCPU, cfg.ext_ssaia, false),
1455
1456 DEFINE_PROP_BOOL("x-zvfh", RISCVCPU, cfg.ext_zvfh, false),
1457 DEFINE_PROP_BOOL("x-zvfhmin", RISCVCPU, cfg.ext_zvfhmin, false),
1458
1459 DEFINE_PROP_END_OF_LIST(),
1460 };
1461
1462 /*
1463 * Register CPU props based on env.misa_ext. If a non-zero
1464 * value was set, register only the required cpu->cfg.ext_*
1465 * properties and leave. env.misa_ext = 0 means that we want
1466 * all the default properties to be registered.
1467 */
1468 static void register_cpu_props(Object *obj)
1469 {
1470 RISCVCPU *cpu = RISCV_CPU(obj);
1471 uint32_t misa_ext = cpu->env.misa_ext;
1472 Property *prop;
1473 DeviceState *dev = DEVICE(obj);
1474
1475 /*
1476 * If misa_ext is not zero, set cfg properties now to
1477 * allow them to be read during riscv_cpu_realize()
1478 * later on.
1479 */
1480 if (cpu->env.misa_ext != 0) {
1481 cpu->cfg.ext_i = misa_ext & RVI;
1482 cpu->cfg.ext_e = misa_ext & RVE;
1483 cpu->cfg.ext_m = misa_ext & RVM;
1484 cpu->cfg.ext_a = misa_ext & RVA;
1485 cpu->cfg.ext_f = misa_ext & RVF;
1486 cpu->cfg.ext_d = misa_ext & RVD;
1487 cpu->cfg.ext_v = misa_ext & RVV;
1488 cpu->cfg.ext_c = misa_ext & RVC;
1489 cpu->cfg.ext_s = misa_ext & RVS;
1490 cpu->cfg.ext_u = misa_ext & RVU;
1491 cpu->cfg.ext_h = misa_ext & RVH;
1492 cpu->cfg.ext_j = misa_ext & RVJ;
1493
1494 /*
1495 * We don't want to set the default riscv_cpu_extensions
1496 * in this case.
1497 */
1498 return;
1499 }
1500
1501 for (prop = riscv_cpu_extensions; prop && prop->name; prop++) {
1502 qdev_property_add_static(dev, prop);
1503 }
1504
1505 #ifndef CONFIG_USER_ONLY
1506 riscv_add_satp_mode_properties(obj);
1507 #endif
1508 }
1509
1510 static Property riscv_cpu_properties[] = {
1511 DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true),
1512
1513 DEFINE_PROP_UINT32("mvendorid", RISCVCPU, cfg.mvendorid, 0),
1514 DEFINE_PROP_UINT64("marchid", RISCVCPU, cfg.marchid, RISCV_CPU_MARCHID),
1515 DEFINE_PROP_UINT64("mimpid", RISCVCPU, cfg.mimpid, RISCV_CPU_MIMPID),
1516
1517 #ifndef CONFIG_USER_ONLY
1518 DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC),
1519 #endif
1520
1521 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false),
1522
1523 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false),
1524 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false),
1525
1526 /*
1527 * write_misa() is marked as experimental for now so mark
1528 * it with -x and default to 'false'.
1529 */
1530 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false),
1531 DEFINE_PROP_END_OF_LIST(),
1532 };
1533
1534 static gchar *riscv_gdb_arch_name(CPUState *cs)
1535 {
1536 RISCVCPU *cpu = RISCV_CPU(cs);
1537 CPURISCVState *env = &cpu->env;
1538
1539 switch (riscv_cpu_mxl(env)) {
1540 case MXL_RV32:
1541 return g_strdup("riscv:rv32");
1542 case MXL_RV64:
1543 case MXL_RV128:
1544 return g_strdup("riscv:rv64");
1545 default:
1546 g_assert_not_reached();
1547 }
1548 }
1549
1550 static const char *riscv_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname)
1551 {
1552 RISCVCPU *cpu = RISCV_CPU(cs);
1553
1554 if (strcmp(xmlname, "riscv-csr.xml") == 0) {
1555 return cpu->dyn_csr_xml;
1556 } else if (strcmp(xmlname, "riscv-vector.xml") == 0) {
1557 return cpu->dyn_vreg_xml;
1558 }
1559
1560 return NULL;
1561 }
1562
1563 #ifndef CONFIG_USER_ONLY
1564 static int64_t riscv_get_arch_id(CPUState *cs)
1565 {
1566 RISCVCPU *cpu = RISCV_CPU(cs);
1567
1568 return cpu->env.mhartid;
1569 }
1570
1571 #include "hw/core/sysemu-cpu-ops.h"
1572
1573 static const struct SysemuCPUOps riscv_sysemu_ops = {
1574 .get_phys_page_debug = riscv_cpu_get_phys_page_debug,
1575 .write_elf64_note = riscv_cpu_write_elf64_note,
1576 .write_elf32_note = riscv_cpu_write_elf32_note,
1577 .legacy_vmsd = &vmstate_riscv_cpu,
1578 };
1579 #endif
1580
1581 #include "hw/core/tcg-cpu-ops.h"
1582
1583 static const struct TCGCPUOps riscv_tcg_ops = {
1584 .initialize = riscv_translate_init,
1585 .synchronize_from_tb = riscv_cpu_synchronize_from_tb,
1586 .restore_state_to_opc = riscv_restore_state_to_opc,
1587
1588 #ifndef CONFIG_USER_ONLY
1589 .tlb_fill = riscv_cpu_tlb_fill,
1590 .cpu_exec_interrupt = riscv_cpu_exec_interrupt,
1591 .do_interrupt = riscv_cpu_do_interrupt,
1592 .do_transaction_failed = riscv_cpu_do_transaction_failed,
1593 .do_unaligned_access = riscv_cpu_do_unaligned_access,
1594 .debug_excp_handler = riscv_cpu_debug_excp_handler,
1595 .debug_check_breakpoint = riscv_cpu_debug_check_breakpoint,
1596 .debug_check_watchpoint = riscv_cpu_debug_check_watchpoint,
1597 #endif /* !CONFIG_USER_ONLY */
1598 };
1599
1600 static void riscv_cpu_class_init(ObjectClass *c, void *data)
1601 {
1602 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
1603 CPUClass *cc = CPU_CLASS(c);
1604 DeviceClass *dc = DEVICE_CLASS(c);
1605 ResettableClass *rc = RESETTABLE_CLASS(c);
1606
1607 device_class_set_parent_realize(dc, riscv_cpu_realize,
1608 &mcc->parent_realize);
1609
1610 resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL,
1611 &mcc->parent_phases);
1612
1613 cc->class_by_name = riscv_cpu_class_by_name;
1614 cc->has_work = riscv_cpu_has_work;
1615 cc->dump_state = riscv_cpu_dump_state;
1616 cc->set_pc = riscv_cpu_set_pc;
1617 cc->get_pc = riscv_cpu_get_pc;
1618 cc->gdb_read_register = riscv_cpu_gdb_read_register;
1619 cc->gdb_write_register = riscv_cpu_gdb_write_register;
1620 cc->gdb_num_core_regs = 33;
1621 cc->gdb_stop_before_watchpoint = true;
1622 cc->disas_set_info = riscv_cpu_disas_set_info;
1623 #ifndef CONFIG_USER_ONLY
1624 cc->sysemu_ops = &riscv_sysemu_ops;
1625 cc->get_arch_id = riscv_get_arch_id;
1626 #endif
1627 cc->gdb_arch_name = riscv_gdb_arch_name;
1628 cc->gdb_get_dynamic_xml = riscv_gdb_get_dynamic_xml;
1629 cc->tcg_ops = &riscv_tcg_ops;
1630
1631 device_class_set_props(dc, riscv_cpu_properties);
1632 }
1633
1634 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, int max_str_len)
1635 {
1636 char *old = *isa_str;
1637 char *new = *isa_str;
1638 int i;
1639
1640 for (i = 0; i < ARRAY_SIZE(isa_edata_arr); i++) {
1641 if (isa_edata_arr[i].multi_letter &&
1642 isa_ext_is_enabled(cpu, &isa_edata_arr[i])) {
1643 new = g_strconcat(old, "_", isa_edata_arr[i].name, NULL);
1644 g_free(old);
1645 old = new;
1646 }
1647 }
1648
1649 *isa_str = new;
1650 }
1651
1652 char *riscv_isa_string(RISCVCPU *cpu)
1653 {
1654 int i;
1655 const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts);
1656 char *isa_str = g_new(char, maxlen);
1657 char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", TARGET_LONG_BITS);
1658 for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) {
1659 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) {
1660 *p++ = qemu_tolower(riscv_single_letter_exts[i]);
1661 }
1662 }
1663 *p = '\0';
1664 if (!cpu->cfg.short_isa_string) {
1665 riscv_isa_string_ext(cpu, &isa_str, maxlen);
1666 }
1667 return isa_str;
1668 }
1669
1670 static gint riscv_cpu_list_compare(gconstpointer a, gconstpointer b)
1671 {
1672 ObjectClass *class_a = (ObjectClass *)a;
1673 ObjectClass *class_b = (ObjectClass *)b;
1674 const char *name_a, *name_b;
1675
1676 name_a = object_class_get_name(class_a);
1677 name_b = object_class_get_name(class_b);
1678 return strcmp(name_a, name_b);
1679 }
1680
1681 static void riscv_cpu_list_entry(gpointer data, gpointer user_data)
1682 {
1683 const char *typename = object_class_get_name(OBJECT_CLASS(data));
1684 int len = strlen(typename) - strlen(RISCV_CPU_TYPE_SUFFIX);
1685
1686 qemu_printf("%.*s\n", len, typename);
1687 }
1688
1689 void riscv_cpu_list(void)
1690 {
1691 GSList *list;
1692
1693 list = object_class_get_list(TYPE_RISCV_CPU, false);
1694 list = g_slist_sort(list, riscv_cpu_list_compare);
1695 g_slist_foreach(list, riscv_cpu_list_entry, NULL);
1696 g_slist_free(list);
1697 }
1698
1699 #define DEFINE_CPU(type_name, initfn) \
1700 { \
1701 .name = type_name, \
1702 .parent = TYPE_RISCV_CPU, \
1703 .instance_init = initfn \
1704 }
1705
1706 static const TypeInfo riscv_cpu_type_infos[] = {
1707 {
1708 .name = TYPE_RISCV_CPU,
1709 .parent = TYPE_CPU,
1710 .instance_size = sizeof(RISCVCPU),
1711 .instance_align = __alignof__(RISCVCPU),
1712 .instance_init = riscv_cpu_init,
1713 .abstract = true,
1714 .class_size = sizeof(RISCVCPUClass),
1715 .class_init = riscv_cpu_class_init,
1716 },
1717 DEFINE_CPU(TYPE_RISCV_CPU_ANY, riscv_any_cpu_init),
1718 #if defined(CONFIG_KVM)
1719 DEFINE_CPU(TYPE_RISCV_CPU_HOST, riscv_host_cpu_init),
1720 #endif
1721 #if defined(TARGET_RISCV32)
1722 DEFINE_CPU(TYPE_RISCV_CPU_BASE32, rv32_base_cpu_init),
1723 DEFINE_CPU(TYPE_RISCV_CPU_IBEX, rv32_ibex_cpu_init),
1724 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E31, rv32_sifive_e_cpu_init),
1725 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E34, rv32_imafcu_nommu_cpu_init),
1726 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U34, rv32_sifive_u_cpu_init),
1727 #elif defined(TARGET_RISCV64)
1728 DEFINE_CPU(TYPE_RISCV_CPU_BASE64, rv64_base_cpu_init),
1729 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E51, rv64_sifive_e_cpu_init),
1730 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U54, rv64_sifive_u_cpu_init),
1731 DEFINE_CPU(TYPE_RISCV_CPU_SHAKTI_C, rv64_sifive_u_cpu_init),
1732 DEFINE_CPU(TYPE_RISCV_CPU_THEAD_C906, rv64_thead_c906_cpu_init),
1733 DEFINE_CPU(TYPE_RISCV_CPU_BASE128, rv128_base_cpu_init),
1734 #endif
1735 };
1736
1737 DEFINE_TYPES(riscv_cpu_type_infos)