]> git.proxmox.com Git - mirror_qemu.git/blob - target/riscv/cpu.c
hw/core: Add CPUClass.get_pc
[mirror_qemu.git] / target / riscv / cpu.c
1 /*
2 * QEMU RISC-V CPU
3 *
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/qemu-print.h"
22 #include "qemu/ctype.h"
23 #include "qemu/log.h"
24 #include "cpu.h"
25 #include "pmu.h"
26 #include "internals.h"
27 #include "time_helper.h"
28 #include "exec/exec-all.h"
29 #include "qapi/error.h"
30 #include "qemu/error-report.h"
31 #include "hw/qdev-properties.h"
32 #include "migration/vmstate.h"
33 #include "fpu/softfloat-helpers.h"
34 #include "sysemu/kvm.h"
35 #include "kvm_riscv.h"
36
37 /* RISC-V CPU definitions */
38
39 #define RISCV_CPU_MARCHID ((QEMU_VERSION_MAJOR << 16) | \
40 (QEMU_VERSION_MINOR << 8) | \
41 (QEMU_VERSION_MICRO))
42 #define RISCV_CPU_MIMPID RISCV_CPU_MARCHID
43
44 static const char riscv_single_letter_exts[] = "IEMAFDQCPVH";
45
46 struct isa_ext_data {
47 const char *name;
48 bool multi_letter;
49 int min_version;
50 int ext_enable_offset;
51 };
52
53 #define ISA_EXT_DATA_ENTRY(_name, _m_letter, _min_ver, _prop) \
54 {#_name, _m_letter, _min_ver, offsetof(struct RISCVCPUConfig, _prop)}
55
56 /**
57 * Here are the ordering rules of extension naming defined by RISC-V
58 * specification :
59 * 1. All extensions should be separated from other multi-letter extensions
60 * by an underscore.
61 * 2. The first letter following the 'Z' conventionally indicates the most
62 * closely related alphabetical extension category, IMAFDQLCBKJTPVH.
63 * If multiple 'Z' extensions are named, they should be ordered first
64 * by category, then alphabetically within a category.
65 * 3. Standard supervisor-level extensions (starts with 'S') should be
66 * listed after standard unprivileged extensions. If multiple
67 * supervisor-level extensions are listed, they should be ordered
68 * alphabetically.
69 * 4. Non-standard extensions (starts with 'X') must be listed after all
70 * standard extensions. They must be separated from other multi-letter
71 * extensions by an underscore.
72 */
73 static const struct isa_ext_data isa_edata_arr[] = {
74 ISA_EXT_DATA_ENTRY(h, false, PRIV_VERSION_1_12_0, ext_h),
75 ISA_EXT_DATA_ENTRY(v, false, PRIV_VERSION_1_12_0, ext_v),
76 ISA_EXT_DATA_ENTRY(zicsr, true, PRIV_VERSION_1_10_0, ext_icsr),
77 ISA_EXT_DATA_ENTRY(zifencei, true, PRIV_VERSION_1_10_0, ext_ifencei),
78 ISA_EXT_DATA_ENTRY(zihintpause, true, PRIV_VERSION_1_10_0, ext_zihintpause),
79 ISA_EXT_DATA_ENTRY(zfh, true, PRIV_VERSION_1_12_0, ext_zfh),
80 ISA_EXT_DATA_ENTRY(zfhmin, true, PRIV_VERSION_1_12_0, ext_zfhmin),
81 ISA_EXT_DATA_ENTRY(zfinx, true, PRIV_VERSION_1_12_0, ext_zfinx),
82 ISA_EXT_DATA_ENTRY(zdinx, true, PRIV_VERSION_1_12_0, ext_zdinx),
83 ISA_EXT_DATA_ENTRY(zba, true, PRIV_VERSION_1_12_0, ext_zba),
84 ISA_EXT_DATA_ENTRY(zbb, true, PRIV_VERSION_1_12_0, ext_zbb),
85 ISA_EXT_DATA_ENTRY(zbc, true, PRIV_VERSION_1_12_0, ext_zbc),
86 ISA_EXT_DATA_ENTRY(zbkb, true, PRIV_VERSION_1_12_0, ext_zbkb),
87 ISA_EXT_DATA_ENTRY(zbkc, true, PRIV_VERSION_1_12_0, ext_zbkc),
88 ISA_EXT_DATA_ENTRY(zbkx, true, PRIV_VERSION_1_12_0, ext_zbkx),
89 ISA_EXT_DATA_ENTRY(zbs, true, PRIV_VERSION_1_12_0, ext_zbs),
90 ISA_EXT_DATA_ENTRY(zk, true, PRIV_VERSION_1_12_0, ext_zk),
91 ISA_EXT_DATA_ENTRY(zkn, true, PRIV_VERSION_1_12_0, ext_zkn),
92 ISA_EXT_DATA_ENTRY(zknd, true, PRIV_VERSION_1_12_0, ext_zknd),
93 ISA_EXT_DATA_ENTRY(zkne, true, PRIV_VERSION_1_12_0, ext_zkne),
94 ISA_EXT_DATA_ENTRY(zknh, true, PRIV_VERSION_1_12_0, ext_zknh),
95 ISA_EXT_DATA_ENTRY(zkr, true, PRIV_VERSION_1_12_0, ext_zkr),
96 ISA_EXT_DATA_ENTRY(zks, true, PRIV_VERSION_1_12_0, ext_zks),
97 ISA_EXT_DATA_ENTRY(zksed, true, PRIV_VERSION_1_12_0, ext_zksed),
98 ISA_EXT_DATA_ENTRY(zksh, true, PRIV_VERSION_1_12_0, ext_zksh),
99 ISA_EXT_DATA_ENTRY(zkt, true, PRIV_VERSION_1_12_0, ext_zkt),
100 ISA_EXT_DATA_ENTRY(zve32f, true, PRIV_VERSION_1_12_0, ext_zve32f),
101 ISA_EXT_DATA_ENTRY(zve64f, true, PRIV_VERSION_1_12_0, ext_zve64f),
102 ISA_EXT_DATA_ENTRY(zhinx, true, PRIV_VERSION_1_12_0, ext_zhinx),
103 ISA_EXT_DATA_ENTRY(zhinxmin, true, PRIV_VERSION_1_12_0, ext_zhinxmin),
104 ISA_EXT_DATA_ENTRY(smaia, true, PRIV_VERSION_1_12_0, ext_smaia),
105 ISA_EXT_DATA_ENTRY(ssaia, true, PRIV_VERSION_1_12_0, ext_ssaia),
106 ISA_EXT_DATA_ENTRY(sscofpmf, true, PRIV_VERSION_1_12_0, ext_sscofpmf),
107 ISA_EXT_DATA_ENTRY(sstc, true, PRIV_VERSION_1_12_0, ext_sstc),
108 ISA_EXT_DATA_ENTRY(svinval, true, PRIV_VERSION_1_12_0, ext_svinval),
109 ISA_EXT_DATA_ENTRY(svnapot, true, PRIV_VERSION_1_12_0, ext_svnapot),
110 ISA_EXT_DATA_ENTRY(svpbmt, true, PRIV_VERSION_1_12_0, ext_svpbmt),
111 ISA_EXT_DATA_ENTRY(xventanacondops, true, PRIV_VERSION_1_12_0, ext_XVentanaCondOps),
112 };
113
114 static bool isa_ext_is_enabled(RISCVCPU *cpu,
115 const struct isa_ext_data *edata)
116 {
117 bool *ext_enabled = (void *)&cpu->cfg + edata->ext_enable_offset;
118
119 return *ext_enabled;
120 }
121
122 static void isa_ext_update_enabled(RISCVCPU *cpu,
123 const struct isa_ext_data *edata, bool en)
124 {
125 bool *ext_enabled = (void *)&cpu->cfg + edata->ext_enable_offset;
126
127 *ext_enabled = en;
128 }
129
130 const char * const riscv_int_regnames[] = {
131 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1",
132 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3",
133 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4",
134 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11",
135 "x28/t3", "x29/t4", "x30/t5", "x31/t6"
136 };
137
138 const char * const riscv_int_regnamesh[] = {
139 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h",
140 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h",
141 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h",
142 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h",
143 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h",
144 "x30h/t5h", "x31h/t6h"
145 };
146
147 const char * const riscv_fpr_regnames[] = {
148 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5",
149 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1",
150 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7",
151 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7",
152 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9",
153 "f30/ft10", "f31/ft11"
154 };
155
156 static const char * const riscv_excp_names[] = {
157 "misaligned_fetch",
158 "fault_fetch",
159 "illegal_instruction",
160 "breakpoint",
161 "misaligned_load",
162 "fault_load",
163 "misaligned_store",
164 "fault_store",
165 "user_ecall",
166 "supervisor_ecall",
167 "hypervisor_ecall",
168 "machine_ecall",
169 "exec_page_fault",
170 "load_page_fault",
171 "reserved",
172 "store_page_fault",
173 "reserved",
174 "reserved",
175 "reserved",
176 "reserved",
177 "guest_exec_page_fault",
178 "guest_load_page_fault",
179 "reserved",
180 "guest_store_page_fault",
181 };
182
183 static const char * const riscv_intr_names[] = {
184 "u_software",
185 "s_software",
186 "vs_software",
187 "m_software",
188 "u_timer",
189 "s_timer",
190 "vs_timer",
191 "m_timer",
192 "u_external",
193 "s_external",
194 "vs_external",
195 "m_external",
196 "reserved",
197 "reserved",
198 "reserved",
199 "reserved"
200 };
201
202 static void register_cpu_props(DeviceState *dev);
203
204 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async)
205 {
206 if (async) {
207 return (cause < ARRAY_SIZE(riscv_intr_names)) ?
208 riscv_intr_names[cause] : "(unknown)";
209 } else {
210 return (cause < ARRAY_SIZE(riscv_excp_names)) ?
211 riscv_excp_names[cause] : "(unknown)";
212 }
213 }
214
215 static void set_misa(CPURISCVState *env, RISCVMXL mxl, uint32_t ext)
216 {
217 env->misa_mxl_max = env->misa_mxl = mxl;
218 env->misa_ext_mask = env->misa_ext = ext;
219 }
220
221 static void set_priv_version(CPURISCVState *env, int priv_ver)
222 {
223 env->priv_ver = priv_ver;
224 }
225
226 static void set_vext_version(CPURISCVState *env, int vext_ver)
227 {
228 env->vext_ver = vext_ver;
229 }
230
231 static void riscv_any_cpu_init(Object *obj)
232 {
233 CPURISCVState *env = &RISCV_CPU(obj)->env;
234 #if defined(TARGET_RISCV32)
235 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVU);
236 #elif defined(TARGET_RISCV64)
237 set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVU);
238 #endif
239 set_priv_version(env, PRIV_VERSION_1_12_0);
240 register_cpu_props(DEVICE(obj));
241 }
242
243 #if defined(TARGET_RISCV64)
244 static void rv64_base_cpu_init(Object *obj)
245 {
246 CPURISCVState *env = &RISCV_CPU(obj)->env;
247 /* We set this in the realise function */
248 set_misa(env, MXL_RV64, 0);
249 register_cpu_props(DEVICE(obj));
250 /* Set latest version of privileged specification */
251 set_priv_version(env, PRIV_VERSION_1_12_0);
252 }
253
254 static void rv64_sifive_u_cpu_init(Object *obj)
255 {
256 CPURISCVState *env = &RISCV_CPU(obj)->env;
257 set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
258 set_priv_version(env, PRIV_VERSION_1_10_0);
259 }
260
261 static void rv64_sifive_e_cpu_init(Object *obj)
262 {
263 CPURISCVState *env = &RISCV_CPU(obj)->env;
264 RISCVCPU *cpu = RISCV_CPU(obj);
265
266 set_misa(env, MXL_RV64, RVI | RVM | RVA | RVC | RVU);
267 set_priv_version(env, PRIV_VERSION_1_10_0);
268 cpu->cfg.mmu = false;
269 }
270
271 static void rv128_base_cpu_init(Object *obj)
272 {
273 if (qemu_tcg_mttcg_enabled()) {
274 /* Missing 128-bit aligned atomics */
275 error_report("128-bit RISC-V currently does not work with Multi "
276 "Threaded TCG. Please use: -accel tcg,thread=single");
277 exit(EXIT_FAILURE);
278 }
279 CPURISCVState *env = &RISCV_CPU(obj)->env;
280 /* We set this in the realise function */
281 set_misa(env, MXL_RV128, 0);
282 register_cpu_props(DEVICE(obj));
283 /* Set latest version of privileged specification */
284 set_priv_version(env, PRIV_VERSION_1_12_0);
285 }
286 #else
287 static void rv32_base_cpu_init(Object *obj)
288 {
289 CPURISCVState *env = &RISCV_CPU(obj)->env;
290 /* We set this in the realise function */
291 set_misa(env, MXL_RV32, 0);
292 register_cpu_props(DEVICE(obj));
293 /* Set latest version of privileged specification */
294 set_priv_version(env, PRIV_VERSION_1_12_0);
295 }
296
297 static void rv32_sifive_u_cpu_init(Object *obj)
298 {
299 CPURISCVState *env = &RISCV_CPU(obj)->env;
300 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
301 set_priv_version(env, PRIV_VERSION_1_10_0);
302 }
303
304 static void rv32_sifive_e_cpu_init(Object *obj)
305 {
306 CPURISCVState *env = &RISCV_CPU(obj)->env;
307 RISCVCPU *cpu = RISCV_CPU(obj);
308
309 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVC | RVU);
310 set_priv_version(env, PRIV_VERSION_1_10_0);
311 cpu->cfg.mmu = false;
312 }
313
314 static void rv32_ibex_cpu_init(Object *obj)
315 {
316 CPURISCVState *env = &RISCV_CPU(obj)->env;
317 RISCVCPU *cpu = RISCV_CPU(obj);
318
319 set_misa(env, MXL_RV32, RVI | RVM | RVC | RVU);
320 set_priv_version(env, PRIV_VERSION_1_11_0);
321 cpu->cfg.mmu = false;
322 cpu->cfg.epmp = true;
323 }
324
325 static void rv32_imafcu_nommu_cpu_init(Object *obj)
326 {
327 CPURISCVState *env = &RISCV_CPU(obj)->env;
328 RISCVCPU *cpu = RISCV_CPU(obj);
329
330 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVC | RVU);
331 set_priv_version(env, PRIV_VERSION_1_10_0);
332 cpu->cfg.mmu = false;
333 }
334 #endif
335
336 #if defined(CONFIG_KVM)
337 static void riscv_host_cpu_init(Object *obj)
338 {
339 CPURISCVState *env = &RISCV_CPU(obj)->env;
340 #if defined(TARGET_RISCV32)
341 set_misa(env, MXL_RV32, 0);
342 #elif defined(TARGET_RISCV64)
343 set_misa(env, MXL_RV64, 0);
344 #endif
345 register_cpu_props(DEVICE(obj));
346 }
347 #endif
348
349 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model)
350 {
351 ObjectClass *oc;
352 char *typename;
353 char **cpuname;
354
355 cpuname = g_strsplit(cpu_model, ",", 1);
356 typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]);
357 oc = object_class_by_name(typename);
358 g_strfreev(cpuname);
359 g_free(typename);
360 if (!oc || !object_class_dynamic_cast(oc, TYPE_RISCV_CPU) ||
361 object_class_is_abstract(oc)) {
362 return NULL;
363 }
364 return oc;
365 }
366
367 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags)
368 {
369 RISCVCPU *cpu = RISCV_CPU(cs);
370 CPURISCVState *env = &cpu->env;
371 int i;
372
373 #if !defined(CONFIG_USER_ONLY)
374 if (riscv_has_ext(env, RVH)) {
375 qemu_fprintf(f, " %s %d\n", "V = ", riscv_cpu_virt_enabled(env));
376 }
377 #endif
378 qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc);
379 #ifndef CONFIG_USER_ONLY
380 {
381 static const int dump_csrs[] = {
382 CSR_MHARTID,
383 CSR_MSTATUS,
384 CSR_MSTATUSH,
385 CSR_HSTATUS,
386 CSR_VSSTATUS,
387 CSR_MIP,
388 CSR_MIE,
389 CSR_MIDELEG,
390 CSR_HIDELEG,
391 CSR_MEDELEG,
392 CSR_HEDELEG,
393 CSR_MTVEC,
394 CSR_STVEC,
395 CSR_VSTVEC,
396 CSR_MEPC,
397 CSR_SEPC,
398 CSR_VSEPC,
399 CSR_MCAUSE,
400 CSR_SCAUSE,
401 CSR_VSCAUSE,
402 CSR_MTVAL,
403 CSR_STVAL,
404 CSR_HTVAL,
405 CSR_MTVAL2,
406 CSR_MSCRATCH,
407 CSR_SSCRATCH,
408 CSR_SATP,
409 CSR_MMTE,
410 CSR_UPMBASE,
411 CSR_UPMMASK,
412 CSR_SPMBASE,
413 CSR_SPMMASK,
414 CSR_MPMBASE,
415 CSR_MPMMASK,
416 };
417
418 for (int i = 0; i < ARRAY_SIZE(dump_csrs); ++i) {
419 int csrno = dump_csrs[i];
420 target_ulong val = 0;
421 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
422
423 /*
424 * Rely on the smode, hmode, etc, predicates within csr.c
425 * to do the filtering of the registers that are present.
426 */
427 if (res == RISCV_EXCP_NONE) {
428 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
429 csr_ops[csrno].name, val);
430 }
431 }
432 }
433 #endif
434
435 for (i = 0; i < 32; i++) {
436 qemu_fprintf(f, " %-8s " TARGET_FMT_lx,
437 riscv_int_regnames[i], env->gpr[i]);
438 if ((i & 3) == 3) {
439 qemu_fprintf(f, "\n");
440 }
441 }
442 if (flags & CPU_DUMP_FPU) {
443 for (i = 0; i < 32; i++) {
444 qemu_fprintf(f, " %-8s %016" PRIx64,
445 riscv_fpr_regnames[i], env->fpr[i]);
446 if ((i & 3) == 3) {
447 qemu_fprintf(f, "\n");
448 }
449 }
450 }
451 }
452
453 static void riscv_cpu_set_pc(CPUState *cs, vaddr value)
454 {
455 RISCVCPU *cpu = RISCV_CPU(cs);
456 CPURISCVState *env = &cpu->env;
457
458 if (env->xl == MXL_RV32) {
459 env->pc = (int32_t)value;
460 } else {
461 env->pc = value;
462 }
463 }
464
465 static vaddr riscv_cpu_get_pc(CPUState *cs)
466 {
467 RISCVCPU *cpu = RISCV_CPU(cs);
468 CPURISCVState *env = &cpu->env;
469
470 /* Match cpu_get_tb_cpu_state. */
471 if (env->xl == MXL_RV32) {
472 return env->pc & UINT32_MAX;
473 }
474 return env->pc;
475 }
476
477 static void riscv_cpu_synchronize_from_tb(CPUState *cs,
478 const TranslationBlock *tb)
479 {
480 RISCVCPU *cpu = RISCV_CPU(cs);
481 CPURISCVState *env = &cpu->env;
482 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL);
483
484 if (xl == MXL_RV32) {
485 env->pc = (int32_t)tb->pc;
486 } else {
487 env->pc = tb->pc;
488 }
489 }
490
491 static bool riscv_cpu_has_work(CPUState *cs)
492 {
493 #ifndef CONFIG_USER_ONLY
494 RISCVCPU *cpu = RISCV_CPU(cs);
495 CPURISCVState *env = &cpu->env;
496 /*
497 * Definition of the WFI instruction requires it to ignore the privilege
498 * mode and delegation registers, but respect individual enables
499 */
500 return riscv_cpu_all_pending(env) != 0;
501 #else
502 return true;
503 #endif
504 }
505
506 void restore_state_to_opc(CPURISCVState *env, TranslationBlock *tb,
507 target_ulong *data)
508 {
509 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL);
510 if (xl == MXL_RV32) {
511 env->pc = (int32_t)data[0];
512 } else {
513 env->pc = data[0];
514 }
515 env->bins = data[1];
516 }
517
518 static void riscv_cpu_reset(DeviceState *dev)
519 {
520 #ifndef CONFIG_USER_ONLY
521 uint8_t iprio;
522 int i, irq, rdzero;
523 #endif
524 CPUState *cs = CPU(dev);
525 RISCVCPU *cpu = RISCV_CPU(cs);
526 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
527 CPURISCVState *env = &cpu->env;
528
529 mcc->parent_reset(dev);
530 #ifndef CONFIG_USER_ONLY
531 env->misa_mxl = env->misa_mxl_max;
532 env->priv = PRV_M;
533 env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV);
534 if (env->misa_mxl > MXL_RV32) {
535 /*
536 * The reset status of SXL/UXL is undefined, but mstatus is WARL
537 * and we must ensure that the value after init is valid for read.
538 */
539 env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl);
540 env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl);
541 if (riscv_has_ext(env, RVH)) {
542 env->vsstatus = set_field(env->vsstatus,
543 MSTATUS64_SXL, env->misa_mxl);
544 env->vsstatus = set_field(env->vsstatus,
545 MSTATUS64_UXL, env->misa_mxl);
546 env->mstatus_hs = set_field(env->mstatus_hs,
547 MSTATUS64_SXL, env->misa_mxl);
548 env->mstatus_hs = set_field(env->mstatus_hs,
549 MSTATUS64_UXL, env->misa_mxl);
550 }
551 }
552 env->mcause = 0;
553 env->miclaim = MIP_SGEIP;
554 env->pc = env->resetvec;
555 env->bins = 0;
556 env->two_stage_lookup = false;
557
558 /* Initialized default priorities of local interrupts. */
559 for (i = 0; i < ARRAY_SIZE(env->miprio); i++) {
560 iprio = riscv_cpu_default_priority(i);
561 env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio;
562 env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio;
563 env->hviprio[i] = 0;
564 }
565 i = 0;
566 while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) {
567 if (!rdzero) {
568 env->hviprio[irq] = env->miprio[irq];
569 }
570 i++;
571 }
572 /* mmte is supposed to have pm.current hardwired to 1 */
573 env->mmte |= (PM_EXT_INITIAL | MMTE_M_PM_CURRENT);
574 #endif
575 env->xl = riscv_cpu_mxl(env);
576 riscv_cpu_update_mask(env);
577 cs->exception_index = RISCV_EXCP_NONE;
578 env->load_res = -1;
579 set_default_nan_mode(1, &env->fp_status);
580
581 #ifndef CONFIG_USER_ONLY
582 if (riscv_feature(env, RISCV_FEATURE_DEBUG)) {
583 riscv_trigger_init(env);
584 }
585
586 if (kvm_enabled()) {
587 kvm_riscv_reset_vcpu(cpu);
588 }
589 #endif
590 }
591
592 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info)
593 {
594 RISCVCPU *cpu = RISCV_CPU(s);
595
596 switch (riscv_cpu_mxl(&cpu->env)) {
597 case MXL_RV32:
598 info->print_insn = print_insn_riscv32;
599 break;
600 case MXL_RV64:
601 info->print_insn = print_insn_riscv64;
602 break;
603 case MXL_RV128:
604 info->print_insn = print_insn_riscv128;
605 break;
606 default:
607 g_assert_not_reached();
608 }
609 }
610
611 static void riscv_cpu_realize(DeviceState *dev, Error **errp)
612 {
613 CPUState *cs = CPU(dev);
614 RISCVCPU *cpu = RISCV_CPU(dev);
615 CPURISCVState *env = &cpu->env;
616 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev);
617 CPUClass *cc = CPU_CLASS(mcc);
618 int i, priv_version = -1;
619 Error *local_err = NULL;
620
621 cpu_exec_realizefn(cs, &local_err);
622 if (local_err != NULL) {
623 error_propagate(errp, local_err);
624 return;
625 }
626
627 if (cpu->cfg.priv_spec) {
628 if (!g_strcmp0(cpu->cfg.priv_spec, "v1.12.0")) {
629 priv_version = PRIV_VERSION_1_12_0;
630 } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.11.0")) {
631 priv_version = PRIV_VERSION_1_11_0;
632 } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.10.0")) {
633 priv_version = PRIV_VERSION_1_10_0;
634 } else {
635 error_setg(errp,
636 "Unsupported privilege spec version '%s'",
637 cpu->cfg.priv_spec);
638 return;
639 }
640 }
641
642 if (priv_version >= PRIV_VERSION_1_10_0) {
643 set_priv_version(env, priv_version);
644 }
645
646 /* Force disable extensions if priv spec version does not match */
647 for (i = 0; i < ARRAY_SIZE(isa_edata_arr); i++) {
648 if (isa_ext_is_enabled(cpu, &isa_edata_arr[i]) &&
649 (env->priv_ver < isa_edata_arr[i].min_version)) {
650 isa_ext_update_enabled(cpu, &isa_edata_arr[i], false);
651 #ifndef CONFIG_USER_ONLY
652 warn_report("disabling %s extension for hart 0x%lx because "
653 "privilege spec version does not match",
654 isa_edata_arr[i].name, (unsigned long)env->mhartid);
655 #else
656 warn_report("disabling %s extension because "
657 "privilege spec version does not match",
658 isa_edata_arr[i].name);
659 #endif
660 }
661 }
662
663 if (cpu->cfg.mmu) {
664 riscv_set_feature(env, RISCV_FEATURE_MMU);
665 }
666
667 if (cpu->cfg.pmp) {
668 riscv_set_feature(env, RISCV_FEATURE_PMP);
669
670 /*
671 * Enhanced PMP should only be available
672 * on harts with PMP support
673 */
674 if (cpu->cfg.epmp) {
675 riscv_set_feature(env, RISCV_FEATURE_EPMP);
676 }
677 }
678
679 if (cpu->cfg.debug) {
680 riscv_set_feature(env, RISCV_FEATURE_DEBUG);
681 }
682
683
684 #ifndef CONFIG_USER_ONLY
685 if (cpu->cfg.ext_sstc) {
686 riscv_timer_init(cpu);
687 }
688 #endif /* CONFIG_USER_ONLY */
689
690 /* Validate that MISA_MXL is set properly. */
691 switch (env->misa_mxl_max) {
692 #ifdef TARGET_RISCV64
693 case MXL_RV64:
694 case MXL_RV128:
695 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml";
696 break;
697 #endif
698 case MXL_RV32:
699 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml";
700 break;
701 default:
702 g_assert_not_reached();
703 }
704 assert(env->misa_mxl_max == env->misa_mxl);
705
706 /* If only MISA_EXT is unset for misa, then set it from properties */
707 if (env->misa_ext == 0) {
708 uint32_t ext = 0;
709
710 /* Do some ISA extension error checking */
711 if (cpu->cfg.ext_g && !(cpu->cfg.ext_i && cpu->cfg.ext_m &&
712 cpu->cfg.ext_a && cpu->cfg.ext_f &&
713 cpu->cfg.ext_d &&
714 cpu->cfg.ext_icsr && cpu->cfg.ext_ifencei)) {
715 warn_report("Setting G will also set IMAFD_Zicsr_Zifencei");
716 cpu->cfg.ext_i = true;
717 cpu->cfg.ext_m = true;
718 cpu->cfg.ext_a = true;
719 cpu->cfg.ext_f = true;
720 cpu->cfg.ext_d = true;
721 cpu->cfg.ext_icsr = true;
722 cpu->cfg.ext_ifencei = true;
723 }
724
725 if (cpu->cfg.ext_i && cpu->cfg.ext_e) {
726 error_setg(errp,
727 "I and E extensions are incompatible");
728 return;
729 }
730
731 if (!cpu->cfg.ext_i && !cpu->cfg.ext_e) {
732 error_setg(errp,
733 "Either I or E extension must be set");
734 return;
735 }
736
737 if (cpu->cfg.ext_s && !cpu->cfg.ext_u) {
738 error_setg(errp,
739 "Setting S extension without U extension is illegal");
740 return;
741 }
742
743 if (cpu->cfg.ext_h && !cpu->cfg.ext_i) {
744 error_setg(errp,
745 "H depends on an I base integer ISA with 32 x registers");
746 return;
747 }
748
749 if (cpu->cfg.ext_h && !cpu->cfg.ext_s) {
750 error_setg(errp, "H extension implicitly requires S-mode");
751 return;
752 }
753
754 if (cpu->cfg.ext_f && !cpu->cfg.ext_icsr) {
755 error_setg(errp, "F extension requires Zicsr");
756 return;
757 }
758
759 if ((cpu->cfg.ext_zfh || cpu->cfg.ext_zfhmin) && !cpu->cfg.ext_f) {
760 error_setg(errp, "Zfh/Zfhmin extensions require F extension");
761 return;
762 }
763
764 if (cpu->cfg.ext_d && !cpu->cfg.ext_f) {
765 error_setg(errp, "D extension requires F extension");
766 return;
767 }
768
769 if (cpu->cfg.ext_v && !cpu->cfg.ext_d) {
770 error_setg(errp, "V extension requires D extension");
771 return;
772 }
773
774 if ((cpu->cfg.ext_zve32f || cpu->cfg.ext_zve64f) && !cpu->cfg.ext_f) {
775 error_setg(errp, "Zve32f/Zve64f extensions require F extension");
776 return;
777 }
778
779 /* Set the ISA extensions, checks should have happened above */
780 if (cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinx ||
781 cpu->cfg.ext_zhinxmin) {
782 cpu->cfg.ext_zfinx = true;
783 }
784
785 if (cpu->cfg.ext_zfinx) {
786 if (!cpu->cfg.ext_icsr) {
787 error_setg(errp, "Zfinx extension requires Zicsr");
788 return;
789 }
790 if (cpu->cfg.ext_f) {
791 error_setg(errp,
792 "Zfinx cannot be supported together with F extension");
793 return;
794 }
795 }
796
797 if (cpu->cfg.ext_zk) {
798 cpu->cfg.ext_zkn = true;
799 cpu->cfg.ext_zkr = true;
800 cpu->cfg.ext_zkt = true;
801 }
802
803 if (cpu->cfg.ext_zkn) {
804 cpu->cfg.ext_zbkb = true;
805 cpu->cfg.ext_zbkc = true;
806 cpu->cfg.ext_zbkx = true;
807 cpu->cfg.ext_zkne = true;
808 cpu->cfg.ext_zknd = true;
809 cpu->cfg.ext_zknh = true;
810 }
811
812 if (cpu->cfg.ext_zks) {
813 cpu->cfg.ext_zbkb = true;
814 cpu->cfg.ext_zbkc = true;
815 cpu->cfg.ext_zbkx = true;
816 cpu->cfg.ext_zksed = true;
817 cpu->cfg.ext_zksh = true;
818 }
819
820 if (cpu->cfg.ext_i) {
821 ext |= RVI;
822 }
823 if (cpu->cfg.ext_e) {
824 ext |= RVE;
825 }
826 if (cpu->cfg.ext_m) {
827 ext |= RVM;
828 }
829 if (cpu->cfg.ext_a) {
830 ext |= RVA;
831 }
832 if (cpu->cfg.ext_f) {
833 ext |= RVF;
834 }
835 if (cpu->cfg.ext_d) {
836 ext |= RVD;
837 }
838 if (cpu->cfg.ext_c) {
839 ext |= RVC;
840 }
841 if (cpu->cfg.ext_s) {
842 ext |= RVS;
843 }
844 if (cpu->cfg.ext_u) {
845 ext |= RVU;
846 }
847 if (cpu->cfg.ext_h) {
848 ext |= RVH;
849 }
850 if (cpu->cfg.ext_v) {
851 int vext_version = VEXT_VERSION_1_00_0;
852 ext |= RVV;
853 if (!is_power_of_2(cpu->cfg.vlen)) {
854 error_setg(errp,
855 "Vector extension VLEN must be power of 2");
856 return;
857 }
858 if (cpu->cfg.vlen > RV_VLEN_MAX || cpu->cfg.vlen < 128) {
859 error_setg(errp,
860 "Vector extension implementation only supports VLEN "
861 "in the range [128, %d]", RV_VLEN_MAX);
862 return;
863 }
864 if (!is_power_of_2(cpu->cfg.elen)) {
865 error_setg(errp,
866 "Vector extension ELEN must be power of 2");
867 return;
868 }
869 if (cpu->cfg.elen > 64 || cpu->cfg.vlen < 8) {
870 error_setg(errp,
871 "Vector extension implementation only supports ELEN "
872 "in the range [8, 64]");
873 return;
874 }
875 if (cpu->cfg.vext_spec) {
876 if (!g_strcmp0(cpu->cfg.vext_spec, "v1.0")) {
877 vext_version = VEXT_VERSION_1_00_0;
878 } else {
879 error_setg(errp,
880 "Unsupported vector spec version '%s'",
881 cpu->cfg.vext_spec);
882 return;
883 }
884 } else {
885 qemu_log("vector version is not specified, "
886 "use the default value v1.0\n");
887 }
888 set_vext_version(env, vext_version);
889 }
890 if (cpu->cfg.ext_j) {
891 ext |= RVJ;
892 }
893
894 set_misa(env, env->misa_mxl, ext);
895 }
896
897 #ifndef CONFIG_USER_ONLY
898 if (cpu->cfg.pmu_num) {
899 if (!riscv_pmu_init(cpu, cpu->cfg.pmu_num) && cpu->cfg.ext_sscofpmf) {
900 cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
901 riscv_pmu_timer_cb, cpu);
902 }
903 }
904 #endif
905
906 riscv_cpu_register_gdb_regs_for_features(cs);
907
908 qemu_init_vcpu(cs);
909 cpu_reset(cs);
910
911 mcc->parent_realize(dev, errp);
912 }
913
914 #ifndef CONFIG_USER_ONLY
915 static void riscv_cpu_set_irq(void *opaque, int irq, int level)
916 {
917 RISCVCPU *cpu = RISCV_CPU(opaque);
918 CPURISCVState *env = &cpu->env;
919
920 if (irq < IRQ_LOCAL_MAX) {
921 switch (irq) {
922 case IRQ_U_SOFT:
923 case IRQ_S_SOFT:
924 case IRQ_VS_SOFT:
925 case IRQ_M_SOFT:
926 case IRQ_U_TIMER:
927 case IRQ_S_TIMER:
928 case IRQ_VS_TIMER:
929 case IRQ_M_TIMER:
930 case IRQ_U_EXT:
931 case IRQ_VS_EXT:
932 case IRQ_M_EXT:
933 if (kvm_enabled()) {
934 kvm_riscv_set_irq(cpu, irq, level);
935 } else {
936 riscv_cpu_update_mip(cpu, 1 << irq, BOOL_TO_MASK(level));
937 }
938 break;
939 case IRQ_S_EXT:
940 if (kvm_enabled()) {
941 kvm_riscv_set_irq(cpu, irq, level);
942 } else {
943 env->external_seip = level;
944 riscv_cpu_update_mip(cpu, 1 << irq,
945 BOOL_TO_MASK(level | env->software_seip));
946 }
947 break;
948 default:
949 g_assert_not_reached();
950 }
951 } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) {
952 /* Require H-extension for handling guest local interrupts */
953 if (!riscv_has_ext(env, RVH)) {
954 g_assert_not_reached();
955 }
956
957 /* Compute bit position in HGEIP CSR */
958 irq = irq - IRQ_LOCAL_MAX + 1;
959 if (env->geilen < irq) {
960 g_assert_not_reached();
961 }
962
963 /* Update HGEIP CSR */
964 env->hgeip &= ~((target_ulong)1 << irq);
965 if (level) {
966 env->hgeip |= (target_ulong)1 << irq;
967 }
968
969 /* Update mip.SGEIP bit */
970 riscv_cpu_update_mip(cpu, MIP_SGEIP,
971 BOOL_TO_MASK(!!(env->hgeie & env->hgeip)));
972 } else {
973 g_assert_not_reached();
974 }
975 }
976 #endif /* CONFIG_USER_ONLY */
977
978 static void riscv_cpu_init(Object *obj)
979 {
980 RISCVCPU *cpu = RISCV_CPU(obj);
981
982 cpu->cfg.ext_ifencei = true;
983 cpu->cfg.ext_icsr = true;
984 cpu->cfg.mmu = true;
985 cpu->cfg.pmp = true;
986
987 cpu_set_cpustate_pointers(cpu);
988
989 #ifndef CONFIG_USER_ONLY
990 qdev_init_gpio_in(DEVICE(cpu), riscv_cpu_set_irq,
991 IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX);
992 #endif /* CONFIG_USER_ONLY */
993 }
994
995 static Property riscv_cpu_extensions[] = {
996 /* Defaults for standard extensions */
997 DEFINE_PROP_BOOL("i", RISCVCPU, cfg.ext_i, true),
998 DEFINE_PROP_BOOL("e", RISCVCPU, cfg.ext_e, false),
999 DEFINE_PROP_BOOL("g", RISCVCPU, cfg.ext_g, false),
1000 DEFINE_PROP_BOOL("m", RISCVCPU, cfg.ext_m, true),
1001 DEFINE_PROP_BOOL("a", RISCVCPU, cfg.ext_a, true),
1002 DEFINE_PROP_BOOL("f", RISCVCPU, cfg.ext_f, true),
1003 DEFINE_PROP_BOOL("d", RISCVCPU, cfg.ext_d, true),
1004 DEFINE_PROP_BOOL("c", RISCVCPU, cfg.ext_c, true),
1005 DEFINE_PROP_BOOL("s", RISCVCPU, cfg.ext_s, true),
1006 DEFINE_PROP_BOOL("u", RISCVCPU, cfg.ext_u, true),
1007 DEFINE_PROP_BOOL("v", RISCVCPU, cfg.ext_v, false),
1008 DEFINE_PROP_BOOL("h", RISCVCPU, cfg.ext_h, true),
1009 DEFINE_PROP_UINT8("pmu-num", RISCVCPU, cfg.pmu_num, 16),
1010 DEFINE_PROP_BOOL("sscofpmf", RISCVCPU, cfg.ext_sscofpmf, false),
1011 DEFINE_PROP_BOOL("Zifencei", RISCVCPU, cfg.ext_ifencei, true),
1012 DEFINE_PROP_BOOL("Zicsr", RISCVCPU, cfg.ext_icsr, true),
1013 DEFINE_PROP_BOOL("Zihintpause", RISCVCPU, cfg.ext_zihintpause, true),
1014 DEFINE_PROP_BOOL("Zfh", RISCVCPU, cfg.ext_zfh, false),
1015 DEFINE_PROP_BOOL("Zfhmin", RISCVCPU, cfg.ext_zfhmin, false),
1016 DEFINE_PROP_BOOL("Zve32f", RISCVCPU, cfg.ext_zve32f, false),
1017 DEFINE_PROP_BOOL("Zve64f", RISCVCPU, cfg.ext_zve64f, false),
1018 DEFINE_PROP_BOOL("mmu", RISCVCPU, cfg.mmu, true),
1019 DEFINE_PROP_BOOL("pmp", RISCVCPU, cfg.pmp, true),
1020 DEFINE_PROP_BOOL("sstc", RISCVCPU, cfg.ext_sstc, true),
1021
1022 DEFINE_PROP_STRING("priv_spec", RISCVCPU, cfg.priv_spec),
1023 DEFINE_PROP_STRING("vext_spec", RISCVCPU, cfg.vext_spec),
1024 DEFINE_PROP_UINT16("vlen", RISCVCPU, cfg.vlen, 128),
1025 DEFINE_PROP_UINT16("elen", RISCVCPU, cfg.elen, 64),
1026
1027 DEFINE_PROP_BOOL("svinval", RISCVCPU, cfg.ext_svinval, false),
1028 DEFINE_PROP_BOOL("svnapot", RISCVCPU, cfg.ext_svnapot, false),
1029 DEFINE_PROP_BOOL("svpbmt", RISCVCPU, cfg.ext_svpbmt, false),
1030
1031 DEFINE_PROP_BOOL("zba", RISCVCPU, cfg.ext_zba, true),
1032 DEFINE_PROP_BOOL("zbb", RISCVCPU, cfg.ext_zbb, true),
1033 DEFINE_PROP_BOOL("zbc", RISCVCPU, cfg.ext_zbc, true),
1034 DEFINE_PROP_BOOL("zbkb", RISCVCPU, cfg.ext_zbkb, false),
1035 DEFINE_PROP_BOOL("zbkc", RISCVCPU, cfg.ext_zbkc, false),
1036 DEFINE_PROP_BOOL("zbkx", RISCVCPU, cfg.ext_zbkx, false),
1037 DEFINE_PROP_BOOL("zbs", RISCVCPU, cfg.ext_zbs, true),
1038 DEFINE_PROP_BOOL("zk", RISCVCPU, cfg.ext_zk, false),
1039 DEFINE_PROP_BOOL("zkn", RISCVCPU, cfg.ext_zkn, false),
1040 DEFINE_PROP_BOOL("zknd", RISCVCPU, cfg.ext_zknd, false),
1041 DEFINE_PROP_BOOL("zkne", RISCVCPU, cfg.ext_zkne, false),
1042 DEFINE_PROP_BOOL("zknh", RISCVCPU, cfg.ext_zknh, false),
1043 DEFINE_PROP_BOOL("zkr", RISCVCPU, cfg.ext_zkr, false),
1044 DEFINE_PROP_BOOL("zks", RISCVCPU, cfg.ext_zks, false),
1045 DEFINE_PROP_BOOL("zksed", RISCVCPU, cfg.ext_zksed, false),
1046 DEFINE_PROP_BOOL("zksh", RISCVCPU, cfg.ext_zksh, false),
1047 DEFINE_PROP_BOOL("zkt", RISCVCPU, cfg.ext_zkt, false),
1048
1049 DEFINE_PROP_BOOL("zdinx", RISCVCPU, cfg.ext_zdinx, false),
1050 DEFINE_PROP_BOOL("zfinx", RISCVCPU, cfg.ext_zfinx, false),
1051 DEFINE_PROP_BOOL("zhinx", RISCVCPU, cfg.ext_zhinx, false),
1052 DEFINE_PROP_BOOL("zhinxmin", RISCVCPU, cfg.ext_zhinxmin, false),
1053
1054 DEFINE_PROP_BOOL("zmmul", RISCVCPU, cfg.ext_zmmul, false),
1055
1056 /* Vendor-specific custom extensions */
1057 DEFINE_PROP_BOOL("xventanacondops", RISCVCPU, cfg.ext_XVentanaCondOps, false),
1058
1059 /* These are experimental so mark with 'x-' */
1060 DEFINE_PROP_BOOL("x-j", RISCVCPU, cfg.ext_j, false),
1061 /* ePMP 0.9.3 */
1062 DEFINE_PROP_BOOL("x-epmp", RISCVCPU, cfg.epmp, false),
1063 DEFINE_PROP_BOOL("x-smaia", RISCVCPU, cfg.ext_smaia, false),
1064 DEFINE_PROP_BOOL("x-ssaia", RISCVCPU, cfg.ext_ssaia, false),
1065
1066 DEFINE_PROP_END_OF_LIST(),
1067 };
1068
1069 static void register_cpu_props(DeviceState *dev)
1070 {
1071 Property *prop;
1072
1073 for (prop = riscv_cpu_extensions; prop && prop->name; prop++) {
1074 qdev_property_add_static(dev, prop);
1075 }
1076 }
1077
1078 static Property riscv_cpu_properties[] = {
1079 DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true),
1080
1081 DEFINE_PROP_UINT32("mvendorid", RISCVCPU, cfg.mvendorid, 0),
1082 DEFINE_PROP_UINT64("marchid", RISCVCPU, cfg.marchid, RISCV_CPU_MARCHID),
1083 DEFINE_PROP_UINT64("mimpid", RISCVCPU, cfg.mimpid, RISCV_CPU_MIMPID),
1084
1085 #ifndef CONFIG_USER_ONLY
1086 DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC),
1087 #endif
1088
1089 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false),
1090
1091 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false),
1092 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false),
1093 DEFINE_PROP_END_OF_LIST(),
1094 };
1095
1096 static gchar *riscv_gdb_arch_name(CPUState *cs)
1097 {
1098 RISCVCPU *cpu = RISCV_CPU(cs);
1099 CPURISCVState *env = &cpu->env;
1100
1101 switch (riscv_cpu_mxl(env)) {
1102 case MXL_RV32:
1103 return g_strdup("riscv:rv32");
1104 case MXL_RV64:
1105 case MXL_RV128:
1106 return g_strdup("riscv:rv64");
1107 default:
1108 g_assert_not_reached();
1109 }
1110 }
1111
1112 static const char *riscv_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname)
1113 {
1114 RISCVCPU *cpu = RISCV_CPU(cs);
1115
1116 if (strcmp(xmlname, "riscv-csr.xml") == 0) {
1117 return cpu->dyn_csr_xml;
1118 } else if (strcmp(xmlname, "riscv-vector.xml") == 0) {
1119 return cpu->dyn_vreg_xml;
1120 }
1121
1122 return NULL;
1123 }
1124
1125 #ifndef CONFIG_USER_ONLY
1126 #include "hw/core/sysemu-cpu-ops.h"
1127
1128 static const struct SysemuCPUOps riscv_sysemu_ops = {
1129 .get_phys_page_debug = riscv_cpu_get_phys_page_debug,
1130 .write_elf64_note = riscv_cpu_write_elf64_note,
1131 .write_elf32_note = riscv_cpu_write_elf32_note,
1132 .legacy_vmsd = &vmstate_riscv_cpu,
1133 };
1134 #endif
1135
1136 #include "hw/core/tcg-cpu-ops.h"
1137
1138 static const struct TCGCPUOps riscv_tcg_ops = {
1139 .initialize = riscv_translate_init,
1140 .synchronize_from_tb = riscv_cpu_synchronize_from_tb,
1141
1142 #ifndef CONFIG_USER_ONLY
1143 .tlb_fill = riscv_cpu_tlb_fill,
1144 .cpu_exec_interrupt = riscv_cpu_exec_interrupt,
1145 .do_interrupt = riscv_cpu_do_interrupt,
1146 .do_transaction_failed = riscv_cpu_do_transaction_failed,
1147 .do_unaligned_access = riscv_cpu_do_unaligned_access,
1148 .debug_excp_handler = riscv_cpu_debug_excp_handler,
1149 .debug_check_breakpoint = riscv_cpu_debug_check_breakpoint,
1150 .debug_check_watchpoint = riscv_cpu_debug_check_watchpoint,
1151 #endif /* !CONFIG_USER_ONLY */
1152 };
1153
1154 static void riscv_cpu_class_init(ObjectClass *c, void *data)
1155 {
1156 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
1157 CPUClass *cc = CPU_CLASS(c);
1158 DeviceClass *dc = DEVICE_CLASS(c);
1159
1160 device_class_set_parent_realize(dc, riscv_cpu_realize,
1161 &mcc->parent_realize);
1162
1163 device_class_set_parent_reset(dc, riscv_cpu_reset, &mcc->parent_reset);
1164
1165 cc->class_by_name = riscv_cpu_class_by_name;
1166 cc->has_work = riscv_cpu_has_work;
1167 cc->dump_state = riscv_cpu_dump_state;
1168 cc->set_pc = riscv_cpu_set_pc;
1169 cc->get_pc = riscv_cpu_get_pc;
1170 cc->gdb_read_register = riscv_cpu_gdb_read_register;
1171 cc->gdb_write_register = riscv_cpu_gdb_write_register;
1172 cc->gdb_num_core_regs = 33;
1173 cc->gdb_stop_before_watchpoint = true;
1174 cc->disas_set_info = riscv_cpu_disas_set_info;
1175 #ifndef CONFIG_USER_ONLY
1176 cc->sysemu_ops = &riscv_sysemu_ops;
1177 #endif
1178 cc->gdb_arch_name = riscv_gdb_arch_name;
1179 cc->gdb_get_dynamic_xml = riscv_gdb_get_dynamic_xml;
1180 cc->tcg_ops = &riscv_tcg_ops;
1181
1182 device_class_set_props(dc, riscv_cpu_properties);
1183 }
1184
1185 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str, int max_str_len)
1186 {
1187 char *old = *isa_str;
1188 char *new = *isa_str;
1189 int i;
1190
1191 for (i = 0; i < ARRAY_SIZE(isa_edata_arr); i++) {
1192 if (isa_edata_arr[i].multi_letter &&
1193 isa_ext_is_enabled(cpu, &isa_edata_arr[i])) {
1194 new = g_strconcat(old, "_", isa_edata_arr[i].name, NULL);
1195 g_free(old);
1196 old = new;
1197 }
1198 }
1199
1200 *isa_str = new;
1201 }
1202
1203 char *riscv_isa_string(RISCVCPU *cpu)
1204 {
1205 int i;
1206 const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts);
1207 char *isa_str = g_new(char, maxlen);
1208 char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", TARGET_LONG_BITS);
1209 for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) {
1210 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) {
1211 *p++ = qemu_tolower(riscv_single_letter_exts[i]);
1212 }
1213 }
1214 *p = '\0';
1215 if (!cpu->cfg.short_isa_string) {
1216 riscv_isa_string_ext(cpu, &isa_str, maxlen);
1217 }
1218 return isa_str;
1219 }
1220
1221 static gint riscv_cpu_list_compare(gconstpointer a, gconstpointer b)
1222 {
1223 ObjectClass *class_a = (ObjectClass *)a;
1224 ObjectClass *class_b = (ObjectClass *)b;
1225 const char *name_a, *name_b;
1226
1227 name_a = object_class_get_name(class_a);
1228 name_b = object_class_get_name(class_b);
1229 return strcmp(name_a, name_b);
1230 }
1231
1232 static void riscv_cpu_list_entry(gpointer data, gpointer user_data)
1233 {
1234 const char *typename = object_class_get_name(OBJECT_CLASS(data));
1235 int len = strlen(typename) - strlen(RISCV_CPU_TYPE_SUFFIX);
1236
1237 qemu_printf("%.*s\n", len, typename);
1238 }
1239
1240 void riscv_cpu_list(void)
1241 {
1242 GSList *list;
1243
1244 list = object_class_get_list(TYPE_RISCV_CPU, false);
1245 list = g_slist_sort(list, riscv_cpu_list_compare);
1246 g_slist_foreach(list, riscv_cpu_list_entry, NULL);
1247 g_slist_free(list);
1248 }
1249
1250 #define DEFINE_CPU(type_name, initfn) \
1251 { \
1252 .name = type_name, \
1253 .parent = TYPE_RISCV_CPU, \
1254 .instance_init = initfn \
1255 }
1256
1257 static const TypeInfo riscv_cpu_type_infos[] = {
1258 {
1259 .name = TYPE_RISCV_CPU,
1260 .parent = TYPE_CPU,
1261 .instance_size = sizeof(RISCVCPU),
1262 .instance_align = __alignof__(RISCVCPU),
1263 .instance_init = riscv_cpu_init,
1264 .abstract = true,
1265 .class_size = sizeof(RISCVCPUClass),
1266 .class_init = riscv_cpu_class_init,
1267 },
1268 DEFINE_CPU(TYPE_RISCV_CPU_ANY, riscv_any_cpu_init),
1269 #if defined(CONFIG_KVM)
1270 DEFINE_CPU(TYPE_RISCV_CPU_HOST, riscv_host_cpu_init),
1271 #endif
1272 #if defined(TARGET_RISCV32)
1273 DEFINE_CPU(TYPE_RISCV_CPU_BASE32, rv32_base_cpu_init),
1274 DEFINE_CPU(TYPE_RISCV_CPU_IBEX, rv32_ibex_cpu_init),
1275 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E31, rv32_sifive_e_cpu_init),
1276 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E34, rv32_imafcu_nommu_cpu_init),
1277 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U34, rv32_sifive_u_cpu_init),
1278 #elif defined(TARGET_RISCV64)
1279 DEFINE_CPU(TYPE_RISCV_CPU_BASE64, rv64_base_cpu_init),
1280 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E51, rv64_sifive_e_cpu_init),
1281 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U54, rv64_sifive_u_cpu_init),
1282 DEFINE_CPU(TYPE_RISCV_CPU_SHAKTI_C, rv64_sifive_u_cpu_init),
1283 DEFINE_CPU(TYPE_RISCV_CPU_BASE128, rv128_base_cpu_init),
1284 #endif
1285 };
1286
1287 DEFINE_TYPES(riscv_cpu_type_infos)