]> git.proxmox.com Git - mirror_qemu.git/blob - target/riscv/tcg/tcg-cpu.c
Merge tag 'pull-target-arm-20240111' of https://git.linaro.org/people/pmaydell/qemu...
[mirror_qemu.git] / target / riscv / tcg / tcg-cpu.c
1 /*
2 * riscv TCG cpu class initialization
3 *
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "exec/exec-all.h"
22 #include "tcg-cpu.h"
23 #include "cpu.h"
24 #include "pmu.h"
25 #include "time_helper.h"
26 #include "qapi/error.h"
27 #include "qapi/visitor.h"
28 #include "qemu/accel.h"
29 #include "qemu/error-report.h"
30 #include "qemu/log.h"
31 #include "hw/core/accel-cpu.h"
32 #include "hw/core/tcg-cpu-ops.h"
33 #include "tcg/tcg.h"
34
35 /* Hash that stores user set extensions */
36 static GHashTable *multi_ext_user_opts;
37 static GHashTable *misa_ext_user_opts;
38
39 static bool cpu_cfg_ext_is_user_set(uint32_t ext_offset)
40 {
41 return g_hash_table_contains(multi_ext_user_opts,
42 GUINT_TO_POINTER(ext_offset));
43 }
44
45 static bool cpu_misa_ext_is_user_set(uint32_t misa_bit)
46 {
47 return g_hash_table_contains(misa_ext_user_opts,
48 GUINT_TO_POINTER(misa_bit));
49 }
50
51 static void cpu_cfg_ext_add_user_opt(uint32_t ext_offset, bool value)
52 {
53 g_hash_table_insert(multi_ext_user_opts, GUINT_TO_POINTER(ext_offset),
54 (gpointer)value);
55 }
56
57 static void cpu_misa_ext_add_user_opt(uint32_t bit, bool value)
58 {
59 g_hash_table_insert(misa_ext_user_opts, GUINT_TO_POINTER(bit),
60 (gpointer)value);
61 }
62
63 static void riscv_cpu_write_misa_bit(RISCVCPU *cpu, uint32_t bit,
64 bool enabled)
65 {
66 CPURISCVState *env = &cpu->env;
67
68 if (enabled) {
69 env->misa_ext |= bit;
70 env->misa_ext_mask |= bit;
71 } else {
72 env->misa_ext &= ~bit;
73 env->misa_ext_mask &= ~bit;
74 }
75 }
76
77 static const char *cpu_priv_ver_to_str(int priv_ver)
78 {
79 switch (priv_ver) {
80 case PRIV_VERSION_1_10_0:
81 return "v1.10.0";
82 case PRIV_VERSION_1_11_0:
83 return "v1.11.0";
84 case PRIV_VERSION_1_12_0:
85 return "v1.12.0";
86 }
87
88 g_assert_not_reached();
89 }
90
91 static void riscv_cpu_synchronize_from_tb(CPUState *cs,
92 const TranslationBlock *tb)
93 {
94 if (!(tb_cflags(tb) & CF_PCREL)) {
95 RISCVCPU *cpu = RISCV_CPU(cs);
96 CPURISCVState *env = &cpu->env;
97 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL);
98
99 tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL));
100
101 if (xl == MXL_RV32) {
102 env->pc = (int32_t) tb->pc;
103 } else {
104 env->pc = tb->pc;
105 }
106 }
107 }
108
109 static void riscv_restore_state_to_opc(CPUState *cs,
110 const TranslationBlock *tb,
111 const uint64_t *data)
112 {
113 RISCVCPU *cpu = RISCV_CPU(cs);
114 CPURISCVState *env = &cpu->env;
115 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL);
116 target_ulong pc;
117
118 if (tb_cflags(tb) & CF_PCREL) {
119 pc = (env->pc & TARGET_PAGE_MASK) | data[0];
120 } else {
121 pc = data[0];
122 }
123
124 if (xl == MXL_RV32) {
125 env->pc = (int32_t)pc;
126 } else {
127 env->pc = pc;
128 }
129 env->bins = data[1];
130 }
131
132 static const struct TCGCPUOps riscv_tcg_ops = {
133 .initialize = riscv_translate_init,
134 .synchronize_from_tb = riscv_cpu_synchronize_from_tb,
135 .restore_state_to_opc = riscv_restore_state_to_opc,
136
137 #ifndef CONFIG_USER_ONLY
138 .tlb_fill = riscv_cpu_tlb_fill,
139 .cpu_exec_interrupt = riscv_cpu_exec_interrupt,
140 .do_interrupt = riscv_cpu_do_interrupt,
141 .do_transaction_failed = riscv_cpu_do_transaction_failed,
142 .do_unaligned_access = riscv_cpu_do_unaligned_access,
143 .debug_excp_handler = riscv_cpu_debug_excp_handler,
144 .debug_check_breakpoint = riscv_cpu_debug_check_breakpoint,
145 .debug_check_watchpoint = riscv_cpu_debug_check_watchpoint,
146 #endif /* !CONFIG_USER_ONLY */
147 };
148
149 static int cpu_cfg_ext_get_min_version(uint32_t ext_offset)
150 {
151 const RISCVIsaExtData *edata;
152
153 for (edata = isa_edata_arr; edata && edata->name; edata++) {
154 if (edata->ext_enable_offset != ext_offset) {
155 continue;
156 }
157
158 return edata->min_version;
159 }
160
161 g_assert_not_reached();
162 }
163
164 static const char *cpu_cfg_ext_get_name(uint32_t ext_offset)
165 {
166 const RISCVCPUMultiExtConfig *feat;
167 const RISCVIsaExtData *edata;
168
169 for (edata = isa_edata_arr; edata->name != NULL; edata++) {
170 if (edata->ext_enable_offset == ext_offset) {
171 return edata->name;
172 }
173 }
174
175 for (feat = riscv_cpu_named_features; feat->name != NULL; feat++) {
176 if (feat->offset == ext_offset) {
177 return feat->name;
178 }
179 }
180
181 g_assert_not_reached();
182 }
183
184 static bool cpu_cfg_offset_is_named_feat(uint32_t ext_offset)
185 {
186 const RISCVCPUMultiExtConfig *feat;
187
188 for (feat = riscv_cpu_named_features; feat->name != NULL; feat++) {
189 if (feat->offset == ext_offset) {
190 return true;
191 }
192 }
193
194 return false;
195 }
196
197 static void riscv_cpu_enable_named_feat(RISCVCPU *cpu, uint32_t feat_offset)
198 {
199 switch (feat_offset) {
200 case CPU_CFG_OFFSET(zic64b):
201 cpu->cfg.cbom_blocksize = 64;
202 cpu->cfg.cbop_blocksize = 64;
203 cpu->cfg.cboz_blocksize = 64;
204 break;
205 case CPU_CFG_OFFSET(svade):
206 cpu->cfg.ext_svadu = false;
207 break;
208 default:
209 g_assert_not_reached();
210 }
211 }
212
213 static void cpu_bump_multi_ext_priv_ver(CPURISCVState *env,
214 uint32_t ext_offset)
215 {
216 int ext_priv_ver;
217
218 if (env->priv_ver == PRIV_VERSION_LATEST) {
219 return;
220 }
221
222 if (cpu_cfg_offset_is_named_feat(ext_offset)) {
223 return;
224 }
225
226 ext_priv_ver = cpu_cfg_ext_get_min_version(ext_offset);
227
228 if (env->priv_ver < ext_priv_ver) {
229 /*
230 * Note: the 'priv_spec' command line option, if present,
231 * will take precedence over this priv_ver bump.
232 */
233 env->priv_ver = ext_priv_ver;
234 }
235 }
236
237 static void cpu_cfg_ext_auto_update(RISCVCPU *cpu, uint32_t ext_offset,
238 bool value)
239 {
240 CPURISCVState *env = &cpu->env;
241 bool prev_val = isa_ext_is_enabled(cpu, ext_offset);
242 int min_version;
243
244 if (prev_val == value) {
245 return;
246 }
247
248 if (cpu_cfg_ext_is_user_set(ext_offset)) {
249 return;
250 }
251
252 if (value && env->priv_ver != PRIV_VERSION_LATEST) {
253 /* Do not enable it if priv_ver is older than min_version */
254 min_version = cpu_cfg_ext_get_min_version(ext_offset);
255 if (env->priv_ver < min_version) {
256 return;
257 }
258 }
259
260 isa_ext_update_enabled(cpu, ext_offset, value);
261 }
262
263 static void riscv_cpu_validate_misa_priv(CPURISCVState *env, Error **errp)
264 {
265 if (riscv_has_ext(env, RVH) && env->priv_ver < PRIV_VERSION_1_12_0) {
266 error_setg(errp, "H extension requires priv spec 1.12.0");
267 return;
268 }
269 }
270
271 static void riscv_cpu_validate_misa_mxl(RISCVCPU *cpu, Error **errp)
272 {
273 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
274 CPUClass *cc = CPU_CLASS(mcc);
275 CPURISCVState *env = &cpu->env;
276
277 /* Validate that MISA_MXL is set properly. */
278 switch (env->misa_mxl_max) {
279 #ifdef TARGET_RISCV64
280 case MXL_RV64:
281 case MXL_RV128:
282 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml";
283 break;
284 #endif
285 case MXL_RV32:
286 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml";
287 break;
288 default:
289 g_assert_not_reached();
290 }
291
292 if (env->misa_mxl_max != env->misa_mxl) {
293 error_setg(errp, "misa_mxl_max must be equal to misa_mxl");
294 return;
295 }
296 }
297
298 static void riscv_cpu_validate_priv_spec(RISCVCPU *cpu, Error **errp)
299 {
300 CPURISCVState *env = &cpu->env;
301 int priv_version = -1;
302
303 if (cpu->cfg.priv_spec) {
304 if (!g_strcmp0(cpu->cfg.priv_spec, "v1.12.0")) {
305 priv_version = PRIV_VERSION_1_12_0;
306 } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.11.0")) {
307 priv_version = PRIV_VERSION_1_11_0;
308 } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.10.0")) {
309 priv_version = PRIV_VERSION_1_10_0;
310 } else {
311 error_setg(errp,
312 "Unsupported privilege spec version '%s'",
313 cpu->cfg.priv_spec);
314 return;
315 }
316
317 env->priv_ver = priv_version;
318 }
319 }
320
321 static void riscv_cpu_validate_v(CPURISCVState *env, RISCVCPUConfig *cfg,
322 Error **errp)
323 {
324 if (!is_power_of_2(cfg->vlen)) {
325 error_setg(errp, "Vector extension VLEN must be power of 2");
326 return;
327 }
328
329 if (cfg->vlen > RV_VLEN_MAX || cfg->vlen < 128) {
330 error_setg(errp,
331 "Vector extension implementation only supports VLEN "
332 "in the range [128, %d]", RV_VLEN_MAX);
333 return;
334 }
335
336 if (!is_power_of_2(cfg->elen)) {
337 error_setg(errp, "Vector extension ELEN must be power of 2");
338 return;
339 }
340
341 if (cfg->elen > 64 || cfg->elen < 8) {
342 error_setg(errp,
343 "Vector extension implementation only supports ELEN "
344 "in the range [8, 64]");
345 return;
346 }
347
348 if (cfg->vext_spec) {
349 if (!g_strcmp0(cfg->vext_spec, "v1.0")) {
350 env->vext_ver = VEXT_VERSION_1_00_0;
351 } else {
352 error_setg(errp, "Unsupported vector spec version '%s'",
353 cfg->vext_spec);
354 return;
355 }
356 } else if (env->vext_ver == 0) {
357 qemu_log("vector version is not specified, "
358 "use the default value v1.0\n");
359
360 env->vext_ver = VEXT_VERSION_1_00_0;
361 }
362 }
363
364 static void riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU *cpu)
365 {
366 CPURISCVState *env = &cpu->env;
367 const RISCVIsaExtData *edata;
368
369 /* Force disable extensions if priv spec version does not match */
370 for (edata = isa_edata_arr; edata && edata->name; edata++) {
371 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset) &&
372 (env->priv_ver < edata->min_version)) {
373 /*
374 * These two extensions are always enabled as they were supported
375 * by QEMU before they were added as extensions in the ISA.
376 */
377 if (!strcmp(edata->name, "zicntr") ||
378 !strcmp(edata->name, "zihpm")) {
379 continue;
380 }
381
382 isa_ext_update_enabled(cpu, edata->ext_enable_offset, false);
383 #ifndef CONFIG_USER_ONLY
384 warn_report("disabling %s extension for hart 0x" TARGET_FMT_lx
385 " because privilege spec version does not match",
386 edata->name, env->mhartid);
387 #else
388 warn_report("disabling %s extension because "
389 "privilege spec version does not match",
390 edata->name);
391 #endif
392 }
393 }
394 }
395
396 static void riscv_cpu_update_named_features(RISCVCPU *cpu)
397 {
398 cpu->cfg.zic64b = cpu->cfg.cbom_blocksize == 64 &&
399 cpu->cfg.cbop_blocksize == 64 &&
400 cpu->cfg.cboz_blocksize == 64;
401
402 cpu->cfg.svade = !cpu->cfg.ext_svadu;
403 }
404
405 static void riscv_cpu_validate_g(RISCVCPU *cpu)
406 {
407 const char *warn_msg = "RVG mandates disabled extension %s";
408 uint32_t g_misa_bits[] = {RVI, RVM, RVA, RVF, RVD};
409 bool send_warn = cpu_misa_ext_is_user_set(RVG);
410
411 for (int i = 0; i < ARRAY_SIZE(g_misa_bits); i++) {
412 uint32_t bit = g_misa_bits[i];
413
414 if (riscv_has_ext(&cpu->env, bit)) {
415 continue;
416 }
417
418 if (!cpu_misa_ext_is_user_set(bit)) {
419 riscv_cpu_write_misa_bit(cpu, bit, true);
420 continue;
421 }
422
423 if (send_warn) {
424 warn_report(warn_msg, riscv_get_misa_ext_name(bit));
425 }
426 }
427
428 if (!cpu->cfg.ext_zicsr) {
429 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zicsr))) {
430 cpu->cfg.ext_zicsr = true;
431 } else if (send_warn) {
432 warn_report(warn_msg, "zicsr");
433 }
434 }
435
436 if (!cpu->cfg.ext_zifencei) {
437 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zifencei))) {
438 cpu->cfg.ext_zifencei = true;
439 } else if (send_warn) {
440 warn_report(warn_msg, "zifencei");
441 }
442 }
443 }
444
445 /*
446 * Check consistency between chosen extensions while setting
447 * cpu->cfg accordingly.
448 */
449 void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
450 {
451 CPURISCVState *env = &cpu->env;
452 Error *local_err = NULL;
453
454 if (riscv_has_ext(env, RVG)) {
455 riscv_cpu_validate_g(cpu);
456 }
457
458 if (riscv_has_ext(env, RVI) && riscv_has_ext(env, RVE)) {
459 error_setg(errp,
460 "I and E extensions are incompatible");
461 return;
462 }
463
464 if (!riscv_has_ext(env, RVI) && !riscv_has_ext(env, RVE)) {
465 error_setg(errp,
466 "Either I or E extension must be set");
467 return;
468 }
469
470 if (riscv_has_ext(env, RVS) && !riscv_has_ext(env, RVU)) {
471 error_setg(errp,
472 "Setting S extension without U extension is illegal");
473 return;
474 }
475
476 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVI)) {
477 error_setg(errp,
478 "H depends on an I base integer ISA with 32 x registers");
479 return;
480 }
481
482 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVS)) {
483 error_setg(errp, "H extension implicitly requires S-mode");
484 return;
485 }
486
487 if (riscv_has_ext(env, RVF) && !cpu->cfg.ext_zicsr) {
488 error_setg(errp, "F extension requires Zicsr");
489 return;
490 }
491
492 if ((cpu->cfg.ext_zacas) && !riscv_has_ext(env, RVA)) {
493 error_setg(errp, "Zacas extension requires A extension");
494 return;
495 }
496
497 if ((cpu->cfg.ext_zawrs) && !riscv_has_ext(env, RVA)) {
498 error_setg(errp, "Zawrs extension requires A extension");
499 return;
500 }
501
502 if (cpu->cfg.ext_zfa && !riscv_has_ext(env, RVF)) {
503 error_setg(errp, "Zfa extension requires F extension");
504 return;
505 }
506
507 if (cpu->cfg.ext_zfh) {
508 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zfhmin), true);
509 }
510
511 if (cpu->cfg.ext_zfhmin && !riscv_has_ext(env, RVF)) {
512 error_setg(errp, "Zfh/Zfhmin extensions require F extension");
513 return;
514 }
515
516 if (cpu->cfg.ext_zfbfmin && !riscv_has_ext(env, RVF)) {
517 error_setg(errp, "Zfbfmin extension depends on F extension");
518 return;
519 }
520
521 if (riscv_has_ext(env, RVD) && !riscv_has_ext(env, RVF)) {
522 error_setg(errp, "D extension requires F extension");
523 return;
524 }
525
526 if (riscv_has_ext(env, RVV)) {
527 riscv_cpu_validate_v(env, &cpu->cfg, &local_err);
528 if (local_err != NULL) {
529 error_propagate(errp, local_err);
530 return;
531 }
532
533 /* The V vector extension depends on the Zve64d extension */
534 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zve64d), true);
535 }
536
537 /* The Zve64d extension depends on the Zve64f extension */
538 if (cpu->cfg.ext_zve64d) {
539 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zve64f), true);
540 }
541
542 /* The Zve64f extension depends on the Zve32f extension */
543 if (cpu->cfg.ext_zve64f) {
544 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zve32f), true);
545 }
546
547 if (cpu->cfg.ext_zve64d && !riscv_has_ext(env, RVD)) {
548 error_setg(errp, "Zve64d/V extensions require D extension");
549 return;
550 }
551
552 if (cpu->cfg.ext_zve32f && !riscv_has_ext(env, RVF)) {
553 error_setg(errp, "Zve32f/Zve64f extensions require F extension");
554 return;
555 }
556
557 if (cpu->cfg.ext_zvfh) {
558 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvfhmin), true);
559 }
560
561 if (cpu->cfg.ext_zvfhmin && !cpu->cfg.ext_zve32f) {
562 error_setg(errp, "Zvfh/Zvfhmin extensions require Zve32f extension");
563 return;
564 }
565
566 if (cpu->cfg.ext_zvfh && !cpu->cfg.ext_zfhmin) {
567 error_setg(errp, "Zvfh extensions requires Zfhmin extension");
568 return;
569 }
570
571 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zfbfmin) {
572 error_setg(errp, "Zvfbfmin extension depends on Zfbfmin extension");
573 return;
574 }
575
576 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zve32f) {
577 error_setg(errp, "Zvfbfmin extension depends on Zve32f extension");
578 return;
579 }
580
581 if (cpu->cfg.ext_zvfbfwma && !cpu->cfg.ext_zvfbfmin) {
582 error_setg(errp, "Zvfbfwma extension depends on Zvfbfmin extension");
583 return;
584 }
585
586 /* Set the ISA extensions, checks should have happened above */
587 if (cpu->cfg.ext_zhinx) {
588 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true);
589 }
590
591 if ((cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinxmin) && !cpu->cfg.ext_zfinx) {
592 error_setg(errp, "Zdinx/Zhinx/Zhinxmin extensions require Zfinx");
593 return;
594 }
595
596 if (cpu->cfg.ext_zfinx) {
597 if (!cpu->cfg.ext_zicsr) {
598 error_setg(errp, "Zfinx extension requires Zicsr");
599 return;
600 }
601 if (riscv_has_ext(env, RVF)) {
602 error_setg(errp,
603 "Zfinx cannot be supported together with F extension");
604 return;
605 }
606 }
607
608 if (cpu->cfg.ext_zce) {
609 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true);
610 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcb), true);
611 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmp), true);
612 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmt), true);
613 if (riscv_has_ext(env, RVF) && env->misa_mxl_max == MXL_RV32) {
614 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true);
615 }
616 }
617
618 /* zca, zcd and zcf has a PRIV 1.12.0 restriction */
619 if (riscv_has_ext(env, RVC) && env->priv_ver >= PRIV_VERSION_1_12_0) {
620 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true);
621 if (riscv_has_ext(env, RVF) && env->misa_mxl_max == MXL_RV32) {
622 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true);
623 }
624 if (riscv_has_ext(env, RVD)) {
625 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcd), true);
626 }
627 }
628
629 if (env->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) {
630 error_setg(errp, "Zcf extension is only relevant to RV32");
631 return;
632 }
633
634 if (!riscv_has_ext(env, RVF) && cpu->cfg.ext_zcf) {
635 error_setg(errp, "Zcf extension requires F extension");
636 return;
637 }
638
639 if (!riscv_has_ext(env, RVD) && cpu->cfg.ext_zcd) {
640 error_setg(errp, "Zcd extension requires D extension");
641 return;
642 }
643
644 if ((cpu->cfg.ext_zcf || cpu->cfg.ext_zcd || cpu->cfg.ext_zcb ||
645 cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt) && !cpu->cfg.ext_zca) {
646 error_setg(errp, "Zcf/Zcd/Zcb/Zcmp/Zcmt extensions require Zca "
647 "extension");
648 return;
649 }
650
651 if (cpu->cfg.ext_zcd && (cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt)) {
652 error_setg(errp, "Zcmp/Zcmt extensions are incompatible with "
653 "Zcd extension");
654 return;
655 }
656
657 if (cpu->cfg.ext_zcmt && !cpu->cfg.ext_zicsr) {
658 error_setg(errp, "Zcmt extension requires Zicsr extension");
659 return;
660 }
661
662 /*
663 * Shorthand vector crypto extensions
664 */
665 if (cpu->cfg.ext_zvknc) {
666 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkn), true);
667 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvbc), true);
668 }
669
670 if (cpu->cfg.ext_zvkng) {
671 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkn), true);
672 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkg), true);
673 }
674
675 if (cpu->cfg.ext_zvkn) {
676 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkned), true);
677 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvknhb), true);
678 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkb), true);
679 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkt), true);
680 }
681
682 if (cpu->cfg.ext_zvksc) {
683 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvks), true);
684 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvbc), true);
685 }
686
687 if (cpu->cfg.ext_zvksg) {
688 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvks), true);
689 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkg), true);
690 }
691
692 if (cpu->cfg.ext_zvks) {
693 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvksed), true);
694 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvksh), true);
695 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkb), true);
696 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkt), true);
697 }
698
699 if (cpu->cfg.ext_zvkt) {
700 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvbb), true);
701 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvbc), true);
702 }
703
704 /*
705 * In principle Zve*x would also suffice here, were they supported
706 * in qemu
707 */
708 if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkb || cpu->cfg.ext_zvkg ||
709 cpu->cfg.ext_zvkned || cpu->cfg.ext_zvknha || cpu->cfg.ext_zvksed ||
710 cpu->cfg.ext_zvksh) && !cpu->cfg.ext_zve32f) {
711 error_setg(errp,
712 "Vector crypto extensions require V or Zve* extensions");
713 return;
714 }
715
716 if ((cpu->cfg.ext_zvbc || cpu->cfg.ext_zvknhb) && !cpu->cfg.ext_zve64f) {
717 error_setg(
718 errp,
719 "Zvbc and Zvknhb extensions require V or Zve64{f,d} extensions");
720 return;
721 }
722
723 if (cpu->cfg.ext_zk) {
724 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zkn), true);
725 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zkr), true);
726 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zkt), true);
727 }
728
729 if (cpu->cfg.ext_zkn) {
730 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkb), true);
731 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkc), true);
732 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkx), true);
733 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zkne), true);
734 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zknd), true);
735 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zknh), true);
736 }
737
738 if (cpu->cfg.ext_zks) {
739 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkb), true);
740 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkc), true);
741 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkx), true);
742 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zksed), true);
743 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zksh), true);
744 }
745
746 if (cpu->cfg.ext_zicntr && !cpu->cfg.ext_zicsr) {
747 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zicntr))) {
748 error_setg(errp, "zicntr requires zicsr");
749 return;
750 }
751 cpu->cfg.ext_zicntr = false;
752 }
753
754 if (cpu->cfg.ext_zihpm && !cpu->cfg.ext_zicsr) {
755 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zihpm))) {
756 error_setg(errp, "zihpm requires zicsr");
757 return;
758 }
759 cpu->cfg.ext_zihpm = false;
760 }
761
762 if (!cpu->cfg.ext_zihpm) {
763 cpu->cfg.pmu_mask = 0;
764 cpu->pmu_avail_ctrs = 0;
765 }
766
767 /*
768 * Disable isa extensions based on priv spec after we
769 * validated and set everything we need.
770 */
771 riscv_cpu_disable_priv_spec_isa_exts(cpu);
772 }
773
774 #ifndef CONFIG_USER_ONLY
775 static bool riscv_cpu_validate_profile_satp(RISCVCPU *cpu,
776 RISCVCPUProfile *profile,
777 bool send_warn)
778 {
779 int satp_max = satp_mode_max_from_map(cpu->cfg.satp_mode.supported);
780
781 if (profile->satp_mode > satp_max) {
782 if (send_warn) {
783 bool is_32bit = riscv_cpu_is_32bit(cpu);
784 const char *req_satp = satp_mode_str(profile->satp_mode, is_32bit);
785 const char *cur_satp = satp_mode_str(satp_max, is_32bit);
786
787 warn_report("Profile %s requires satp mode %s, "
788 "but satp mode %s was set", profile->name,
789 req_satp, cur_satp);
790 }
791
792 return false;
793 }
794
795 return true;
796 }
797 #endif
798
799 static void riscv_cpu_validate_profile(RISCVCPU *cpu,
800 RISCVCPUProfile *profile)
801 {
802 CPURISCVState *env = &cpu->env;
803 const char *warn_msg = "Profile %s mandates disabled extension %s";
804 bool send_warn = profile->user_set && profile->enabled;
805 bool parent_enabled, profile_impl = true;
806 int i;
807
808 #ifndef CONFIG_USER_ONLY
809 if (profile->satp_mode != RISCV_PROFILE_ATTR_UNUSED) {
810 profile_impl = riscv_cpu_validate_profile_satp(cpu, profile,
811 send_warn);
812 }
813 #endif
814
815 if (profile->priv_spec != RISCV_PROFILE_ATTR_UNUSED &&
816 profile->priv_spec != env->priv_ver) {
817 profile_impl = false;
818
819 if (send_warn) {
820 warn_report("Profile %s requires priv spec %s, "
821 "but priv ver %s was set", profile->name,
822 cpu_priv_ver_to_str(profile->priv_spec),
823 cpu_priv_ver_to_str(env->priv_ver));
824 }
825 }
826
827 for (i = 0; misa_bits[i] != 0; i++) {
828 uint32_t bit = misa_bits[i];
829
830 if (!(profile->misa_ext & bit)) {
831 continue;
832 }
833
834 if (!riscv_has_ext(&cpu->env, bit)) {
835 profile_impl = false;
836
837 if (send_warn) {
838 warn_report(warn_msg, profile->name,
839 riscv_get_misa_ext_name(bit));
840 }
841 }
842 }
843
844 for (i = 0; profile->ext_offsets[i] != RISCV_PROFILE_EXT_LIST_END; i++) {
845 int ext_offset = profile->ext_offsets[i];
846
847 if (!isa_ext_is_enabled(cpu, ext_offset)) {
848 profile_impl = false;
849
850 if (send_warn) {
851 warn_report(warn_msg, profile->name,
852 cpu_cfg_ext_get_name(ext_offset));
853 }
854 }
855 }
856
857 profile->enabled = profile_impl;
858
859 if (profile->parent != NULL) {
860 parent_enabled = object_property_get_bool(OBJECT(cpu),
861 profile->parent->name,
862 NULL);
863 profile->enabled = profile->enabled && parent_enabled;
864 }
865 }
866
867 static void riscv_cpu_validate_profiles(RISCVCPU *cpu)
868 {
869 for (int i = 0; riscv_profiles[i] != NULL; i++) {
870 riscv_cpu_validate_profile(cpu, riscv_profiles[i]);
871 }
872 }
873
874 void riscv_tcg_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
875 {
876 CPURISCVState *env = &cpu->env;
877 Error *local_err = NULL;
878
879 riscv_cpu_validate_priv_spec(cpu, &local_err);
880 if (local_err != NULL) {
881 error_propagate(errp, local_err);
882 return;
883 }
884
885 riscv_cpu_validate_misa_priv(env, &local_err);
886 if (local_err != NULL) {
887 error_propagate(errp, local_err);
888 return;
889 }
890
891 riscv_cpu_update_named_features(cpu);
892 riscv_cpu_validate_profiles(cpu);
893
894 if (cpu->cfg.ext_smepmp && !cpu->cfg.pmp) {
895 /*
896 * Enhanced PMP should only be available
897 * on harts with PMP support
898 */
899 error_setg(errp, "Invalid configuration: Smepmp requires PMP support");
900 return;
901 }
902
903 riscv_cpu_validate_set_extensions(cpu, &local_err);
904 if (local_err != NULL) {
905 error_propagate(errp, local_err);
906 return;
907 }
908 }
909
910 bool riscv_cpu_tcg_compatible(RISCVCPU *cpu)
911 {
912 return object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_CPU_HOST) == NULL;
913 }
914
915 static bool riscv_cpu_is_generic(Object *cpu_obj)
916 {
917 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL;
918 }
919
920 static bool riscv_cpu_is_vendor(Object *cpu_obj)
921 {
922 return object_dynamic_cast(cpu_obj, TYPE_RISCV_VENDOR_CPU) != NULL;
923 }
924
925 /*
926 * We'll get here via the following path:
927 *
928 * riscv_cpu_realize()
929 * -> cpu_exec_realizefn()
930 * -> tcg_cpu_realize() (via accel_cpu_common_realize())
931 */
932 static bool tcg_cpu_realize(CPUState *cs, Error **errp)
933 {
934 RISCVCPU *cpu = RISCV_CPU(cs);
935 Error *local_err = NULL;
936
937 if (!riscv_cpu_tcg_compatible(cpu)) {
938 g_autofree char *name = riscv_cpu_get_name(cpu);
939 error_setg(errp, "'%s' CPU is not compatible with TCG acceleration",
940 name);
941 return false;
942 }
943
944 riscv_cpu_validate_misa_mxl(cpu, &local_err);
945 if (local_err != NULL) {
946 error_propagate(errp, local_err);
947 return false;
948 }
949
950 #ifndef CONFIG_USER_ONLY
951 CPURISCVState *env = &cpu->env;
952
953 CPU(cs)->tcg_cflags |= CF_PCREL;
954
955 if (cpu->cfg.ext_sstc) {
956 riscv_timer_init(cpu);
957 }
958
959 if (cpu->cfg.pmu_mask) {
960 riscv_pmu_init(cpu, &local_err);
961 if (local_err != NULL) {
962 error_propagate(errp, local_err);
963 return false;
964 }
965
966 if (cpu->cfg.ext_sscofpmf) {
967 cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
968 riscv_pmu_timer_cb, cpu);
969 }
970 }
971
972 /* With H-Ext, VSSIP, VSTIP, VSEIP and SGEIP are hardwired to one. */
973 if (riscv_has_ext(env, RVH)) {
974 env->mideleg = MIP_VSSIP | MIP_VSTIP | MIP_VSEIP | MIP_SGEIP;
975 }
976 #endif
977
978 return true;
979 }
980
981 typedef struct RISCVCPUMisaExtConfig {
982 target_ulong misa_bit;
983 bool enabled;
984 } RISCVCPUMisaExtConfig;
985
986 static void cpu_set_misa_ext_cfg(Object *obj, Visitor *v, const char *name,
987 void *opaque, Error **errp)
988 {
989 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque;
990 target_ulong misa_bit = misa_ext_cfg->misa_bit;
991 RISCVCPU *cpu = RISCV_CPU(obj);
992 CPURISCVState *env = &cpu->env;
993 bool vendor_cpu = riscv_cpu_is_vendor(obj);
994 bool prev_val, value;
995
996 if (!visit_type_bool(v, name, &value, errp)) {
997 return;
998 }
999
1000 cpu_misa_ext_add_user_opt(misa_bit, value);
1001
1002 prev_val = env->misa_ext & misa_bit;
1003
1004 if (value == prev_val) {
1005 return;
1006 }
1007
1008 if (value) {
1009 if (vendor_cpu) {
1010 g_autofree char *cpuname = riscv_cpu_get_name(cpu);
1011 error_setg(errp, "'%s' CPU does not allow enabling extensions",
1012 cpuname);
1013 return;
1014 }
1015
1016 if (misa_bit == RVH && env->priv_ver < PRIV_VERSION_1_12_0) {
1017 /*
1018 * Note: the 'priv_spec' command line option, if present,
1019 * will take precedence over this priv_ver bump.
1020 */
1021 env->priv_ver = PRIV_VERSION_1_12_0;
1022 }
1023 }
1024
1025 riscv_cpu_write_misa_bit(cpu, misa_bit, value);
1026 }
1027
1028 static void cpu_get_misa_ext_cfg(Object *obj, Visitor *v, const char *name,
1029 void *opaque, Error **errp)
1030 {
1031 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque;
1032 target_ulong misa_bit = misa_ext_cfg->misa_bit;
1033 RISCVCPU *cpu = RISCV_CPU(obj);
1034 CPURISCVState *env = &cpu->env;
1035 bool value;
1036
1037 value = env->misa_ext & misa_bit;
1038
1039 visit_type_bool(v, name, &value, errp);
1040 }
1041
1042 #define MISA_CFG(_bit, _enabled) \
1043 {.misa_bit = _bit, .enabled = _enabled}
1044
1045 static const RISCVCPUMisaExtConfig misa_ext_cfgs[] = {
1046 MISA_CFG(RVA, true),
1047 MISA_CFG(RVC, true),
1048 MISA_CFG(RVD, true),
1049 MISA_CFG(RVF, true),
1050 MISA_CFG(RVI, true),
1051 MISA_CFG(RVE, false),
1052 MISA_CFG(RVM, true),
1053 MISA_CFG(RVS, true),
1054 MISA_CFG(RVU, true),
1055 MISA_CFG(RVH, true),
1056 MISA_CFG(RVJ, false),
1057 MISA_CFG(RVV, false),
1058 MISA_CFG(RVG, false),
1059 };
1060
1061 /*
1062 * We do not support user choice tracking for MISA
1063 * extensions yet because, so far, we do not silently
1064 * change MISA bits during realize() (RVG enables MISA
1065 * bits but the user is warned about it).
1066 */
1067 static void riscv_cpu_add_misa_properties(Object *cpu_obj)
1068 {
1069 bool use_def_vals = riscv_cpu_is_generic(cpu_obj);
1070 int i;
1071
1072 for (i = 0; i < ARRAY_SIZE(misa_ext_cfgs); i++) {
1073 const RISCVCPUMisaExtConfig *misa_cfg = &misa_ext_cfgs[i];
1074 int bit = misa_cfg->misa_bit;
1075 const char *name = riscv_get_misa_ext_name(bit);
1076 const char *desc = riscv_get_misa_ext_description(bit);
1077
1078 /* Check if KVM already created the property */
1079 if (object_property_find(cpu_obj, name)) {
1080 continue;
1081 }
1082
1083 object_property_add(cpu_obj, name, "bool",
1084 cpu_get_misa_ext_cfg,
1085 cpu_set_misa_ext_cfg,
1086 NULL, (void *)misa_cfg);
1087 object_property_set_description(cpu_obj, name, desc);
1088 if (use_def_vals) {
1089 riscv_cpu_write_misa_bit(RISCV_CPU(cpu_obj), bit,
1090 misa_cfg->enabled);
1091 }
1092 }
1093 }
1094
1095 static void cpu_set_profile(Object *obj, Visitor *v, const char *name,
1096 void *opaque, Error **errp)
1097 {
1098 RISCVCPUProfile *profile = opaque;
1099 RISCVCPU *cpu = RISCV_CPU(obj);
1100 bool value;
1101 int i, ext_offset;
1102
1103 if (riscv_cpu_is_vendor(obj)) {
1104 error_setg(errp, "Profile %s is not available for vendor CPUs",
1105 profile->name);
1106 return;
1107 }
1108
1109 if (cpu->env.misa_mxl != MXL_RV64) {
1110 error_setg(errp, "Profile %s only available for 64 bit CPUs",
1111 profile->name);
1112 return;
1113 }
1114
1115 if (!visit_type_bool(v, name, &value, errp)) {
1116 return;
1117 }
1118
1119 profile->user_set = true;
1120 profile->enabled = value;
1121
1122 if (profile->parent != NULL) {
1123 object_property_set_bool(obj, profile->parent->name,
1124 profile->enabled, NULL);
1125 }
1126
1127 if (profile->enabled) {
1128 cpu->env.priv_ver = profile->priv_spec;
1129 }
1130
1131 #ifndef CONFIG_USER_ONLY
1132 if (profile->satp_mode != RISCV_PROFILE_ATTR_UNUSED) {
1133 const char *satp_prop = satp_mode_str(profile->satp_mode,
1134 riscv_cpu_is_32bit(cpu));
1135 object_property_set_bool(obj, satp_prop, profile->enabled, NULL);
1136 }
1137 #endif
1138
1139 for (i = 0; misa_bits[i] != 0; i++) {
1140 uint32_t bit = misa_bits[i];
1141
1142 if (!(profile->misa_ext & bit)) {
1143 continue;
1144 }
1145
1146 if (bit == RVI && !profile->enabled) {
1147 /*
1148 * Disabling profiles will not disable the base
1149 * ISA RV64I.
1150 */
1151 continue;
1152 }
1153
1154 cpu_misa_ext_add_user_opt(bit, profile->enabled);
1155 riscv_cpu_write_misa_bit(cpu, bit, profile->enabled);
1156 }
1157
1158 for (i = 0; profile->ext_offsets[i] != RISCV_PROFILE_EXT_LIST_END; i++) {
1159 ext_offset = profile->ext_offsets[i];
1160
1161 if (profile->enabled) {
1162 if (cpu_cfg_offset_is_named_feat(ext_offset)) {
1163 riscv_cpu_enable_named_feat(cpu, ext_offset);
1164 }
1165
1166 cpu_bump_multi_ext_priv_ver(&cpu->env, ext_offset);
1167 }
1168
1169 cpu_cfg_ext_add_user_opt(ext_offset, profile->enabled);
1170 isa_ext_update_enabled(cpu, ext_offset, profile->enabled);
1171 }
1172 }
1173
1174 static void cpu_get_profile(Object *obj, Visitor *v, const char *name,
1175 void *opaque, Error **errp)
1176 {
1177 RISCVCPUProfile *profile = opaque;
1178 bool value = profile->enabled;
1179
1180 visit_type_bool(v, name, &value, errp);
1181 }
1182
1183 static void riscv_cpu_add_profiles(Object *cpu_obj)
1184 {
1185 for (int i = 0; riscv_profiles[i] != NULL; i++) {
1186 const RISCVCPUProfile *profile = riscv_profiles[i];
1187
1188 object_property_add(cpu_obj, profile->name, "bool",
1189 cpu_get_profile, cpu_set_profile,
1190 NULL, (void *)profile);
1191
1192 /*
1193 * CPUs might enable a profile right from the start.
1194 * Enable its mandatory extensions right away in this
1195 * case.
1196 */
1197 if (profile->enabled) {
1198 object_property_set_bool(cpu_obj, profile->name, true, NULL);
1199 }
1200 }
1201 }
1202
1203 static bool cpu_ext_is_deprecated(const char *ext_name)
1204 {
1205 return isupper(ext_name[0]);
1206 }
1207
1208 /*
1209 * String will be allocated in the heap. Caller is responsible
1210 * for freeing it.
1211 */
1212 static char *cpu_ext_to_lower(const char *ext_name)
1213 {
1214 char *ret = g_malloc0(strlen(ext_name) + 1);
1215
1216 strcpy(ret, ext_name);
1217 ret[0] = tolower(ret[0]);
1218
1219 return ret;
1220 }
1221
1222 static void cpu_set_multi_ext_cfg(Object *obj, Visitor *v, const char *name,
1223 void *opaque, Error **errp)
1224 {
1225 const RISCVCPUMultiExtConfig *multi_ext_cfg = opaque;
1226 RISCVCPU *cpu = RISCV_CPU(obj);
1227 bool vendor_cpu = riscv_cpu_is_vendor(obj);
1228 bool prev_val, value;
1229
1230 if (!visit_type_bool(v, name, &value, errp)) {
1231 return;
1232 }
1233
1234 if (cpu_ext_is_deprecated(multi_ext_cfg->name)) {
1235 g_autofree char *lower = cpu_ext_to_lower(multi_ext_cfg->name);
1236
1237 warn_report("CPU property '%s' is deprecated. Please use '%s' instead",
1238 multi_ext_cfg->name, lower);
1239 }
1240
1241 cpu_cfg_ext_add_user_opt(multi_ext_cfg->offset, value);
1242
1243 prev_val = isa_ext_is_enabled(cpu, multi_ext_cfg->offset);
1244
1245 if (value == prev_val) {
1246 return;
1247 }
1248
1249 if (value && vendor_cpu) {
1250 g_autofree char *cpuname = riscv_cpu_get_name(cpu);
1251 error_setg(errp, "'%s' CPU does not allow enabling extensions",
1252 cpuname);
1253 return;
1254 }
1255
1256 if (value) {
1257 cpu_bump_multi_ext_priv_ver(&cpu->env, multi_ext_cfg->offset);
1258 }
1259
1260 isa_ext_update_enabled(cpu, multi_ext_cfg->offset, value);
1261 }
1262
1263 static void cpu_get_multi_ext_cfg(Object *obj, Visitor *v, const char *name,
1264 void *opaque, Error **errp)
1265 {
1266 const RISCVCPUMultiExtConfig *multi_ext_cfg = opaque;
1267 bool value = isa_ext_is_enabled(RISCV_CPU(obj), multi_ext_cfg->offset);
1268
1269 visit_type_bool(v, name, &value, errp);
1270 }
1271
1272 static void cpu_add_multi_ext_prop(Object *cpu_obj,
1273 const RISCVCPUMultiExtConfig *multi_cfg)
1274 {
1275 bool generic_cpu = riscv_cpu_is_generic(cpu_obj);
1276 bool deprecated_ext = cpu_ext_is_deprecated(multi_cfg->name);
1277
1278 object_property_add(cpu_obj, multi_cfg->name, "bool",
1279 cpu_get_multi_ext_cfg,
1280 cpu_set_multi_ext_cfg,
1281 NULL, (void *)multi_cfg);
1282
1283 if (!generic_cpu || deprecated_ext) {
1284 return;
1285 }
1286
1287 /*
1288 * Set def val directly instead of using
1289 * object_property_set_bool() to save the set()
1290 * callback hash for user inputs.
1291 */
1292 isa_ext_update_enabled(RISCV_CPU(cpu_obj), multi_cfg->offset,
1293 multi_cfg->enabled);
1294 }
1295
1296 static void riscv_cpu_add_multiext_prop_array(Object *obj,
1297 const RISCVCPUMultiExtConfig *array)
1298 {
1299 const RISCVCPUMultiExtConfig *prop;
1300
1301 g_assert(array);
1302
1303 for (prop = array; prop && prop->name; prop++) {
1304 cpu_add_multi_ext_prop(obj, prop);
1305 }
1306 }
1307
1308 /*
1309 * Add CPU properties with user-facing flags.
1310 *
1311 * This will overwrite existing env->misa_ext values with the
1312 * defaults set via riscv_cpu_add_misa_properties().
1313 */
1314 static void riscv_cpu_add_user_properties(Object *obj)
1315 {
1316 #ifndef CONFIG_USER_ONLY
1317 riscv_add_satp_mode_properties(obj);
1318 #endif
1319
1320 riscv_cpu_add_misa_properties(obj);
1321
1322 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_extensions);
1323 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_vendor_exts);
1324 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_experimental_exts);
1325
1326 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_deprecated_exts);
1327
1328 riscv_cpu_add_profiles(obj);
1329
1330 for (Property *prop = riscv_cpu_options; prop && prop->name; prop++) {
1331 qdev_property_add_static(DEVICE(obj), prop);
1332 }
1333 }
1334
1335 /*
1336 * The 'max' type CPU will have all possible ratified
1337 * non-vendor extensions enabled.
1338 */
1339 static void riscv_init_max_cpu_extensions(Object *obj)
1340 {
1341 RISCVCPU *cpu = RISCV_CPU(obj);
1342 CPURISCVState *env = &cpu->env;
1343 const RISCVCPUMultiExtConfig *prop;
1344
1345 /* Enable RVG, RVJ and RVV that are disabled by default */
1346 riscv_cpu_set_misa(env, env->misa_mxl, env->misa_ext | RVG | RVJ | RVV);
1347
1348 for (prop = riscv_cpu_extensions; prop && prop->name; prop++) {
1349 isa_ext_update_enabled(cpu, prop->offset, true);
1350 }
1351
1352 /* set vector version */
1353 env->vext_ver = VEXT_VERSION_1_00_0;
1354
1355 /* Zfinx is not compatible with F. Disable it */
1356 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zfinx), false);
1357 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zdinx), false);
1358 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zhinx), false);
1359 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zhinxmin), false);
1360
1361 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zce), false);
1362 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcmp), false);
1363 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcmt), false);
1364
1365 if (env->misa_mxl != MXL_RV32) {
1366 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcf), false);
1367 }
1368 }
1369
1370 static bool riscv_cpu_has_max_extensions(Object *cpu_obj)
1371 {
1372 return object_dynamic_cast(cpu_obj, TYPE_RISCV_CPU_MAX) != NULL;
1373 }
1374
1375 static void tcg_cpu_instance_init(CPUState *cs)
1376 {
1377 RISCVCPU *cpu = RISCV_CPU(cs);
1378 Object *obj = OBJECT(cpu);
1379
1380 misa_ext_user_opts = g_hash_table_new(NULL, g_direct_equal);
1381 multi_ext_user_opts = g_hash_table_new(NULL, g_direct_equal);
1382 riscv_cpu_add_user_properties(obj);
1383
1384 if (riscv_cpu_has_max_extensions(obj)) {
1385 riscv_init_max_cpu_extensions(obj);
1386 }
1387 }
1388
1389 static void tcg_cpu_init_ops(AccelCPUClass *accel_cpu, CPUClass *cc)
1390 {
1391 /*
1392 * All cpus use the same set of operations.
1393 */
1394 cc->tcg_ops = &riscv_tcg_ops;
1395 }
1396
1397 static void tcg_cpu_class_init(CPUClass *cc)
1398 {
1399 cc->init_accel_cpu = tcg_cpu_init_ops;
1400 }
1401
1402 static void tcg_cpu_accel_class_init(ObjectClass *oc, void *data)
1403 {
1404 AccelCPUClass *acc = ACCEL_CPU_CLASS(oc);
1405
1406 acc->cpu_class_init = tcg_cpu_class_init;
1407 acc->cpu_instance_init = tcg_cpu_instance_init;
1408 acc->cpu_target_realize = tcg_cpu_realize;
1409 }
1410
1411 static const TypeInfo tcg_cpu_accel_type_info = {
1412 .name = ACCEL_CPU_NAME("tcg"),
1413
1414 .parent = TYPE_ACCEL_CPU,
1415 .class_init = tcg_cpu_accel_class_init,
1416 .abstract = true,
1417 };
1418
1419 static void tcg_cpu_accel_register_types(void)
1420 {
1421 type_register_static(&tcg_cpu_accel_type_info);
1422 }
1423 type_init(tcg_cpu_accel_register_types);