]> git.proxmox.com Git - mirror_qemu.git/blob - target/riscv/tcg/tcg-cpu.c
Merge tag 'pull-maintainer-may24-160524-2' of https://gitlab.com/stsquad/qemu into...
[mirror_qemu.git] / target / riscv / tcg / tcg-cpu.c
1 /*
2 * riscv TCG cpu class initialization
3 *
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "exec/exec-all.h"
22 #include "tcg-cpu.h"
23 #include "cpu.h"
24 #include "pmu.h"
25 #include "time_helper.h"
26 #include "qapi/error.h"
27 #include "qapi/visitor.h"
28 #include "qemu/accel.h"
29 #include "qemu/error-report.h"
30 #include "qemu/log.h"
31 #include "hw/core/accel-cpu.h"
32 #include "hw/core/tcg-cpu-ops.h"
33 #include "tcg/tcg.h"
34
35 /* Hash that stores user set extensions */
36 static GHashTable *multi_ext_user_opts;
37 static GHashTable *misa_ext_user_opts;
38
39 static bool cpu_cfg_ext_is_user_set(uint32_t ext_offset)
40 {
41 return g_hash_table_contains(multi_ext_user_opts,
42 GUINT_TO_POINTER(ext_offset));
43 }
44
45 static bool cpu_misa_ext_is_user_set(uint32_t misa_bit)
46 {
47 return g_hash_table_contains(misa_ext_user_opts,
48 GUINT_TO_POINTER(misa_bit));
49 }
50
51 static void cpu_cfg_ext_add_user_opt(uint32_t ext_offset, bool value)
52 {
53 g_hash_table_insert(multi_ext_user_opts, GUINT_TO_POINTER(ext_offset),
54 (gpointer)value);
55 }
56
57 static void cpu_misa_ext_add_user_opt(uint32_t bit, bool value)
58 {
59 g_hash_table_insert(misa_ext_user_opts, GUINT_TO_POINTER(bit),
60 (gpointer)value);
61 }
62
63 static void riscv_cpu_write_misa_bit(RISCVCPU *cpu, uint32_t bit,
64 bool enabled)
65 {
66 CPURISCVState *env = &cpu->env;
67
68 if (enabled) {
69 env->misa_ext |= bit;
70 env->misa_ext_mask |= bit;
71 } else {
72 env->misa_ext &= ~bit;
73 env->misa_ext_mask &= ~bit;
74 }
75 }
76
77 static const char *cpu_priv_ver_to_str(int priv_ver)
78 {
79 switch (priv_ver) {
80 case PRIV_VERSION_1_10_0:
81 return "v1.10.0";
82 case PRIV_VERSION_1_11_0:
83 return "v1.11.0";
84 case PRIV_VERSION_1_12_0:
85 return "v1.12.0";
86 }
87
88 g_assert_not_reached();
89 }
90
91 static void riscv_cpu_synchronize_from_tb(CPUState *cs,
92 const TranslationBlock *tb)
93 {
94 if (!(tb_cflags(tb) & CF_PCREL)) {
95 RISCVCPU *cpu = RISCV_CPU(cs);
96 CPURISCVState *env = &cpu->env;
97 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL);
98
99 tcg_debug_assert(!tcg_cflags_has(cs, CF_PCREL));
100
101 if (xl == MXL_RV32) {
102 env->pc = (int32_t) tb->pc;
103 } else {
104 env->pc = tb->pc;
105 }
106 }
107 }
108
109 static void riscv_restore_state_to_opc(CPUState *cs,
110 const TranslationBlock *tb,
111 const uint64_t *data)
112 {
113 RISCVCPU *cpu = RISCV_CPU(cs);
114 CPURISCVState *env = &cpu->env;
115 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL);
116 target_ulong pc;
117
118 if (tb_cflags(tb) & CF_PCREL) {
119 pc = (env->pc & TARGET_PAGE_MASK) | data[0];
120 } else {
121 pc = data[0];
122 }
123
124 if (xl == MXL_RV32) {
125 env->pc = (int32_t)pc;
126 } else {
127 env->pc = pc;
128 }
129 env->bins = data[1];
130 }
131
132 static const TCGCPUOps riscv_tcg_ops = {
133 .initialize = riscv_translate_init,
134 .synchronize_from_tb = riscv_cpu_synchronize_from_tb,
135 .restore_state_to_opc = riscv_restore_state_to_opc,
136
137 #ifndef CONFIG_USER_ONLY
138 .tlb_fill = riscv_cpu_tlb_fill,
139 .cpu_exec_interrupt = riscv_cpu_exec_interrupt,
140 .do_interrupt = riscv_cpu_do_interrupt,
141 .do_transaction_failed = riscv_cpu_do_transaction_failed,
142 .do_unaligned_access = riscv_cpu_do_unaligned_access,
143 .debug_excp_handler = riscv_cpu_debug_excp_handler,
144 .debug_check_breakpoint = riscv_cpu_debug_check_breakpoint,
145 .debug_check_watchpoint = riscv_cpu_debug_check_watchpoint,
146 #endif /* !CONFIG_USER_ONLY */
147 };
148
149 static int cpu_cfg_ext_get_min_version(uint32_t ext_offset)
150 {
151 const RISCVIsaExtData *edata;
152
153 for (edata = isa_edata_arr; edata && edata->name; edata++) {
154 if (edata->ext_enable_offset != ext_offset) {
155 continue;
156 }
157
158 return edata->min_version;
159 }
160
161 g_assert_not_reached();
162 }
163
164 static const char *cpu_cfg_ext_get_name(uint32_t ext_offset)
165 {
166 const RISCVCPUMultiExtConfig *feat;
167 const RISCVIsaExtData *edata;
168
169 for (edata = isa_edata_arr; edata->name != NULL; edata++) {
170 if (edata->ext_enable_offset == ext_offset) {
171 return edata->name;
172 }
173 }
174
175 for (feat = riscv_cpu_named_features; feat->name != NULL; feat++) {
176 if (feat->offset == ext_offset) {
177 return feat->name;
178 }
179 }
180
181 g_assert_not_reached();
182 }
183
184 static bool cpu_cfg_offset_is_named_feat(uint32_t ext_offset)
185 {
186 const RISCVCPUMultiExtConfig *feat;
187
188 for (feat = riscv_cpu_named_features; feat->name != NULL; feat++) {
189 if (feat->offset == ext_offset) {
190 return true;
191 }
192 }
193
194 return false;
195 }
196
197 static void riscv_cpu_enable_named_feat(RISCVCPU *cpu, uint32_t feat_offset)
198 {
199 /*
200 * All other named features are already enabled
201 * in riscv_tcg_cpu_instance_init().
202 */
203 if (feat_offset == CPU_CFG_OFFSET(ext_zic64b)) {
204 cpu->cfg.cbom_blocksize = 64;
205 cpu->cfg.cbop_blocksize = 64;
206 cpu->cfg.cboz_blocksize = 64;
207 }
208 }
209
210 static void cpu_bump_multi_ext_priv_ver(CPURISCVState *env,
211 uint32_t ext_offset)
212 {
213 int ext_priv_ver;
214
215 if (env->priv_ver == PRIV_VERSION_LATEST) {
216 return;
217 }
218
219 ext_priv_ver = cpu_cfg_ext_get_min_version(ext_offset);
220
221 if (env->priv_ver < ext_priv_ver) {
222 /*
223 * Note: the 'priv_spec' command line option, if present,
224 * will take precedence over this priv_ver bump.
225 */
226 env->priv_ver = ext_priv_ver;
227 }
228 }
229
230 static void cpu_cfg_ext_auto_update(RISCVCPU *cpu, uint32_t ext_offset,
231 bool value)
232 {
233 CPURISCVState *env = &cpu->env;
234 bool prev_val = isa_ext_is_enabled(cpu, ext_offset);
235 int min_version;
236
237 if (prev_val == value) {
238 return;
239 }
240
241 if (cpu_cfg_ext_is_user_set(ext_offset)) {
242 return;
243 }
244
245 if (value && env->priv_ver != PRIV_VERSION_LATEST) {
246 /* Do not enable it if priv_ver is older than min_version */
247 min_version = cpu_cfg_ext_get_min_version(ext_offset);
248 if (env->priv_ver < min_version) {
249 return;
250 }
251 }
252
253 isa_ext_update_enabled(cpu, ext_offset, value);
254 }
255
256 static void riscv_cpu_validate_misa_priv(CPURISCVState *env, Error **errp)
257 {
258 if (riscv_has_ext(env, RVH) && env->priv_ver < PRIV_VERSION_1_12_0) {
259 error_setg(errp, "H extension requires priv spec 1.12.0");
260 return;
261 }
262 }
263
264 static void riscv_cpu_validate_v(CPURISCVState *env, RISCVCPUConfig *cfg,
265 Error **errp)
266 {
267 uint32_t vlen = cfg->vlenb << 3;
268
269 if (vlen > RV_VLEN_MAX || vlen < 128) {
270 error_setg(errp,
271 "Vector extension implementation only supports VLEN "
272 "in the range [128, %d]", RV_VLEN_MAX);
273 return;
274 }
275
276 if (cfg->elen > 64 || cfg->elen < 8) {
277 error_setg(errp,
278 "Vector extension implementation only supports ELEN "
279 "in the range [8, 64]");
280 return;
281 }
282 }
283
284 static void riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU *cpu)
285 {
286 CPURISCVState *env = &cpu->env;
287 const RISCVIsaExtData *edata;
288
289 /* Force disable extensions if priv spec version does not match */
290 for (edata = isa_edata_arr; edata && edata->name; edata++) {
291 if (isa_ext_is_enabled(cpu, edata->ext_enable_offset) &&
292 (env->priv_ver < edata->min_version)) {
293 /*
294 * These two extensions are always enabled as they were supported
295 * by QEMU before they were added as extensions in the ISA.
296 */
297 if (!strcmp(edata->name, "zicntr") ||
298 !strcmp(edata->name, "zihpm")) {
299 continue;
300 }
301
302 isa_ext_update_enabled(cpu, edata->ext_enable_offset, false);
303 #ifndef CONFIG_USER_ONLY
304 warn_report("disabling %s extension for hart 0x" TARGET_FMT_lx
305 " because privilege spec version does not match",
306 edata->name, env->mhartid);
307 #else
308 warn_report("disabling %s extension because "
309 "privilege spec version does not match",
310 edata->name);
311 #endif
312 }
313 }
314 }
315
316 static void riscv_cpu_update_named_features(RISCVCPU *cpu)
317 {
318 if (cpu->env.priv_ver >= PRIV_VERSION_1_11_0) {
319 cpu->cfg.has_priv_1_11 = true;
320 }
321
322 if (cpu->env.priv_ver >= PRIV_VERSION_1_12_0) {
323 cpu->cfg.has_priv_1_12 = true;
324 }
325
326 /* zic64b is 1.12 or later */
327 cpu->cfg.ext_zic64b = cpu->cfg.cbom_blocksize == 64 &&
328 cpu->cfg.cbop_blocksize == 64 &&
329 cpu->cfg.cboz_blocksize == 64 &&
330 cpu->cfg.has_priv_1_12;
331 }
332
333 static void riscv_cpu_validate_g(RISCVCPU *cpu)
334 {
335 const char *warn_msg = "RVG mandates disabled extension %s";
336 uint32_t g_misa_bits[] = {RVI, RVM, RVA, RVF, RVD};
337 bool send_warn = cpu_misa_ext_is_user_set(RVG);
338
339 for (int i = 0; i < ARRAY_SIZE(g_misa_bits); i++) {
340 uint32_t bit = g_misa_bits[i];
341
342 if (riscv_has_ext(&cpu->env, bit)) {
343 continue;
344 }
345
346 if (!cpu_misa_ext_is_user_set(bit)) {
347 riscv_cpu_write_misa_bit(cpu, bit, true);
348 continue;
349 }
350
351 if (send_warn) {
352 warn_report(warn_msg, riscv_get_misa_ext_name(bit));
353 }
354 }
355
356 if (!cpu->cfg.ext_zicsr) {
357 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zicsr))) {
358 cpu->cfg.ext_zicsr = true;
359 } else if (send_warn) {
360 warn_report(warn_msg, "zicsr");
361 }
362 }
363
364 if (!cpu->cfg.ext_zifencei) {
365 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zifencei))) {
366 cpu->cfg.ext_zifencei = true;
367 } else if (send_warn) {
368 warn_report(warn_msg, "zifencei");
369 }
370 }
371 }
372
373 static void riscv_cpu_validate_b(RISCVCPU *cpu)
374 {
375 const char *warn_msg = "RVB mandates disabled extension %s";
376
377 if (!cpu->cfg.ext_zba) {
378 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zba))) {
379 cpu->cfg.ext_zba = true;
380 } else {
381 warn_report(warn_msg, "zba");
382 }
383 }
384
385 if (!cpu->cfg.ext_zbb) {
386 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zbb))) {
387 cpu->cfg.ext_zbb = true;
388 } else {
389 warn_report(warn_msg, "zbb");
390 }
391 }
392
393 if (!cpu->cfg.ext_zbs) {
394 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zbs))) {
395 cpu->cfg.ext_zbs = true;
396 } else {
397 warn_report(warn_msg, "zbs");
398 }
399 }
400 }
401
402 /*
403 * Check consistency between chosen extensions while setting
404 * cpu->cfg accordingly.
405 */
406 void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
407 {
408 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
409 CPURISCVState *env = &cpu->env;
410 Error *local_err = NULL;
411
412 if (riscv_has_ext(env, RVG)) {
413 riscv_cpu_validate_g(cpu);
414 }
415
416 if (riscv_has_ext(env, RVB)) {
417 riscv_cpu_validate_b(cpu);
418 }
419
420 if (riscv_has_ext(env, RVI) && riscv_has_ext(env, RVE)) {
421 error_setg(errp,
422 "I and E extensions are incompatible");
423 return;
424 }
425
426 if (!riscv_has_ext(env, RVI) && !riscv_has_ext(env, RVE)) {
427 error_setg(errp,
428 "Either I or E extension must be set");
429 return;
430 }
431
432 if (riscv_has_ext(env, RVS) && !riscv_has_ext(env, RVU)) {
433 error_setg(errp,
434 "Setting S extension without U extension is illegal");
435 return;
436 }
437
438 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVI)) {
439 error_setg(errp,
440 "H depends on an I base integer ISA with 32 x registers");
441 return;
442 }
443
444 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVS)) {
445 error_setg(errp, "H extension implicitly requires S-mode");
446 return;
447 }
448
449 if (riscv_has_ext(env, RVF) && !cpu->cfg.ext_zicsr) {
450 error_setg(errp, "F extension requires Zicsr");
451 return;
452 }
453
454 if ((cpu->cfg.ext_zacas) && !riscv_has_ext(env, RVA)) {
455 error_setg(errp, "Zacas extension requires A extension");
456 return;
457 }
458
459 if ((cpu->cfg.ext_zawrs) && !riscv_has_ext(env, RVA)) {
460 error_setg(errp, "Zawrs extension requires A extension");
461 return;
462 }
463
464 if (cpu->cfg.ext_zfa && !riscv_has_ext(env, RVF)) {
465 error_setg(errp, "Zfa extension requires F extension");
466 return;
467 }
468
469 if (cpu->cfg.ext_zfh) {
470 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zfhmin), true);
471 }
472
473 if (cpu->cfg.ext_zfhmin && !riscv_has_ext(env, RVF)) {
474 error_setg(errp, "Zfh/Zfhmin extensions require F extension");
475 return;
476 }
477
478 if (cpu->cfg.ext_zfbfmin && !riscv_has_ext(env, RVF)) {
479 error_setg(errp, "Zfbfmin extension depends on F extension");
480 return;
481 }
482
483 if (riscv_has_ext(env, RVD) && !riscv_has_ext(env, RVF)) {
484 error_setg(errp, "D extension requires F extension");
485 return;
486 }
487
488 if (riscv_has_ext(env, RVV)) {
489 riscv_cpu_validate_v(env, &cpu->cfg, &local_err);
490 if (local_err != NULL) {
491 error_propagate(errp, local_err);
492 return;
493 }
494
495 /* The V vector extension depends on the Zve64d extension */
496 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zve64d), true);
497 }
498
499 /* The Zve64d extension depends on the Zve64f extension */
500 if (cpu->cfg.ext_zve64d) {
501 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zve64f), true);
502 }
503
504 /* The Zve64f extension depends on the Zve32f extension */
505 if (cpu->cfg.ext_zve64f) {
506 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zve32f), true);
507 }
508
509 if (cpu->cfg.ext_zve64d && !riscv_has_ext(env, RVD)) {
510 error_setg(errp, "Zve64d/V extensions require D extension");
511 return;
512 }
513
514 if (cpu->cfg.ext_zve32f && !riscv_has_ext(env, RVF)) {
515 error_setg(errp, "Zve32f/Zve64f extensions require F extension");
516 return;
517 }
518
519 if (cpu->cfg.ext_zvfh) {
520 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvfhmin), true);
521 }
522
523 if (cpu->cfg.ext_zvfhmin && !cpu->cfg.ext_zve32f) {
524 error_setg(errp, "Zvfh/Zvfhmin extensions require Zve32f extension");
525 return;
526 }
527
528 if (cpu->cfg.ext_zvfh && !cpu->cfg.ext_zfhmin) {
529 error_setg(errp, "Zvfh extensions requires Zfhmin extension");
530 return;
531 }
532
533 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zve32f) {
534 error_setg(errp, "Zvfbfmin extension depends on Zve32f extension");
535 return;
536 }
537
538 if (cpu->cfg.ext_zvfbfwma && !cpu->cfg.ext_zvfbfmin) {
539 error_setg(errp, "Zvfbfwma extension depends on Zvfbfmin extension");
540 return;
541 }
542
543 /* Set the ISA extensions, checks should have happened above */
544 if (cpu->cfg.ext_zhinx) {
545 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true);
546 }
547
548 if ((cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinxmin) && !cpu->cfg.ext_zfinx) {
549 error_setg(errp, "Zdinx/Zhinx/Zhinxmin extensions require Zfinx");
550 return;
551 }
552
553 if (cpu->cfg.ext_zfinx) {
554 if (!cpu->cfg.ext_zicsr) {
555 error_setg(errp, "Zfinx extension requires Zicsr");
556 return;
557 }
558 if (riscv_has_ext(env, RVF)) {
559 error_setg(errp,
560 "Zfinx cannot be supported together with F extension");
561 return;
562 }
563 }
564
565 if (cpu->cfg.ext_zce) {
566 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true);
567 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcb), true);
568 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmp), true);
569 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcmt), true);
570 if (riscv_has_ext(env, RVF) && mcc->misa_mxl_max == MXL_RV32) {
571 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true);
572 }
573 }
574
575 /* zca, zcd and zcf has a PRIV 1.12.0 restriction */
576 if (riscv_has_ext(env, RVC) && env->priv_ver >= PRIV_VERSION_1_12_0) {
577 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zca), true);
578 if (riscv_has_ext(env, RVF) && mcc->misa_mxl_max == MXL_RV32) {
579 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcf), true);
580 }
581 if (riscv_has_ext(env, RVD)) {
582 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zcd), true);
583 }
584 }
585
586 if (mcc->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) {
587 error_setg(errp, "Zcf extension is only relevant to RV32");
588 return;
589 }
590
591 if (!riscv_has_ext(env, RVF) && cpu->cfg.ext_zcf) {
592 error_setg(errp, "Zcf extension requires F extension");
593 return;
594 }
595
596 if (!riscv_has_ext(env, RVD) && cpu->cfg.ext_zcd) {
597 error_setg(errp, "Zcd extension requires D extension");
598 return;
599 }
600
601 if ((cpu->cfg.ext_zcf || cpu->cfg.ext_zcd || cpu->cfg.ext_zcb ||
602 cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt) && !cpu->cfg.ext_zca) {
603 error_setg(errp, "Zcf/Zcd/Zcb/Zcmp/Zcmt extensions require Zca "
604 "extension");
605 return;
606 }
607
608 if (cpu->cfg.ext_zcd && (cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt)) {
609 error_setg(errp, "Zcmp/Zcmt extensions are incompatible with "
610 "Zcd extension");
611 return;
612 }
613
614 if (cpu->cfg.ext_zcmt && !cpu->cfg.ext_zicsr) {
615 error_setg(errp, "Zcmt extension requires Zicsr extension");
616 return;
617 }
618
619 /*
620 * Shorthand vector crypto extensions
621 */
622 if (cpu->cfg.ext_zvknc) {
623 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkn), true);
624 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvbc), true);
625 }
626
627 if (cpu->cfg.ext_zvkng) {
628 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkn), true);
629 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkg), true);
630 }
631
632 if (cpu->cfg.ext_zvkn) {
633 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkned), true);
634 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvknhb), true);
635 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkb), true);
636 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkt), true);
637 }
638
639 if (cpu->cfg.ext_zvksc) {
640 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvks), true);
641 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvbc), true);
642 }
643
644 if (cpu->cfg.ext_zvksg) {
645 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvks), true);
646 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkg), true);
647 }
648
649 if (cpu->cfg.ext_zvks) {
650 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvksed), true);
651 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvksh), true);
652 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkb), true);
653 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvkt), true);
654 }
655
656 if (cpu->cfg.ext_zvkt) {
657 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvbb), true);
658 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zvbc), true);
659 }
660
661 /*
662 * In principle Zve*x would also suffice here, were they supported
663 * in qemu
664 */
665 if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkb || cpu->cfg.ext_zvkg ||
666 cpu->cfg.ext_zvkned || cpu->cfg.ext_zvknha || cpu->cfg.ext_zvksed ||
667 cpu->cfg.ext_zvksh) && !cpu->cfg.ext_zve32f) {
668 error_setg(errp,
669 "Vector crypto extensions require V or Zve* extensions");
670 return;
671 }
672
673 if ((cpu->cfg.ext_zvbc || cpu->cfg.ext_zvknhb) && !cpu->cfg.ext_zve64f) {
674 error_setg(
675 errp,
676 "Zvbc and Zvknhb extensions require V or Zve64{f,d} extensions");
677 return;
678 }
679
680 if (cpu->cfg.ext_zk) {
681 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zkn), true);
682 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zkr), true);
683 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zkt), true);
684 }
685
686 if (cpu->cfg.ext_zkn) {
687 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkb), true);
688 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkc), true);
689 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkx), true);
690 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zkne), true);
691 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zknd), true);
692 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zknh), true);
693 }
694
695 if (cpu->cfg.ext_zks) {
696 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkb), true);
697 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkc), true);
698 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zbkx), true);
699 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zksed), true);
700 cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zksh), true);
701 }
702
703 if (cpu->cfg.ext_zicntr && !cpu->cfg.ext_zicsr) {
704 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zicntr))) {
705 error_setg(errp, "zicntr requires zicsr");
706 return;
707 }
708 cpu->cfg.ext_zicntr = false;
709 }
710
711 if (cpu->cfg.ext_zihpm && !cpu->cfg.ext_zicsr) {
712 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zihpm))) {
713 error_setg(errp, "zihpm requires zicsr");
714 return;
715 }
716 cpu->cfg.ext_zihpm = false;
717 }
718
719 if (!cpu->cfg.ext_zihpm) {
720 cpu->cfg.pmu_mask = 0;
721 cpu->pmu_avail_ctrs = 0;
722 }
723
724 /*
725 * Disable isa extensions based on priv spec after we
726 * validated and set everything we need.
727 */
728 riscv_cpu_disable_priv_spec_isa_exts(cpu);
729 }
730
731 #ifndef CONFIG_USER_ONLY
732 static bool riscv_cpu_validate_profile_satp(RISCVCPU *cpu,
733 RISCVCPUProfile *profile,
734 bool send_warn)
735 {
736 int satp_max = satp_mode_max_from_map(cpu->cfg.satp_mode.supported);
737
738 if (profile->satp_mode > satp_max) {
739 if (send_warn) {
740 bool is_32bit = riscv_cpu_is_32bit(cpu);
741 const char *req_satp = satp_mode_str(profile->satp_mode, is_32bit);
742 const char *cur_satp = satp_mode_str(satp_max, is_32bit);
743
744 warn_report("Profile %s requires satp mode %s, "
745 "but satp mode %s was set", profile->name,
746 req_satp, cur_satp);
747 }
748
749 return false;
750 }
751
752 return true;
753 }
754 #endif
755
756 static void riscv_cpu_validate_profile(RISCVCPU *cpu,
757 RISCVCPUProfile *profile)
758 {
759 CPURISCVState *env = &cpu->env;
760 const char *warn_msg = "Profile %s mandates disabled extension %s";
761 bool send_warn = profile->user_set && profile->enabled;
762 bool parent_enabled, profile_impl = true;
763 int i;
764
765 #ifndef CONFIG_USER_ONLY
766 if (profile->satp_mode != RISCV_PROFILE_ATTR_UNUSED) {
767 profile_impl = riscv_cpu_validate_profile_satp(cpu, profile,
768 send_warn);
769 }
770 #endif
771
772 if (profile->priv_spec != RISCV_PROFILE_ATTR_UNUSED &&
773 profile->priv_spec != env->priv_ver) {
774 profile_impl = false;
775
776 if (send_warn) {
777 warn_report("Profile %s requires priv spec %s, "
778 "but priv ver %s was set", profile->name,
779 cpu_priv_ver_to_str(profile->priv_spec),
780 cpu_priv_ver_to_str(env->priv_ver));
781 }
782 }
783
784 for (i = 0; misa_bits[i] != 0; i++) {
785 uint32_t bit = misa_bits[i];
786
787 if (!(profile->misa_ext & bit)) {
788 continue;
789 }
790
791 if (!riscv_has_ext(&cpu->env, bit)) {
792 profile_impl = false;
793
794 if (send_warn) {
795 warn_report(warn_msg, profile->name,
796 riscv_get_misa_ext_name(bit));
797 }
798 }
799 }
800
801 for (i = 0; profile->ext_offsets[i] != RISCV_PROFILE_EXT_LIST_END; i++) {
802 int ext_offset = profile->ext_offsets[i];
803
804 if (!isa_ext_is_enabled(cpu, ext_offset)) {
805 profile_impl = false;
806
807 if (send_warn) {
808 warn_report(warn_msg, profile->name,
809 cpu_cfg_ext_get_name(ext_offset));
810 }
811 }
812 }
813
814 profile->enabled = profile_impl;
815
816 if (profile->parent != NULL) {
817 parent_enabled = object_property_get_bool(OBJECT(cpu),
818 profile->parent->name,
819 NULL);
820 profile->enabled = profile->enabled && parent_enabled;
821 }
822 }
823
824 static void riscv_cpu_validate_profiles(RISCVCPU *cpu)
825 {
826 for (int i = 0; riscv_profiles[i] != NULL; i++) {
827 riscv_cpu_validate_profile(cpu, riscv_profiles[i]);
828 }
829 }
830
831 void riscv_tcg_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
832 {
833 CPURISCVState *env = &cpu->env;
834 Error *local_err = NULL;
835
836 riscv_cpu_validate_misa_priv(env, &local_err);
837 if (local_err != NULL) {
838 error_propagate(errp, local_err);
839 return;
840 }
841
842 riscv_cpu_update_named_features(cpu);
843 riscv_cpu_validate_profiles(cpu);
844
845 if (cpu->cfg.ext_smepmp && !cpu->cfg.pmp) {
846 /*
847 * Enhanced PMP should only be available
848 * on harts with PMP support
849 */
850 error_setg(errp, "Invalid configuration: Smepmp requires PMP support");
851 return;
852 }
853
854 riscv_cpu_validate_set_extensions(cpu, &local_err);
855 if (local_err != NULL) {
856 error_propagate(errp, local_err);
857 return;
858 }
859 }
860
861 bool riscv_cpu_tcg_compatible(RISCVCPU *cpu)
862 {
863 return object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_CPU_HOST) == NULL;
864 }
865
866 static bool riscv_cpu_is_generic(Object *cpu_obj)
867 {
868 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL;
869 }
870
871 /*
872 * We'll get here via the following path:
873 *
874 * riscv_cpu_realize()
875 * -> cpu_exec_realizefn()
876 * -> tcg_cpu_realize() (via accel_cpu_common_realize())
877 */
878 static bool riscv_tcg_cpu_realize(CPUState *cs, Error **errp)
879 {
880 RISCVCPU *cpu = RISCV_CPU(cs);
881
882 if (!riscv_cpu_tcg_compatible(cpu)) {
883 g_autofree char *name = riscv_cpu_get_name(cpu);
884 error_setg(errp, "'%s' CPU is not compatible with TCG acceleration",
885 name);
886 return false;
887 }
888
889 #ifndef CONFIG_USER_ONLY
890 CPURISCVState *env = &cpu->env;
891 Error *local_err = NULL;
892
893 tcg_cflags_set(CPU(cs), CF_PCREL);
894
895 if (cpu->cfg.ext_sstc) {
896 riscv_timer_init(cpu);
897 }
898
899 if (cpu->cfg.pmu_mask) {
900 riscv_pmu_init(cpu, &local_err);
901 if (local_err != NULL) {
902 error_propagate(errp, local_err);
903 return false;
904 }
905
906 if (cpu->cfg.ext_sscofpmf) {
907 cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
908 riscv_pmu_timer_cb, cpu);
909 }
910 }
911
912 /* With H-Ext, VSSIP, VSTIP, VSEIP and SGEIP are hardwired to one. */
913 if (riscv_has_ext(env, RVH)) {
914 env->mideleg = MIP_VSSIP | MIP_VSTIP | MIP_VSEIP | MIP_SGEIP;
915 }
916 #endif
917
918 return true;
919 }
920
921 typedef struct RISCVCPUMisaExtConfig {
922 target_ulong misa_bit;
923 bool enabled;
924 } RISCVCPUMisaExtConfig;
925
926 static void cpu_set_misa_ext_cfg(Object *obj, Visitor *v, const char *name,
927 void *opaque, Error **errp)
928 {
929 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque;
930 target_ulong misa_bit = misa_ext_cfg->misa_bit;
931 RISCVCPU *cpu = RISCV_CPU(obj);
932 CPURISCVState *env = &cpu->env;
933 bool vendor_cpu = riscv_cpu_is_vendor(obj);
934 bool prev_val, value;
935
936 if (!visit_type_bool(v, name, &value, errp)) {
937 return;
938 }
939
940 cpu_misa_ext_add_user_opt(misa_bit, value);
941
942 prev_val = env->misa_ext & misa_bit;
943
944 if (value == prev_val) {
945 return;
946 }
947
948 if (value) {
949 if (vendor_cpu) {
950 g_autofree char *cpuname = riscv_cpu_get_name(cpu);
951 error_setg(errp, "'%s' CPU does not allow enabling extensions",
952 cpuname);
953 return;
954 }
955
956 if (misa_bit == RVH && env->priv_ver < PRIV_VERSION_1_12_0) {
957 /*
958 * Note: the 'priv_spec' command line option, if present,
959 * will take precedence over this priv_ver bump.
960 */
961 env->priv_ver = PRIV_VERSION_1_12_0;
962 }
963 }
964
965 riscv_cpu_write_misa_bit(cpu, misa_bit, value);
966 }
967
968 static void cpu_get_misa_ext_cfg(Object *obj, Visitor *v, const char *name,
969 void *opaque, Error **errp)
970 {
971 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque;
972 target_ulong misa_bit = misa_ext_cfg->misa_bit;
973 RISCVCPU *cpu = RISCV_CPU(obj);
974 CPURISCVState *env = &cpu->env;
975 bool value;
976
977 value = env->misa_ext & misa_bit;
978
979 visit_type_bool(v, name, &value, errp);
980 }
981
982 #define MISA_CFG(_bit, _enabled) \
983 {.misa_bit = _bit, .enabled = _enabled}
984
985 static const RISCVCPUMisaExtConfig misa_ext_cfgs[] = {
986 MISA_CFG(RVA, true),
987 MISA_CFG(RVC, true),
988 MISA_CFG(RVD, true),
989 MISA_CFG(RVF, true),
990 MISA_CFG(RVI, true),
991 MISA_CFG(RVE, false),
992 MISA_CFG(RVM, true),
993 MISA_CFG(RVS, true),
994 MISA_CFG(RVU, true),
995 MISA_CFG(RVH, true),
996 MISA_CFG(RVJ, false),
997 MISA_CFG(RVV, false),
998 MISA_CFG(RVG, false),
999 MISA_CFG(RVB, false),
1000 };
1001
1002 /*
1003 * We do not support user choice tracking for MISA
1004 * extensions yet because, so far, we do not silently
1005 * change MISA bits during realize() (RVG enables MISA
1006 * bits but the user is warned about it).
1007 */
1008 static void riscv_cpu_add_misa_properties(Object *cpu_obj)
1009 {
1010 bool use_def_vals = riscv_cpu_is_generic(cpu_obj);
1011 int i;
1012
1013 for (i = 0; i < ARRAY_SIZE(misa_ext_cfgs); i++) {
1014 const RISCVCPUMisaExtConfig *misa_cfg = &misa_ext_cfgs[i];
1015 int bit = misa_cfg->misa_bit;
1016 const char *name = riscv_get_misa_ext_name(bit);
1017 const char *desc = riscv_get_misa_ext_description(bit);
1018
1019 /* Check if KVM already created the property */
1020 if (object_property_find(cpu_obj, name)) {
1021 continue;
1022 }
1023
1024 object_property_add(cpu_obj, name, "bool",
1025 cpu_get_misa_ext_cfg,
1026 cpu_set_misa_ext_cfg,
1027 NULL, (void *)misa_cfg);
1028 object_property_set_description(cpu_obj, name, desc);
1029 if (use_def_vals) {
1030 riscv_cpu_write_misa_bit(RISCV_CPU(cpu_obj), bit,
1031 misa_cfg->enabled);
1032 }
1033 }
1034 }
1035
1036 static void cpu_set_profile(Object *obj, Visitor *v, const char *name,
1037 void *opaque, Error **errp)
1038 {
1039 RISCVCPUProfile *profile = opaque;
1040 RISCVCPU *cpu = RISCV_CPU(obj);
1041 bool value;
1042 int i, ext_offset;
1043
1044 if (riscv_cpu_is_vendor(obj)) {
1045 error_setg(errp, "Profile %s is not available for vendor CPUs",
1046 profile->name);
1047 return;
1048 }
1049
1050 if (cpu->env.misa_mxl != MXL_RV64) {
1051 error_setg(errp, "Profile %s only available for 64 bit CPUs",
1052 profile->name);
1053 return;
1054 }
1055
1056 if (!visit_type_bool(v, name, &value, errp)) {
1057 return;
1058 }
1059
1060 profile->user_set = true;
1061 profile->enabled = value;
1062
1063 if (profile->parent != NULL) {
1064 object_property_set_bool(obj, profile->parent->name,
1065 profile->enabled, NULL);
1066 }
1067
1068 if (profile->enabled) {
1069 cpu->env.priv_ver = profile->priv_spec;
1070 }
1071
1072 #ifndef CONFIG_USER_ONLY
1073 if (profile->satp_mode != RISCV_PROFILE_ATTR_UNUSED) {
1074 object_property_set_bool(obj, "mmu", true, NULL);
1075 const char *satp_prop = satp_mode_str(profile->satp_mode,
1076 riscv_cpu_is_32bit(cpu));
1077 object_property_set_bool(obj, satp_prop, profile->enabled, NULL);
1078 }
1079 #endif
1080
1081 for (i = 0; misa_bits[i] != 0; i++) {
1082 uint32_t bit = misa_bits[i];
1083
1084 if (!(profile->misa_ext & bit)) {
1085 continue;
1086 }
1087
1088 if (bit == RVI && !profile->enabled) {
1089 /*
1090 * Disabling profiles will not disable the base
1091 * ISA RV64I.
1092 */
1093 continue;
1094 }
1095
1096 cpu_misa_ext_add_user_opt(bit, profile->enabled);
1097 riscv_cpu_write_misa_bit(cpu, bit, profile->enabled);
1098 }
1099
1100 for (i = 0; profile->ext_offsets[i] != RISCV_PROFILE_EXT_LIST_END; i++) {
1101 ext_offset = profile->ext_offsets[i];
1102
1103 if (profile->enabled) {
1104 if (cpu_cfg_offset_is_named_feat(ext_offset)) {
1105 riscv_cpu_enable_named_feat(cpu, ext_offset);
1106 }
1107
1108 cpu_bump_multi_ext_priv_ver(&cpu->env, ext_offset);
1109 }
1110
1111 cpu_cfg_ext_add_user_opt(ext_offset, profile->enabled);
1112 isa_ext_update_enabled(cpu, ext_offset, profile->enabled);
1113 }
1114 }
1115
1116 static void cpu_get_profile(Object *obj, Visitor *v, const char *name,
1117 void *opaque, Error **errp)
1118 {
1119 RISCVCPUProfile *profile = opaque;
1120 bool value = profile->enabled;
1121
1122 visit_type_bool(v, name, &value, errp);
1123 }
1124
1125 static void riscv_cpu_add_profiles(Object *cpu_obj)
1126 {
1127 for (int i = 0; riscv_profiles[i] != NULL; i++) {
1128 const RISCVCPUProfile *profile = riscv_profiles[i];
1129
1130 object_property_add(cpu_obj, profile->name, "bool",
1131 cpu_get_profile, cpu_set_profile,
1132 NULL, (void *)profile);
1133
1134 /*
1135 * CPUs might enable a profile right from the start.
1136 * Enable its mandatory extensions right away in this
1137 * case.
1138 */
1139 if (profile->enabled) {
1140 object_property_set_bool(cpu_obj, profile->name, true, NULL);
1141 }
1142 }
1143 }
1144
1145 static bool cpu_ext_is_deprecated(const char *ext_name)
1146 {
1147 return isupper(ext_name[0]);
1148 }
1149
1150 /*
1151 * String will be allocated in the heap. Caller is responsible
1152 * for freeing it.
1153 */
1154 static char *cpu_ext_to_lower(const char *ext_name)
1155 {
1156 char *ret = g_malloc0(strlen(ext_name) + 1);
1157
1158 strcpy(ret, ext_name);
1159 ret[0] = tolower(ret[0]);
1160
1161 return ret;
1162 }
1163
1164 static void cpu_set_multi_ext_cfg(Object *obj, Visitor *v, const char *name,
1165 void *opaque, Error **errp)
1166 {
1167 const RISCVCPUMultiExtConfig *multi_ext_cfg = opaque;
1168 RISCVCPU *cpu = RISCV_CPU(obj);
1169 bool vendor_cpu = riscv_cpu_is_vendor(obj);
1170 bool prev_val, value;
1171
1172 if (!visit_type_bool(v, name, &value, errp)) {
1173 return;
1174 }
1175
1176 if (cpu_ext_is_deprecated(multi_ext_cfg->name)) {
1177 g_autofree char *lower = cpu_ext_to_lower(multi_ext_cfg->name);
1178
1179 warn_report("CPU property '%s' is deprecated. Please use '%s' instead",
1180 multi_ext_cfg->name, lower);
1181 }
1182
1183 cpu_cfg_ext_add_user_opt(multi_ext_cfg->offset, value);
1184
1185 prev_val = isa_ext_is_enabled(cpu, multi_ext_cfg->offset);
1186
1187 if (value == prev_val) {
1188 return;
1189 }
1190
1191 if (value && vendor_cpu) {
1192 g_autofree char *cpuname = riscv_cpu_get_name(cpu);
1193 error_setg(errp, "'%s' CPU does not allow enabling extensions",
1194 cpuname);
1195 return;
1196 }
1197
1198 if (value) {
1199 cpu_bump_multi_ext_priv_ver(&cpu->env, multi_ext_cfg->offset);
1200 }
1201
1202 isa_ext_update_enabled(cpu, multi_ext_cfg->offset, value);
1203 }
1204
1205 static void cpu_get_multi_ext_cfg(Object *obj, Visitor *v, const char *name,
1206 void *opaque, Error **errp)
1207 {
1208 const RISCVCPUMultiExtConfig *multi_ext_cfg = opaque;
1209 bool value = isa_ext_is_enabled(RISCV_CPU(obj), multi_ext_cfg->offset);
1210
1211 visit_type_bool(v, name, &value, errp);
1212 }
1213
1214 static void cpu_add_multi_ext_prop(Object *cpu_obj,
1215 const RISCVCPUMultiExtConfig *multi_cfg)
1216 {
1217 bool generic_cpu = riscv_cpu_is_generic(cpu_obj);
1218 bool deprecated_ext = cpu_ext_is_deprecated(multi_cfg->name);
1219
1220 object_property_add(cpu_obj, multi_cfg->name, "bool",
1221 cpu_get_multi_ext_cfg,
1222 cpu_set_multi_ext_cfg,
1223 NULL, (void *)multi_cfg);
1224
1225 if (!generic_cpu || deprecated_ext) {
1226 return;
1227 }
1228
1229 /*
1230 * Set def val directly instead of using
1231 * object_property_set_bool() to save the set()
1232 * callback hash for user inputs.
1233 */
1234 isa_ext_update_enabled(RISCV_CPU(cpu_obj), multi_cfg->offset,
1235 multi_cfg->enabled);
1236 }
1237
1238 static void riscv_cpu_add_multiext_prop_array(Object *obj,
1239 const RISCVCPUMultiExtConfig *array)
1240 {
1241 const RISCVCPUMultiExtConfig *prop;
1242
1243 g_assert(array);
1244
1245 for (prop = array; prop && prop->name; prop++) {
1246 cpu_add_multi_ext_prop(obj, prop);
1247 }
1248 }
1249
1250 /*
1251 * Add CPU properties with user-facing flags.
1252 *
1253 * This will overwrite existing env->misa_ext values with the
1254 * defaults set via riscv_cpu_add_misa_properties().
1255 */
1256 static void riscv_cpu_add_user_properties(Object *obj)
1257 {
1258 #ifndef CONFIG_USER_ONLY
1259 riscv_add_satp_mode_properties(obj);
1260 #endif
1261
1262 riscv_cpu_add_misa_properties(obj);
1263
1264 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_extensions);
1265 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_vendor_exts);
1266 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_experimental_exts);
1267
1268 riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_deprecated_exts);
1269
1270 riscv_cpu_add_profiles(obj);
1271 }
1272
1273 /*
1274 * The 'max' type CPU will have all possible ratified
1275 * non-vendor extensions enabled.
1276 */
1277 static void riscv_init_max_cpu_extensions(Object *obj)
1278 {
1279 RISCVCPU *cpu = RISCV_CPU(obj);
1280 CPURISCVState *env = &cpu->env;
1281 const RISCVCPUMultiExtConfig *prop;
1282
1283 /* Enable RVG, RVJ and RVV that are disabled by default */
1284 riscv_cpu_set_misa_ext(env, env->misa_ext | RVG | RVJ | RVV);
1285
1286 for (prop = riscv_cpu_extensions; prop && prop->name; prop++) {
1287 isa_ext_update_enabled(cpu, prop->offset, true);
1288 }
1289
1290 /*
1291 * Some extensions can't be added without backward compatibilty concerns.
1292 * Disable those, the user can still opt in to them on the command line.
1293 */
1294 cpu->cfg.ext_svade = false;
1295
1296 /* set vector version */
1297 env->vext_ver = VEXT_VERSION_1_00_0;
1298
1299 /* Zfinx is not compatible with F. Disable it */
1300 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zfinx), false);
1301 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zdinx), false);
1302 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zhinx), false);
1303 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zhinxmin), false);
1304
1305 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zce), false);
1306 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcmp), false);
1307 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcmt), false);
1308
1309 if (env->misa_mxl != MXL_RV32) {
1310 isa_ext_update_enabled(cpu, CPU_CFG_OFFSET(ext_zcf), false);
1311 }
1312 }
1313
1314 static bool riscv_cpu_has_max_extensions(Object *cpu_obj)
1315 {
1316 return object_dynamic_cast(cpu_obj, TYPE_RISCV_CPU_MAX) != NULL;
1317 }
1318
1319 static void riscv_tcg_cpu_instance_init(CPUState *cs)
1320 {
1321 RISCVCPU *cpu = RISCV_CPU(cs);
1322 Object *obj = OBJECT(cpu);
1323
1324 misa_ext_user_opts = g_hash_table_new(NULL, g_direct_equal);
1325 multi_ext_user_opts = g_hash_table_new(NULL, g_direct_equal);
1326 riscv_cpu_add_user_properties(obj);
1327
1328 if (riscv_cpu_has_max_extensions(obj)) {
1329 riscv_init_max_cpu_extensions(obj);
1330 }
1331 }
1332
1333 static void riscv_tcg_cpu_init_ops(AccelCPUClass *accel_cpu, CPUClass *cc)
1334 {
1335 /*
1336 * All cpus use the same set of operations.
1337 */
1338 cc->tcg_ops = &riscv_tcg_ops;
1339 }
1340
1341 static void riscv_tcg_cpu_class_init(CPUClass *cc)
1342 {
1343 cc->init_accel_cpu = riscv_tcg_cpu_init_ops;
1344 }
1345
1346 static void riscv_tcg_cpu_accel_class_init(ObjectClass *oc, void *data)
1347 {
1348 AccelCPUClass *acc = ACCEL_CPU_CLASS(oc);
1349
1350 acc->cpu_class_init = riscv_tcg_cpu_class_init;
1351 acc->cpu_instance_init = riscv_tcg_cpu_instance_init;
1352 acc->cpu_target_realize = riscv_tcg_cpu_realize;
1353 }
1354
1355 static const TypeInfo riscv_tcg_cpu_accel_type_info = {
1356 .name = ACCEL_CPU_NAME("tcg"),
1357
1358 .parent = TYPE_ACCEL_CPU,
1359 .class_init = riscv_tcg_cpu_accel_class_init,
1360 .abstract = true,
1361 };
1362
1363 static void riscv_tcg_cpu_accel_register_types(void)
1364 {
1365 type_register_static(&riscv_tcg_cpu_accel_type_info);
1366 }
1367 type_init(riscv_tcg_cpu_accel_register_types);