2 * riscv TCG cpu class initialization
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "exec/exec-all.h"
25 #include "time_helper.h"
26 #include "qapi/error.h"
27 #include "qapi/visitor.h"
28 #include "qemu/accel.h"
29 #include "qemu/error-report.h"
31 #include "hw/core/accel-cpu.h"
32 #include "hw/core/tcg-cpu-ops.h"
35 /* Hash that stores user set extensions */
36 static GHashTable
*multi_ext_user_opts
;
37 static GHashTable
*misa_ext_user_opts
;
39 static bool cpu_cfg_ext_is_user_set(uint32_t ext_offset
)
41 return g_hash_table_contains(multi_ext_user_opts
,
42 GUINT_TO_POINTER(ext_offset
));
45 static bool cpu_misa_ext_is_user_set(uint32_t misa_bit
)
47 return g_hash_table_contains(misa_ext_user_opts
,
48 GUINT_TO_POINTER(misa_bit
));
51 static void cpu_cfg_ext_add_user_opt(uint32_t ext_offset
, bool value
)
53 g_hash_table_insert(multi_ext_user_opts
, GUINT_TO_POINTER(ext_offset
),
57 static void cpu_misa_ext_add_user_opt(uint32_t bit
, bool value
)
59 g_hash_table_insert(misa_ext_user_opts
, GUINT_TO_POINTER(bit
),
63 static void riscv_cpu_write_misa_bit(RISCVCPU
*cpu
, uint32_t bit
,
66 CPURISCVState
*env
= &cpu
->env
;
70 env
->misa_ext_mask
|= bit
;
72 env
->misa_ext
&= ~bit
;
73 env
->misa_ext_mask
&= ~bit
;
77 static const char *cpu_priv_ver_to_str(int priv_ver
)
80 case PRIV_VERSION_1_10_0
:
82 case PRIV_VERSION_1_11_0
:
84 case PRIV_VERSION_1_12_0
:
88 g_assert_not_reached();
91 static void riscv_cpu_synchronize_from_tb(CPUState
*cs
,
92 const TranslationBlock
*tb
)
94 if (!(tb_cflags(tb
) & CF_PCREL
)) {
95 RISCVCPU
*cpu
= RISCV_CPU(cs
);
96 CPURISCVState
*env
= &cpu
->env
;
97 RISCVMXL xl
= FIELD_EX32(tb
->flags
, TB_FLAGS
, XL
);
99 tcg_debug_assert(!tcg_cflags_has(cs
, CF_PCREL
));
101 if (xl
== MXL_RV32
) {
102 env
->pc
= (int32_t) tb
->pc
;
109 static void riscv_restore_state_to_opc(CPUState
*cs
,
110 const TranslationBlock
*tb
,
111 const uint64_t *data
)
113 RISCVCPU
*cpu
= RISCV_CPU(cs
);
114 CPURISCVState
*env
= &cpu
->env
;
115 RISCVMXL xl
= FIELD_EX32(tb
->flags
, TB_FLAGS
, XL
);
118 if (tb_cflags(tb
) & CF_PCREL
) {
119 pc
= (env
->pc
& TARGET_PAGE_MASK
) | data
[0];
124 if (xl
== MXL_RV32
) {
125 env
->pc
= (int32_t)pc
;
132 static const TCGCPUOps riscv_tcg_ops
= {
133 .initialize
= riscv_translate_init
,
134 .synchronize_from_tb
= riscv_cpu_synchronize_from_tb
,
135 .restore_state_to_opc
= riscv_restore_state_to_opc
,
137 #ifndef CONFIG_USER_ONLY
138 .tlb_fill
= riscv_cpu_tlb_fill
,
139 .cpu_exec_interrupt
= riscv_cpu_exec_interrupt
,
140 .do_interrupt
= riscv_cpu_do_interrupt
,
141 .do_transaction_failed
= riscv_cpu_do_transaction_failed
,
142 .do_unaligned_access
= riscv_cpu_do_unaligned_access
,
143 .debug_excp_handler
= riscv_cpu_debug_excp_handler
,
144 .debug_check_breakpoint
= riscv_cpu_debug_check_breakpoint
,
145 .debug_check_watchpoint
= riscv_cpu_debug_check_watchpoint
,
146 #endif /* !CONFIG_USER_ONLY */
149 static int cpu_cfg_ext_get_min_version(uint32_t ext_offset
)
151 const RISCVIsaExtData
*edata
;
153 for (edata
= isa_edata_arr
; edata
&& edata
->name
; edata
++) {
154 if (edata
->ext_enable_offset
!= ext_offset
) {
158 return edata
->min_version
;
161 g_assert_not_reached();
164 static const char *cpu_cfg_ext_get_name(uint32_t ext_offset
)
166 const RISCVCPUMultiExtConfig
*feat
;
167 const RISCVIsaExtData
*edata
;
169 for (edata
= isa_edata_arr
; edata
->name
!= NULL
; edata
++) {
170 if (edata
->ext_enable_offset
== ext_offset
) {
175 for (feat
= riscv_cpu_named_features
; feat
->name
!= NULL
; feat
++) {
176 if (feat
->offset
== ext_offset
) {
181 g_assert_not_reached();
184 static bool cpu_cfg_offset_is_named_feat(uint32_t ext_offset
)
186 const RISCVCPUMultiExtConfig
*feat
;
188 for (feat
= riscv_cpu_named_features
; feat
->name
!= NULL
; feat
++) {
189 if (feat
->offset
== ext_offset
) {
197 static void riscv_cpu_enable_named_feat(RISCVCPU
*cpu
, uint32_t feat_offset
)
200 * All other named features are already enabled
201 * in riscv_tcg_cpu_instance_init().
203 if (feat_offset
== CPU_CFG_OFFSET(ext_zic64b
)) {
204 cpu
->cfg
.cbom_blocksize
= 64;
205 cpu
->cfg
.cbop_blocksize
= 64;
206 cpu
->cfg
.cboz_blocksize
= 64;
210 static void cpu_bump_multi_ext_priv_ver(CPURISCVState
*env
,
215 if (env
->priv_ver
== PRIV_VERSION_LATEST
) {
219 ext_priv_ver
= cpu_cfg_ext_get_min_version(ext_offset
);
221 if (env
->priv_ver
< ext_priv_ver
) {
223 * Note: the 'priv_spec' command line option, if present,
224 * will take precedence over this priv_ver bump.
226 env
->priv_ver
= ext_priv_ver
;
230 static void cpu_cfg_ext_auto_update(RISCVCPU
*cpu
, uint32_t ext_offset
,
233 CPURISCVState
*env
= &cpu
->env
;
234 bool prev_val
= isa_ext_is_enabled(cpu
, ext_offset
);
237 if (prev_val
== value
) {
241 if (cpu_cfg_ext_is_user_set(ext_offset
)) {
245 if (value
&& env
->priv_ver
!= PRIV_VERSION_LATEST
) {
246 /* Do not enable it if priv_ver is older than min_version */
247 min_version
= cpu_cfg_ext_get_min_version(ext_offset
);
248 if (env
->priv_ver
< min_version
) {
253 isa_ext_update_enabled(cpu
, ext_offset
, value
);
256 static void riscv_cpu_validate_misa_priv(CPURISCVState
*env
, Error
**errp
)
258 if (riscv_has_ext(env
, RVH
) && env
->priv_ver
< PRIV_VERSION_1_12_0
) {
259 error_setg(errp
, "H extension requires priv spec 1.12.0");
264 static void riscv_cpu_validate_v(CPURISCVState
*env
, RISCVCPUConfig
*cfg
,
267 uint32_t vlen
= cfg
->vlenb
<< 3;
269 if (vlen
> RV_VLEN_MAX
|| vlen
< 128) {
271 "Vector extension implementation only supports VLEN "
272 "in the range [128, %d]", RV_VLEN_MAX
);
276 if (cfg
->elen
> 64 || cfg
->elen
< 8) {
278 "Vector extension implementation only supports ELEN "
279 "in the range [8, 64]");
284 static void riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU
*cpu
)
286 CPURISCVState
*env
= &cpu
->env
;
287 const RISCVIsaExtData
*edata
;
289 /* Force disable extensions if priv spec version does not match */
290 for (edata
= isa_edata_arr
; edata
&& edata
->name
; edata
++) {
291 if (isa_ext_is_enabled(cpu
, edata
->ext_enable_offset
) &&
292 (env
->priv_ver
< edata
->min_version
)) {
294 * These two extensions are always enabled as they were supported
295 * by QEMU before they were added as extensions in the ISA.
297 if (!strcmp(edata
->name
, "zicntr") ||
298 !strcmp(edata
->name
, "zihpm")) {
302 isa_ext_update_enabled(cpu
, edata
->ext_enable_offset
, false);
303 #ifndef CONFIG_USER_ONLY
304 warn_report("disabling %s extension for hart 0x" TARGET_FMT_lx
305 " because privilege spec version does not match",
306 edata
->name
, env
->mhartid
);
308 warn_report("disabling %s extension because "
309 "privilege spec version does not match",
316 static void riscv_cpu_update_named_features(RISCVCPU
*cpu
)
318 if (cpu
->env
.priv_ver
>= PRIV_VERSION_1_11_0
) {
319 cpu
->cfg
.has_priv_1_11
= true;
322 if (cpu
->env
.priv_ver
>= PRIV_VERSION_1_12_0
) {
323 cpu
->cfg
.has_priv_1_12
= true;
326 /* zic64b is 1.12 or later */
327 cpu
->cfg
.ext_zic64b
= cpu
->cfg
.cbom_blocksize
== 64 &&
328 cpu
->cfg
.cbop_blocksize
== 64 &&
329 cpu
->cfg
.cboz_blocksize
== 64 &&
330 cpu
->cfg
.has_priv_1_12
;
333 static void riscv_cpu_validate_g(RISCVCPU
*cpu
)
335 const char *warn_msg
= "RVG mandates disabled extension %s";
336 uint32_t g_misa_bits
[] = {RVI
, RVM
, RVA
, RVF
, RVD
};
337 bool send_warn
= cpu_misa_ext_is_user_set(RVG
);
339 for (int i
= 0; i
< ARRAY_SIZE(g_misa_bits
); i
++) {
340 uint32_t bit
= g_misa_bits
[i
];
342 if (riscv_has_ext(&cpu
->env
, bit
)) {
346 if (!cpu_misa_ext_is_user_set(bit
)) {
347 riscv_cpu_write_misa_bit(cpu
, bit
, true);
352 warn_report(warn_msg
, riscv_get_misa_ext_name(bit
));
356 if (!cpu
->cfg
.ext_zicsr
) {
357 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zicsr
))) {
358 cpu
->cfg
.ext_zicsr
= true;
359 } else if (send_warn
) {
360 warn_report(warn_msg
, "zicsr");
364 if (!cpu
->cfg
.ext_zifencei
) {
365 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zifencei
))) {
366 cpu
->cfg
.ext_zifencei
= true;
367 } else if (send_warn
) {
368 warn_report(warn_msg
, "zifencei");
373 static void riscv_cpu_validate_b(RISCVCPU
*cpu
)
375 const char *warn_msg
= "RVB mandates disabled extension %s";
377 if (!cpu
->cfg
.ext_zba
) {
378 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zba
))) {
379 cpu
->cfg
.ext_zba
= true;
381 warn_report(warn_msg
, "zba");
385 if (!cpu
->cfg
.ext_zbb
) {
386 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zbb
))) {
387 cpu
->cfg
.ext_zbb
= true;
389 warn_report(warn_msg
, "zbb");
393 if (!cpu
->cfg
.ext_zbs
) {
394 if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zbs
))) {
395 cpu
->cfg
.ext_zbs
= true;
397 warn_report(warn_msg
, "zbs");
403 * Check consistency between chosen extensions while setting
404 * cpu->cfg accordingly.
406 void riscv_cpu_validate_set_extensions(RISCVCPU
*cpu
, Error
**errp
)
408 RISCVCPUClass
*mcc
= RISCV_CPU_GET_CLASS(cpu
);
409 CPURISCVState
*env
= &cpu
->env
;
410 Error
*local_err
= NULL
;
412 if (riscv_has_ext(env
, RVG
)) {
413 riscv_cpu_validate_g(cpu
);
416 if (riscv_has_ext(env
, RVB
)) {
417 riscv_cpu_validate_b(cpu
);
420 if (riscv_has_ext(env
, RVI
) && riscv_has_ext(env
, RVE
)) {
422 "I and E extensions are incompatible");
426 if (!riscv_has_ext(env
, RVI
) && !riscv_has_ext(env
, RVE
)) {
428 "Either I or E extension must be set");
432 if (riscv_has_ext(env
, RVS
) && !riscv_has_ext(env
, RVU
)) {
434 "Setting S extension without U extension is illegal");
438 if (riscv_has_ext(env
, RVH
) && !riscv_has_ext(env
, RVI
)) {
440 "H depends on an I base integer ISA with 32 x registers");
444 if (riscv_has_ext(env
, RVH
) && !riscv_has_ext(env
, RVS
)) {
445 error_setg(errp
, "H extension implicitly requires S-mode");
449 if (riscv_has_ext(env
, RVF
) && !cpu
->cfg
.ext_zicsr
) {
450 error_setg(errp
, "F extension requires Zicsr");
454 if ((cpu
->cfg
.ext_zacas
) && !riscv_has_ext(env
, RVA
)) {
455 error_setg(errp
, "Zacas extension requires A extension");
459 if ((cpu
->cfg
.ext_zawrs
) && !riscv_has_ext(env
, RVA
)) {
460 error_setg(errp
, "Zawrs extension requires A extension");
464 if (cpu
->cfg
.ext_zfa
&& !riscv_has_ext(env
, RVF
)) {
465 error_setg(errp
, "Zfa extension requires F extension");
469 if (cpu
->cfg
.ext_zfh
) {
470 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zfhmin
), true);
473 if (cpu
->cfg
.ext_zfhmin
&& !riscv_has_ext(env
, RVF
)) {
474 error_setg(errp
, "Zfh/Zfhmin extensions require F extension");
478 if (cpu
->cfg
.ext_zfbfmin
&& !riscv_has_ext(env
, RVF
)) {
479 error_setg(errp
, "Zfbfmin extension depends on F extension");
483 if (riscv_has_ext(env
, RVD
) && !riscv_has_ext(env
, RVF
)) {
484 error_setg(errp
, "D extension requires F extension");
488 if (riscv_has_ext(env
, RVV
)) {
489 riscv_cpu_validate_v(env
, &cpu
->cfg
, &local_err
);
490 if (local_err
!= NULL
) {
491 error_propagate(errp
, local_err
);
495 /* The V vector extension depends on the Zve64d extension */
496 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zve64d
), true);
499 /* The Zve64d extension depends on the Zve64f extension */
500 if (cpu
->cfg
.ext_zve64d
) {
501 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zve64f
), true);
504 /* The Zve64f extension depends on the Zve32f extension */
505 if (cpu
->cfg
.ext_zve64f
) {
506 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zve32f
), true);
509 if (cpu
->cfg
.ext_zve64d
&& !riscv_has_ext(env
, RVD
)) {
510 error_setg(errp
, "Zve64d/V extensions require D extension");
514 if (cpu
->cfg
.ext_zve32f
&& !riscv_has_ext(env
, RVF
)) {
515 error_setg(errp
, "Zve32f/Zve64f extensions require F extension");
519 if (cpu
->cfg
.ext_zvfh
) {
520 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zvfhmin
), true);
523 if (cpu
->cfg
.ext_zvfhmin
&& !cpu
->cfg
.ext_zve32f
) {
524 error_setg(errp
, "Zvfh/Zvfhmin extensions require Zve32f extension");
528 if (cpu
->cfg
.ext_zvfh
&& !cpu
->cfg
.ext_zfhmin
) {
529 error_setg(errp
, "Zvfh extensions requires Zfhmin extension");
533 if (cpu
->cfg
.ext_zvfbfmin
&& !cpu
->cfg
.ext_zve32f
) {
534 error_setg(errp
, "Zvfbfmin extension depends on Zve32f extension");
538 if (cpu
->cfg
.ext_zvfbfwma
&& !cpu
->cfg
.ext_zvfbfmin
) {
539 error_setg(errp
, "Zvfbfwma extension depends on Zvfbfmin extension");
543 /* Set the ISA extensions, checks should have happened above */
544 if (cpu
->cfg
.ext_zhinx
) {
545 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zca
), true);
548 if ((cpu
->cfg
.ext_zdinx
|| cpu
->cfg
.ext_zhinxmin
) && !cpu
->cfg
.ext_zfinx
) {
549 error_setg(errp
, "Zdinx/Zhinx/Zhinxmin extensions require Zfinx");
553 if (cpu
->cfg
.ext_zfinx
) {
554 if (!cpu
->cfg
.ext_zicsr
) {
555 error_setg(errp
, "Zfinx extension requires Zicsr");
558 if (riscv_has_ext(env
, RVF
)) {
560 "Zfinx cannot be supported together with F extension");
565 if (cpu
->cfg
.ext_zce
) {
566 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zca
), true);
567 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zcb
), true);
568 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zcmp
), true);
569 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zcmt
), true);
570 if (riscv_has_ext(env
, RVF
) && mcc
->misa_mxl_max
== MXL_RV32
) {
571 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zcf
), true);
575 /* zca, zcd and zcf has a PRIV 1.12.0 restriction */
576 if (riscv_has_ext(env
, RVC
) && env
->priv_ver
>= PRIV_VERSION_1_12_0
) {
577 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zca
), true);
578 if (riscv_has_ext(env
, RVF
) && mcc
->misa_mxl_max
== MXL_RV32
) {
579 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zcf
), true);
581 if (riscv_has_ext(env
, RVD
)) {
582 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zcd
), true);
586 if (mcc
->misa_mxl_max
!= MXL_RV32
&& cpu
->cfg
.ext_zcf
) {
587 error_setg(errp
, "Zcf extension is only relevant to RV32");
591 if (!riscv_has_ext(env
, RVF
) && cpu
->cfg
.ext_zcf
) {
592 error_setg(errp
, "Zcf extension requires F extension");
596 if (!riscv_has_ext(env
, RVD
) && cpu
->cfg
.ext_zcd
) {
597 error_setg(errp
, "Zcd extension requires D extension");
601 if ((cpu
->cfg
.ext_zcf
|| cpu
->cfg
.ext_zcd
|| cpu
->cfg
.ext_zcb
||
602 cpu
->cfg
.ext_zcmp
|| cpu
->cfg
.ext_zcmt
) && !cpu
->cfg
.ext_zca
) {
603 error_setg(errp
, "Zcf/Zcd/Zcb/Zcmp/Zcmt extensions require Zca "
608 if (cpu
->cfg
.ext_zcd
&& (cpu
->cfg
.ext_zcmp
|| cpu
->cfg
.ext_zcmt
)) {
609 error_setg(errp
, "Zcmp/Zcmt extensions are incompatible with "
614 if (cpu
->cfg
.ext_zcmt
&& !cpu
->cfg
.ext_zicsr
) {
615 error_setg(errp
, "Zcmt extension requires Zicsr extension");
620 * Shorthand vector crypto extensions
622 if (cpu
->cfg
.ext_zvknc
) {
623 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zvkn
), true);
624 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zvbc
), true);
627 if (cpu
->cfg
.ext_zvkng
) {
628 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zvkn
), true);
629 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zvkg
), true);
632 if (cpu
->cfg
.ext_zvkn
) {
633 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zvkned
), true);
634 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zvknhb
), true);
635 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zvkb
), true);
636 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zvkt
), true);
639 if (cpu
->cfg
.ext_zvksc
) {
640 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zvks
), true);
641 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zvbc
), true);
644 if (cpu
->cfg
.ext_zvksg
) {
645 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zvks
), true);
646 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zvkg
), true);
649 if (cpu
->cfg
.ext_zvks
) {
650 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zvksed
), true);
651 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zvksh
), true);
652 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zvkb
), true);
653 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zvkt
), true);
656 if (cpu
->cfg
.ext_zvkt
) {
657 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zvbb
), true);
658 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zvbc
), true);
662 * In principle Zve*x would also suffice here, were they supported
665 if ((cpu
->cfg
.ext_zvbb
|| cpu
->cfg
.ext_zvkb
|| cpu
->cfg
.ext_zvkg
||
666 cpu
->cfg
.ext_zvkned
|| cpu
->cfg
.ext_zvknha
|| cpu
->cfg
.ext_zvksed
||
667 cpu
->cfg
.ext_zvksh
) && !cpu
->cfg
.ext_zve32f
) {
669 "Vector crypto extensions require V or Zve* extensions");
673 if ((cpu
->cfg
.ext_zvbc
|| cpu
->cfg
.ext_zvknhb
) && !cpu
->cfg
.ext_zve64f
) {
676 "Zvbc and Zvknhb extensions require V or Zve64{f,d} extensions");
680 if (cpu
->cfg
.ext_zk
) {
681 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zkn
), true);
682 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zkr
), true);
683 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zkt
), true);
686 if (cpu
->cfg
.ext_zkn
) {
687 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zbkb
), true);
688 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zbkc
), true);
689 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zbkx
), true);
690 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zkne
), true);
691 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zknd
), true);
692 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zknh
), true);
695 if (cpu
->cfg
.ext_zks
) {
696 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zbkb
), true);
697 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zbkc
), true);
698 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zbkx
), true);
699 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zksed
), true);
700 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zksh
), true);
703 if (cpu
->cfg
.ext_zicntr
&& !cpu
->cfg
.ext_zicsr
) {
704 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zicntr
))) {
705 error_setg(errp
, "zicntr requires zicsr");
708 cpu
->cfg
.ext_zicntr
= false;
711 if (cpu
->cfg
.ext_zihpm
&& !cpu
->cfg
.ext_zicsr
) {
712 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zihpm
))) {
713 error_setg(errp
, "zihpm requires zicsr");
716 cpu
->cfg
.ext_zihpm
= false;
719 if (!cpu
->cfg
.ext_zihpm
) {
720 cpu
->cfg
.pmu_mask
= 0;
721 cpu
->pmu_avail_ctrs
= 0;
725 * Disable isa extensions based on priv spec after we
726 * validated and set everything we need.
728 riscv_cpu_disable_priv_spec_isa_exts(cpu
);
731 #ifndef CONFIG_USER_ONLY
732 static bool riscv_cpu_validate_profile_satp(RISCVCPU
*cpu
,
733 RISCVCPUProfile
*profile
,
736 int satp_max
= satp_mode_max_from_map(cpu
->cfg
.satp_mode
.supported
);
738 if (profile
->satp_mode
> satp_max
) {
740 bool is_32bit
= riscv_cpu_is_32bit(cpu
);
741 const char *req_satp
= satp_mode_str(profile
->satp_mode
, is_32bit
);
742 const char *cur_satp
= satp_mode_str(satp_max
, is_32bit
);
744 warn_report("Profile %s requires satp mode %s, "
745 "but satp mode %s was set", profile
->name
,
756 static void riscv_cpu_validate_profile(RISCVCPU
*cpu
,
757 RISCVCPUProfile
*profile
)
759 CPURISCVState
*env
= &cpu
->env
;
760 const char *warn_msg
= "Profile %s mandates disabled extension %s";
761 bool send_warn
= profile
->user_set
&& profile
->enabled
;
762 bool parent_enabled
, profile_impl
= true;
765 #ifndef CONFIG_USER_ONLY
766 if (profile
->satp_mode
!= RISCV_PROFILE_ATTR_UNUSED
) {
767 profile_impl
= riscv_cpu_validate_profile_satp(cpu
, profile
,
772 if (profile
->priv_spec
!= RISCV_PROFILE_ATTR_UNUSED
&&
773 profile
->priv_spec
!= env
->priv_ver
) {
774 profile_impl
= false;
777 warn_report("Profile %s requires priv spec %s, "
778 "but priv ver %s was set", profile
->name
,
779 cpu_priv_ver_to_str(profile
->priv_spec
),
780 cpu_priv_ver_to_str(env
->priv_ver
));
784 for (i
= 0; misa_bits
[i
] != 0; i
++) {
785 uint32_t bit
= misa_bits
[i
];
787 if (!(profile
->misa_ext
& bit
)) {
791 if (!riscv_has_ext(&cpu
->env
, bit
)) {
792 profile_impl
= false;
795 warn_report(warn_msg
, profile
->name
,
796 riscv_get_misa_ext_name(bit
));
801 for (i
= 0; profile
->ext_offsets
[i
] != RISCV_PROFILE_EXT_LIST_END
; i
++) {
802 int ext_offset
= profile
->ext_offsets
[i
];
804 if (!isa_ext_is_enabled(cpu
, ext_offset
)) {
805 profile_impl
= false;
808 warn_report(warn_msg
, profile
->name
,
809 cpu_cfg_ext_get_name(ext_offset
));
814 profile
->enabled
= profile_impl
;
816 if (profile
->parent
!= NULL
) {
817 parent_enabled
= object_property_get_bool(OBJECT(cpu
),
818 profile
->parent
->name
,
820 profile
->enabled
= profile
->enabled
&& parent_enabled
;
824 static void riscv_cpu_validate_profiles(RISCVCPU
*cpu
)
826 for (int i
= 0; riscv_profiles
[i
] != NULL
; i
++) {
827 riscv_cpu_validate_profile(cpu
, riscv_profiles
[i
]);
831 void riscv_tcg_cpu_finalize_features(RISCVCPU
*cpu
, Error
**errp
)
833 CPURISCVState
*env
= &cpu
->env
;
834 Error
*local_err
= NULL
;
836 riscv_cpu_validate_misa_priv(env
, &local_err
);
837 if (local_err
!= NULL
) {
838 error_propagate(errp
, local_err
);
842 riscv_cpu_update_named_features(cpu
);
843 riscv_cpu_validate_profiles(cpu
);
845 if (cpu
->cfg
.ext_smepmp
&& !cpu
->cfg
.pmp
) {
847 * Enhanced PMP should only be available
848 * on harts with PMP support
850 error_setg(errp
, "Invalid configuration: Smepmp requires PMP support");
854 riscv_cpu_validate_set_extensions(cpu
, &local_err
);
855 if (local_err
!= NULL
) {
856 error_propagate(errp
, local_err
);
861 bool riscv_cpu_tcg_compatible(RISCVCPU
*cpu
)
863 return object_dynamic_cast(OBJECT(cpu
), TYPE_RISCV_CPU_HOST
) == NULL
;
866 static bool riscv_cpu_is_generic(Object
*cpu_obj
)
868 return object_dynamic_cast(cpu_obj
, TYPE_RISCV_DYNAMIC_CPU
) != NULL
;
872 * We'll get here via the following path:
874 * riscv_cpu_realize()
875 * -> cpu_exec_realizefn()
876 * -> tcg_cpu_realize() (via accel_cpu_common_realize())
878 static bool riscv_tcg_cpu_realize(CPUState
*cs
, Error
**errp
)
880 RISCVCPU
*cpu
= RISCV_CPU(cs
);
882 if (!riscv_cpu_tcg_compatible(cpu
)) {
883 g_autofree
char *name
= riscv_cpu_get_name(cpu
);
884 error_setg(errp
, "'%s' CPU is not compatible with TCG acceleration",
889 #ifndef CONFIG_USER_ONLY
890 CPURISCVState
*env
= &cpu
->env
;
891 Error
*local_err
= NULL
;
893 tcg_cflags_set(CPU(cs
), CF_PCREL
);
895 if (cpu
->cfg
.ext_sstc
) {
896 riscv_timer_init(cpu
);
899 if (cpu
->cfg
.pmu_mask
) {
900 riscv_pmu_init(cpu
, &local_err
);
901 if (local_err
!= NULL
) {
902 error_propagate(errp
, local_err
);
906 if (cpu
->cfg
.ext_sscofpmf
) {
907 cpu
->pmu_timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
,
908 riscv_pmu_timer_cb
, cpu
);
912 /* With H-Ext, VSSIP, VSTIP, VSEIP and SGEIP are hardwired to one. */
913 if (riscv_has_ext(env
, RVH
)) {
914 env
->mideleg
= MIP_VSSIP
| MIP_VSTIP
| MIP_VSEIP
| MIP_SGEIP
;
921 typedef struct RISCVCPUMisaExtConfig
{
922 target_ulong misa_bit
;
924 } RISCVCPUMisaExtConfig
;
926 static void cpu_set_misa_ext_cfg(Object
*obj
, Visitor
*v
, const char *name
,
927 void *opaque
, Error
**errp
)
929 const RISCVCPUMisaExtConfig
*misa_ext_cfg
= opaque
;
930 target_ulong misa_bit
= misa_ext_cfg
->misa_bit
;
931 RISCVCPU
*cpu
= RISCV_CPU(obj
);
932 CPURISCVState
*env
= &cpu
->env
;
933 bool vendor_cpu
= riscv_cpu_is_vendor(obj
);
934 bool prev_val
, value
;
936 if (!visit_type_bool(v
, name
, &value
, errp
)) {
940 cpu_misa_ext_add_user_opt(misa_bit
, value
);
942 prev_val
= env
->misa_ext
& misa_bit
;
944 if (value
== prev_val
) {
950 g_autofree
char *cpuname
= riscv_cpu_get_name(cpu
);
951 error_setg(errp
, "'%s' CPU does not allow enabling extensions",
956 if (misa_bit
== RVH
&& env
->priv_ver
< PRIV_VERSION_1_12_0
) {
958 * Note: the 'priv_spec' command line option, if present,
959 * will take precedence over this priv_ver bump.
961 env
->priv_ver
= PRIV_VERSION_1_12_0
;
965 riscv_cpu_write_misa_bit(cpu
, misa_bit
, value
);
968 static void cpu_get_misa_ext_cfg(Object
*obj
, Visitor
*v
, const char *name
,
969 void *opaque
, Error
**errp
)
971 const RISCVCPUMisaExtConfig
*misa_ext_cfg
= opaque
;
972 target_ulong misa_bit
= misa_ext_cfg
->misa_bit
;
973 RISCVCPU
*cpu
= RISCV_CPU(obj
);
974 CPURISCVState
*env
= &cpu
->env
;
977 value
= env
->misa_ext
& misa_bit
;
979 visit_type_bool(v
, name
, &value
, errp
);
982 #define MISA_CFG(_bit, _enabled) \
983 {.misa_bit = _bit, .enabled = _enabled}
985 static const RISCVCPUMisaExtConfig misa_ext_cfgs
[] = {
991 MISA_CFG(RVE
, false),
996 MISA_CFG(RVJ
, false),
997 MISA_CFG(RVV
, false),
998 MISA_CFG(RVG
, false),
999 MISA_CFG(RVB
, false),
1003 * We do not support user choice tracking for MISA
1004 * extensions yet because, so far, we do not silently
1005 * change MISA bits during realize() (RVG enables MISA
1006 * bits but the user is warned about it).
1008 static void riscv_cpu_add_misa_properties(Object
*cpu_obj
)
1010 bool use_def_vals
= riscv_cpu_is_generic(cpu_obj
);
1013 for (i
= 0; i
< ARRAY_SIZE(misa_ext_cfgs
); i
++) {
1014 const RISCVCPUMisaExtConfig
*misa_cfg
= &misa_ext_cfgs
[i
];
1015 int bit
= misa_cfg
->misa_bit
;
1016 const char *name
= riscv_get_misa_ext_name(bit
);
1017 const char *desc
= riscv_get_misa_ext_description(bit
);
1019 /* Check if KVM already created the property */
1020 if (object_property_find(cpu_obj
, name
)) {
1024 object_property_add(cpu_obj
, name
, "bool",
1025 cpu_get_misa_ext_cfg
,
1026 cpu_set_misa_ext_cfg
,
1027 NULL
, (void *)misa_cfg
);
1028 object_property_set_description(cpu_obj
, name
, desc
);
1030 riscv_cpu_write_misa_bit(RISCV_CPU(cpu_obj
), bit
,
1036 static void cpu_set_profile(Object
*obj
, Visitor
*v
, const char *name
,
1037 void *opaque
, Error
**errp
)
1039 RISCVCPUProfile
*profile
= opaque
;
1040 RISCVCPU
*cpu
= RISCV_CPU(obj
);
1044 if (riscv_cpu_is_vendor(obj
)) {
1045 error_setg(errp
, "Profile %s is not available for vendor CPUs",
1050 if (cpu
->env
.misa_mxl
!= MXL_RV64
) {
1051 error_setg(errp
, "Profile %s only available for 64 bit CPUs",
1056 if (!visit_type_bool(v
, name
, &value
, errp
)) {
1060 profile
->user_set
= true;
1061 profile
->enabled
= value
;
1063 if (profile
->parent
!= NULL
) {
1064 object_property_set_bool(obj
, profile
->parent
->name
,
1065 profile
->enabled
, NULL
);
1068 if (profile
->enabled
) {
1069 cpu
->env
.priv_ver
= profile
->priv_spec
;
1072 #ifndef CONFIG_USER_ONLY
1073 if (profile
->satp_mode
!= RISCV_PROFILE_ATTR_UNUSED
) {
1074 object_property_set_bool(obj
, "mmu", true, NULL
);
1075 const char *satp_prop
= satp_mode_str(profile
->satp_mode
,
1076 riscv_cpu_is_32bit(cpu
));
1077 object_property_set_bool(obj
, satp_prop
, profile
->enabled
, NULL
);
1081 for (i
= 0; misa_bits
[i
] != 0; i
++) {
1082 uint32_t bit
= misa_bits
[i
];
1084 if (!(profile
->misa_ext
& bit
)) {
1088 if (bit
== RVI
&& !profile
->enabled
) {
1090 * Disabling profiles will not disable the base
1096 cpu_misa_ext_add_user_opt(bit
, profile
->enabled
);
1097 riscv_cpu_write_misa_bit(cpu
, bit
, profile
->enabled
);
1100 for (i
= 0; profile
->ext_offsets
[i
] != RISCV_PROFILE_EXT_LIST_END
; i
++) {
1101 ext_offset
= profile
->ext_offsets
[i
];
1103 if (profile
->enabled
) {
1104 if (cpu_cfg_offset_is_named_feat(ext_offset
)) {
1105 riscv_cpu_enable_named_feat(cpu
, ext_offset
);
1108 cpu_bump_multi_ext_priv_ver(&cpu
->env
, ext_offset
);
1111 cpu_cfg_ext_add_user_opt(ext_offset
, profile
->enabled
);
1112 isa_ext_update_enabled(cpu
, ext_offset
, profile
->enabled
);
1116 static void cpu_get_profile(Object
*obj
, Visitor
*v
, const char *name
,
1117 void *opaque
, Error
**errp
)
1119 RISCVCPUProfile
*profile
= opaque
;
1120 bool value
= profile
->enabled
;
1122 visit_type_bool(v
, name
, &value
, errp
);
1125 static void riscv_cpu_add_profiles(Object
*cpu_obj
)
1127 for (int i
= 0; riscv_profiles
[i
] != NULL
; i
++) {
1128 const RISCVCPUProfile
*profile
= riscv_profiles
[i
];
1130 object_property_add(cpu_obj
, profile
->name
, "bool",
1131 cpu_get_profile
, cpu_set_profile
,
1132 NULL
, (void *)profile
);
1135 * CPUs might enable a profile right from the start.
1136 * Enable its mandatory extensions right away in this
1139 if (profile
->enabled
) {
1140 object_property_set_bool(cpu_obj
, profile
->name
, true, NULL
);
1145 static bool cpu_ext_is_deprecated(const char *ext_name
)
1147 return isupper(ext_name
[0]);
1151 * String will be allocated in the heap. Caller is responsible
1154 static char *cpu_ext_to_lower(const char *ext_name
)
1156 char *ret
= g_malloc0(strlen(ext_name
) + 1);
1158 strcpy(ret
, ext_name
);
1159 ret
[0] = tolower(ret
[0]);
1164 static void cpu_set_multi_ext_cfg(Object
*obj
, Visitor
*v
, const char *name
,
1165 void *opaque
, Error
**errp
)
1167 const RISCVCPUMultiExtConfig
*multi_ext_cfg
= opaque
;
1168 RISCVCPU
*cpu
= RISCV_CPU(obj
);
1169 bool vendor_cpu
= riscv_cpu_is_vendor(obj
);
1170 bool prev_val
, value
;
1172 if (!visit_type_bool(v
, name
, &value
, errp
)) {
1176 if (cpu_ext_is_deprecated(multi_ext_cfg
->name
)) {
1177 g_autofree
char *lower
= cpu_ext_to_lower(multi_ext_cfg
->name
);
1179 warn_report("CPU property '%s' is deprecated. Please use '%s' instead",
1180 multi_ext_cfg
->name
, lower
);
1183 cpu_cfg_ext_add_user_opt(multi_ext_cfg
->offset
, value
);
1185 prev_val
= isa_ext_is_enabled(cpu
, multi_ext_cfg
->offset
);
1187 if (value
== prev_val
) {
1191 if (value
&& vendor_cpu
) {
1192 g_autofree
char *cpuname
= riscv_cpu_get_name(cpu
);
1193 error_setg(errp
, "'%s' CPU does not allow enabling extensions",
1199 cpu_bump_multi_ext_priv_ver(&cpu
->env
, multi_ext_cfg
->offset
);
1202 isa_ext_update_enabled(cpu
, multi_ext_cfg
->offset
, value
);
1205 static void cpu_get_multi_ext_cfg(Object
*obj
, Visitor
*v
, const char *name
,
1206 void *opaque
, Error
**errp
)
1208 const RISCVCPUMultiExtConfig
*multi_ext_cfg
= opaque
;
1209 bool value
= isa_ext_is_enabled(RISCV_CPU(obj
), multi_ext_cfg
->offset
);
1211 visit_type_bool(v
, name
, &value
, errp
);
1214 static void cpu_add_multi_ext_prop(Object
*cpu_obj
,
1215 const RISCVCPUMultiExtConfig
*multi_cfg
)
1217 bool generic_cpu
= riscv_cpu_is_generic(cpu_obj
);
1218 bool deprecated_ext
= cpu_ext_is_deprecated(multi_cfg
->name
);
1220 object_property_add(cpu_obj
, multi_cfg
->name
, "bool",
1221 cpu_get_multi_ext_cfg
,
1222 cpu_set_multi_ext_cfg
,
1223 NULL
, (void *)multi_cfg
);
1225 if (!generic_cpu
|| deprecated_ext
) {
1230 * Set def val directly instead of using
1231 * object_property_set_bool() to save the set()
1232 * callback hash for user inputs.
1234 isa_ext_update_enabled(RISCV_CPU(cpu_obj
), multi_cfg
->offset
,
1235 multi_cfg
->enabled
);
1238 static void riscv_cpu_add_multiext_prop_array(Object
*obj
,
1239 const RISCVCPUMultiExtConfig
*array
)
1241 const RISCVCPUMultiExtConfig
*prop
;
1245 for (prop
= array
; prop
&& prop
->name
; prop
++) {
1246 cpu_add_multi_ext_prop(obj
, prop
);
1251 * Add CPU properties with user-facing flags.
1253 * This will overwrite existing env->misa_ext values with the
1254 * defaults set via riscv_cpu_add_misa_properties().
1256 static void riscv_cpu_add_user_properties(Object
*obj
)
1258 #ifndef CONFIG_USER_ONLY
1259 riscv_add_satp_mode_properties(obj
);
1262 riscv_cpu_add_misa_properties(obj
);
1264 riscv_cpu_add_multiext_prop_array(obj
, riscv_cpu_extensions
);
1265 riscv_cpu_add_multiext_prop_array(obj
, riscv_cpu_vendor_exts
);
1266 riscv_cpu_add_multiext_prop_array(obj
, riscv_cpu_experimental_exts
);
1268 riscv_cpu_add_multiext_prop_array(obj
, riscv_cpu_deprecated_exts
);
1270 riscv_cpu_add_profiles(obj
);
1274 * The 'max' type CPU will have all possible ratified
1275 * non-vendor extensions enabled.
1277 static void riscv_init_max_cpu_extensions(Object
*obj
)
1279 RISCVCPU
*cpu
= RISCV_CPU(obj
);
1280 CPURISCVState
*env
= &cpu
->env
;
1281 const RISCVCPUMultiExtConfig
*prop
;
1283 /* Enable RVG, RVJ and RVV that are disabled by default */
1284 riscv_cpu_set_misa_ext(env
, env
->misa_ext
| RVG
| RVJ
| RVV
);
1286 for (prop
= riscv_cpu_extensions
; prop
&& prop
->name
; prop
++) {
1287 isa_ext_update_enabled(cpu
, prop
->offset
, true);
1291 * Some extensions can't be added without backward compatibilty concerns.
1292 * Disable those, the user can still opt in to them on the command line.
1294 cpu
->cfg
.ext_svade
= false;
1296 /* set vector version */
1297 env
->vext_ver
= VEXT_VERSION_1_00_0
;
1299 /* Zfinx is not compatible with F. Disable it */
1300 isa_ext_update_enabled(cpu
, CPU_CFG_OFFSET(ext_zfinx
), false);
1301 isa_ext_update_enabled(cpu
, CPU_CFG_OFFSET(ext_zdinx
), false);
1302 isa_ext_update_enabled(cpu
, CPU_CFG_OFFSET(ext_zhinx
), false);
1303 isa_ext_update_enabled(cpu
, CPU_CFG_OFFSET(ext_zhinxmin
), false);
1305 isa_ext_update_enabled(cpu
, CPU_CFG_OFFSET(ext_zce
), false);
1306 isa_ext_update_enabled(cpu
, CPU_CFG_OFFSET(ext_zcmp
), false);
1307 isa_ext_update_enabled(cpu
, CPU_CFG_OFFSET(ext_zcmt
), false);
1309 if (env
->misa_mxl
!= MXL_RV32
) {
1310 isa_ext_update_enabled(cpu
, CPU_CFG_OFFSET(ext_zcf
), false);
1314 static bool riscv_cpu_has_max_extensions(Object
*cpu_obj
)
1316 return object_dynamic_cast(cpu_obj
, TYPE_RISCV_CPU_MAX
) != NULL
;
1319 static void riscv_tcg_cpu_instance_init(CPUState
*cs
)
1321 RISCVCPU
*cpu
= RISCV_CPU(cs
);
1322 Object
*obj
= OBJECT(cpu
);
1324 misa_ext_user_opts
= g_hash_table_new(NULL
, g_direct_equal
);
1325 multi_ext_user_opts
= g_hash_table_new(NULL
, g_direct_equal
);
1326 riscv_cpu_add_user_properties(obj
);
1328 if (riscv_cpu_has_max_extensions(obj
)) {
1329 riscv_init_max_cpu_extensions(obj
);
1333 static void riscv_tcg_cpu_init_ops(AccelCPUClass
*accel_cpu
, CPUClass
*cc
)
1336 * All cpus use the same set of operations.
1338 cc
->tcg_ops
= &riscv_tcg_ops
;
1341 static void riscv_tcg_cpu_class_init(CPUClass
*cc
)
1343 cc
->init_accel_cpu
= riscv_tcg_cpu_init_ops
;
1346 static void riscv_tcg_cpu_accel_class_init(ObjectClass
*oc
, void *data
)
1348 AccelCPUClass
*acc
= ACCEL_CPU_CLASS(oc
);
1350 acc
->cpu_class_init
= riscv_tcg_cpu_class_init
;
1351 acc
->cpu_instance_init
= riscv_tcg_cpu_instance_init
;
1352 acc
->cpu_target_realize
= riscv_tcg_cpu_realize
;
1355 static const TypeInfo riscv_tcg_cpu_accel_type_info
= {
1356 .name
= ACCEL_CPU_NAME("tcg"),
1358 .parent
= TYPE_ACCEL_CPU
,
1359 .class_init
= riscv_tcg_cpu_accel_class_init
,
1363 static void riscv_tcg_cpu_accel_register_types(void)
1365 type_register_static(&riscv_tcg_cpu_accel_type_info
);
1367 type_init(riscv_tcg_cpu_accel_register_types
);