2 * riscv TCG cpu class initialization
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "exec/exec-all.h"
25 #include "time_helper.h"
26 #include "qapi/error.h"
27 #include "qapi/visitor.h"
28 #include "qemu/accel.h"
29 #include "qemu/error-report.h"
31 #include "hw/core/accel-cpu.h"
32 #include "hw/core/tcg-cpu-ops.h"
35 /* Hash that stores user set extensions */
36 static GHashTable
*multi_ext_user_opts
;
38 static bool cpu_cfg_ext_is_user_set(uint32_t ext_offset
)
40 return g_hash_table_contains(multi_ext_user_opts
,
41 GUINT_TO_POINTER(ext_offset
));
44 static void riscv_cpu_synchronize_from_tb(CPUState
*cs
,
45 const TranslationBlock
*tb
)
47 if (!(tb_cflags(tb
) & CF_PCREL
)) {
48 RISCVCPU
*cpu
= RISCV_CPU(cs
);
49 CPURISCVState
*env
= &cpu
->env
;
50 RISCVMXL xl
= FIELD_EX32(tb
->flags
, TB_FLAGS
, XL
);
52 tcg_debug_assert(!(cs
->tcg_cflags
& CF_PCREL
));
55 env
->pc
= (int32_t) tb
->pc
;
62 static void riscv_restore_state_to_opc(CPUState
*cs
,
63 const TranslationBlock
*tb
,
66 RISCVCPU
*cpu
= RISCV_CPU(cs
);
67 CPURISCVState
*env
= &cpu
->env
;
68 RISCVMXL xl
= FIELD_EX32(tb
->flags
, TB_FLAGS
, XL
);
71 if (tb_cflags(tb
) & CF_PCREL
) {
72 pc
= (env
->pc
& TARGET_PAGE_MASK
) | data
[0];
78 env
->pc
= (int32_t)pc
;
85 static const struct TCGCPUOps riscv_tcg_ops
= {
86 .initialize
= riscv_translate_init
,
87 .synchronize_from_tb
= riscv_cpu_synchronize_from_tb
,
88 .restore_state_to_opc
= riscv_restore_state_to_opc
,
90 #ifndef CONFIG_USER_ONLY
91 .tlb_fill
= riscv_cpu_tlb_fill
,
92 .cpu_exec_interrupt
= riscv_cpu_exec_interrupt
,
93 .do_interrupt
= riscv_cpu_do_interrupt
,
94 .do_transaction_failed
= riscv_cpu_do_transaction_failed
,
95 .do_unaligned_access
= riscv_cpu_do_unaligned_access
,
96 .debug_excp_handler
= riscv_cpu_debug_excp_handler
,
97 .debug_check_breakpoint
= riscv_cpu_debug_check_breakpoint
,
98 .debug_check_watchpoint
= riscv_cpu_debug_check_watchpoint
,
99 #endif /* !CONFIG_USER_ONLY */
102 static int cpu_cfg_ext_get_min_version(uint32_t ext_offset
)
104 const RISCVIsaExtData
*edata
;
106 for (edata
= isa_edata_arr
; edata
&& edata
->name
; edata
++) {
107 if (edata
->ext_enable_offset
!= ext_offset
) {
111 return edata
->min_version
;
114 g_assert_not_reached();
117 static void cpu_cfg_ext_auto_update(RISCVCPU
*cpu
, uint32_t ext_offset
,
120 CPURISCVState
*env
= &cpu
->env
;
121 bool prev_val
= isa_ext_is_enabled(cpu
, ext_offset
);
124 if (prev_val
== value
) {
128 if (cpu_cfg_ext_is_user_set(ext_offset
)) {
132 if (value
&& env
->priv_ver
!= PRIV_VERSION_LATEST
) {
133 /* Do not enable it if priv_ver is older than min_version */
134 min_version
= cpu_cfg_ext_get_min_version(ext_offset
);
135 if (env
->priv_ver
< min_version
) {
140 isa_ext_update_enabled(cpu
, ext_offset
, value
);
143 static void riscv_cpu_validate_misa_priv(CPURISCVState
*env
, Error
**errp
)
145 if (riscv_has_ext(env
, RVH
) && env
->priv_ver
< PRIV_VERSION_1_12_0
) {
146 error_setg(errp
, "H extension requires priv spec 1.12.0");
151 static void riscv_cpu_validate_misa_mxl(RISCVCPU
*cpu
, Error
**errp
)
153 RISCVCPUClass
*mcc
= RISCV_CPU_GET_CLASS(cpu
);
154 CPUClass
*cc
= CPU_CLASS(mcc
);
155 CPURISCVState
*env
= &cpu
->env
;
157 /* Validate that MISA_MXL is set properly. */
158 switch (env
->misa_mxl_max
) {
159 #ifdef TARGET_RISCV64
162 cc
->gdb_core_xml_file
= "riscv-64bit-cpu.xml";
166 cc
->gdb_core_xml_file
= "riscv-32bit-cpu.xml";
169 g_assert_not_reached();
172 if (env
->misa_mxl_max
!= env
->misa_mxl
) {
173 error_setg(errp
, "misa_mxl_max must be equal to misa_mxl");
178 static void riscv_cpu_validate_priv_spec(RISCVCPU
*cpu
, Error
**errp
)
180 CPURISCVState
*env
= &cpu
->env
;
181 int priv_version
= -1;
183 if (cpu
->cfg
.priv_spec
) {
184 if (!g_strcmp0(cpu
->cfg
.priv_spec
, "v1.12.0")) {
185 priv_version
= PRIV_VERSION_1_12_0
;
186 } else if (!g_strcmp0(cpu
->cfg
.priv_spec
, "v1.11.0")) {
187 priv_version
= PRIV_VERSION_1_11_0
;
188 } else if (!g_strcmp0(cpu
->cfg
.priv_spec
, "v1.10.0")) {
189 priv_version
= PRIV_VERSION_1_10_0
;
192 "Unsupported privilege spec version '%s'",
197 env
->priv_ver
= priv_version
;
201 static void riscv_cpu_validate_v(CPURISCVState
*env
, RISCVCPUConfig
*cfg
,
204 if (!is_power_of_2(cfg
->vlen
)) {
205 error_setg(errp
, "Vector extension VLEN must be power of 2");
209 if (cfg
->vlen
> RV_VLEN_MAX
|| cfg
->vlen
< 128) {
211 "Vector extension implementation only supports VLEN "
212 "in the range [128, %d]", RV_VLEN_MAX
);
216 if (!is_power_of_2(cfg
->elen
)) {
217 error_setg(errp
, "Vector extension ELEN must be power of 2");
221 if (cfg
->elen
> 64 || cfg
->elen
< 8) {
223 "Vector extension implementation only supports ELEN "
224 "in the range [8, 64]");
228 if (cfg
->vext_spec
) {
229 if (!g_strcmp0(cfg
->vext_spec
, "v1.0")) {
230 env
->vext_ver
= VEXT_VERSION_1_00_0
;
232 error_setg(errp
, "Unsupported vector spec version '%s'",
236 } else if (env
->vext_ver
== 0) {
237 qemu_log("vector version is not specified, "
238 "use the default value v1.0\n");
240 env
->vext_ver
= VEXT_VERSION_1_00_0
;
244 static void riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU
*cpu
)
246 CPURISCVState
*env
= &cpu
->env
;
247 const RISCVIsaExtData
*edata
;
249 /* Force disable extensions if priv spec version does not match */
250 for (edata
= isa_edata_arr
; edata
&& edata
->name
; edata
++) {
251 if (isa_ext_is_enabled(cpu
, edata
->ext_enable_offset
) &&
252 (env
->priv_ver
< edata
->min_version
)) {
254 * These two extensions are always enabled as they were supported
255 * by QEMU before they were added as extensions in the ISA.
257 if (!strcmp(edata
->name
, "zicntr") ||
258 !strcmp(edata
->name
, "zihpm")) {
262 isa_ext_update_enabled(cpu
, edata
->ext_enable_offset
, false);
263 #ifndef CONFIG_USER_ONLY
264 warn_report("disabling %s extension for hart 0x" TARGET_FMT_lx
265 " because privilege spec version does not match",
266 edata
->name
, env
->mhartid
);
268 warn_report("disabling %s extension because "
269 "privilege spec version does not match",
277 * Check consistency between chosen extensions while setting
278 * cpu->cfg accordingly.
280 void riscv_cpu_validate_set_extensions(RISCVCPU
*cpu
, Error
**errp
)
282 CPURISCVState
*env
= &cpu
->env
;
283 Error
*local_err
= NULL
;
285 /* Do some ISA extension error checking */
286 if (riscv_has_ext(env
, RVG
) &&
287 !(riscv_has_ext(env
, RVI
) && riscv_has_ext(env
, RVM
) &&
288 riscv_has_ext(env
, RVA
) && riscv_has_ext(env
, RVF
) &&
289 riscv_has_ext(env
, RVD
) &&
290 cpu
->cfg
.ext_zicsr
&& cpu
->cfg
.ext_zifencei
)) {
292 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zicsr
)) &&
293 !cpu
->cfg
.ext_zicsr
) {
294 error_setg(errp
, "RVG requires Zicsr but user set Zicsr to false");
298 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zifencei
)) &&
299 !cpu
->cfg
.ext_zifencei
) {
300 error_setg(errp
, "RVG requires Zifencei but user set "
301 "Zifencei to false");
305 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zicsr
), true);
306 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zifencei
), true);
308 env
->misa_ext
|= RVI
| RVM
| RVA
| RVF
| RVD
;
309 env
->misa_ext_mask
|= RVI
| RVM
| RVA
| RVF
| RVD
;
312 if (riscv_has_ext(env
, RVI
) && riscv_has_ext(env
, RVE
)) {
314 "I and E extensions are incompatible");
318 if (!riscv_has_ext(env
, RVI
) && !riscv_has_ext(env
, RVE
)) {
320 "Either I or E extension must be set");
324 if (riscv_has_ext(env
, RVS
) && !riscv_has_ext(env
, RVU
)) {
326 "Setting S extension without U extension is illegal");
330 if (riscv_has_ext(env
, RVH
) && !riscv_has_ext(env
, RVI
)) {
332 "H depends on an I base integer ISA with 32 x registers");
336 if (riscv_has_ext(env
, RVH
) && !riscv_has_ext(env
, RVS
)) {
337 error_setg(errp
, "H extension implicitly requires S-mode");
341 if (riscv_has_ext(env
, RVF
) && !cpu
->cfg
.ext_zicsr
) {
342 error_setg(errp
, "F extension requires Zicsr");
346 if ((cpu
->cfg
.ext_zawrs
) && !riscv_has_ext(env
, RVA
)) {
347 error_setg(errp
, "Zawrs extension requires A extension");
351 if (cpu
->cfg
.ext_zfa
&& !riscv_has_ext(env
, RVF
)) {
352 error_setg(errp
, "Zfa extension requires F extension");
356 if (cpu
->cfg
.ext_zfh
) {
357 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zfhmin
), true);
360 if (cpu
->cfg
.ext_zfhmin
&& !riscv_has_ext(env
, RVF
)) {
361 error_setg(errp
, "Zfh/Zfhmin extensions require F extension");
365 if (cpu
->cfg
.ext_zfbfmin
&& !riscv_has_ext(env
, RVF
)) {
366 error_setg(errp
, "Zfbfmin extension depends on F extension");
370 if (riscv_has_ext(env
, RVD
) && !riscv_has_ext(env
, RVF
)) {
371 error_setg(errp
, "D extension requires F extension");
375 if (riscv_has_ext(env
, RVV
)) {
376 riscv_cpu_validate_v(env
, &cpu
->cfg
, &local_err
);
377 if (local_err
!= NULL
) {
378 error_propagate(errp
, local_err
);
382 /* The V vector extension depends on the Zve64d extension */
383 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zve64d
), true);
386 /* The Zve64d extension depends on the Zve64f extension */
387 if (cpu
->cfg
.ext_zve64d
) {
388 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zve64f
), true);
391 /* The Zve64f extension depends on the Zve32f extension */
392 if (cpu
->cfg
.ext_zve64f
) {
393 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zve32f
), true);
396 if (cpu
->cfg
.ext_zve64d
&& !riscv_has_ext(env
, RVD
)) {
397 error_setg(errp
, "Zve64d/V extensions require D extension");
401 if (cpu
->cfg
.ext_zve32f
&& !riscv_has_ext(env
, RVF
)) {
402 error_setg(errp
, "Zve32f/Zve64f extensions require F extension");
406 if (cpu
->cfg
.ext_zvfh
) {
407 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zvfhmin
), true);
410 if (cpu
->cfg
.ext_zvfhmin
&& !cpu
->cfg
.ext_zve32f
) {
411 error_setg(errp
, "Zvfh/Zvfhmin extensions require Zve32f extension");
415 if (cpu
->cfg
.ext_zvfh
&& !cpu
->cfg
.ext_zfhmin
) {
416 error_setg(errp
, "Zvfh extensions requires Zfhmin extension");
420 if (cpu
->cfg
.ext_zvfbfmin
&& !cpu
->cfg
.ext_zfbfmin
) {
421 error_setg(errp
, "Zvfbfmin extension depends on Zfbfmin extension");
425 if (cpu
->cfg
.ext_zvfbfmin
&& !cpu
->cfg
.ext_zve32f
) {
426 error_setg(errp
, "Zvfbfmin extension depends on Zve32f extension");
430 if (cpu
->cfg
.ext_zvfbfwma
&& !cpu
->cfg
.ext_zvfbfmin
) {
431 error_setg(errp
, "Zvfbfwma extension depends on Zvfbfmin extension");
435 /* Set the ISA extensions, checks should have happened above */
436 if (cpu
->cfg
.ext_zhinx
) {
437 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zca
), true);
440 if ((cpu
->cfg
.ext_zdinx
|| cpu
->cfg
.ext_zhinxmin
) && !cpu
->cfg
.ext_zfinx
) {
441 error_setg(errp
, "Zdinx/Zhinx/Zhinxmin extensions require Zfinx");
445 if (cpu
->cfg
.ext_zfinx
) {
446 if (!cpu
->cfg
.ext_zicsr
) {
447 error_setg(errp
, "Zfinx extension requires Zicsr");
450 if (riscv_has_ext(env
, RVF
)) {
452 "Zfinx cannot be supported together with F extension");
457 if (cpu
->cfg
.ext_zce
) {
458 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zca
), true);
459 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zcb
), true);
460 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zcmp
), true);
461 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zcmt
), true);
462 if (riscv_has_ext(env
, RVF
) && env
->misa_mxl_max
== MXL_RV32
) {
463 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zcf
), true);
467 /* zca, zcd and zcf has a PRIV 1.12.0 restriction */
468 if (riscv_has_ext(env
, RVC
) && env
->priv_ver
>= PRIV_VERSION_1_12_0
) {
469 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zca
), true);
470 if (riscv_has_ext(env
, RVF
) && env
->misa_mxl_max
== MXL_RV32
) {
471 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zcf
), true);
473 if (riscv_has_ext(env
, RVD
)) {
474 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zcd
), true);
478 if (env
->misa_mxl_max
!= MXL_RV32
&& cpu
->cfg
.ext_zcf
) {
479 error_setg(errp
, "Zcf extension is only relevant to RV32");
483 if (!riscv_has_ext(env
, RVF
) && cpu
->cfg
.ext_zcf
) {
484 error_setg(errp
, "Zcf extension requires F extension");
488 if (!riscv_has_ext(env
, RVD
) && cpu
->cfg
.ext_zcd
) {
489 error_setg(errp
, "Zcd extension requires D extension");
493 if ((cpu
->cfg
.ext_zcf
|| cpu
->cfg
.ext_zcd
|| cpu
->cfg
.ext_zcb
||
494 cpu
->cfg
.ext_zcmp
|| cpu
->cfg
.ext_zcmt
) && !cpu
->cfg
.ext_zca
) {
495 error_setg(errp
, "Zcf/Zcd/Zcb/Zcmp/Zcmt extensions require Zca "
500 if (cpu
->cfg
.ext_zcd
&& (cpu
->cfg
.ext_zcmp
|| cpu
->cfg
.ext_zcmt
)) {
501 error_setg(errp
, "Zcmp/Zcmt extensions are incompatible with "
506 if (cpu
->cfg
.ext_zcmt
&& !cpu
->cfg
.ext_zicsr
) {
507 error_setg(errp
, "Zcmt extension requires Zicsr extension");
512 * Shorthand vector crypto extensions
514 if (cpu
->cfg
.ext_zvknc
) {
515 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zvkn
), true);
516 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zvbc
), true);
519 if (cpu
->cfg
.ext_zvkng
) {
520 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zvkn
), true);
521 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zvkg
), true);
524 if (cpu
->cfg
.ext_zvkn
) {
525 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zvkned
), true);
526 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zvknhb
), true);
527 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zvkb
), true);
528 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zvkt
), true);
531 if (cpu
->cfg
.ext_zvksc
) {
532 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zvks
), true);
533 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zvbc
), true);
536 if (cpu
->cfg
.ext_zvksg
) {
537 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zvks
), true);
538 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zvkg
), true);
541 if (cpu
->cfg
.ext_zvks
) {
542 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zvksed
), true);
543 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zvksh
), true);
544 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zvkb
), true);
545 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zvkt
), true);
548 if (cpu
->cfg
.ext_zvkt
) {
549 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zvbb
), true);
550 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zvbc
), true);
554 * In principle Zve*x would also suffice here, were they supported
557 if ((cpu
->cfg
.ext_zvbb
|| cpu
->cfg
.ext_zvkb
|| cpu
->cfg
.ext_zvkg
||
558 cpu
->cfg
.ext_zvkned
|| cpu
->cfg
.ext_zvknha
|| cpu
->cfg
.ext_zvksed
||
559 cpu
->cfg
.ext_zvksh
) && !cpu
->cfg
.ext_zve32f
) {
561 "Vector crypto extensions require V or Zve* extensions");
565 if ((cpu
->cfg
.ext_zvbc
|| cpu
->cfg
.ext_zvknhb
) && !cpu
->cfg
.ext_zve64f
) {
568 "Zvbc and Zvknhb extensions require V or Zve64{f,d} extensions");
572 if (cpu
->cfg
.ext_zk
) {
573 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zkn
), true);
574 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zkr
), true);
575 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zkt
), true);
578 if (cpu
->cfg
.ext_zkn
) {
579 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zbkb
), true);
580 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zbkc
), true);
581 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zbkx
), true);
582 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zkne
), true);
583 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zknd
), true);
584 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zknh
), true);
587 if (cpu
->cfg
.ext_zks
) {
588 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zbkb
), true);
589 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zbkc
), true);
590 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zbkx
), true);
591 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zksed
), true);
592 cpu_cfg_ext_auto_update(cpu
, CPU_CFG_OFFSET(ext_zksh
), true);
595 if (cpu
->cfg
.ext_zicntr
&& !cpu
->cfg
.ext_zicsr
) {
596 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zicntr
))) {
597 error_setg(errp
, "zicntr requires zicsr");
600 cpu
->cfg
.ext_zicntr
= false;
603 if (cpu
->cfg
.ext_zihpm
&& !cpu
->cfg
.ext_zicsr
) {
604 if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zihpm
))) {
605 error_setg(errp
, "zihpm requires zicsr");
608 cpu
->cfg
.ext_zihpm
= false;
611 if (!cpu
->cfg
.ext_zihpm
) {
612 cpu
->cfg
.pmu_mask
= 0;
613 cpu
->pmu_avail_ctrs
= 0;
617 * Disable isa extensions based on priv spec after we
618 * validated and set everything we need.
620 riscv_cpu_disable_priv_spec_isa_exts(cpu
);
623 void riscv_tcg_cpu_finalize_features(RISCVCPU
*cpu
, Error
**errp
)
625 CPURISCVState
*env
= &cpu
->env
;
626 Error
*local_err
= NULL
;
628 riscv_cpu_validate_priv_spec(cpu
, &local_err
);
629 if (local_err
!= NULL
) {
630 error_propagate(errp
, local_err
);
634 riscv_cpu_validate_misa_priv(env
, &local_err
);
635 if (local_err
!= NULL
) {
636 error_propagate(errp
, local_err
);
640 if (cpu
->cfg
.ext_smepmp
&& !cpu
->cfg
.pmp
) {
642 * Enhanced PMP should only be available
643 * on harts with PMP support
645 error_setg(errp
, "Invalid configuration: Smepmp requires PMP support");
649 riscv_cpu_validate_set_extensions(cpu
, &local_err
);
650 if (local_err
!= NULL
) {
651 error_propagate(errp
, local_err
);
656 bool riscv_cpu_tcg_compatible(RISCVCPU
*cpu
)
658 return object_dynamic_cast(OBJECT(cpu
), TYPE_RISCV_CPU_HOST
) == NULL
;
661 static bool riscv_cpu_is_generic(Object
*cpu_obj
)
663 return object_dynamic_cast(cpu_obj
, TYPE_RISCV_DYNAMIC_CPU
) != NULL
;
667 * We'll get here via the following path:
669 * riscv_cpu_realize()
670 * -> cpu_exec_realizefn()
671 * -> tcg_cpu_realize() (via accel_cpu_common_realize())
673 static bool tcg_cpu_realize(CPUState
*cs
, Error
**errp
)
675 RISCVCPU
*cpu
= RISCV_CPU(cs
);
676 Error
*local_err
= NULL
;
678 if (!riscv_cpu_tcg_compatible(cpu
)) {
679 g_autofree
char *name
= riscv_cpu_get_name(cpu
);
680 error_setg(errp
, "'%s' CPU is not compatible with TCG acceleration",
685 riscv_cpu_validate_misa_mxl(cpu
, &local_err
);
686 if (local_err
!= NULL
) {
687 error_propagate(errp
, local_err
);
691 #ifndef CONFIG_USER_ONLY
692 CPURISCVState
*env
= &cpu
->env
;
694 CPU(cs
)->tcg_cflags
|= CF_PCREL
;
696 if (cpu
->cfg
.ext_sstc
) {
697 riscv_timer_init(cpu
);
700 if (cpu
->cfg
.pmu_mask
) {
701 riscv_pmu_init(cpu
, &local_err
);
702 if (local_err
!= NULL
) {
703 error_propagate(errp
, local_err
);
707 if (cpu
->cfg
.ext_sscofpmf
) {
708 cpu
->pmu_timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
,
709 riscv_pmu_timer_cb
, cpu
);
713 /* With H-Ext, VSSIP, VSTIP, VSEIP and SGEIP are hardwired to one. */
714 if (riscv_has_ext(env
, RVH
)) {
715 env
->mideleg
= MIP_VSSIP
| MIP_VSTIP
| MIP_VSEIP
| MIP_SGEIP
;
722 typedef struct RISCVCPUMisaExtConfig
{
723 target_ulong misa_bit
;
725 } RISCVCPUMisaExtConfig
;
727 static void cpu_set_misa_ext_cfg(Object
*obj
, Visitor
*v
, const char *name
,
728 void *opaque
, Error
**errp
)
730 const RISCVCPUMisaExtConfig
*misa_ext_cfg
= opaque
;
731 target_ulong misa_bit
= misa_ext_cfg
->misa_bit
;
732 RISCVCPU
*cpu
= RISCV_CPU(obj
);
733 CPURISCVState
*env
= &cpu
->env
;
734 bool generic_cpu
= riscv_cpu_is_generic(obj
);
735 bool prev_val
, value
;
737 if (!visit_type_bool(v
, name
, &value
, errp
)) {
741 prev_val
= env
->misa_ext
& misa_bit
;
743 if (value
== prev_val
) {
749 g_autofree
char *cpuname
= riscv_cpu_get_name(cpu
);
750 error_setg(errp
, "'%s' CPU does not allow enabling extensions",
755 env
->misa_ext
|= misa_bit
;
756 env
->misa_ext_mask
|= misa_bit
;
758 env
->misa_ext
&= ~misa_bit
;
759 env
->misa_ext_mask
&= ~misa_bit
;
763 static void cpu_get_misa_ext_cfg(Object
*obj
, Visitor
*v
, const char *name
,
764 void *opaque
, Error
**errp
)
766 const RISCVCPUMisaExtConfig
*misa_ext_cfg
= opaque
;
767 target_ulong misa_bit
= misa_ext_cfg
->misa_bit
;
768 RISCVCPU
*cpu
= RISCV_CPU(obj
);
769 CPURISCVState
*env
= &cpu
->env
;
772 value
= env
->misa_ext
& misa_bit
;
774 visit_type_bool(v
, name
, &value
, errp
);
777 #define MISA_CFG(_bit, _enabled) \
778 {.misa_bit = _bit, .enabled = _enabled}
780 static const RISCVCPUMisaExtConfig misa_ext_cfgs
[] = {
786 MISA_CFG(RVE
, false),
791 MISA_CFG(RVJ
, false),
792 MISA_CFG(RVV
, false),
793 MISA_CFG(RVG
, false),
797 * We do not support user choice tracking for MISA
798 * extensions yet because, so far, we do not silently
799 * change MISA bits during realize() (RVG enables MISA
800 * bits but the user is warned about it).
802 static void riscv_cpu_add_misa_properties(Object
*cpu_obj
)
804 bool use_def_vals
= riscv_cpu_is_generic(cpu_obj
);
807 for (i
= 0; i
< ARRAY_SIZE(misa_ext_cfgs
); i
++) {
808 const RISCVCPUMisaExtConfig
*misa_cfg
= &misa_ext_cfgs
[i
];
809 int bit
= misa_cfg
->misa_bit
;
810 const char *name
= riscv_get_misa_ext_name(bit
);
811 const char *desc
= riscv_get_misa_ext_description(bit
);
813 /* Check if KVM already created the property */
814 if (object_property_find(cpu_obj
, name
)) {
818 object_property_add(cpu_obj
, name
, "bool",
819 cpu_get_misa_ext_cfg
,
820 cpu_set_misa_ext_cfg
,
821 NULL
, (void *)misa_cfg
);
822 object_property_set_description(cpu_obj
, name
, desc
);
824 object_property_set_bool(cpu_obj
, name
, misa_cfg
->enabled
, NULL
);
829 static bool cpu_ext_is_deprecated(const char *ext_name
)
831 return isupper(ext_name
[0]);
835 * String will be allocated in the heap. Caller is responsible
838 static char *cpu_ext_to_lower(const char *ext_name
)
840 char *ret
= g_malloc0(strlen(ext_name
) + 1);
842 strcpy(ret
, ext_name
);
843 ret
[0] = tolower(ret
[0]);
848 static void cpu_set_multi_ext_cfg(Object
*obj
, Visitor
*v
, const char *name
,
849 void *opaque
, Error
**errp
)
851 const RISCVCPUMultiExtConfig
*multi_ext_cfg
= opaque
;
852 RISCVCPU
*cpu
= RISCV_CPU(obj
);
853 bool generic_cpu
= riscv_cpu_is_generic(obj
);
854 bool prev_val
, value
;
856 if (!visit_type_bool(v
, name
, &value
, errp
)) {
860 if (cpu_ext_is_deprecated(multi_ext_cfg
->name
)) {
861 g_autofree
char *lower
= cpu_ext_to_lower(multi_ext_cfg
->name
);
863 warn_report("CPU property '%s' is deprecated. Please use '%s' instead",
864 multi_ext_cfg
->name
, lower
);
867 g_hash_table_insert(multi_ext_user_opts
,
868 GUINT_TO_POINTER(multi_ext_cfg
->offset
),
871 prev_val
= isa_ext_is_enabled(cpu
, multi_ext_cfg
->offset
);
873 if (value
== prev_val
) {
877 if (value
&& !generic_cpu
) {
878 g_autofree
char *cpuname
= riscv_cpu_get_name(cpu
);
879 error_setg(errp
, "'%s' CPU does not allow enabling extensions",
884 isa_ext_update_enabled(cpu
, multi_ext_cfg
->offset
, value
);
887 static void cpu_get_multi_ext_cfg(Object
*obj
, Visitor
*v
, const char *name
,
888 void *opaque
, Error
**errp
)
890 const RISCVCPUMultiExtConfig
*multi_ext_cfg
= opaque
;
891 bool value
= isa_ext_is_enabled(RISCV_CPU(obj
), multi_ext_cfg
->offset
);
893 visit_type_bool(v
, name
, &value
, errp
);
896 static void cpu_add_multi_ext_prop(Object
*cpu_obj
,
897 const RISCVCPUMultiExtConfig
*multi_cfg
)
899 bool generic_cpu
= riscv_cpu_is_generic(cpu_obj
);
900 bool deprecated_ext
= cpu_ext_is_deprecated(multi_cfg
->name
);
902 object_property_add(cpu_obj
, multi_cfg
->name
, "bool",
903 cpu_get_multi_ext_cfg
,
904 cpu_set_multi_ext_cfg
,
905 NULL
, (void *)multi_cfg
);
907 if (!generic_cpu
|| deprecated_ext
) {
912 * Set def val directly instead of using
913 * object_property_set_bool() to save the set()
914 * callback hash for user inputs.
916 isa_ext_update_enabled(RISCV_CPU(cpu_obj
), multi_cfg
->offset
,
920 static void riscv_cpu_add_multiext_prop_array(Object
*obj
,
921 const RISCVCPUMultiExtConfig
*array
)
923 const RISCVCPUMultiExtConfig
*prop
;
927 for (prop
= array
; prop
&& prop
->name
; prop
++) {
928 cpu_add_multi_ext_prop(obj
, prop
);
933 * Add CPU properties with user-facing flags.
935 * This will overwrite existing env->misa_ext values with the
936 * defaults set via riscv_cpu_add_misa_properties().
938 static void riscv_cpu_add_user_properties(Object
*obj
)
940 #ifndef CONFIG_USER_ONLY
941 riscv_add_satp_mode_properties(obj
);
944 riscv_cpu_add_misa_properties(obj
);
946 riscv_cpu_add_multiext_prop_array(obj
, riscv_cpu_extensions
);
947 riscv_cpu_add_multiext_prop_array(obj
, riscv_cpu_vendor_exts
);
948 riscv_cpu_add_multiext_prop_array(obj
, riscv_cpu_experimental_exts
);
950 riscv_cpu_add_multiext_prop_array(obj
, riscv_cpu_deprecated_exts
);
952 for (Property
*prop
= riscv_cpu_options
; prop
&& prop
->name
; prop
++) {
953 qdev_property_add_static(DEVICE(obj
), prop
);
958 * The 'max' type CPU will have all possible ratified
959 * non-vendor extensions enabled.
961 static void riscv_init_max_cpu_extensions(Object
*obj
)
963 RISCVCPU
*cpu
= RISCV_CPU(obj
);
964 CPURISCVState
*env
= &cpu
->env
;
965 const RISCVCPUMultiExtConfig
*prop
;
967 /* Enable RVG, RVJ and RVV that are disabled by default */
968 riscv_cpu_set_misa(env
, env
->misa_mxl
, env
->misa_ext
| RVG
| RVJ
| RVV
);
970 for (prop
= riscv_cpu_extensions
; prop
&& prop
->name
; prop
++) {
971 isa_ext_update_enabled(cpu
, prop
->offset
, true);
974 /* set vector version */
975 env
->vext_ver
= VEXT_VERSION_1_00_0
;
977 /* Zfinx is not compatible with F. Disable it */
978 isa_ext_update_enabled(cpu
, CPU_CFG_OFFSET(ext_zfinx
), false);
979 isa_ext_update_enabled(cpu
, CPU_CFG_OFFSET(ext_zdinx
), false);
980 isa_ext_update_enabled(cpu
, CPU_CFG_OFFSET(ext_zhinx
), false);
981 isa_ext_update_enabled(cpu
, CPU_CFG_OFFSET(ext_zhinxmin
), false);
983 isa_ext_update_enabled(cpu
, CPU_CFG_OFFSET(ext_zce
), false);
984 isa_ext_update_enabled(cpu
, CPU_CFG_OFFSET(ext_zcmp
), false);
985 isa_ext_update_enabled(cpu
, CPU_CFG_OFFSET(ext_zcmt
), false);
987 if (env
->misa_mxl
!= MXL_RV32
) {
988 isa_ext_update_enabled(cpu
, CPU_CFG_OFFSET(ext_zcf
), false);
992 static bool riscv_cpu_has_max_extensions(Object
*cpu_obj
)
994 return object_dynamic_cast(cpu_obj
, TYPE_RISCV_CPU_MAX
) != NULL
;
997 static void tcg_cpu_instance_init(CPUState
*cs
)
999 RISCVCPU
*cpu
= RISCV_CPU(cs
);
1000 Object
*obj
= OBJECT(cpu
);
1002 multi_ext_user_opts
= g_hash_table_new(NULL
, g_direct_equal
);
1003 riscv_cpu_add_user_properties(obj
);
1005 if (riscv_cpu_has_max_extensions(obj
)) {
1006 riscv_init_max_cpu_extensions(obj
);
1010 static void tcg_cpu_init_ops(AccelCPUClass
*accel_cpu
, CPUClass
*cc
)
1013 * All cpus use the same set of operations.
1015 cc
->tcg_ops
= &riscv_tcg_ops
;
1018 static void tcg_cpu_class_init(CPUClass
*cc
)
1020 cc
->init_accel_cpu
= tcg_cpu_init_ops
;
1023 static void tcg_cpu_accel_class_init(ObjectClass
*oc
, void *data
)
1025 AccelCPUClass
*acc
= ACCEL_CPU_CLASS(oc
);
1027 acc
->cpu_class_init
= tcg_cpu_class_init
;
1028 acc
->cpu_instance_init
= tcg_cpu_instance_init
;
1029 acc
->cpu_target_realize
= tcg_cpu_realize
;
1032 static const TypeInfo tcg_cpu_accel_type_info
= {
1033 .name
= ACCEL_CPU_NAME("tcg"),
1035 .parent
= TYPE_ACCEL_CPU
,
1036 .class_init
= tcg_cpu_accel_class_init
,
1040 static void tcg_cpu_accel_register_types(void)
1042 type_register_static(&tcg_cpu_accel_type_info
);
1044 type_init(tcg_cpu_accel_register_types
);