2 * RISC-V Control and Status Registers.
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "qemu/timer.h"
25 #include "time_helper.h"
26 #include "qemu/main-loop.h"
27 #include "exec/exec-all.h"
28 #include "sysemu/cpu-timers.h"
29 #include "qemu/guest-random.h"
30 #include "qapi/error.h"
32 /* CSR function table public API */
33 void riscv_get_csr_ops(int csrno
, riscv_csr_operations
*ops
)
35 *ops
= csr_ops
[csrno
& (CSR_TABLE_SIZE
- 1)];
38 void riscv_set_csr_ops(int csrno
, riscv_csr_operations
*ops
)
40 csr_ops
[csrno
& (CSR_TABLE_SIZE
- 1)] = *ops
;
44 static RISCVException
fs(CPURISCVState
*env
, int csrno
)
46 #if !defined(CONFIG_USER_ONLY)
47 if (!env
->debugger
&& !riscv_cpu_fp_enabled(env
) &&
48 !RISCV_CPU(env_cpu(env
))->cfg
.ext_zfinx
) {
49 return RISCV_EXCP_ILLEGAL_INST
;
52 return RISCV_EXCP_NONE
;
55 static RISCVException
vs(CPURISCVState
*env
, int csrno
)
57 CPUState
*cs
= env_cpu(env
);
58 RISCVCPU
*cpu
= RISCV_CPU(cs
);
60 if (env
->misa_ext
& RVV
||
61 cpu
->cfg
.ext_zve32f
|| cpu
->cfg
.ext_zve64f
) {
62 #if !defined(CONFIG_USER_ONLY)
63 if (!env
->debugger
&& !riscv_cpu_vector_enabled(env
)) {
64 return RISCV_EXCP_ILLEGAL_INST
;
67 return RISCV_EXCP_NONE
;
69 return RISCV_EXCP_ILLEGAL_INST
;
72 static RISCVException
ctr(CPURISCVState
*env
, int csrno
)
74 #if !defined(CONFIG_USER_ONLY)
75 CPUState
*cs
= env_cpu(env
);
76 RISCVCPU
*cpu
= RISCV_CPU(cs
);
78 target_ulong ctr_mask
;
79 int base_csrno
= CSR_CYCLE
;
80 bool rv32
= riscv_cpu_mxl(env
) == MXL_RV32
? true : false;
82 if (rv32
&& csrno
>= CSR_CYCLEH
) {
83 /* Offset for RV32 hpmcounternh counters */
86 ctr_index
= csrno
- base_csrno
;
87 ctr_mask
= BIT(ctr_index
);
89 if ((csrno
>= CSR_CYCLE
&& csrno
<= CSR_INSTRET
) ||
90 (csrno
>= CSR_CYCLEH
&& csrno
<= CSR_INSTRETH
)) {
91 goto skip_ext_pmu_check
;
94 if (!(cpu
->pmu_avail_ctrs
& ctr_mask
)) {
95 /* No counter is enabled in PMU or the counter is out of range */
96 return RISCV_EXCP_ILLEGAL_INST
;
101 if (((env
->priv
== PRV_S
) && (!get_field(env
->mcounteren
, ctr_mask
))) ||
102 ((env
->priv
== PRV_U
) && (!get_field(env
->scounteren
, ctr_mask
)))) {
103 return RISCV_EXCP_ILLEGAL_INST
;
106 if (riscv_cpu_virt_enabled(env
)) {
107 if (!get_field(env
->hcounteren
, ctr_mask
) &&
108 get_field(env
->mcounteren
, ctr_mask
)) {
109 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT
;
113 return RISCV_EXCP_NONE
;
116 static RISCVException
ctr32(CPURISCVState
*env
, int csrno
)
118 if (riscv_cpu_mxl(env
) != MXL_RV32
) {
119 return RISCV_EXCP_ILLEGAL_INST
;
122 return ctr(env
, csrno
);
125 #if !defined(CONFIG_USER_ONLY)
126 static RISCVException
mctr(CPURISCVState
*env
, int csrno
)
128 CPUState
*cs
= env_cpu(env
);
129 RISCVCPU
*cpu
= RISCV_CPU(cs
);
131 int base_csrno
= CSR_MHPMCOUNTER3
;
133 if ((riscv_cpu_mxl(env
) == MXL_RV32
) && csrno
>= CSR_MCYCLEH
) {
134 /* Offset for RV32 mhpmcounternh counters */
137 ctr_index
= csrno
- base_csrno
;
138 if (!cpu
->cfg
.pmu_num
|| ctr_index
>= cpu
->cfg
.pmu_num
) {
139 /* The PMU is not enabled or counter is out of range*/
140 return RISCV_EXCP_ILLEGAL_INST
;
143 return RISCV_EXCP_NONE
;
146 static RISCVException
mctr32(CPURISCVState
*env
, int csrno
)
148 if (riscv_cpu_mxl(env
) != MXL_RV32
) {
149 return RISCV_EXCP_ILLEGAL_INST
;
152 return mctr(env
, csrno
);
155 static RISCVException
sscofpmf(CPURISCVState
*env
, int csrno
)
157 CPUState
*cs
= env_cpu(env
);
158 RISCVCPU
*cpu
= RISCV_CPU(cs
);
160 if (!cpu
->cfg
.ext_sscofpmf
) {
161 return RISCV_EXCP_ILLEGAL_INST
;
164 return RISCV_EXCP_NONE
;
167 static RISCVException
any(CPURISCVState
*env
, int csrno
)
169 return RISCV_EXCP_NONE
;
172 static RISCVException
any32(CPURISCVState
*env
, int csrno
)
174 if (riscv_cpu_mxl(env
) != MXL_RV32
) {
175 return RISCV_EXCP_ILLEGAL_INST
;
178 return any(env
, csrno
);
182 static int aia_any(CPURISCVState
*env
, int csrno
)
184 RISCVCPU
*cpu
= env_archcpu(env
);
186 if (!cpu
->cfg
.ext_smaia
) {
187 return RISCV_EXCP_ILLEGAL_INST
;
190 return any(env
, csrno
);
193 static int aia_any32(CPURISCVState
*env
, int csrno
)
195 RISCVCPU
*cpu
= env_archcpu(env
);
197 if (!cpu
->cfg
.ext_smaia
) {
198 return RISCV_EXCP_ILLEGAL_INST
;
201 return any32(env
, csrno
);
204 static RISCVException
smode(CPURISCVState
*env
, int csrno
)
206 if (riscv_has_ext(env
, RVS
)) {
207 return RISCV_EXCP_NONE
;
210 return RISCV_EXCP_ILLEGAL_INST
;
213 static int smode32(CPURISCVState
*env
, int csrno
)
215 if (riscv_cpu_mxl(env
) != MXL_RV32
) {
216 return RISCV_EXCP_ILLEGAL_INST
;
219 return smode(env
, csrno
);
222 static int aia_smode(CPURISCVState
*env
, int csrno
)
224 RISCVCPU
*cpu
= env_archcpu(env
);
226 if (!cpu
->cfg
.ext_ssaia
) {
227 return RISCV_EXCP_ILLEGAL_INST
;
230 return smode(env
, csrno
);
233 static int aia_smode32(CPURISCVState
*env
, int csrno
)
235 RISCVCPU
*cpu
= env_archcpu(env
);
237 if (!cpu
->cfg
.ext_ssaia
) {
238 return RISCV_EXCP_ILLEGAL_INST
;
241 return smode32(env
, csrno
);
244 static RISCVException
hmode(CPURISCVState
*env
, int csrno
)
246 if (riscv_has_ext(env
, RVH
)) {
247 return RISCV_EXCP_NONE
;
250 return RISCV_EXCP_ILLEGAL_INST
;
253 static RISCVException
hmode32(CPURISCVState
*env
, int csrno
)
255 if (riscv_cpu_mxl(env
) != MXL_RV32
) {
256 return RISCV_EXCP_ILLEGAL_INST
;
259 return hmode(env
, csrno
);
263 static RISCVException
umode(CPURISCVState
*env
, int csrno
)
265 if (riscv_has_ext(env
, RVU
)) {
266 return RISCV_EXCP_NONE
;
269 return RISCV_EXCP_ILLEGAL_INST
;
272 static RISCVException
umode32(CPURISCVState
*env
, int csrno
)
274 if (riscv_cpu_mxl(env
) != MXL_RV32
) {
275 return RISCV_EXCP_ILLEGAL_INST
;
278 return umode(env
, csrno
);
281 /* Checks if PointerMasking registers could be accessed */
282 static RISCVException
pointer_masking(CPURISCVState
*env
, int csrno
)
284 /* Check if j-ext is present */
285 if (riscv_has_ext(env
, RVJ
)) {
286 return RISCV_EXCP_NONE
;
288 return RISCV_EXCP_ILLEGAL_INST
;
291 static int aia_hmode(CPURISCVState
*env
, int csrno
)
293 RISCVCPU
*cpu
= env_archcpu(env
);
295 if (!cpu
->cfg
.ext_ssaia
) {
296 return RISCV_EXCP_ILLEGAL_INST
;
299 return hmode(env
, csrno
);
302 static int aia_hmode32(CPURISCVState
*env
, int csrno
)
304 RISCVCPU
*cpu
= env_archcpu(env
);
306 if (!cpu
->cfg
.ext_ssaia
) {
307 return RISCV_EXCP_ILLEGAL_INST
;
310 return hmode32(env
, csrno
);
313 static RISCVException
pmp(CPURISCVState
*env
, int csrno
)
315 if (riscv_feature(env
, RISCV_FEATURE_PMP
)) {
316 return RISCV_EXCP_NONE
;
319 return RISCV_EXCP_ILLEGAL_INST
;
322 static RISCVException
epmp(CPURISCVState
*env
, int csrno
)
324 if (env
->priv
== PRV_M
&& riscv_feature(env
, RISCV_FEATURE_EPMP
)) {
325 return RISCV_EXCP_NONE
;
328 return RISCV_EXCP_ILLEGAL_INST
;
331 static RISCVException
debug(CPURISCVState
*env
, int csrno
)
333 if (riscv_feature(env
, RISCV_FEATURE_DEBUG
)) {
334 return RISCV_EXCP_NONE
;
337 return RISCV_EXCP_ILLEGAL_INST
;
341 static RISCVException
seed(CPURISCVState
*env
, int csrno
)
343 RISCVCPU
*cpu
= env_archcpu(env
);
345 if (!cpu
->cfg
.ext_zkr
) {
346 return RISCV_EXCP_ILLEGAL_INST
;
349 #if !defined(CONFIG_USER_ONLY)
351 * With a CSR read-write instruction:
352 * 1) The seed CSR is always available in machine mode as normal.
353 * 2) Attempted access to seed from virtual modes VS and VU always raises
354 * an exception(virtual instruction exception only if mseccfg.sseed=1).
355 * 3) Without the corresponding access control bit set to 1, any attempted
356 * access to seed from U, S or HS modes will raise an illegal instruction
359 if (env
->priv
== PRV_M
) {
360 return RISCV_EXCP_NONE
;
361 } else if (riscv_cpu_virt_enabled(env
)) {
362 if (env
->mseccfg
& MSECCFG_SSEED
) {
363 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT
;
365 return RISCV_EXCP_ILLEGAL_INST
;
368 if (env
->priv
== PRV_S
&& (env
->mseccfg
& MSECCFG_SSEED
)) {
369 return RISCV_EXCP_NONE
;
370 } else if (env
->priv
== PRV_U
&& (env
->mseccfg
& MSECCFG_USEED
)) {
371 return RISCV_EXCP_NONE
;
373 return RISCV_EXCP_ILLEGAL_INST
;
377 return RISCV_EXCP_NONE
;
381 /* User Floating-Point CSRs */
382 static RISCVException
read_fflags(CPURISCVState
*env
, int csrno
,
385 *val
= riscv_cpu_get_fflags(env
);
386 return RISCV_EXCP_NONE
;
389 static RISCVException
write_fflags(CPURISCVState
*env
, int csrno
,
392 #if !defined(CONFIG_USER_ONLY)
393 if (riscv_has_ext(env
, RVF
)) {
394 env
->mstatus
|= MSTATUS_FS
;
397 riscv_cpu_set_fflags(env
, val
& (FSR_AEXC
>> FSR_AEXC_SHIFT
));
398 return RISCV_EXCP_NONE
;
401 static RISCVException
read_frm(CPURISCVState
*env
, int csrno
,
405 return RISCV_EXCP_NONE
;
408 static RISCVException
write_frm(CPURISCVState
*env
, int csrno
,
411 #if !defined(CONFIG_USER_ONLY)
412 if (riscv_has_ext(env
, RVF
)) {
413 env
->mstatus
|= MSTATUS_FS
;
416 env
->frm
= val
& (FSR_RD
>> FSR_RD_SHIFT
);
417 return RISCV_EXCP_NONE
;
420 static RISCVException
read_fcsr(CPURISCVState
*env
, int csrno
,
423 *val
= (riscv_cpu_get_fflags(env
) << FSR_AEXC_SHIFT
)
424 | (env
->frm
<< FSR_RD_SHIFT
);
425 return RISCV_EXCP_NONE
;
428 static RISCVException
write_fcsr(CPURISCVState
*env
, int csrno
,
431 #if !defined(CONFIG_USER_ONLY)
432 if (riscv_has_ext(env
, RVF
)) {
433 env
->mstatus
|= MSTATUS_FS
;
436 env
->frm
= (val
& FSR_RD
) >> FSR_RD_SHIFT
;
437 riscv_cpu_set_fflags(env
, (val
& FSR_AEXC
) >> FSR_AEXC_SHIFT
);
438 return RISCV_EXCP_NONE
;
441 static RISCVException
read_vtype(CPURISCVState
*env
, int csrno
,
447 vill
= (uint32_t)env
->vill
<< 31;
450 vill
= (uint64_t)env
->vill
<< 63;
453 g_assert_not_reached();
455 *val
= (target_ulong
)vill
| env
->vtype
;
456 return RISCV_EXCP_NONE
;
459 static RISCVException
read_vl(CPURISCVState
*env
, int csrno
,
463 return RISCV_EXCP_NONE
;
466 static int read_vlenb(CPURISCVState
*env
, int csrno
, target_ulong
*val
)
468 *val
= env_archcpu(env
)->cfg
.vlen
>> 3;
469 return RISCV_EXCP_NONE
;
472 static RISCVException
read_vxrm(CPURISCVState
*env
, int csrno
,
476 return RISCV_EXCP_NONE
;
479 static RISCVException
write_vxrm(CPURISCVState
*env
, int csrno
,
482 #if !defined(CONFIG_USER_ONLY)
483 env
->mstatus
|= MSTATUS_VS
;
486 return RISCV_EXCP_NONE
;
489 static RISCVException
read_vxsat(CPURISCVState
*env
, int csrno
,
493 return RISCV_EXCP_NONE
;
496 static RISCVException
write_vxsat(CPURISCVState
*env
, int csrno
,
499 #if !defined(CONFIG_USER_ONLY)
500 env
->mstatus
|= MSTATUS_VS
;
503 return RISCV_EXCP_NONE
;
506 static RISCVException
read_vstart(CPURISCVState
*env
, int csrno
,
510 return RISCV_EXCP_NONE
;
513 static RISCVException
write_vstart(CPURISCVState
*env
, int csrno
,
516 #if !defined(CONFIG_USER_ONLY)
517 env
->mstatus
|= MSTATUS_VS
;
520 * The vstart CSR is defined to have only enough writable bits
521 * to hold the largest element index, i.e. lg2(VLEN) bits.
523 env
->vstart
= val
& ~(~0ULL << ctzl(env_archcpu(env
)->cfg
.vlen
));
524 return RISCV_EXCP_NONE
;
527 static int read_vcsr(CPURISCVState
*env
, int csrno
, target_ulong
*val
)
529 *val
= (env
->vxrm
<< VCSR_VXRM_SHIFT
) | (env
->vxsat
<< VCSR_VXSAT_SHIFT
);
530 return RISCV_EXCP_NONE
;
533 static int write_vcsr(CPURISCVState
*env
, int csrno
, target_ulong val
)
535 #if !defined(CONFIG_USER_ONLY)
536 env
->mstatus
|= MSTATUS_VS
;
538 env
->vxrm
= (val
& VCSR_VXRM
) >> VCSR_VXRM_SHIFT
;
539 env
->vxsat
= (val
& VCSR_VXSAT
) >> VCSR_VXSAT_SHIFT
;
540 return RISCV_EXCP_NONE
;
543 /* User Timers and Counters */
544 static target_ulong
get_ticks(bool shift
)
549 #if !defined(CONFIG_USER_ONLY)
550 if (icount_enabled()) {
553 val
= cpu_get_host_ticks();
556 val
= cpu_get_host_ticks();
568 #if defined(CONFIG_USER_ONLY)
569 static RISCVException
read_time(CPURISCVState
*env
, int csrno
,
572 *val
= cpu_get_host_ticks();
573 return RISCV_EXCP_NONE
;
576 static RISCVException
read_timeh(CPURISCVState
*env
, int csrno
,
579 *val
= cpu_get_host_ticks() >> 32;
580 return RISCV_EXCP_NONE
;
583 static int read_hpmcounter(CPURISCVState
*env
, int csrno
, target_ulong
*val
)
585 *val
= get_ticks(false);
586 return RISCV_EXCP_NONE
;
589 static int read_hpmcounterh(CPURISCVState
*env
, int csrno
, target_ulong
*val
)
591 *val
= get_ticks(true);
592 return RISCV_EXCP_NONE
;
595 #else /* CONFIG_USER_ONLY */
597 static int read_mhpmevent(CPURISCVState
*env
, int csrno
, target_ulong
*val
)
599 int evt_index
= csrno
- CSR_MCOUNTINHIBIT
;
601 *val
= env
->mhpmevent_val
[evt_index
];
603 return RISCV_EXCP_NONE
;
606 static int write_mhpmevent(CPURISCVState
*env
, int csrno
, target_ulong val
)
608 int evt_index
= csrno
- CSR_MCOUNTINHIBIT
;
609 uint64_t mhpmevt_val
= val
;
611 env
->mhpmevent_val
[evt_index
] = val
;
613 if (riscv_cpu_mxl(env
) == MXL_RV32
) {
614 mhpmevt_val
= mhpmevt_val
|
615 ((uint64_t)env
->mhpmeventh_val
[evt_index
] << 32);
617 riscv_pmu_update_event_map(env
, mhpmevt_val
, evt_index
);
619 return RISCV_EXCP_NONE
;
622 static int read_mhpmeventh(CPURISCVState
*env
, int csrno
, target_ulong
*val
)
624 int evt_index
= csrno
- CSR_MHPMEVENT3H
+ 3;
626 *val
= env
->mhpmeventh_val
[evt_index
];
628 return RISCV_EXCP_NONE
;
631 static int write_mhpmeventh(CPURISCVState
*env
, int csrno
, target_ulong val
)
633 int evt_index
= csrno
- CSR_MHPMEVENT3H
+ 3;
634 uint64_t mhpmevth_val
= val
;
635 uint64_t mhpmevt_val
= env
->mhpmevent_val
[evt_index
];
637 mhpmevt_val
= mhpmevt_val
| (mhpmevth_val
<< 32);
638 env
->mhpmeventh_val
[evt_index
] = val
;
640 riscv_pmu_update_event_map(env
, mhpmevt_val
, evt_index
);
642 return RISCV_EXCP_NONE
;
645 static int write_mhpmcounter(CPURISCVState
*env
, int csrno
, target_ulong val
)
647 int ctr_idx
= csrno
- CSR_MCYCLE
;
648 PMUCTRState
*counter
= &env
->pmu_ctrs
[ctr_idx
];
649 uint64_t mhpmctr_val
= val
;
651 counter
->mhpmcounter_val
= val
;
652 if (riscv_pmu_ctr_monitor_cycles(env
, ctr_idx
) ||
653 riscv_pmu_ctr_monitor_instructions(env
, ctr_idx
)) {
654 counter
->mhpmcounter_prev
= get_ticks(false);
656 if (riscv_cpu_mxl(env
) == MXL_RV32
) {
657 mhpmctr_val
= mhpmctr_val
|
658 ((uint64_t)counter
->mhpmcounterh_val
<< 32);
660 riscv_pmu_setup_timer(env
, mhpmctr_val
, ctr_idx
);
663 /* Other counters can keep incrementing from the given value */
664 counter
->mhpmcounter_prev
= val
;
667 return RISCV_EXCP_NONE
;
670 static int write_mhpmcounterh(CPURISCVState
*env
, int csrno
, target_ulong val
)
672 int ctr_idx
= csrno
- CSR_MCYCLEH
;
673 PMUCTRState
*counter
= &env
->pmu_ctrs
[ctr_idx
];
674 uint64_t mhpmctr_val
= counter
->mhpmcounter_val
;
675 uint64_t mhpmctrh_val
= val
;
677 counter
->mhpmcounterh_val
= val
;
678 mhpmctr_val
= mhpmctr_val
| (mhpmctrh_val
<< 32);
679 if (riscv_pmu_ctr_monitor_cycles(env
, ctr_idx
) ||
680 riscv_pmu_ctr_monitor_instructions(env
, ctr_idx
)) {
681 counter
->mhpmcounterh_prev
= get_ticks(true);
683 riscv_pmu_setup_timer(env
, mhpmctr_val
, ctr_idx
);
686 counter
->mhpmcounterh_prev
= val
;
689 return RISCV_EXCP_NONE
;
692 static RISCVException
riscv_pmu_read_ctr(CPURISCVState
*env
, target_ulong
*val
,
693 bool upper_half
, uint32_t ctr_idx
)
695 PMUCTRState counter
= env
->pmu_ctrs
[ctr_idx
];
696 target_ulong ctr_prev
= upper_half
? counter
.mhpmcounterh_prev
:
697 counter
.mhpmcounter_prev
;
698 target_ulong ctr_val
= upper_half
? counter
.mhpmcounterh_val
:
699 counter
.mhpmcounter_val
;
701 if (get_field(env
->mcountinhibit
, BIT(ctr_idx
))) {
703 * Counter should not increment if inhibit bit is set. We can't really
704 * stop the icount counting. Just return the counter value written by
705 * the supervisor to indicate that counter was not incremented.
707 if (!counter
.started
) {
709 return RISCV_EXCP_NONE
;
711 /* Mark that the counter has been stopped */
712 counter
.started
= false;
717 * The kernel computes the perf delta by subtracting the current value from
718 * the value it initialized previously (ctr_val).
720 if (riscv_pmu_ctr_monitor_cycles(env
, ctr_idx
) ||
721 riscv_pmu_ctr_monitor_instructions(env
, ctr_idx
)) {
722 *val
= get_ticks(upper_half
) - ctr_prev
+ ctr_val
;
727 return RISCV_EXCP_NONE
;
730 static int read_hpmcounter(CPURISCVState
*env
, int csrno
, target_ulong
*val
)
734 if (csrno
>= CSR_MCYCLE
&& csrno
<= CSR_MHPMCOUNTER31
) {
735 ctr_index
= csrno
- CSR_MCYCLE
;
736 } else if (csrno
>= CSR_CYCLE
&& csrno
<= CSR_HPMCOUNTER31
) {
737 ctr_index
= csrno
- CSR_CYCLE
;
739 return RISCV_EXCP_ILLEGAL_INST
;
742 return riscv_pmu_read_ctr(env
, val
, false, ctr_index
);
745 static int read_hpmcounterh(CPURISCVState
*env
, int csrno
, target_ulong
*val
)
749 if (csrno
>= CSR_MCYCLEH
&& csrno
<= CSR_MHPMCOUNTER31H
) {
750 ctr_index
= csrno
- CSR_MCYCLEH
;
751 } else if (csrno
>= CSR_CYCLEH
&& csrno
<= CSR_HPMCOUNTER31H
) {
752 ctr_index
= csrno
- CSR_CYCLEH
;
754 return RISCV_EXCP_ILLEGAL_INST
;
757 return riscv_pmu_read_ctr(env
, val
, true, ctr_index
);
760 static int read_scountovf(CPURISCVState
*env
, int csrno
, target_ulong
*val
)
762 int mhpmevt_start
= CSR_MHPMEVENT3
- CSR_MCOUNTINHIBIT
;
765 target_ulong
*mhpm_evt_val
;
766 uint64_t of_bit_mask
;
768 if (riscv_cpu_mxl(env
) == MXL_RV32
) {
769 mhpm_evt_val
= env
->mhpmeventh_val
;
770 of_bit_mask
= MHPMEVENTH_BIT_OF
;
772 mhpm_evt_val
= env
->mhpmevent_val
;
773 of_bit_mask
= MHPMEVENT_BIT_OF
;
776 for (i
= mhpmevt_start
; i
< RV_MAX_MHPMEVENTS
; i
++) {
777 if ((get_field(env
->mcounteren
, BIT(i
))) &&
778 (mhpm_evt_val
[i
] & of_bit_mask
)) {
783 return RISCV_EXCP_NONE
;
786 static RISCVException
read_time(CPURISCVState
*env
, int csrno
,
789 uint64_t delta
= riscv_cpu_virt_enabled(env
) ? env
->htimedelta
: 0;
791 if (!env
->rdtime_fn
) {
792 return RISCV_EXCP_ILLEGAL_INST
;
795 *val
= env
->rdtime_fn(env
->rdtime_fn_arg
) + delta
;
796 return RISCV_EXCP_NONE
;
799 static RISCVException
read_timeh(CPURISCVState
*env
, int csrno
,
802 uint64_t delta
= riscv_cpu_virt_enabled(env
) ? env
->htimedelta
: 0;
804 if (!env
->rdtime_fn
) {
805 return RISCV_EXCP_ILLEGAL_INST
;
808 *val
= (env
->rdtime_fn(env
->rdtime_fn_arg
) + delta
) >> 32;
809 return RISCV_EXCP_NONE
;
812 static RISCVException
sstc(CPURISCVState
*env
, int csrno
)
814 CPUState
*cs
= env_cpu(env
);
815 RISCVCPU
*cpu
= RISCV_CPU(cs
);
816 bool hmode_check
= false;
818 if (!cpu
->cfg
.ext_sstc
|| !env
->rdtime_fn
) {
819 return RISCV_EXCP_ILLEGAL_INST
;
822 if (env
->priv
== PRV_M
) {
823 return RISCV_EXCP_NONE
;
827 * No need of separate function for rv32 as menvcfg stores both menvcfg
830 if (!(get_field(env
->mcounteren
, COUNTEREN_TM
) &&
831 get_field(env
->menvcfg
, MENVCFG_STCE
))) {
832 return RISCV_EXCP_ILLEGAL_INST
;
835 if (riscv_cpu_virt_enabled(env
)) {
836 if (!(get_field(env
->hcounteren
, COUNTEREN_TM
) &
837 get_field(env
->henvcfg
, HENVCFG_STCE
))) {
838 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT
;
842 if ((csrno
== CSR_VSTIMECMP
) || (csrno
== CSR_VSTIMECMPH
)) {
846 return hmode_check
? hmode(env
, csrno
) : smode(env
, csrno
);
849 static RISCVException
sstc_32(CPURISCVState
*env
, int csrno
)
851 if (riscv_cpu_mxl(env
) != MXL_RV32
) {
852 return RISCV_EXCP_ILLEGAL_INST
;
855 return sstc(env
, csrno
);
858 static RISCVException
read_vstimecmp(CPURISCVState
*env
, int csrno
,
861 *val
= env
->vstimecmp
;
863 return RISCV_EXCP_NONE
;
866 static RISCVException
read_vstimecmph(CPURISCVState
*env
, int csrno
,
869 *val
= env
->vstimecmp
>> 32;
871 return RISCV_EXCP_NONE
;
874 static RISCVException
write_vstimecmp(CPURISCVState
*env
, int csrno
,
877 RISCVCPU
*cpu
= env_archcpu(env
);
879 if (riscv_cpu_mxl(env
) == MXL_RV32
) {
880 env
->vstimecmp
= deposit64(env
->vstimecmp
, 0, 32, (uint64_t)val
);
882 env
->vstimecmp
= val
;
885 riscv_timer_write_timecmp(cpu
, env
->vstimer
, env
->vstimecmp
,
886 env
->htimedelta
, MIP_VSTIP
);
888 return RISCV_EXCP_NONE
;
891 static RISCVException
write_vstimecmph(CPURISCVState
*env
, int csrno
,
894 RISCVCPU
*cpu
= env_archcpu(env
);
896 env
->vstimecmp
= deposit64(env
->vstimecmp
, 32, 32, (uint64_t)val
);
897 riscv_timer_write_timecmp(cpu
, env
->vstimer
, env
->vstimecmp
,
898 env
->htimedelta
, MIP_VSTIP
);
900 return RISCV_EXCP_NONE
;
903 static RISCVException
read_stimecmp(CPURISCVState
*env
, int csrno
,
906 if (riscv_cpu_virt_enabled(env
)) {
907 *val
= env
->vstimecmp
;
909 *val
= env
->stimecmp
;
912 return RISCV_EXCP_NONE
;
915 static RISCVException
read_stimecmph(CPURISCVState
*env
, int csrno
,
918 if (riscv_cpu_virt_enabled(env
)) {
919 *val
= env
->vstimecmp
>> 32;
921 *val
= env
->stimecmp
>> 32;
924 return RISCV_EXCP_NONE
;
927 static RISCVException
write_stimecmp(CPURISCVState
*env
, int csrno
,
930 RISCVCPU
*cpu
= env_archcpu(env
);
932 if (riscv_cpu_virt_enabled(env
)) {
933 return write_vstimecmp(env
, csrno
, val
);
936 if (riscv_cpu_mxl(env
) == MXL_RV32
) {
937 env
->stimecmp
= deposit64(env
->stimecmp
, 0, 32, (uint64_t)val
);
942 riscv_timer_write_timecmp(cpu
, env
->stimer
, env
->stimecmp
, 0, MIP_STIP
);
944 return RISCV_EXCP_NONE
;
947 static RISCVException
write_stimecmph(CPURISCVState
*env
, int csrno
,
950 RISCVCPU
*cpu
= env_archcpu(env
);
952 if (riscv_cpu_virt_enabled(env
)) {
953 return write_vstimecmph(env
, csrno
, val
);
956 env
->stimecmp
= deposit64(env
->stimecmp
, 32, 32, (uint64_t)val
);
957 riscv_timer_write_timecmp(cpu
, env
->stimer
, env
->stimecmp
, 0, MIP_STIP
);
959 return RISCV_EXCP_NONE
;
962 /* Machine constants */
964 #define M_MODE_INTERRUPTS ((uint64_t)(MIP_MSIP | MIP_MTIP | MIP_MEIP))
965 #define S_MODE_INTERRUPTS ((uint64_t)(MIP_SSIP | MIP_STIP | MIP_SEIP | \
967 #define VS_MODE_INTERRUPTS ((uint64_t)(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP))
968 #define HS_MODE_INTERRUPTS ((uint64_t)(MIP_SGEIP | VS_MODE_INTERRUPTS))
970 #define VSTOPI_NUM_SRCS 5
972 static const uint64_t delegable_ints
= S_MODE_INTERRUPTS
|
974 static const uint64_t vs_delegable_ints
= VS_MODE_INTERRUPTS
;
975 static const uint64_t all_ints
= M_MODE_INTERRUPTS
| S_MODE_INTERRUPTS
|
977 #define DELEGABLE_EXCPS ((1ULL << (RISCV_EXCP_INST_ADDR_MIS)) | \
978 (1ULL << (RISCV_EXCP_INST_ACCESS_FAULT)) | \
979 (1ULL << (RISCV_EXCP_ILLEGAL_INST)) | \
980 (1ULL << (RISCV_EXCP_BREAKPOINT)) | \
981 (1ULL << (RISCV_EXCP_LOAD_ADDR_MIS)) | \
982 (1ULL << (RISCV_EXCP_LOAD_ACCESS_FAULT)) | \
983 (1ULL << (RISCV_EXCP_STORE_AMO_ADDR_MIS)) | \
984 (1ULL << (RISCV_EXCP_STORE_AMO_ACCESS_FAULT)) | \
985 (1ULL << (RISCV_EXCP_U_ECALL)) | \
986 (1ULL << (RISCV_EXCP_S_ECALL)) | \
987 (1ULL << (RISCV_EXCP_VS_ECALL)) | \
988 (1ULL << (RISCV_EXCP_M_ECALL)) | \
989 (1ULL << (RISCV_EXCP_INST_PAGE_FAULT)) | \
990 (1ULL << (RISCV_EXCP_LOAD_PAGE_FAULT)) | \
991 (1ULL << (RISCV_EXCP_STORE_PAGE_FAULT)) | \
992 (1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT)) | \
993 (1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT)) | \
994 (1ULL << (RISCV_EXCP_VIRT_INSTRUCTION_FAULT)) | \
995 (1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT)))
996 static const target_ulong vs_delegable_excps
= DELEGABLE_EXCPS
&
997 ~((1ULL << (RISCV_EXCP_S_ECALL
)) |
998 (1ULL << (RISCV_EXCP_VS_ECALL
)) |
999 (1ULL << (RISCV_EXCP_M_ECALL
)) |
1000 (1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT
)) |
1001 (1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT
)) |
1002 (1ULL << (RISCV_EXCP_VIRT_INSTRUCTION_FAULT
)) |
1003 (1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT
)));
1004 static const target_ulong sstatus_v1_10_mask
= SSTATUS_SIE
| SSTATUS_SPIE
|
1005 SSTATUS_UIE
| SSTATUS_UPIE
| SSTATUS_SPP
| SSTATUS_FS
| SSTATUS_XS
|
1006 SSTATUS_SUM
| SSTATUS_MXR
| SSTATUS_VS
;
1007 static const target_ulong sip_writable_mask
= SIP_SSIP
| MIP_USIP
| MIP_UEIP
|
1009 static const target_ulong hip_writable_mask
= MIP_VSSIP
;
1010 static const target_ulong hvip_writable_mask
= MIP_VSSIP
| MIP_VSTIP
| MIP_VSEIP
;
1011 static const target_ulong vsip_writable_mask
= MIP_VSSIP
;
1013 static const char valid_vm_1_10_32
[16] = {
1014 [VM_1_10_MBARE
] = 1,
1018 static const char valid_vm_1_10_64
[16] = {
1019 [VM_1_10_MBARE
] = 1,
1025 /* Machine Information Registers */
1026 static RISCVException
read_zero(CPURISCVState
*env
, int csrno
,
1030 return RISCV_EXCP_NONE
;
1033 static RISCVException
write_ignore(CPURISCVState
*env
, int csrno
,
1036 return RISCV_EXCP_NONE
;
1039 static RISCVException
read_mvendorid(CPURISCVState
*env
, int csrno
,
1042 CPUState
*cs
= env_cpu(env
);
1043 RISCVCPU
*cpu
= RISCV_CPU(cs
);
1045 *val
= cpu
->cfg
.mvendorid
;
1046 return RISCV_EXCP_NONE
;
1049 static RISCVException
read_marchid(CPURISCVState
*env
, int csrno
,
1052 CPUState
*cs
= env_cpu(env
);
1053 RISCVCPU
*cpu
= RISCV_CPU(cs
);
1055 *val
= cpu
->cfg
.marchid
;
1056 return RISCV_EXCP_NONE
;
1059 static RISCVException
read_mimpid(CPURISCVState
*env
, int csrno
,
1062 CPUState
*cs
= env_cpu(env
);
1063 RISCVCPU
*cpu
= RISCV_CPU(cs
);
1065 *val
= cpu
->cfg
.mimpid
;
1066 return RISCV_EXCP_NONE
;
1069 static RISCVException
read_mhartid(CPURISCVState
*env
, int csrno
,
1072 *val
= env
->mhartid
;
1073 return RISCV_EXCP_NONE
;
1076 /* Machine Trap Setup */
1078 /* We do not store SD explicitly, only compute it on demand. */
1079 static uint64_t add_status_sd(RISCVMXL xl
, uint64_t status
)
1081 if ((status
& MSTATUS_FS
) == MSTATUS_FS
||
1082 (status
& MSTATUS_VS
) == MSTATUS_VS
||
1083 (status
& MSTATUS_XS
) == MSTATUS_XS
) {
1086 return status
| MSTATUS32_SD
;
1088 return status
| MSTATUS64_SD
;
1090 return MSTATUSH128_SD
;
1092 g_assert_not_reached();
1098 static RISCVException
read_mstatus(CPURISCVState
*env
, int csrno
,
1101 *val
= add_status_sd(riscv_cpu_mxl(env
), env
->mstatus
);
1102 return RISCV_EXCP_NONE
;
1105 static int validate_vm(CPURISCVState
*env
, target_ulong vm
)
1107 if (riscv_cpu_mxl(env
) == MXL_RV32
) {
1108 return valid_vm_1_10_32
[vm
& 0xf];
1110 return valid_vm_1_10_64
[vm
& 0xf];
1114 static RISCVException
write_mstatus(CPURISCVState
*env
, int csrno
,
1117 uint64_t mstatus
= env
->mstatus
;
1119 RISCVMXL xl
= riscv_cpu_mxl(env
);
1121 /* flush tlb on mstatus fields that affect VM */
1122 if ((val
^ mstatus
) & (MSTATUS_MXR
| MSTATUS_MPP
| MSTATUS_MPV
|
1123 MSTATUS_MPRV
| MSTATUS_SUM
)) {
1124 tlb_flush(env_cpu(env
));
1126 mask
= MSTATUS_SIE
| MSTATUS_SPIE
| MSTATUS_MIE
| MSTATUS_MPIE
|
1127 MSTATUS_SPP
| MSTATUS_MPRV
| MSTATUS_SUM
|
1128 MSTATUS_MPP
| MSTATUS_MXR
| MSTATUS_TVM
| MSTATUS_TSR
|
1129 MSTATUS_TW
| MSTATUS_VS
;
1131 if (riscv_has_ext(env
, RVF
)) {
1135 if (xl
!= MXL_RV32
|| env
->debugger
) {
1137 * RV32: MPV and GVA are not in mstatus. The current plan is to
1138 * add them to mstatush. For now, we just don't support it.
1140 mask
|= MSTATUS_MPV
| MSTATUS_GVA
;
1141 if ((val
& MSTATUS64_UXL
) != 0) {
1142 mask
|= MSTATUS64_UXL
;
1146 mstatus
= (mstatus
& ~mask
) | (val
& mask
);
1148 if (xl
> MXL_RV32
) {
1149 /* SXL field is for now read only */
1150 mstatus
= set_field(mstatus
, MSTATUS64_SXL
, xl
);
1152 env
->mstatus
= mstatus
;
1153 env
->xl
= cpu_recompute_xl(env
);
1155 return RISCV_EXCP_NONE
;
1158 static RISCVException
read_mstatush(CPURISCVState
*env
, int csrno
,
1161 *val
= env
->mstatus
>> 32;
1162 return RISCV_EXCP_NONE
;
1165 static RISCVException
write_mstatush(CPURISCVState
*env
, int csrno
,
1168 uint64_t valh
= (uint64_t)val
<< 32;
1169 uint64_t mask
= MSTATUS_MPV
| MSTATUS_GVA
;
1171 if ((valh
^ env
->mstatus
) & (MSTATUS_MPV
)) {
1172 tlb_flush(env_cpu(env
));
1175 env
->mstatus
= (env
->mstatus
& ~mask
) | (valh
& mask
);
1177 return RISCV_EXCP_NONE
;
1180 static RISCVException
read_mstatus_i128(CPURISCVState
*env
, int csrno
,
1183 *val
= int128_make128(env
->mstatus
, add_status_sd(MXL_RV128
, env
->mstatus
));
1184 return RISCV_EXCP_NONE
;
1187 static RISCVException
read_misa_i128(CPURISCVState
*env
, int csrno
,
1190 *val
= int128_make128(env
->misa_ext
, (uint64_t)MXL_RV128
<< 62);
1191 return RISCV_EXCP_NONE
;
1194 static RISCVException
read_misa(CPURISCVState
*env
, int csrno
,
1199 switch (env
->misa_mxl
) {
1201 misa
= (target_ulong
)MXL_RV32
<< 30;
1203 #ifdef TARGET_RISCV64
1205 misa
= (target_ulong
)MXL_RV64
<< 62;
1209 g_assert_not_reached();
1212 *val
= misa
| env
->misa_ext
;
1213 return RISCV_EXCP_NONE
;
1216 static RISCVException
write_misa(CPURISCVState
*env
, int csrno
,
1219 if (!riscv_feature(env
, RISCV_FEATURE_MISA
)) {
1220 /* drop write to misa */
1221 return RISCV_EXCP_NONE
;
1224 /* 'I' or 'E' must be present */
1225 if (!(val
& (RVI
| RVE
))) {
1226 /* It is not, drop write to misa */
1227 return RISCV_EXCP_NONE
;
1230 /* 'E' excludes all other extensions */
1232 /* when we support 'E' we can do "val = RVE;" however
1233 * for now we just drop writes if 'E' is present.
1235 return RISCV_EXCP_NONE
;
1239 * misa.MXL writes are not supported by QEMU.
1240 * Drop writes to those bits.
1243 /* Mask extensions that are not supported by this hart */
1244 val
&= env
->misa_ext_mask
;
1246 /* Mask extensions that are not supported by QEMU */
1247 val
&= (RVI
| RVE
| RVM
| RVA
| RVF
| RVD
| RVC
| RVS
| RVU
| RVV
);
1249 /* 'D' depends on 'F', so clear 'D' if 'F' is not present */
1250 if ((val
& RVD
) && !(val
& RVF
)) {
1254 /* Suppress 'C' if next instruction is not aligned
1255 * TODO: this should check next_pc
1257 if ((val
& RVC
) && (GETPC() & ~3) != 0) {
1261 /* If nothing changed, do nothing. */
1262 if (val
== env
->misa_ext
) {
1263 return RISCV_EXCP_NONE
;
1267 env
->mstatus
&= ~MSTATUS_FS
;
1270 /* flush translation cache */
1271 tb_flush(env_cpu(env
));
1272 env
->misa_ext
= val
;
1273 env
->xl
= riscv_cpu_mxl(env
);
1274 return RISCV_EXCP_NONE
;
1277 static RISCVException
read_medeleg(CPURISCVState
*env
, int csrno
,
1280 *val
= env
->medeleg
;
1281 return RISCV_EXCP_NONE
;
1284 static RISCVException
write_medeleg(CPURISCVState
*env
, int csrno
,
1287 env
->medeleg
= (env
->medeleg
& ~DELEGABLE_EXCPS
) | (val
& DELEGABLE_EXCPS
);
1288 return RISCV_EXCP_NONE
;
1291 static RISCVException
rmw_mideleg64(CPURISCVState
*env
, int csrno
,
1293 uint64_t new_val
, uint64_t wr_mask
)
1295 uint64_t mask
= wr_mask
& delegable_ints
;
1298 *ret_val
= env
->mideleg
;
1301 env
->mideleg
= (env
->mideleg
& ~mask
) | (new_val
& mask
);
1303 if (riscv_has_ext(env
, RVH
)) {
1304 env
->mideleg
|= HS_MODE_INTERRUPTS
;
1307 return RISCV_EXCP_NONE
;
1310 static RISCVException
rmw_mideleg(CPURISCVState
*env
, int csrno
,
1311 target_ulong
*ret_val
,
1312 target_ulong new_val
, target_ulong wr_mask
)
1317 ret
= rmw_mideleg64(env
, csrno
, &rval
, new_val
, wr_mask
);
1325 static RISCVException
rmw_midelegh(CPURISCVState
*env
, int csrno
,
1326 target_ulong
*ret_val
,
1327 target_ulong new_val
,
1328 target_ulong wr_mask
)
1333 ret
= rmw_mideleg64(env
, csrno
, &rval
,
1334 ((uint64_t)new_val
) << 32, ((uint64_t)wr_mask
) << 32);
1336 *ret_val
= rval
>> 32;
1342 static RISCVException
rmw_mie64(CPURISCVState
*env
, int csrno
,
1344 uint64_t new_val
, uint64_t wr_mask
)
1346 uint64_t mask
= wr_mask
& all_ints
;
1349 *ret_val
= env
->mie
;
1352 env
->mie
= (env
->mie
& ~mask
) | (new_val
& mask
);
1354 if (!riscv_has_ext(env
, RVH
)) {
1355 env
->mie
&= ~((uint64_t)MIP_SGEIP
);
1358 return RISCV_EXCP_NONE
;
1361 static RISCVException
rmw_mie(CPURISCVState
*env
, int csrno
,
1362 target_ulong
*ret_val
,
1363 target_ulong new_val
, target_ulong wr_mask
)
1368 ret
= rmw_mie64(env
, csrno
, &rval
, new_val
, wr_mask
);
1376 static RISCVException
rmw_mieh(CPURISCVState
*env
, int csrno
,
1377 target_ulong
*ret_val
,
1378 target_ulong new_val
, target_ulong wr_mask
)
1383 ret
= rmw_mie64(env
, csrno
, &rval
,
1384 ((uint64_t)new_val
) << 32, ((uint64_t)wr_mask
) << 32);
1386 *ret_val
= rval
>> 32;
1392 static int read_mtopi(CPURISCVState
*env
, int csrno
, target_ulong
*val
)
1397 irq
= riscv_cpu_mirq_pending(env
);
1398 if (irq
<= 0 || irq
> 63) {
1401 iprio
= env
->miprio
[irq
];
1403 if (riscv_cpu_default_priority(irq
) > IPRIO_DEFAULT_M
) {
1404 iprio
= IPRIO_MMAXIPRIO
;
1407 *val
= (irq
& TOPI_IID_MASK
) << TOPI_IID_SHIFT
;
1411 return RISCV_EXCP_NONE
;
1414 static int aia_xlate_vs_csrno(CPURISCVState
*env
, int csrno
)
1416 if (!riscv_cpu_virt_enabled(env
)) {
1422 return CSR_VSISELECT
;
1432 static int rmw_xiselect(CPURISCVState
*env
, int csrno
, target_ulong
*val
,
1433 target_ulong new_val
, target_ulong wr_mask
)
1435 target_ulong
*iselect
;
1437 /* Translate CSR number for VS-mode */
1438 csrno
= aia_xlate_vs_csrno(env
, csrno
);
1440 /* Find the iselect CSR based on CSR number */
1443 iselect
= &env
->miselect
;
1446 iselect
= &env
->siselect
;
1449 iselect
= &env
->vsiselect
;
1452 return RISCV_EXCP_ILLEGAL_INST
;
1459 wr_mask
&= ISELECT_MASK
;
1461 *iselect
= (*iselect
& ~wr_mask
) | (new_val
& wr_mask
);
1464 return RISCV_EXCP_NONE
;
1467 static int rmw_iprio(target_ulong xlen
,
1468 target_ulong iselect
, uint8_t *iprio
,
1469 target_ulong
*val
, target_ulong new_val
,
1470 target_ulong wr_mask
, int ext_irq_no
)
1473 target_ulong old_val
;
1475 if (iselect
< ISELECT_IPRIO0
|| ISELECT_IPRIO15
< iselect
) {
1478 if (xlen
!= 32 && iselect
& 0x1) {
1482 nirqs
= 4 * (xlen
/ 32);
1483 firq
= ((iselect
- ISELECT_IPRIO0
) / (xlen
/ 32)) * (nirqs
);
1486 for (i
= 0; i
< nirqs
; i
++) {
1487 old_val
|= ((target_ulong
)iprio
[firq
+ i
]) << (IPRIO_IRQ_BITS
* i
);
1495 new_val
= (old_val
& ~wr_mask
) | (new_val
& wr_mask
);
1496 for (i
= 0; i
< nirqs
; i
++) {
1498 * M-level and S-level external IRQ priority always read-only
1499 * zero. This means default priority order is always preferred
1500 * for M-level and S-level external IRQs.
1502 if ((firq
+ i
) == ext_irq_no
) {
1505 iprio
[firq
+ i
] = (new_val
>> (IPRIO_IRQ_BITS
* i
)) & 0xff;
1512 static int rmw_xireg(CPURISCVState
*env
, int csrno
, target_ulong
*val
,
1513 target_ulong new_val
, target_ulong wr_mask
)
1518 target_ulong priv
, isel
, vgein
;
1520 /* Translate CSR number for VS-mode */
1521 csrno
= aia_xlate_vs_csrno(env
, csrno
);
1523 /* Decode register details from CSR number */
1527 iprio
= env
->miprio
;
1528 isel
= env
->miselect
;
1532 iprio
= env
->siprio
;
1533 isel
= env
->siselect
;
1537 iprio
= env
->hviprio
;
1538 isel
= env
->vsiselect
;
1546 /* Find the selected guest interrupt file */
1547 vgein
= (virt
) ? get_field(env
->hstatus
, HSTATUS_VGEIN
) : 0;
1549 if (ISELECT_IPRIO0
<= isel
&& isel
<= ISELECT_IPRIO15
) {
1550 /* Local interrupt priority registers not available for VS-mode */
1552 ret
= rmw_iprio(riscv_cpu_mxl_bits(env
),
1553 isel
, iprio
, val
, new_val
, wr_mask
,
1554 (priv
== PRV_M
) ? IRQ_M_EXT
: IRQ_S_EXT
);
1556 } else if (ISELECT_IMSIC_FIRST
<= isel
&& isel
<= ISELECT_IMSIC_LAST
) {
1557 /* IMSIC registers only available when machine implements it. */
1558 if (env
->aia_ireg_rmw_fn
[priv
]) {
1559 /* Selected guest interrupt file should not be zero */
1560 if (virt
&& (!vgein
|| env
->geilen
< vgein
)) {
1563 /* Call machine specific IMSIC register emulation */
1564 ret
= env
->aia_ireg_rmw_fn
[priv
](env
->aia_ireg_rmw_fn_arg
[priv
],
1565 AIA_MAKE_IREG(isel
, priv
, virt
, vgein
,
1566 riscv_cpu_mxl_bits(env
)),
1567 val
, new_val
, wr_mask
);
1573 return (riscv_cpu_virt_enabled(env
) && virt
) ?
1574 RISCV_EXCP_VIRT_INSTRUCTION_FAULT
: RISCV_EXCP_ILLEGAL_INST
;
1576 return RISCV_EXCP_NONE
;
1579 static int rmw_xtopei(CPURISCVState
*env
, int csrno
, target_ulong
*val
,
1580 target_ulong new_val
, target_ulong wr_mask
)
1584 target_ulong priv
, vgein
;
1586 /* Translate CSR number for VS-mode */
1587 csrno
= aia_xlate_vs_csrno(env
, csrno
);
1589 /* Decode register details from CSR number */
1606 /* IMSIC CSRs only available when machine implements IMSIC. */
1607 if (!env
->aia_ireg_rmw_fn
[priv
]) {
1611 /* Find the selected guest interrupt file */
1612 vgein
= (virt
) ? get_field(env
->hstatus
, HSTATUS_VGEIN
) : 0;
1614 /* Selected guest interrupt file should be valid */
1615 if (virt
&& (!vgein
|| env
->geilen
< vgein
)) {
1619 /* Call machine specific IMSIC register emulation for TOPEI */
1620 ret
= env
->aia_ireg_rmw_fn
[priv
](env
->aia_ireg_rmw_fn_arg
[priv
],
1621 AIA_MAKE_IREG(ISELECT_IMSIC_TOPEI
, priv
, virt
, vgein
,
1622 riscv_cpu_mxl_bits(env
)),
1623 val
, new_val
, wr_mask
);
1627 return (riscv_cpu_virt_enabled(env
) && virt
) ?
1628 RISCV_EXCP_VIRT_INSTRUCTION_FAULT
: RISCV_EXCP_ILLEGAL_INST
;
1630 return RISCV_EXCP_NONE
;
1633 static RISCVException
read_mtvec(CPURISCVState
*env
, int csrno
,
1637 return RISCV_EXCP_NONE
;
1640 static RISCVException
write_mtvec(CPURISCVState
*env
, int csrno
,
1643 /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
1644 if ((val
& 3) < 2) {
1647 qemu_log_mask(LOG_UNIMP
, "CSR_MTVEC: reserved mode not supported\n");
1649 return RISCV_EXCP_NONE
;
1652 static RISCVException
read_mcountinhibit(CPURISCVState
*env
, int csrno
,
1655 *val
= env
->mcountinhibit
;
1656 return RISCV_EXCP_NONE
;
1659 static RISCVException
write_mcountinhibit(CPURISCVState
*env
, int csrno
,
1663 PMUCTRState
*counter
;
1665 env
->mcountinhibit
= val
;
1667 /* Check if any other counter is also monitoring cycles/instructions */
1668 for (cidx
= 0; cidx
< RV_MAX_MHPMCOUNTERS
; cidx
++) {
1669 if (!get_field(env
->mcountinhibit
, BIT(cidx
))) {
1670 counter
= &env
->pmu_ctrs
[cidx
];
1671 counter
->started
= true;
1675 return RISCV_EXCP_NONE
;
1678 static RISCVException
read_mcounteren(CPURISCVState
*env
, int csrno
,
1681 *val
= env
->mcounteren
;
1682 return RISCV_EXCP_NONE
;
1685 static RISCVException
write_mcounteren(CPURISCVState
*env
, int csrno
,
1688 env
->mcounteren
= val
;
1689 return RISCV_EXCP_NONE
;
1692 /* Machine Trap Handling */
1693 static RISCVException
read_mscratch_i128(CPURISCVState
*env
, int csrno
,
1696 *val
= int128_make128(env
->mscratch
, env
->mscratchh
);
1697 return RISCV_EXCP_NONE
;
1700 static RISCVException
write_mscratch_i128(CPURISCVState
*env
, int csrno
,
1703 env
->mscratch
= int128_getlo(val
);
1704 env
->mscratchh
= int128_gethi(val
);
1705 return RISCV_EXCP_NONE
;
1708 static RISCVException
read_mscratch(CPURISCVState
*env
, int csrno
,
1711 *val
= env
->mscratch
;
1712 return RISCV_EXCP_NONE
;
1715 static RISCVException
write_mscratch(CPURISCVState
*env
, int csrno
,
1718 env
->mscratch
= val
;
1719 return RISCV_EXCP_NONE
;
1722 static RISCVException
read_mepc(CPURISCVState
*env
, int csrno
,
1726 return RISCV_EXCP_NONE
;
1729 static RISCVException
write_mepc(CPURISCVState
*env
, int csrno
,
1733 return RISCV_EXCP_NONE
;
1736 static RISCVException
read_mcause(CPURISCVState
*env
, int csrno
,
1740 return RISCV_EXCP_NONE
;
1743 static RISCVException
write_mcause(CPURISCVState
*env
, int csrno
,
1747 return RISCV_EXCP_NONE
;
1750 static RISCVException
read_mtval(CPURISCVState
*env
, int csrno
,
1754 return RISCV_EXCP_NONE
;
1757 static RISCVException
write_mtval(CPURISCVState
*env
, int csrno
,
1761 return RISCV_EXCP_NONE
;
1764 /* Execution environment configuration setup */
1765 static RISCVException
read_menvcfg(CPURISCVState
*env
, int csrno
,
1768 *val
= env
->menvcfg
;
1769 return RISCV_EXCP_NONE
;
1772 static RISCVException
write_menvcfg(CPURISCVState
*env
, int csrno
,
1775 uint64_t mask
= MENVCFG_FIOM
| MENVCFG_CBIE
| MENVCFG_CBCFE
| MENVCFG_CBZE
;
1777 if (riscv_cpu_mxl(env
) == MXL_RV64
) {
1778 mask
|= MENVCFG_PBMTE
| MENVCFG_STCE
;
1780 env
->menvcfg
= (env
->menvcfg
& ~mask
) | (val
& mask
);
1782 return RISCV_EXCP_NONE
;
1785 static RISCVException
read_menvcfgh(CPURISCVState
*env
, int csrno
,
1788 *val
= env
->menvcfg
>> 32;
1789 return RISCV_EXCP_NONE
;
1792 static RISCVException
write_menvcfgh(CPURISCVState
*env
, int csrno
,
1795 uint64_t mask
= MENVCFG_PBMTE
| MENVCFG_STCE
;
1796 uint64_t valh
= (uint64_t)val
<< 32;
1798 env
->menvcfg
= (env
->menvcfg
& ~mask
) | (valh
& mask
);
1800 return RISCV_EXCP_NONE
;
1803 static RISCVException
read_senvcfg(CPURISCVState
*env
, int csrno
,
1806 *val
= env
->senvcfg
;
1807 return RISCV_EXCP_NONE
;
1810 static RISCVException
write_senvcfg(CPURISCVState
*env
, int csrno
,
1813 uint64_t mask
= SENVCFG_FIOM
| SENVCFG_CBIE
| SENVCFG_CBCFE
| SENVCFG_CBZE
;
1815 env
->senvcfg
= (env
->senvcfg
& ~mask
) | (val
& mask
);
1817 return RISCV_EXCP_NONE
;
1820 static RISCVException
read_henvcfg(CPURISCVState
*env
, int csrno
,
1823 *val
= env
->henvcfg
;
1824 return RISCV_EXCP_NONE
;
1827 static RISCVException
write_henvcfg(CPURISCVState
*env
, int csrno
,
1830 uint64_t mask
= HENVCFG_FIOM
| HENVCFG_CBIE
| HENVCFG_CBCFE
| HENVCFG_CBZE
;
1832 if (riscv_cpu_mxl(env
) == MXL_RV64
) {
1833 mask
|= HENVCFG_PBMTE
| HENVCFG_STCE
;
1836 env
->henvcfg
= (env
->henvcfg
& ~mask
) | (val
& mask
);
1838 return RISCV_EXCP_NONE
;
1841 static RISCVException
read_henvcfgh(CPURISCVState
*env
, int csrno
,
1844 *val
= env
->henvcfg
>> 32;
1845 return RISCV_EXCP_NONE
;
1848 static RISCVException
write_henvcfgh(CPURISCVState
*env
, int csrno
,
1851 uint64_t mask
= HENVCFG_PBMTE
| HENVCFG_STCE
;
1852 uint64_t valh
= (uint64_t)val
<< 32;
1854 env
->henvcfg
= (env
->henvcfg
& ~mask
) | (valh
& mask
);
1856 return RISCV_EXCP_NONE
;
1859 static RISCVException
rmw_mip64(CPURISCVState
*env
, int csrno
,
1861 uint64_t new_val
, uint64_t wr_mask
)
1863 RISCVCPU
*cpu
= env_archcpu(env
);
1864 uint64_t old_mip
, mask
= wr_mask
& delegable_ints
;
1867 if (mask
& MIP_SEIP
) {
1868 env
->software_seip
= new_val
& MIP_SEIP
;
1869 new_val
|= env
->external_seip
* MIP_SEIP
;
1872 if (cpu
->cfg
.ext_sstc
&& (env
->priv
== PRV_M
) &&
1873 get_field(env
->menvcfg
, MENVCFG_STCE
)) {
1874 /* sstc extension forbids STIP & VSTIP to be writeable in mip */
1875 mask
= mask
& ~(MIP_STIP
| MIP_VSTIP
);
1879 old_mip
= riscv_cpu_update_mip(cpu
, mask
, (new_val
& mask
));
1884 if (csrno
!= CSR_HVIP
) {
1885 gin
= get_field(env
->hstatus
, HSTATUS_VGEIN
);
1886 old_mip
|= (env
->hgeip
& ((target_ulong
)1 << gin
)) ? MIP_VSEIP
: 0;
1887 old_mip
|= env
->vstime_irq
? MIP_VSTIP
: 0;
1894 return RISCV_EXCP_NONE
;
1897 static RISCVException
rmw_mip(CPURISCVState
*env
, int csrno
,
1898 target_ulong
*ret_val
,
1899 target_ulong new_val
, target_ulong wr_mask
)
1904 ret
= rmw_mip64(env
, csrno
, &rval
, new_val
, wr_mask
);
1912 static RISCVException
rmw_miph(CPURISCVState
*env
, int csrno
,
1913 target_ulong
*ret_val
,
1914 target_ulong new_val
, target_ulong wr_mask
)
1919 ret
= rmw_mip64(env
, csrno
, &rval
,
1920 ((uint64_t)new_val
) << 32, ((uint64_t)wr_mask
) << 32);
1922 *ret_val
= rval
>> 32;
1928 /* Supervisor Trap Setup */
1929 static RISCVException
read_sstatus_i128(CPURISCVState
*env
, int csrno
,
1932 uint64_t mask
= sstatus_v1_10_mask
;
1933 uint64_t sstatus
= env
->mstatus
& mask
;
1934 if (env
->xl
!= MXL_RV32
|| env
->debugger
) {
1935 mask
|= SSTATUS64_UXL
;
1938 *val
= int128_make128(sstatus
, add_status_sd(MXL_RV128
, sstatus
));
1939 return RISCV_EXCP_NONE
;
1942 static RISCVException
read_sstatus(CPURISCVState
*env
, int csrno
,
1945 target_ulong mask
= (sstatus_v1_10_mask
);
1946 if (env
->xl
!= MXL_RV32
|| env
->debugger
) {
1947 mask
|= SSTATUS64_UXL
;
1949 /* TODO: Use SXL not MXL. */
1950 *val
= add_status_sd(riscv_cpu_mxl(env
), env
->mstatus
& mask
);
1951 return RISCV_EXCP_NONE
;
1954 static RISCVException
write_sstatus(CPURISCVState
*env
, int csrno
,
1957 target_ulong mask
= (sstatus_v1_10_mask
);
1959 if (env
->xl
!= MXL_RV32
|| env
->debugger
) {
1960 if ((val
& SSTATUS64_UXL
) != 0) {
1961 mask
|= SSTATUS64_UXL
;
1964 target_ulong newval
= (env
->mstatus
& ~mask
) | (val
& mask
);
1965 return write_mstatus(env
, CSR_MSTATUS
, newval
);
1968 static RISCVException
rmw_vsie64(CPURISCVState
*env
, int csrno
,
1970 uint64_t new_val
, uint64_t wr_mask
)
1973 uint64_t rval
, vsbits
, mask
= env
->hideleg
& VS_MODE_INTERRUPTS
;
1975 /* Bring VS-level bits to correct position */
1976 vsbits
= new_val
& (VS_MODE_INTERRUPTS
>> 1);
1977 new_val
&= ~(VS_MODE_INTERRUPTS
>> 1);
1978 new_val
|= vsbits
<< 1;
1979 vsbits
= wr_mask
& (VS_MODE_INTERRUPTS
>> 1);
1980 wr_mask
&= ~(VS_MODE_INTERRUPTS
>> 1);
1981 wr_mask
|= vsbits
<< 1;
1983 ret
= rmw_mie64(env
, csrno
, &rval
, new_val
, wr_mask
& mask
);
1986 vsbits
= rval
& VS_MODE_INTERRUPTS
;
1987 rval
&= ~VS_MODE_INTERRUPTS
;
1988 *ret_val
= rval
| (vsbits
>> 1);
1994 static RISCVException
rmw_vsie(CPURISCVState
*env
, int csrno
,
1995 target_ulong
*ret_val
,
1996 target_ulong new_val
, target_ulong wr_mask
)
2001 ret
= rmw_vsie64(env
, csrno
, &rval
, new_val
, wr_mask
);
2009 static RISCVException
rmw_vsieh(CPURISCVState
*env
, int csrno
,
2010 target_ulong
*ret_val
,
2011 target_ulong new_val
, target_ulong wr_mask
)
2016 ret
= rmw_vsie64(env
, csrno
, &rval
,
2017 ((uint64_t)new_val
) << 32, ((uint64_t)wr_mask
) << 32);
2019 *ret_val
= rval
>> 32;
2025 static RISCVException
rmw_sie64(CPURISCVState
*env
, int csrno
,
2027 uint64_t new_val
, uint64_t wr_mask
)
2030 uint64_t mask
= env
->mideleg
& S_MODE_INTERRUPTS
;
2032 if (riscv_cpu_virt_enabled(env
)) {
2033 if (env
->hvictl
& HVICTL_VTI
) {
2034 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT
;
2036 ret
= rmw_vsie64(env
, CSR_VSIE
, ret_val
, new_val
, wr_mask
);
2038 ret
= rmw_mie64(env
, csrno
, ret_val
, new_val
, wr_mask
& mask
);
2048 static RISCVException
rmw_sie(CPURISCVState
*env
, int csrno
,
2049 target_ulong
*ret_val
,
2050 target_ulong new_val
, target_ulong wr_mask
)
2055 ret
= rmw_sie64(env
, csrno
, &rval
, new_val
, wr_mask
);
2056 if (ret
== RISCV_EXCP_NONE
&& ret_val
) {
2063 static RISCVException
rmw_sieh(CPURISCVState
*env
, int csrno
,
2064 target_ulong
*ret_val
,
2065 target_ulong new_val
, target_ulong wr_mask
)
2070 ret
= rmw_sie64(env
, csrno
, &rval
,
2071 ((uint64_t)new_val
) << 32, ((uint64_t)wr_mask
) << 32);
2073 *ret_val
= rval
>> 32;
2079 static RISCVException
read_stvec(CPURISCVState
*env
, int csrno
,
2083 return RISCV_EXCP_NONE
;
2086 static RISCVException
write_stvec(CPURISCVState
*env
, int csrno
,
2089 /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
2090 if ((val
& 3) < 2) {
2093 qemu_log_mask(LOG_UNIMP
, "CSR_STVEC: reserved mode not supported\n");
2095 return RISCV_EXCP_NONE
;
2098 static RISCVException
read_scounteren(CPURISCVState
*env
, int csrno
,
2101 *val
= env
->scounteren
;
2102 return RISCV_EXCP_NONE
;
2105 static RISCVException
write_scounteren(CPURISCVState
*env
, int csrno
,
2108 env
->scounteren
= val
;
2109 return RISCV_EXCP_NONE
;
2112 /* Supervisor Trap Handling */
2113 static RISCVException
read_sscratch_i128(CPURISCVState
*env
, int csrno
,
2116 *val
= int128_make128(env
->sscratch
, env
->sscratchh
);
2117 return RISCV_EXCP_NONE
;
2120 static RISCVException
write_sscratch_i128(CPURISCVState
*env
, int csrno
,
2123 env
->sscratch
= int128_getlo(val
);
2124 env
->sscratchh
= int128_gethi(val
);
2125 return RISCV_EXCP_NONE
;
2128 static RISCVException
read_sscratch(CPURISCVState
*env
, int csrno
,
2131 *val
= env
->sscratch
;
2132 return RISCV_EXCP_NONE
;
2135 static RISCVException
write_sscratch(CPURISCVState
*env
, int csrno
,
2138 env
->sscratch
= val
;
2139 return RISCV_EXCP_NONE
;
2142 static RISCVException
read_sepc(CPURISCVState
*env
, int csrno
,
2146 return RISCV_EXCP_NONE
;
2149 static RISCVException
write_sepc(CPURISCVState
*env
, int csrno
,
2153 return RISCV_EXCP_NONE
;
2156 static RISCVException
read_scause(CPURISCVState
*env
, int csrno
,
2160 return RISCV_EXCP_NONE
;
2163 static RISCVException
write_scause(CPURISCVState
*env
, int csrno
,
2167 return RISCV_EXCP_NONE
;
2170 static RISCVException
read_stval(CPURISCVState
*env
, int csrno
,
2174 return RISCV_EXCP_NONE
;
2177 static RISCVException
write_stval(CPURISCVState
*env
, int csrno
,
2181 return RISCV_EXCP_NONE
;
2184 static RISCVException
rmw_vsip64(CPURISCVState
*env
, int csrno
,
2186 uint64_t new_val
, uint64_t wr_mask
)
2189 uint64_t rval
, vsbits
, mask
= env
->hideleg
& vsip_writable_mask
;
2191 /* Bring VS-level bits to correct position */
2192 vsbits
= new_val
& (VS_MODE_INTERRUPTS
>> 1);
2193 new_val
&= ~(VS_MODE_INTERRUPTS
>> 1);
2194 new_val
|= vsbits
<< 1;
2195 vsbits
= wr_mask
& (VS_MODE_INTERRUPTS
>> 1);
2196 wr_mask
&= ~(VS_MODE_INTERRUPTS
>> 1);
2197 wr_mask
|= vsbits
<< 1;
2199 ret
= rmw_mip64(env
, csrno
, &rval
, new_val
, wr_mask
& mask
);
2202 vsbits
= rval
& VS_MODE_INTERRUPTS
;
2203 rval
&= ~VS_MODE_INTERRUPTS
;
2204 *ret_val
= rval
| (vsbits
>> 1);
2210 static RISCVException
rmw_vsip(CPURISCVState
*env
, int csrno
,
2211 target_ulong
*ret_val
,
2212 target_ulong new_val
, target_ulong wr_mask
)
2217 ret
= rmw_vsip64(env
, csrno
, &rval
, new_val
, wr_mask
);
2225 static RISCVException
rmw_vsiph(CPURISCVState
*env
, int csrno
,
2226 target_ulong
*ret_val
,
2227 target_ulong new_val
, target_ulong wr_mask
)
2232 ret
= rmw_vsip64(env
, csrno
, &rval
,
2233 ((uint64_t)new_val
) << 32, ((uint64_t)wr_mask
) << 32);
2235 *ret_val
= rval
>> 32;
2241 static RISCVException
rmw_sip64(CPURISCVState
*env
, int csrno
,
2243 uint64_t new_val
, uint64_t wr_mask
)
2246 uint64_t mask
= env
->mideleg
& sip_writable_mask
;
2248 if (riscv_cpu_virt_enabled(env
)) {
2249 if (env
->hvictl
& HVICTL_VTI
) {
2250 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT
;
2252 ret
= rmw_vsip64(env
, CSR_VSIP
, ret_val
, new_val
, wr_mask
);
2254 ret
= rmw_mip64(env
, csrno
, ret_val
, new_val
, wr_mask
& mask
);
2258 *ret_val
&= env
->mideleg
& S_MODE_INTERRUPTS
;
2264 static RISCVException
rmw_sip(CPURISCVState
*env
, int csrno
,
2265 target_ulong
*ret_val
,
2266 target_ulong new_val
, target_ulong wr_mask
)
2271 ret
= rmw_sip64(env
, csrno
, &rval
, new_val
, wr_mask
);
2279 static RISCVException
rmw_siph(CPURISCVState
*env
, int csrno
,
2280 target_ulong
*ret_val
,
2281 target_ulong new_val
, target_ulong wr_mask
)
2286 ret
= rmw_sip64(env
, csrno
, &rval
,
2287 ((uint64_t)new_val
) << 32, ((uint64_t)wr_mask
) << 32);
2289 *ret_val
= rval
>> 32;
2295 /* Supervisor Protection and Translation */
2296 static RISCVException
read_satp(CPURISCVState
*env
, int csrno
,
2299 if (!riscv_feature(env
, RISCV_FEATURE_MMU
)) {
2301 return RISCV_EXCP_NONE
;
2304 if (env
->priv
== PRV_S
&& get_field(env
->mstatus
, MSTATUS_TVM
)) {
2305 return RISCV_EXCP_ILLEGAL_INST
;
2310 return RISCV_EXCP_NONE
;
2313 static RISCVException
write_satp(CPURISCVState
*env
, int csrno
,
2316 target_ulong vm
, mask
;
2318 if (!riscv_feature(env
, RISCV_FEATURE_MMU
)) {
2319 return RISCV_EXCP_NONE
;
2322 if (riscv_cpu_mxl(env
) == MXL_RV32
) {
2323 vm
= validate_vm(env
, get_field(val
, SATP32_MODE
));
2324 mask
= (val
^ env
->satp
) & (SATP32_MODE
| SATP32_ASID
| SATP32_PPN
);
2326 vm
= validate_vm(env
, get_field(val
, SATP64_MODE
));
2327 mask
= (val
^ env
->satp
) & (SATP64_MODE
| SATP64_ASID
| SATP64_PPN
);
2331 if (env
->priv
== PRV_S
&& get_field(env
->mstatus
, MSTATUS_TVM
)) {
2332 return RISCV_EXCP_ILLEGAL_INST
;
2335 * The ISA defines SATP.MODE=Bare as "no translation", but we still
2336 * pass these through QEMU's TLB emulation as it improves
2337 * performance. Flushing the TLB on SATP writes with paging
2338 * enabled avoids leaking those invalid cached mappings.
2340 tlb_flush(env_cpu(env
));
2344 return RISCV_EXCP_NONE
;
2347 static int read_vstopi(CPURISCVState
*env
, int csrno
, target_ulong
*val
)
2351 uint64_t vseip
, vsgein
;
2352 uint32_t iid
, iprio
, hviid
, hviprio
, gein
;
2353 uint32_t s
, scount
= 0, siid
[VSTOPI_NUM_SRCS
], siprio
[VSTOPI_NUM_SRCS
];
2355 gein
= get_field(env
->hstatus
, HSTATUS_VGEIN
);
2356 hviid
= get_field(env
->hvictl
, HVICTL_IID
);
2357 hviprio
= get_field(env
->hvictl
, HVICTL_IPRIO
);
2360 vsgein
= (env
->hgeip
& (1ULL << gein
)) ? MIP_VSEIP
: 0;
2361 vseip
= env
->mie
& (env
->mip
| vsgein
) & MIP_VSEIP
;
2362 if (gein
<= env
->geilen
&& vseip
) {
2363 siid
[scount
] = IRQ_S_EXT
;
2364 siprio
[scount
] = IPRIO_MMAXIPRIO
+ 1;
2365 if (env
->aia_ireg_rmw_fn
[PRV_S
]) {
2367 * Call machine specific IMSIC register emulation for
2370 ret
= env
->aia_ireg_rmw_fn
[PRV_S
](
2371 env
->aia_ireg_rmw_fn_arg
[PRV_S
],
2372 AIA_MAKE_IREG(ISELECT_IMSIC_TOPEI
, PRV_S
, true, gein
,
2373 riscv_cpu_mxl_bits(env
)),
2375 if (!ret
&& topei
) {
2376 siprio
[scount
] = topei
& IMSIC_TOPEI_IPRIO_MASK
;
2382 if (hviid
== IRQ_S_EXT
&& hviprio
) {
2383 siid
[scount
] = IRQ_S_EXT
;
2384 siprio
[scount
] = hviprio
;
2389 if (env
->hvictl
& HVICTL_VTI
) {
2390 if (hviid
!= IRQ_S_EXT
) {
2391 siid
[scount
] = hviid
;
2392 siprio
[scount
] = hviprio
;
2396 irq
= riscv_cpu_vsirq_pending(env
);
2397 if (irq
!= IRQ_S_EXT
&& 0 < irq
&& irq
<= 63) {
2399 siprio
[scount
] = env
->hviprio
[irq
];
2406 for (s
= 0; s
< scount
; s
++) {
2407 if (siprio
[s
] < iprio
) {
2414 if (env
->hvictl
& HVICTL_IPRIOM
) {
2415 if (iprio
> IPRIO_MMAXIPRIO
) {
2416 iprio
= IPRIO_MMAXIPRIO
;
2419 if (riscv_cpu_default_priority(iid
) > IPRIO_DEFAULT_S
) {
2420 iprio
= IPRIO_MMAXIPRIO
;
2430 *val
= (iid
& TOPI_IID_MASK
) << TOPI_IID_SHIFT
;
2432 return RISCV_EXCP_NONE
;
2435 static int read_stopi(CPURISCVState
*env
, int csrno
, target_ulong
*val
)
2440 if (riscv_cpu_virt_enabled(env
)) {
2441 return read_vstopi(env
, CSR_VSTOPI
, val
);
2444 irq
= riscv_cpu_sirq_pending(env
);
2445 if (irq
<= 0 || irq
> 63) {
2448 iprio
= env
->siprio
[irq
];
2450 if (riscv_cpu_default_priority(irq
) > IPRIO_DEFAULT_S
) {
2451 iprio
= IPRIO_MMAXIPRIO
;
2454 *val
= (irq
& TOPI_IID_MASK
) << TOPI_IID_SHIFT
;
2458 return RISCV_EXCP_NONE
;
2461 /* Hypervisor Extensions */
2462 static RISCVException
read_hstatus(CPURISCVState
*env
, int csrno
,
2465 *val
= env
->hstatus
;
2466 if (riscv_cpu_mxl(env
) != MXL_RV32
) {
2467 /* We only support 64-bit VSXL */
2468 *val
= set_field(*val
, HSTATUS_VSXL
, 2);
2470 /* We only support little endian */
2471 *val
= set_field(*val
, HSTATUS_VSBE
, 0);
2472 return RISCV_EXCP_NONE
;
2475 static RISCVException
write_hstatus(CPURISCVState
*env
, int csrno
,
2479 if (riscv_cpu_mxl(env
) != MXL_RV32
&& get_field(val
, HSTATUS_VSXL
) != 2) {
2480 qemu_log_mask(LOG_UNIMP
, "QEMU does not support mixed HSXLEN options.");
2482 if (get_field(val
, HSTATUS_VSBE
) != 0) {
2483 qemu_log_mask(LOG_UNIMP
, "QEMU does not support big endian guests.");
2485 return RISCV_EXCP_NONE
;
2488 static RISCVException
read_hedeleg(CPURISCVState
*env
, int csrno
,
2491 *val
= env
->hedeleg
;
2492 return RISCV_EXCP_NONE
;
2495 static RISCVException
write_hedeleg(CPURISCVState
*env
, int csrno
,
2498 env
->hedeleg
= val
& vs_delegable_excps
;
2499 return RISCV_EXCP_NONE
;
2502 static RISCVException
rmw_hideleg64(CPURISCVState
*env
, int csrno
,
2504 uint64_t new_val
, uint64_t wr_mask
)
2506 uint64_t mask
= wr_mask
& vs_delegable_ints
;
2509 *ret_val
= env
->hideleg
& vs_delegable_ints
;
2512 env
->hideleg
= (env
->hideleg
& ~mask
) | (new_val
& mask
);
2513 return RISCV_EXCP_NONE
;
2516 static RISCVException
rmw_hideleg(CPURISCVState
*env
, int csrno
,
2517 target_ulong
*ret_val
,
2518 target_ulong new_val
, target_ulong wr_mask
)
2523 ret
= rmw_hideleg64(env
, csrno
, &rval
, new_val
, wr_mask
);
2531 static RISCVException
rmw_hidelegh(CPURISCVState
*env
, int csrno
,
2532 target_ulong
*ret_val
,
2533 target_ulong new_val
, target_ulong wr_mask
)
2538 ret
= rmw_hideleg64(env
, csrno
, &rval
,
2539 ((uint64_t)new_val
) << 32, ((uint64_t)wr_mask
) << 32);
2541 *ret_val
= rval
>> 32;
2547 static RISCVException
rmw_hvip64(CPURISCVState
*env
, int csrno
,
2549 uint64_t new_val
, uint64_t wr_mask
)
2553 ret
= rmw_mip64(env
, csrno
, ret_val
, new_val
,
2554 wr_mask
& hvip_writable_mask
);
2556 *ret_val
&= VS_MODE_INTERRUPTS
;
2562 static RISCVException
rmw_hvip(CPURISCVState
*env
, int csrno
,
2563 target_ulong
*ret_val
,
2564 target_ulong new_val
, target_ulong wr_mask
)
2569 ret
= rmw_hvip64(env
, csrno
, &rval
, new_val
, wr_mask
);
2577 static RISCVException
rmw_hviph(CPURISCVState
*env
, int csrno
,
2578 target_ulong
*ret_val
,
2579 target_ulong new_val
, target_ulong wr_mask
)
2584 ret
= rmw_hvip64(env
, csrno
, &rval
,
2585 ((uint64_t)new_val
) << 32, ((uint64_t)wr_mask
) << 32);
2587 *ret_val
= rval
>> 32;
2593 static RISCVException
rmw_hip(CPURISCVState
*env
, int csrno
,
2594 target_ulong
*ret_value
,
2595 target_ulong new_value
, target_ulong write_mask
)
2597 int ret
= rmw_mip(env
, csrno
, ret_value
, new_value
,
2598 write_mask
& hip_writable_mask
);
2601 *ret_value
&= HS_MODE_INTERRUPTS
;
2606 static RISCVException
rmw_hie(CPURISCVState
*env
, int csrno
,
2607 target_ulong
*ret_val
,
2608 target_ulong new_val
, target_ulong wr_mask
)
2613 ret
= rmw_mie64(env
, csrno
, &rval
, new_val
, wr_mask
& HS_MODE_INTERRUPTS
);
2615 *ret_val
= rval
& HS_MODE_INTERRUPTS
;
2621 static RISCVException
read_hcounteren(CPURISCVState
*env
, int csrno
,
2624 *val
= env
->hcounteren
;
2625 return RISCV_EXCP_NONE
;
2628 static RISCVException
write_hcounteren(CPURISCVState
*env
, int csrno
,
2631 env
->hcounteren
= val
;
2632 return RISCV_EXCP_NONE
;
2635 static RISCVException
read_hgeie(CPURISCVState
*env
, int csrno
,
2641 return RISCV_EXCP_NONE
;
2644 static RISCVException
write_hgeie(CPURISCVState
*env
, int csrno
,
2647 /* Only GEILEN:1 bits implemented and BIT0 is never implemented */
2648 val
&= ((((target_ulong
)1) << env
->geilen
) - 1) << 1;
2650 /* Update mip.SGEIP bit */
2651 riscv_cpu_update_mip(env_archcpu(env
), MIP_SGEIP
,
2652 BOOL_TO_MASK(!!(env
->hgeie
& env
->hgeip
)));
2653 return RISCV_EXCP_NONE
;
2656 static RISCVException
read_htval(CPURISCVState
*env
, int csrno
,
2660 return RISCV_EXCP_NONE
;
2663 static RISCVException
write_htval(CPURISCVState
*env
, int csrno
,
2667 return RISCV_EXCP_NONE
;
2670 static RISCVException
read_htinst(CPURISCVState
*env
, int csrno
,
2674 return RISCV_EXCP_NONE
;
2677 static RISCVException
write_htinst(CPURISCVState
*env
, int csrno
,
2680 return RISCV_EXCP_NONE
;
2683 static RISCVException
read_hgeip(CPURISCVState
*env
, int csrno
,
2689 return RISCV_EXCP_NONE
;
2692 static RISCVException
read_hgatp(CPURISCVState
*env
, int csrno
,
2696 return RISCV_EXCP_NONE
;
2699 static RISCVException
write_hgatp(CPURISCVState
*env
, int csrno
,
2703 return RISCV_EXCP_NONE
;
2706 static RISCVException
read_htimedelta(CPURISCVState
*env
, int csrno
,
2709 if (!env
->rdtime_fn
) {
2710 return RISCV_EXCP_ILLEGAL_INST
;
2713 *val
= env
->htimedelta
;
2714 return RISCV_EXCP_NONE
;
2717 static RISCVException
write_htimedelta(CPURISCVState
*env
, int csrno
,
2720 if (!env
->rdtime_fn
) {
2721 return RISCV_EXCP_ILLEGAL_INST
;
2724 if (riscv_cpu_mxl(env
) == MXL_RV32
) {
2725 env
->htimedelta
= deposit64(env
->htimedelta
, 0, 32, (uint64_t)val
);
2727 env
->htimedelta
= val
;
2729 return RISCV_EXCP_NONE
;
2732 static RISCVException
read_htimedeltah(CPURISCVState
*env
, int csrno
,
2735 if (!env
->rdtime_fn
) {
2736 return RISCV_EXCP_ILLEGAL_INST
;
2739 *val
= env
->htimedelta
>> 32;
2740 return RISCV_EXCP_NONE
;
2743 static RISCVException
write_htimedeltah(CPURISCVState
*env
, int csrno
,
2746 if (!env
->rdtime_fn
) {
2747 return RISCV_EXCP_ILLEGAL_INST
;
2750 env
->htimedelta
= deposit64(env
->htimedelta
, 32, 32, (uint64_t)val
);
2751 return RISCV_EXCP_NONE
;
2754 static int read_hvictl(CPURISCVState
*env
, int csrno
, target_ulong
*val
)
2757 return RISCV_EXCP_NONE
;
2760 static int write_hvictl(CPURISCVState
*env
, int csrno
, target_ulong val
)
2762 env
->hvictl
= val
& HVICTL_VALID_MASK
;
2763 return RISCV_EXCP_NONE
;
2766 static int read_hvipriox(CPURISCVState
*env
, int first_index
,
2767 uint8_t *iprio
, target_ulong
*val
)
2769 int i
, irq
, rdzero
, num_irqs
= 4 * (riscv_cpu_mxl_bits(env
) / 32);
2771 /* First index has to be a multiple of number of irqs per register */
2772 if (first_index
% num_irqs
) {
2773 return (riscv_cpu_virt_enabled(env
)) ?
2774 RISCV_EXCP_VIRT_INSTRUCTION_FAULT
: RISCV_EXCP_ILLEGAL_INST
;
2777 /* Fill-up return value */
2779 for (i
= 0; i
< num_irqs
; i
++) {
2780 if (riscv_cpu_hviprio_index2irq(first_index
+ i
, &irq
, &rdzero
)) {
2786 *val
|= ((target_ulong
)iprio
[irq
]) << (i
* 8);
2789 return RISCV_EXCP_NONE
;
2792 static int write_hvipriox(CPURISCVState
*env
, int first_index
,
2793 uint8_t *iprio
, target_ulong val
)
2795 int i
, irq
, rdzero
, num_irqs
= 4 * (riscv_cpu_mxl_bits(env
) / 32);
2797 /* First index has to be a multiple of number of irqs per register */
2798 if (first_index
% num_irqs
) {
2799 return (riscv_cpu_virt_enabled(env
)) ?
2800 RISCV_EXCP_VIRT_INSTRUCTION_FAULT
: RISCV_EXCP_ILLEGAL_INST
;
2803 /* Fill-up priority arrary */
2804 for (i
= 0; i
< num_irqs
; i
++) {
2805 if (riscv_cpu_hviprio_index2irq(first_index
+ i
, &irq
, &rdzero
)) {
2811 iprio
[irq
] = (val
>> (i
* 8)) & 0xff;
2815 return RISCV_EXCP_NONE
;
2818 static int read_hviprio1(CPURISCVState
*env
, int csrno
, target_ulong
*val
)
2820 return read_hvipriox(env
, 0, env
->hviprio
, val
);
2823 static int write_hviprio1(CPURISCVState
*env
, int csrno
, target_ulong val
)
2825 return write_hvipriox(env
, 0, env
->hviprio
, val
);
2828 static int read_hviprio1h(CPURISCVState
*env
, int csrno
, target_ulong
*val
)
2830 return read_hvipriox(env
, 4, env
->hviprio
, val
);
2833 static int write_hviprio1h(CPURISCVState
*env
, int csrno
, target_ulong val
)
2835 return write_hvipriox(env
, 4, env
->hviprio
, val
);
2838 static int read_hviprio2(CPURISCVState
*env
, int csrno
, target_ulong
*val
)
2840 return read_hvipriox(env
, 8, env
->hviprio
, val
);
2843 static int write_hviprio2(CPURISCVState
*env
, int csrno
, target_ulong val
)
2845 return write_hvipriox(env
, 8, env
->hviprio
, val
);
2848 static int read_hviprio2h(CPURISCVState
*env
, int csrno
, target_ulong
*val
)
2850 return read_hvipriox(env
, 12, env
->hviprio
, val
);
2853 static int write_hviprio2h(CPURISCVState
*env
, int csrno
, target_ulong val
)
2855 return write_hvipriox(env
, 12, env
->hviprio
, val
);
2858 /* Virtual CSR Registers */
2859 static RISCVException
read_vsstatus(CPURISCVState
*env
, int csrno
,
2862 *val
= env
->vsstatus
;
2863 return RISCV_EXCP_NONE
;
2866 static RISCVException
write_vsstatus(CPURISCVState
*env
, int csrno
,
2869 uint64_t mask
= (target_ulong
)-1;
2870 if ((val
& VSSTATUS64_UXL
) == 0) {
2871 mask
&= ~VSSTATUS64_UXL
;
2873 env
->vsstatus
= (env
->vsstatus
& ~mask
) | (uint64_t)val
;
2874 return RISCV_EXCP_NONE
;
2877 static int read_vstvec(CPURISCVState
*env
, int csrno
, target_ulong
*val
)
2880 return RISCV_EXCP_NONE
;
2883 static RISCVException
write_vstvec(CPURISCVState
*env
, int csrno
,
2887 return RISCV_EXCP_NONE
;
2890 static RISCVException
read_vsscratch(CPURISCVState
*env
, int csrno
,
2893 *val
= env
->vsscratch
;
2894 return RISCV_EXCP_NONE
;
2897 static RISCVException
write_vsscratch(CPURISCVState
*env
, int csrno
,
2900 env
->vsscratch
= val
;
2901 return RISCV_EXCP_NONE
;
2904 static RISCVException
read_vsepc(CPURISCVState
*env
, int csrno
,
2908 return RISCV_EXCP_NONE
;
2911 static RISCVException
write_vsepc(CPURISCVState
*env
, int csrno
,
2915 return RISCV_EXCP_NONE
;
2918 static RISCVException
read_vscause(CPURISCVState
*env
, int csrno
,
2921 *val
= env
->vscause
;
2922 return RISCV_EXCP_NONE
;
2925 static RISCVException
write_vscause(CPURISCVState
*env
, int csrno
,
2929 return RISCV_EXCP_NONE
;
2932 static RISCVException
read_vstval(CPURISCVState
*env
, int csrno
,
2936 return RISCV_EXCP_NONE
;
2939 static RISCVException
write_vstval(CPURISCVState
*env
, int csrno
,
2943 return RISCV_EXCP_NONE
;
2946 static RISCVException
read_vsatp(CPURISCVState
*env
, int csrno
,
2950 return RISCV_EXCP_NONE
;
2953 static RISCVException
write_vsatp(CPURISCVState
*env
, int csrno
,
2957 return RISCV_EXCP_NONE
;
2960 static RISCVException
read_mtval2(CPURISCVState
*env
, int csrno
,
2964 return RISCV_EXCP_NONE
;
2967 static RISCVException
write_mtval2(CPURISCVState
*env
, int csrno
,
2971 return RISCV_EXCP_NONE
;
2974 static RISCVException
read_mtinst(CPURISCVState
*env
, int csrno
,
2978 return RISCV_EXCP_NONE
;
2981 static RISCVException
write_mtinst(CPURISCVState
*env
, int csrno
,
2985 return RISCV_EXCP_NONE
;
2988 /* Physical Memory Protection */
2989 static RISCVException
read_mseccfg(CPURISCVState
*env
, int csrno
,
2992 *val
= mseccfg_csr_read(env
);
2993 return RISCV_EXCP_NONE
;
2996 static RISCVException
write_mseccfg(CPURISCVState
*env
, int csrno
,
2999 mseccfg_csr_write(env
, val
);
3000 return RISCV_EXCP_NONE
;
3003 static bool check_pmp_reg_index(CPURISCVState
*env
, uint32_t reg_index
)
3005 /* TODO: RV128 restriction check */
3006 if ((reg_index
& 1) && (riscv_cpu_mxl(env
) == MXL_RV64
)) {
3012 static RISCVException
read_pmpcfg(CPURISCVState
*env
, int csrno
,
3015 uint32_t reg_index
= csrno
- CSR_PMPCFG0
;
3017 if (!check_pmp_reg_index(env
, reg_index
)) {
3018 return RISCV_EXCP_ILLEGAL_INST
;
3020 *val
= pmpcfg_csr_read(env
, csrno
- CSR_PMPCFG0
);
3021 return RISCV_EXCP_NONE
;
3024 static RISCVException
write_pmpcfg(CPURISCVState
*env
, int csrno
,
3027 uint32_t reg_index
= csrno
- CSR_PMPCFG0
;
3029 if (!check_pmp_reg_index(env
, reg_index
)) {
3030 return RISCV_EXCP_ILLEGAL_INST
;
3032 pmpcfg_csr_write(env
, csrno
- CSR_PMPCFG0
, val
);
3033 return RISCV_EXCP_NONE
;
3036 static RISCVException
read_pmpaddr(CPURISCVState
*env
, int csrno
,
3039 *val
= pmpaddr_csr_read(env
, csrno
- CSR_PMPADDR0
);
3040 return RISCV_EXCP_NONE
;
3043 static RISCVException
write_pmpaddr(CPURISCVState
*env
, int csrno
,
3046 pmpaddr_csr_write(env
, csrno
- CSR_PMPADDR0
, val
);
3047 return RISCV_EXCP_NONE
;
3050 static RISCVException
read_tselect(CPURISCVState
*env
, int csrno
,
3053 *val
= tselect_csr_read(env
);
3054 return RISCV_EXCP_NONE
;
3057 static RISCVException
write_tselect(CPURISCVState
*env
, int csrno
,
3060 tselect_csr_write(env
, val
);
3061 return RISCV_EXCP_NONE
;
3064 static RISCVException
read_tdata(CPURISCVState
*env
, int csrno
,
3067 /* return 0 in tdata1 to end the trigger enumeration */
3068 if (env
->trigger_cur
>= TRIGGER_NUM
&& csrno
== CSR_TDATA1
) {
3070 return RISCV_EXCP_NONE
;
3073 if (!tdata_available(env
, csrno
- CSR_TDATA1
)) {
3074 return RISCV_EXCP_ILLEGAL_INST
;
3077 *val
= tdata_csr_read(env
, csrno
- CSR_TDATA1
);
3078 return RISCV_EXCP_NONE
;
3081 static RISCVException
write_tdata(CPURISCVState
*env
, int csrno
,
3084 if (!tdata_available(env
, csrno
- CSR_TDATA1
)) {
3085 return RISCV_EXCP_ILLEGAL_INST
;
3088 tdata_csr_write(env
, csrno
- CSR_TDATA1
, val
);
3089 return RISCV_EXCP_NONE
;
3093 * Functions to access Pointer Masking feature registers
3094 * We have to check if current priv lvl could modify
3097 static bool check_pm_current_disabled(CPURISCVState
*env
, int csrno
)
3099 int csr_priv
= get_field(csrno
, 0x300);
3102 if (env
->debugger
) {
3106 * If priv lvls differ that means we're accessing csr from higher priv lvl,
3107 * so allow the access
3109 if (env
->priv
!= csr_priv
) {
3112 switch (env
->priv
) {
3114 pm_current
= get_field(env
->mmte
, M_PM_CURRENT
);
3117 pm_current
= get_field(env
->mmte
, S_PM_CURRENT
);
3120 pm_current
= get_field(env
->mmte
, U_PM_CURRENT
);
3123 g_assert_not_reached();
3125 /* It's same priv lvl, so we allow to modify csr only if pm.current==1 */
3129 static RISCVException
read_mmte(CPURISCVState
*env
, int csrno
,
3132 *val
= env
->mmte
& MMTE_MASK
;
3133 return RISCV_EXCP_NONE
;
3136 static RISCVException
write_mmte(CPURISCVState
*env
, int csrno
,
3140 target_ulong wpri_val
= val
& MMTE_MASK
;
3142 if (val
!= wpri_val
) {
3143 qemu_log_mask(LOG_GUEST_ERROR
, "%s" TARGET_FMT_lx
" %s" TARGET_FMT_lx
"\n",
3144 "MMTE: WPRI violation written 0x", val
,
3145 "vs expected 0x", wpri_val
);
3147 /* for machine mode pm.current is hardwired to 1 */
3148 wpri_val
|= MMTE_M_PM_CURRENT
;
3150 /* hardwiring pm.instruction bit to 0, since it's not supported yet */
3151 wpri_val
&= ~(MMTE_M_PM_INSN
| MMTE_S_PM_INSN
| MMTE_U_PM_INSN
);
3152 env
->mmte
= wpri_val
| PM_EXT_DIRTY
;
3153 riscv_cpu_update_mask(env
);
3155 /* Set XS and SD bits, since PM CSRs are dirty */
3156 mstatus
= env
->mstatus
| MSTATUS_XS
;
3157 write_mstatus(env
, csrno
, mstatus
);
3158 return RISCV_EXCP_NONE
;
3161 static RISCVException
read_smte(CPURISCVState
*env
, int csrno
,
3164 *val
= env
->mmte
& SMTE_MASK
;
3165 return RISCV_EXCP_NONE
;
3168 static RISCVException
write_smte(CPURISCVState
*env
, int csrno
,
3171 target_ulong wpri_val
= val
& SMTE_MASK
;
3173 if (val
!= wpri_val
) {
3174 qemu_log_mask(LOG_GUEST_ERROR
, "%s" TARGET_FMT_lx
" %s" TARGET_FMT_lx
"\n",
3175 "SMTE: WPRI violation written 0x", val
,
3176 "vs expected 0x", wpri_val
);
3179 /* if pm.current==0 we can't modify current PM CSRs */
3180 if (check_pm_current_disabled(env
, csrno
)) {
3181 return RISCV_EXCP_NONE
;
3184 wpri_val
|= (env
->mmte
& ~SMTE_MASK
);
3185 write_mmte(env
, csrno
, wpri_val
);
3186 return RISCV_EXCP_NONE
;
3189 static RISCVException
read_umte(CPURISCVState
*env
, int csrno
,
3192 *val
= env
->mmte
& UMTE_MASK
;
3193 return RISCV_EXCP_NONE
;
3196 static RISCVException
write_umte(CPURISCVState
*env
, int csrno
,
3199 target_ulong wpri_val
= val
& UMTE_MASK
;
3201 if (val
!= wpri_val
) {
3202 qemu_log_mask(LOG_GUEST_ERROR
, "%s" TARGET_FMT_lx
" %s" TARGET_FMT_lx
"\n",
3203 "UMTE: WPRI violation written 0x", val
,
3204 "vs expected 0x", wpri_val
);
3207 if (check_pm_current_disabled(env
, csrno
)) {
3208 return RISCV_EXCP_NONE
;
3211 wpri_val
|= (env
->mmte
& ~UMTE_MASK
);
3212 write_mmte(env
, csrno
, wpri_val
);
3213 return RISCV_EXCP_NONE
;
3216 static RISCVException
read_mpmmask(CPURISCVState
*env
, int csrno
,
3219 *val
= env
->mpmmask
;
3220 return RISCV_EXCP_NONE
;
3223 static RISCVException
write_mpmmask(CPURISCVState
*env
, int csrno
,
3229 if ((env
->priv
== PRV_M
) && (env
->mmte
& M_PM_ENABLE
)) {
3230 env
->cur_pmmask
= val
;
3232 env
->mmte
|= PM_EXT_DIRTY
;
3234 /* Set XS and SD bits, since PM CSRs are dirty */
3235 mstatus
= env
->mstatus
| MSTATUS_XS
;
3236 write_mstatus(env
, csrno
, mstatus
);
3237 return RISCV_EXCP_NONE
;
3240 static RISCVException
read_spmmask(CPURISCVState
*env
, int csrno
,
3243 *val
= env
->spmmask
;
3244 return RISCV_EXCP_NONE
;
3247 static RISCVException
write_spmmask(CPURISCVState
*env
, int csrno
,
3252 /* if pm.current==0 we can't modify current PM CSRs */
3253 if (check_pm_current_disabled(env
, csrno
)) {
3254 return RISCV_EXCP_NONE
;
3257 if ((env
->priv
== PRV_S
) && (env
->mmte
& S_PM_ENABLE
)) {
3258 env
->cur_pmmask
= val
;
3260 env
->mmte
|= PM_EXT_DIRTY
;
3262 /* Set XS and SD bits, since PM CSRs are dirty */
3263 mstatus
= env
->mstatus
| MSTATUS_XS
;
3264 write_mstatus(env
, csrno
, mstatus
);
3265 return RISCV_EXCP_NONE
;
3268 static RISCVException
read_upmmask(CPURISCVState
*env
, int csrno
,
3271 *val
= env
->upmmask
;
3272 return RISCV_EXCP_NONE
;
3275 static RISCVException
write_upmmask(CPURISCVState
*env
, int csrno
,
3280 /* if pm.current==0 we can't modify current PM CSRs */
3281 if (check_pm_current_disabled(env
, csrno
)) {
3282 return RISCV_EXCP_NONE
;
3285 if ((env
->priv
== PRV_U
) && (env
->mmte
& U_PM_ENABLE
)) {
3286 env
->cur_pmmask
= val
;
3288 env
->mmte
|= PM_EXT_DIRTY
;
3290 /* Set XS and SD bits, since PM CSRs are dirty */
3291 mstatus
= env
->mstatus
| MSTATUS_XS
;
3292 write_mstatus(env
, csrno
, mstatus
);
3293 return RISCV_EXCP_NONE
;
3296 static RISCVException
read_mpmbase(CPURISCVState
*env
, int csrno
,
3299 *val
= env
->mpmbase
;
3300 return RISCV_EXCP_NONE
;
3303 static RISCVException
write_mpmbase(CPURISCVState
*env
, int csrno
,
3309 if ((env
->priv
== PRV_M
) && (env
->mmte
& M_PM_ENABLE
)) {
3310 env
->cur_pmbase
= val
;
3312 env
->mmte
|= PM_EXT_DIRTY
;
3314 /* Set XS and SD bits, since PM CSRs are dirty */
3315 mstatus
= env
->mstatus
| MSTATUS_XS
;
3316 write_mstatus(env
, csrno
, mstatus
);
3317 return RISCV_EXCP_NONE
;
3320 static RISCVException
read_spmbase(CPURISCVState
*env
, int csrno
,
3323 *val
= env
->spmbase
;
3324 return RISCV_EXCP_NONE
;
3327 static RISCVException
write_spmbase(CPURISCVState
*env
, int csrno
,
3332 /* if pm.current==0 we can't modify current PM CSRs */
3333 if (check_pm_current_disabled(env
, csrno
)) {
3334 return RISCV_EXCP_NONE
;
3337 if ((env
->priv
== PRV_S
) && (env
->mmte
& S_PM_ENABLE
)) {
3338 env
->cur_pmbase
= val
;
3340 env
->mmte
|= PM_EXT_DIRTY
;
3342 /* Set XS and SD bits, since PM CSRs are dirty */
3343 mstatus
= env
->mstatus
| MSTATUS_XS
;
3344 write_mstatus(env
, csrno
, mstatus
);
3345 return RISCV_EXCP_NONE
;
3348 static RISCVException
read_upmbase(CPURISCVState
*env
, int csrno
,
3351 *val
= env
->upmbase
;
3352 return RISCV_EXCP_NONE
;
3355 static RISCVException
write_upmbase(CPURISCVState
*env
, int csrno
,
3360 /* if pm.current==0 we can't modify current PM CSRs */
3361 if (check_pm_current_disabled(env
, csrno
)) {
3362 return RISCV_EXCP_NONE
;
3365 if ((env
->priv
== PRV_U
) && (env
->mmte
& U_PM_ENABLE
)) {
3366 env
->cur_pmbase
= val
;
3368 env
->mmte
|= PM_EXT_DIRTY
;
3370 /* Set XS and SD bits, since PM CSRs are dirty */
3371 mstatus
= env
->mstatus
| MSTATUS_XS
;
3372 write_mstatus(env
, csrno
, mstatus
);
3373 return RISCV_EXCP_NONE
;
3378 /* Crypto Extension */
3379 static RISCVException
rmw_seed(CPURISCVState
*env
, int csrno
,
3380 target_ulong
*ret_value
,
3381 target_ulong new_value
,
3382 target_ulong write_mask
)
3385 Error
*random_e
= NULL
;
3389 random_r
= qemu_guest_getrandom(&random_v
, 2, &random_e
);
3390 if (unlikely(random_r
< 0)) {
3392 * Failed, for unknown reasons in the crypto subsystem.
3393 * The best we can do is log the reason and return a
3394 * failure indication to the guest. There is no reason
3395 * we know to expect the failure to be transitory, so
3396 * indicate DEAD to avoid having the guest spin on WAIT.
3398 qemu_log_mask(LOG_UNIMP
, "%s: Crypto failure: %s",
3399 __func__
, error_get_pretty(random_e
));
3400 error_free(random_e
);
3401 rval
= SEED_OPST_DEAD
;
3403 rval
= random_v
| SEED_OPST_ES16
;
3410 return RISCV_EXCP_NONE
;
3414 * riscv_csrrw - read and/or update control and status register
3416 * csrr <-> riscv_csrrw(env, csrno, ret_value, 0, 0);
3417 * csrrw <-> riscv_csrrw(env, csrno, ret_value, value, -1);
3418 * csrrs <-> riscv_csrrw(env, csrno, ret_value, -1, value);
3419 * csrrc <-> riscv_csrrw(env, csrno, ret_value, 0, value);
3422 static inline RISCVException
riscv_csrrw_check(CPURISCVState
*env
,
3427 /* check privileges and return RISCV_EXCP_ILLEGAL_INST if check fails */
3428 int read_only
= get_field(csrno
, 0xC00) == 3;
3429 int csr_min_priv
= csr_ops
[csrno
].min_priv_ver
;
3431 /* ensure the CSR extension is enabled. */
3432 if (!cpu
->cfg
.ext_icsr
) {
3433 return RISCV_EXCP_ILLEGAL_INST
;
3436 if (env
->priv_ver
< csr_min_priv
) {
3437 return RISCV_EXCP_ILLEGAL_INST
;
3440 /* check predicate */
3441 if (!csr_ops
[csrno
].predicate
) {
3442 return RISCV_EXCP_ILLEGAL_INST
;
3445 if (write_mask
&& read_only
) {
3446 return RISCV_EXCP_ILLEGAL_INST
;
3449 RISCVException ret
= csr_ops
[csrno
].predicate(env
, csrno
);
3450 if (ret
!= RISCV_EXCP_NONE
) {
3454 #if !defined(CONFIG_USER_ONLY)
3455 int csr_priv
, effective_priv
= env
->priv
;
3457 if (riscv_has_ext(env
, RVH
) && env
->priv
== PRV_S
&&
3458 !riscv_cpu_virt_enabled(env
)) {
3460 * We are in HS mode. Add 1 to the effective privledge level to
3461 * allow us to access the Hypervisor CSRs.
3466 csr_priv
= get_field(csrno
, 0x300);
3467 if (!env
->debugger
&& (effective_priv
< csr_priv
)) {
3468 if (csr_priv
== (PRV_S
+ 1) && riscv_cpu_virt_enabled(env
)) {
3469 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT
;
3471 return RISCV_EXCP_ILLEGAL_INST
;
3474 return RISCV_EXCP_NONE
;
3477 static RISCVException
riscv_csrrw_do64(CPURISCVState
*env
, int csrno
,
3478 target_ulong
*ret_value
,
3479 target_ulong new_value
,
3480 target_ulong write_mask
)
3483 target_ulong old_value
;
3485 /* execute combined read/write operation if it exists */
3486 if (csr_ops
[csrno
].op
) {
3487 return csr_ops
[csrno
].op(env
, csrno
, ret_value
, new_value
, write_mask
);
3490 /* if no accessor exists then return failure */
3491 if (!csr_ops
[csrno
].read
) {
3492 return RISCV_EXCP_ILLEGAL_INST
;
3494 /* read old value */
3495 ret
= csr_ops
[csrno
].read(env
, csrno
, &old_value
);
3496 if (ret
!= RISCV_EXCP_NONE
) {
3500 /* write value if writable and write mask set, otherwise drop writes */
3502 new_value
= (old_value
& ~write_mask
) | (new_value
& write_mask
);
3503 if (csr_ops
[csrno
].write
) {
3504 ret
= csr_ops
[csrno
].write(env
, csrno
, new_value
);
3505 if (ret
!= RISCV_EXCP_NONE
) {
3511 /* return old value */
3513 *ret_value
= old_value
;
3516 return RISCV_EXCP_NONE
;
3519 RISCVException
riscv_csrrw(CPURISCVState
*env
, int csrno
,
3520 target_ulong
*ret_value
,
3521 target_ulong new_value
, target_ulong write_mask
)
3523 RISCVCPU
*cpu
= env_archcpu(env
);
3525 RISCVException ret
= riscv_csrrw_check(env
, csrno
, write_mask
, cpu
);
3526 if (ret
!= RISCV_EXCP_NONE
) {
3530 return riscv_csrrw_do64(env
, csrno
, ret_value
, new_value
, write_mask
);
3533 static RISCVException
riscv_csrrw_do128(CPURISCVState
*env
, int csrno
,
3541 /* read old value */
3542 ret
= csr_ops
[csrno
].read128(env
, csrno
, &old_value
);
3543 if (ret
!= RISCV_EXCP_NONE
) {
3547 /* write value if writable and write mask set, otherwise drop writes */
3548 if (int128_nz(write_mask
)) {
3549 new_value
= int128_or(int128_and(old_value
, int128_not(write_mask
)),
3550 int128_and(new_value
, write_mask
));
3551 if (csr_ops
[csrno
].write128
) {
3552 ret
= csr_ops
[csrno
].write128(env
, csrno
, new_value
);
3553 if (ret
!= RISCV_EXCP_NONE
) {
3556 } else if (csr_ops
[csrno
].write
) {
3557 /* avoids having to write wrappers for all registers */
3558 ret
= csr_ops
[csrno
].write(env
, csrno
, int128_getlo(new_value
));
3559 if (ret
!= RISCV_EXCP_NONE
) {
3565 /* return old value */
3567 *ret_value
= old_value
;
3570 return RISCV_EXCP_NONE
;
3573 RISCVException
riscv_csrrw_i128(CPURISCVState
*env
, int csrno
,
3575 Int128 new_value
, Int128 write_mask
)
3578 RISCVCPU
*cpu
= env_archcpu(env
);
3580 ret
= riscv_csrrw_check(env
, csrno
, int128_nz(write_mask
), cpu
);
3581 if (ret
!= RISCV_EXCP_NONE
) {
3585 if (csr_ops
[csrno
].read128
) {
3586 return riscv_csrrw_do128(env
, csrno
, ret_value
, new_value
, write_mask
);
3590 * Fall back to 64-bit version for now, if the 128-bit alternative isn't
3592 * Note, some CSRs don't need to extend to MXLEN (64 upper bits non
3593 * significant), for those, this fallback is correctly handling the accesses
3595 target_ulong old_value
;
3596 ret
= riscv_csrrw_do64(env
, csrno
, &old_value
,
3597 int128_getlo(new_value
),
3598 int128_getlo(write_mask
));
3599 if (ret
== RISCV_EXCP_NONE
&& ret_value
) {
3600 *ret_value
= int128_make64(old_value
);
3606 * Debugger support. If not in user mode, set env->debugger before the
3607 * riscv_csrrw call and clear it after the call.
3609 RISCVException
riscv_csrrw_debug(CPURISCVState
*env
, int csrno
,
3610 target_ulong
*ret_value
,
3611 target_ulong new_value
,
3612 target_ulong write_mask
)
3615 #if !defined(CONFIG_USER_ONLY)
3616 env
->debugger
= true;
3618 ret
= riscv_csrrw(env
, csrno
, ret_value
, new_value
, write_mask
);
3619 #if !defined(CONFIG_USER_ONLY)
3620 env
->debugger
= false;
3625 /* Control and Status Register function table */
3626 riscv_csr_operations csr_ops
[CSR_TABLE_SIZE
] = {
3627 /* User Floating-Point CSRs */
3628 [CSR_FFLAGS
] = { "fflags", fs
, read_fflags
, write_fflags
},
3629 [CSR_FRM
] = { "frm", fs
, read_frm
, write_frm
},
3630 [CSR_FCSR
] = { "fcsr", fs
, read_fcsr
, write_fcsr
},
3632 [CSR_VSTART
] = { "vstart", vs
, read_vstart
, write_vstart
,
3633 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3634 [CSR_VXSAT
] = { "vxsat", vs
, read_vxsat
, write_vxsat
,
3635 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3636 [CSR_VXRM
] = { "vxrm", vs
, read_vxrm
, write_vxrm
,
3637 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3638 [CSR_VCSR
] = { "vcsr", vs
, read_vcsr
, write_vcsr
,
3639 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3640 [CSR_VL
] = { "vl", vs
, read_vl
,
3641 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3642 [CSR_VTYPE
] = { "vtype", vs
, read_vtype
,
3643 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3644 [CSR_VLENB
] = { "vlenb", vs
, read_vlenb
,
3645 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3646 /* User Timers and Counters */
3647 [CSR_CYCLE
] = { "cycle", ctr
, read_hpmcounter
},
3648 [CSR_INSTRET
] = { "instret", ctr
, read_hpmcounter
},
3649 [CSR_CYCLEH
] = { "cycleh", ctr32
, read_hpmcounterh
},
3650 [CSR_INSTRETH
] = { "instreth", ctr32
, read_hpmcounterh
},
3653 * In privileged mode, the monitor will have to emulate TIME CSRs only if
3654 * rdtime callback is not provided by machine/platform emulation.
3656 [CSR_TIME
] = { "time", ctr
, read_time
},
3657 [CSR_TIMEH
] = { "timeh", ctr32
, read_timeh
},
3659 /* Crypto Extension */
3660 [CSR_SEED
] = { "seed", seed
, NULL
, NULL
, rmw_seed
},
3662 #if !defined(CONFIG_USER_ONLY)
3663 /* Machine Timers and Counters */
3664 [CSR_MCYCLE
] = { "mcycle", any
, read_hpmcounter
,
3665 write_mhpmcounter
},
3666 [CSR_MINSTRET
] = { "minstret", any
, read_hpmcounter
,
3667 write_mhpmcounter
},
3668 [CSR_MCYCLEH
] = { "mcycleh", any32
, read_hpmcounterh
,
3669 write_mhpmcounterh
},
3670 [CSR_MINSTRETH
] = { "minstreth", any32
, read_hpmcounterh
,
3671 write_mhpmcounterh
},
3673 /* Machine Information Registers */
3674 [CSR_MVENDORID
] = { "mvendorid", any
, read_mvendorid
},
3675 [CSR_MARCHID
] = { "marchid", any
, read_marchid
},
3676 [CSR_MIMPID
] = { "mimpid", any
, read_mimpid
},
3677 [CSR_MHARTID
] = { "mhartid", any
, read_mhartid
},
3679 [CSR_MCONFIGPTR
] = { "mconfigptr", any
, read_zero
,
3680 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3681 /* Machine Trap Setup */
3682 [CSR_MSTATUS
] = { "mstatus", any
, read_mstatus
, write_mstatus
,
3683 NULL
, read_mstatus_i128
},
3684 [CSR_MISA
] = { "misa", any
, read_misa
, write_misa
,
3685 NULL
, read_misa_i128
},
3686 [CSR_MIDELEG
] = { "mideleg", any
, NULL
, NULL
, rmw_mideleg
},
3687 [CSR_MEDELEG
] = { "medeleg", any
, read_medeleg
, write_medeleg
},
3688 [CSR_MIE
] = { "mie", any
, NULL
, NULL
, rmw_mie
},
3689 [CSR_MTVEC
] = { "mtvec", any
, read_mtvec
, write_mtvec
},
3690 [CSR_MCOUNTEREN
] = { "mcounteren", umode
, read_mcounteren
,
3693 [CSR_MSTATUSH
] = { "mstatush", any32
, read_mstatush
,
3696 /* Machine Trap Handling */
3697 [CSR_MSCRATCH
] = { "mscratch", any
, read_mscratch
, write_mscratch
,
3698 NULL
, read_mscratch_i128
, write_mscratch_i128
},
3699 [CSR_MEPC
] = { "mepc", any
, read_mepc
, write_mepc
},
3700 [CSR_MCAUSE
] = { "mcause", any
, read_mcause
, write_mcause
},
3701 [CSR_MTVAL
] = { "mtval", any
, read_mtval
, write_mtval
},
3702 [CSR_MIP
] = { "mip", any
, NULL
, NULL
, rmw_mip
},
3704 /* Machine-Level Window to Indirectly Accessed Registers (AIA) */
3705 [CSR_MISELECT
] = { "miselect", aia_any
, NULL
, NULL
, rmw_xiselect
},
3706 [CSR_MIREG
] = { "mireg", aia_any
, NULL
, NULL
, rmw_xireg
},
3708 /* Machine-Level Interrupts (AIA) */
3709 [CSR_MTOPEI
] = { "mtopei", aia_any
, NULL
, NULL
, rmw_xtopei
},
3710 [CSR_MTOPI
] = { "mtopi", aia_any
, read_mtopi
},
3712 /* Virtual Interrupts for Supervisor Level (AIA) */
3713 [CSR_MVIEN
] = { "mvien", aia_any
, read_zero
, write_ignore
},
3714 [CSR_MVIP
] = { "mvip", aia_any
, read_zero
, write_ignore
},
3716 /* Machine-Level High-Half CSRs (AIA) */
3717 [CSR_MIDELEGH
] = { "midelegh", aia_any32
, NULL
, NULL
, rmw_midelegh
},
3718 [CSR_MIEH
] = { "mieh", aia_any32
, NULL
, NULL
, rmw_mieh
},
3719 [CSR_MVIENH
] = { "mvienh", aia_any32
, read_zero
, write_ignore
},
3720 [CSR_MVIPH
] = { "mviph", aia_any32
, read_zero
, write_ignore
},
3721 [CSR_MIPH
] = { "miph", aia_any32
, NULL
, NULL
, rmw_miph
},
3723 /* Execution environment configuration */
3724 [CSR_MENVCFG
] = { "menvcfg", umode
, read_menvcfg
, write_menvcfg
,
3725 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3726 [CSR_MENVCFGH
] = { "menvcfgh", umode32
, read_menvcfgh
, write_menvcfgh
,
3727 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3728 [CSR_SENVCFG
] = { "senvcfg", smode
, read_senvcfg
, write_senvcfg
,
3729 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3730 [CSR_HENVCFG
] = { "henvcfg", hmode
, read_henvcfg
, write_henvcfg
,
3731 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3732 [CSR_HENVCFGH
] = { "henvcfgh", hmode32
, read_henvcfgh
, write_henvcfgh
,
3733 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3735 /* Supervisor Trap Setup */
3736 [CSR_SSTATUS
] = { "sstatus", smode
, read_sstatus
, write_sstatus
,
3737 NULL
, read_sstatus_i128
},
3738 [CSR_SIE
] = { "sie", smode
, NULL
, NULL
, rmw_sie
},
3739 [CSR_STVEC
] = { "stvec", smode
, read_stvec
, write_stvec
},
3740 [CSR_SCOUNTEREN
] = { "scounteren", smode
, read_scounteren
,
3743 /* Supervisor Trap Handling */
3744 [CSR_SSCRATCH
] = { "sscratch", smode
, read_sscratch
, write_sscratch
,
3745 NULL
, read_sscratch_i128
, write_sscratch_i128
},
3746 [CSR_SEPC
] = { "sepc", smode
, read_sepc
, write_sepc
},
3747 [CSR_SCAUSE
] = { "scause", smode
, read_scause
, write_scause
},
3748 [CSR_STVAL
] = { "stval", smode
, read_stval
, write_stval
},
3749 [CSR_SIP
] = { "sip", smode
, NULL
, NULL
, rmw_sip
},
3750 [CSR_STIMECMP
] = { "stimecmp", sstc
, read_stimecmp
, write_stimecmp
,
3751 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3752 [CSR_STIMECMPH
] = { "stimecmph", sstc_32
, read_stimecmph
, write_stimecmph
,
3753 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3754 [CSR_VSTIMECMP
] = { "vstimecmp", sstc
, read_vstimecmp
,
3756 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3757 [CSR_VSTIMECMPH
] = { "vstimecmph", sstc_32
, read_vstimecmph
,
3759 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3761 /* Supervisor Protection and Translation */
3762 [CSR_SATP
] = { "satp", smode
, read_satp
, write_satp
},
3764 /* Supervisor-Level Window to Indirectly Accessed Registers (AIA) */
3765 [CSR_SISELECT
] = { "siselect", aia_smode
, NULL
, NULL
, rmw_xiselect
},
3766 [CSR_SIREG
] = { "sireg", aia_smode
, NULL
, NULL
, rmw_xireg
},
3768 /* Supervisor-Level Interrupts (AIA) */
3769 [CSR_STOPEI
] = { "stopei", aia_smode
, NULL
, NULL
, rmw_xtopei
},
3770 [CSR_STOPI
] = { "stopi", aia_smode
, read_stopi
},
3772 /* Supervisor-Level High-Half CSRs (AIA) */
3773 [CSR_SIEH
] = { "sieh", aia_smode32
, NULL
, NULL
, rmw_sieh
},
3774 [CSR_SIPH
] = { "siph", aia_smode32
, NULL
, NULL
, rmw_siph
},
3776 [CSR_HSTATUS
] = { "hstatus", hmode
, read_hstatus
, write_hstatus
,
3777 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3778 [CSR_HEDELEG
] = { "hedeleg", hmode
, read_hedeleg
, write_hedeleg
,
3779 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3780 [CSR_HIDELEG
] = { "hideleg", hmode
, NULL
, NULL
, rmw_hideleg
,
3781 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3782 [CSR_HVIP
] = { "hvip", hmode
, NULL
, NULL
, rmw_hvip
,
3783 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3784 [CSR_HIP
] = { "hip", hmode
, NULL
, NULL
, rmw_hip
,
3785 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3786 [CSR_HIE
] = { "hie", hmode
, NULL
, NULL
, rmw_hie
,
3787 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3788 [CSR_HCOUNTEREN
] = { "hcounteren", hmode
, read_hcounteren
,
3790 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3791 [CSR_HGEIE
] = { "hgeie", hmode
, read_hgeie
, write_hgeie
,
3792 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3793 [CSR_HTVAL
] = { "htval", hmode
, read_htval
, write_htval
,
3794 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3795 [CSR_HTINST
] = { "htinst", hmode
, read_htinst
, write_htinst
,
3796 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3797 [CSR_HGEIP
] = { "hgeip", hmode
, read_hgeip
,
3798 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3799 [CSR_HGATP
] = { "hgatp", hmode
, read_hgatp
, write_hgatp
,
3800 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3801 [CSR_HTIMEDELTA
] = { "htimedelta", hmode
, read_htimedelta
,
3803 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3804 [CSR_HTIMEDELTAH
] = { "htimedeltah", hmode32
, read_htimedeltah
,
3806 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3808 [CSR_VSSTATUS
] = { "vsstatus", hmode
, read_vsstatus
,
3810 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3811 [CSR_VSIP
] = { "vsip", hmode
, NULL
, NULL
, rmw_vsip
,
3812 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3813 [CSR_VSIE
] = { "vsie", hmode
, NULL
, NULL
, rmw_vsie
,
3814 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3815 [CSR_VSTVEC
] = { "vstvec", hmode
, read_vstvec
, write_vstvec
,
3816 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3817 [CSR_VSSCRATCH
] = { "vsscratch", hmode
, read_vsscratch
,
3819 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3820 [CSR_VSEPC
] = { "vsepc", hmode
, read_vsepc
, write_vsepc
,
3821 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3822 [CSR_VSCAUSE
] = { "vscause", hmode
, read_vscause
, write_vscause
,
3823 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3824 [CSR_VSTVAL
] = { "vstval", hmode
, read_vstval
, write_vstval
,
3825 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3826 [CSR_VSATP
] = { "vsatp", hmode
, read_vsatp
, write_vsatp
,
3827 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3829 [CSR_MTVAL2
] = { "mtval2", hmode
, read_mtval2
, write_mtval2
,
3830 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3831 [CSR_MTINST
] = { "mtinst", hmode
, read_mtinst
, write_mtinst
,
3832 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3834 /* Virtual Interrupts and Interrupt Priorities (H-extension with AIA) */
3835 [CSR_HVIEN
] = { "hvien", aia_hmode
, read_zero
, write_ignore
},
3836 [CSR_HVICTL
] = { "hvictl", aia_hmode
, read_hvictl
,
3838 [CSR_HVIPRIO1
] = { "hviprio1", aia_hmode
, read_hviprio1
,
3840 [CSR_HVIPRIO2
] = { "hviprio2", aia_hmode
, read_hviprio2
,
3844 * VS-Level Window to Indirectly Accessed Registers (H-extension with AIA)
3846 [CSR_VSISELECT
] = { "vsiselect", aia_hmode
, NULL
, NULL
,
3848 [CSR_VSIREG
] = { "vsireg", aia_hmode
, NULL
, NULL
, rmw_xireg
},
3850 /* VS-Level Interrupts (H-extension with AIA) */
3851 [CSR_VSTOPEI
] = { "vstopei", aia_hmode
, NULL
, NULL
, rmw_xtopei
},
3852 [CSR_VSTOPI
] = { "vstopi", aia_hmode
, read_vstopi
},
3854 /* Hypervisor and VS-Level High-Half CSRs (H-extension with AIA) */
3855 [CSR_HIDELEGH
] = { "hidelegh", aia_hmode32
, NULL
, NULL
,
3857 [CSR_HVIENH
] = { "hvienh", aia_hmode32
, read_zero
,
3859 [CSR_HVIPH
] = { "hviph", aia_hmode32
, NULL
, NULL
, rmw_hviph
},
3860 [CSR_HVIPRIO1H
] = { "hviprio1h", aia_hmode32
, read_hviprio1h
,
3862 [CSR_HVIPRIO2H
] = { "hviprio2h", aia_hmode32
, read_hviprio2h
,
3864 [CSR_VSIEH
] = { "vsieh", aia_hmode32
, NULL
, NULL
, rmw_vsieh
},
3865 [CSR_VSIPH
] = { "vsiph", aia_hmode32
, NULL
, NULL
, rmw_vsiph
},
3867 /* Physical Memory Protection */
3868 [CSR_MSECCFG
] = { "mseccfg", epmp
, read_mseccfg
, write_mseccfg
,
3869 .min_priv_ver
= PRIV_VERSION_1_11_0
},
3870 [CSR_PMPCFG0
] = { "pmpcfg0", pmp
, read_pmpcfg
, write_pmpcfg
},
3871 [CSR_PMPCFG1
] = { "pmpcfg1", pmp
, read_pmpcfg
, write_pmpcfg
},
3872 [CSR_PMPCFG2
] = { "pmpcfg2", pmp
, read_pmpcfg
, write_pmpcfg
},
3873 [CSR_PMPCFG3
] = { "pmpcfg3", pmp
, read_pmpcfg
, write_pmpcfg
},
3874 [CSR_PMPADDR0
] = { "pmpaddr0", pmp
, read_pmpaddr
, write_pmpaddr
},
3875 [CSR_PMPADDR1
] = { "pmpaddr1", pmp
, read_pmpaddr
, write_pmpaddr
},
3876 [CSR_PMPADDR2
] = { "pmpaddr2", pmp
, read_pmpaddr
, write_pmpaddr
},
3877 [CSR_PMPADDR3
] = { "pmpaddr3", pmp
, read_pmpaddr
, write_pmpaddr
},
3878 [CSR_PMPADDR4
] = { "pmpaddr4", pmp
, read_pmpaddr
, write_pmpaddr
},
3879 [CSR_PMPADDR5
] = { "pmpaddr5", pmp
, read_pmpaddr
, write_pmpaddr
},
3880 [CSR_PMPADDR6
] = { "pmpaddr6", pmp
, read_pmpaddr
, write_pmpaddr
},
3881 [CSR_PMPADDR7
] = { "pmpaddr7", pmp
, read_pmpaddr
, write_pmpaddr
},
3882 [CSR_PMPADDR8
] = { "pmpaddr8", pmp
, read_pmpaddr
, write_pmpaddr
},
3883 [CSR_PMPADDR9
] = { "pmpaddr9", pmp
, read_pmpaddr
, write_pmpaddr
},
3884 [CSR_PMPADDR10
] = { "pmpaddr10", pmp
, read_pmpaddr
, write_pmpaddr
},
3885 [CSR_PMPADDR11
] = { "pmpaddr11", pmp
, read_pmpaddr
, write_pmpaddr
},
3886 [CSR_PMPADDR12
] = { "pmpaddr12", pmp
, read_pmpaddr
, write_pmpaddr
},
3887 [CSR_PMPADDR13
] = { "pmpaddr13", pmp
, read_pmpaddr
, write_pmpaddr
},
3888 [CSR_PMPADDR14
] = { "pmpaddr14", pmp
, read_pmpaddr
, write_pmpaddr
},
3889 [CSR_PMPADDR15
] = { "pmpaddr15", pmp
, read_pmpaddr
, write_pmpaddr
},
3892 [CSR_TSELECT
] = { "tselect", debug
, read_tselect
, write_tselect
},
3893 [CSR_TDATA1
] = { "tdata1", debug
, read_tdata
, write_tdata
},
3894 [CSR_TDATA2
] = { "tdata2", debug
, read_tdata
, write_tdata
},
3895 [CSR_TDATA3
] = { "tdata3", debug
, read_tdata
, write_tdata
},
3897 /* User Pointer Masking */
3898 [CSR_UMTE
] = { "umte", pointer_masking
, read_umte
, write_umte
},
3899 [CSR_UPMMASK
] = { "upmmask", pointer_masking
, read_upmmask
,
3901 [CSR_UPMBASE
] = { "upmbase", pointer_masking
, read_upmbase
,
3903 /* Machine Pointer Masking */
3904 [CSR_MMTE
] = { "mmte", pointer_masking
, read_mmte
, write_mmte
},
3905 [CSR_MPMMASK
] = { "mpmmask", pointer_masking
, read_mpmmask
,
3907 [CSR_MPMBASE
] = { "mpmbase", pointer_masking
, read_mpmbase
,
3909 /* Supervisor Pointer Masking */
3910 [CSR_SMTE
] = { "smte", pointer_masking
, read_smte
, write_smte
},
3911 [CSR_SPMMASK
] = { "spmmask", pointer_masking
, read_spmmask
,
3913 [CSR_SPMBASE
] = { "spmbase", pointer_masking
, read_spmbase
,
3916 /* Performance Counters */
3917 [CSR_HPMCOUNTER3
] = { "hpmcounter3", ctr
, read_hpmcounter
},
3918 [CSR_HPMCOUNTER4
] = { "hpmcounter4", ctr
, read_hpmcounter
},
3919 [CSR_HPMCOUNTER5
] = { "hpmcounter5", ctr
, read_hpmcounter
},
3920 [CSR_HPMCOUNTER6
] = { "hpmcounter6", ctr
, read_hpmcounter
},
3921 [CSR_HPMCOUNTER7
] = { "hpmcounter7", ctr
, read_hpmcounter
},
3922 [CSR_HPMCOUNTER8
] = { "hpmcounter8", ctr
, read_hpmcounter
},
3923 [CSR_HPMCOUNTER9
] = { "hpmcounter9", ctr
, read_hpmcounter
},
3924 [CSR_HPMCOUNTER10
] = { "hpmcounter10", ctr
, read_hpmcounter
},
3925 [CSR_HPMCOUNTER11
] = { "hpmcounter11", ctr
, read_hpmcounter
},
3926 [CSR_HPMCOUNTER12
] = { "hpmcounter12", ctr
, read_hpmcounter
},
3927 [CSR_HPMCOUNTER13
] = { "hpmcounter13", ctr
, read_hpmcounter
},
3928 [CSR_HPMCOUNTER14
] = { "hpmcounter14", ctr
, read_hpmcounter
},
3929 [CSR_HPMCOUNTER15
] = { "hpmcounter15", ctr
, read_hpmcounter
},
3930 [CSR_HPMCOUNTER16
] = { "hpmcounter16", ctr
, read_hpmcounter
},
3931 [CSR_HPMCOUNTER17
] = { "hpmcounter17", ctr
, read_hpmcounter
},
3932 [CSR_HPMCOUNTER18
] = { "hpmcounter18", ctr
, read_hpmcounter
},
3933 [CSR_HPMCOUNTER19
] = { "hpmcounter19", ctr
, read_hpmcounter
},
3934 [CSR_HPMCOUNTER20
] = { "hpmcounter20", ctr
, read_hpmcounter
},
3935 [CSR_HPMCOUNTER21
] = { "hpmcounter21", ctr
, read_hpmcounter
},
3936 [CSR_HPMCOUNTER22
] = { "hpmcounter22", ctr
, read_hpmcounter
},
3937 [CSR_HPMCOUNTER23
] = { "hpmcounter23", ctr
, read_hpmcounter
},
3938 [CSR_HPMCOUNTER24
] = { "hpmcounter24", ctr
, read_hpmcounter
},
3939 [CSR_HPMCOUNTER25
] = { "hpmcounter25", ctr
, read_hpmcounter
},
3940 [CSR_HPMCOUNTER26
] = { "hpmcounter26", ctr
, read_hpmcounter
},
3941 [CSR_HPMCOUNTER27
] = { "hpmcounter27", ctr
, read_hpmcounter
},
3942 [CSR_HPMCOUNTER28
] = { "hpmcounter28", ctr
, read_hpmcounter
},
3943 [CSR_HPMCOUNTER29
] = { "hpmcounter29", ctr
, read_hpmcounter
},
3944 [CSR_HPMCOUNTER30
] = { "hpmcounter30", ctr
, read_hpmcounter
},
3945 [CSR_HPMCOUNTER31
] = { "hpmcounter31", ctr
, read_hpmcounter
},
3947 [CSR_MHPMCOUNTER3
] = { "mhpmcounter3", mctr
, read_hpmcounter
,
3948 write_mhpmcounter
},
3949 [CSR_MHPMCOUNTER4
] = { "mhpmcounter4", mctr
, read_hpmcounter
,
3950 write_mhpmcounter
},
3951 [CSR_MHPMCOUNTER5
] = { "mhpmcounter5", mctr
, read_hpmcounter
,
3952 write_mhpmcounter
},
3953 [CSR_MHPMCOUNTER6
] = { "mhpmcounter6", mctr
, read_hpmcounter
,
3954 write_mhpmcounter
},
3955 [CSR_MHPMCOUNTER7
] = { "mhpmcounter7", mctr
, read_hpmcounter
,
3956 write_mhpmcounter
},
3957 [CSR_MHPMCOUNTER8
] = { "mhpmcounter8", mctr
, read_hpmcounter
,
3958 write_mhpmcounter
},
3959 [CSR_MHPMCOUNTER9
] = { "mhpmcounter9", mctr
, read_hpmcounter
,
3960 write_mhpmcounter
},
3961 [CSR_MHPMCOUNTER10
] = { "mhpmcounter10", mctr
, read_hpmcounter
,
3962 write_mhpmcounter
},
3963 [CSR_MHPMCOUNTER11
] = { "mhpmcounter11", mctr
, read_hpmcounter
,
3964 write_mhpmcounter
},
3965 [CSR_MHPMCOUNTER12
] = { "mhpmcounter12", mctr
, read_hpmcounter
,
3966 write_mhpmcounter
},
3967 [CSR_MHPMCOUNTER13
] = { "mhpmcounter13", mctr
, read_hpmcounter
,
3968 write_mhpmcounter
},
3969 [CSR_MHPMCOUNTER14
] = { "mhpmcounter14", mctr
, read_hpmcounter
,
3970 write_mhpmcounter
},
3971 [CSR_MHPMCOUNTER15
] = { "mhpmcounter15", mctr
, read_hpmcounter
,
3972 write_mhpmcounter
},
3973 [CSR_MHPMCOUNTER16
] = { "mhpmcounter16", mctr
, read_hpmcounter
,
3974 write_mhpmcounter
},
3975 [CSR_MHPMCOUNTER17
] = { "mhpmcounter17", mctr
, read_hpmcounter
,
3976 write_mhpmcounter
},
3977 [CSR_MHPMCOUNTER18
] = { "mhpmcounter18", mctr
, read_hpmcounter
,
3978 write_mhpmcounter
},
3979 [CSR_MHPMCOUNTER19
] = { "mhpmcounter19", mctr
, read_hpmcounter
,
3980 write_mhpmcounter
},
3981 [CSR_MHPMCOUNTER20
] = { "mhpmcounter20", mctr
, read_hpmcounter
,
3982 write_mhpmcounter
},
3983 [CSR_MHPMCOUNTER21
] = { "mhpmcounter21", mctr
, read_hpmcounter
,
3984 write_mhpmcounter
},
3985 [CSR_MHPMCOUNTER22
] = { "mhpmcounter22", mctr
, read_hpmcounter
,
3986 write_mhpmcounter
},
3987 [CSR_MHPMCOUNTER23
] = { "mhpmcounter23", mctr
, read_hpmcounter
,
3988 write_mhpmcounter
},
3989 [CSR_MHPMCOUNTER24
] = { "mhpmcounter24", mctr
, read_hpmcounter
,
3990 write_mhpmcounter
},
3991 [CSR_MHPMCOUNTER25
] = { "mhpmcounter25", mctr
, read_hpmcounter
,
3992 write_mhpmcounter
},
3993 [CSR_MHPMCOUNTER26
] = { "mhpmcounter26", mctr
, read_hpmcounter
,
3994 write_mhpmcounter
},
3995 [CSR_MHPMCOUNTER27
] = { "mhpmcounter27", mctr
, read_hpmcounter
,
3996 write_mhpmcounter
},
3997 [CSR_MHPMCOUNTER28
] = { "mhpmcounter28", mctr
, read_hpmcounter
,
3998 write_mhpmcounter
},
3999 [CSR_MHPMCOUNTER29
] = { "mhpmcounter29", mctr
, read_hpmcounter
,
4000 write_mhpmcounter
},
4001 [CSR_MHPMCOUNTER30
] = { "mhpmcounter30", mctr
, read_hpmcounter
,
4002 write_mhpmcounter
},
4003 [CSR_MHPMCOUNTER31
] = { "mhpmcounter31", mctr
, read_hpmcounter
,
4004 write_mhpmcounter
},
4006 [CSR_MCOUNTINHIBIT
] = { "mcountinhibit", any
, read_mcountinhibit
,
4007 write_mcountinhibit
,
4008 .min_priv_ver
= PRIV_VERSION_1_11_0
},
4010 [CSR_MHPMEVENT3
] = { "mhpmevent3", any
, read_mhpmevent
,
4012 [CSR_MHPMEVENT4
] = { "mhpmevent4", any
, read_mhpmevent
,
4014 [CSR_MHPMEVENT5
] = { "mhpmevent5", any
, read_mhpmevent
,
4016 [CSR_MHPMEVENT6
] = { "mhpmevent6", any
, read_mhpmevent
,
4018 [CSR_MHPMEVENT7
] = { "mhpmevent7", any
, read_mhpmevent
,
4020 [CSR_MHPMEVENT8
] = { "mhpmevent8", any
, read_mhpmevent
,
4022 [CSR_MHPMEVENT9
] = { "mhpmevent9", any
, read_mhpmevent
,
4024 [CSR_MHPMEVENT10
] = { "mhpmevent10", any
, read_mhpmevent
,
4026 [CSR_MHPMEVENT11
] = { "mhpmevent11", any
, read_mhpmevent
,
4028 [CSR_MHPMEVENT12
] = { "mhpmevent12", any
, read_mhpmevent
,
4030 [CSR_MHPMEVENT13
] = { "mhpmevent13", any
, read_mhpmevent
,
4032 [CSR_MHPMEVENT14
] = { "mhpmevent14", any
, read_mhpmevent
,
4034 [CSR_MHPMEVENT15
] = { "mhpmevent15", any
, read_mhpmevent
,
4036 [CSR_MHPMEVENT16
] = { "mhpmevent16", any
, read_mhpmevent
,
4038 [CSR_MHPMEVENT17
] = { "mhpmevent17", any
, read_mhpmevent
,
4040 [CSR_MHPMEVENT18
] = { "mhpmevent18", any
, read_mhpmevent
,
4042 [CSR_MHPMEVENT19
] = { "mhpmevent19", any
, read_mhpmevent
,
4044 [CSR_MHPMEVENT20
] = { "mhpmevent20", any
, read_mhpmevent
,
4046 [CSR_MHPMEVENT21
] = { "mhpmevent21", any
, read_mhpmevent
,
4048 [CSR_MHPMEVENT22
] = { "mhpmevent22", any
, read_mhpmevent
,
4050 [CSR_MHPMEVENT23
] = { "mhpmevent23", any
, read_mhpmevent
,
4052 [CSR_MHPMEVENT24
] = { "mhpmevent24", any
, read_mhpmevent
,
4054 [CSR_MHPMEVENT25
] = { "mhpmevent25", any
, read_mhpmevent
,
4056 [CSR_MHPMEVENT26
] = { "mhpmevent26", any
, read_mhpmevent
,
4058 [CSR_MHPMEVENT27
] = { "mhpmevent27", any
, read_mhpmevent
,
4060 [CSR_MHPMEVENT28
] = { "mhpmevent28", any
, read_mhpmevent
,
4062 [CSR_MHPMEVENT29
] = { "mhpmevent29", any
, read_mhpmevent
,
4064 [CSR_MHPMEVENT30
] = { "mhpmevent30", any
, read_mhpmevent
,
4066 [CSR_MHPMEVENT31
] = { "mhpmevent31", any
, read_mhpmevent
,
4069 [CSR_MHPMEVENT3H
] = { "mhpmevent3h", sscofpmf
, read_mhpmeventh
,
4071 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4072 [CSR_MHPMEVENT4H
] = { "mhpmevent4h", sscofpmf
, read_mhpmeventh
,
4074 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4075 [CSR_MHPMEVENT5H
] = { "mhpmevent5h", sscofpmf
, read_mhpmeventh
,
4077 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4078 [CSR_MHPMEVENT6H
] = { "mhpmevent6h", sscofpmf
, read_mhpmeventh
,
4080 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4081 [CSR_MHPMEVENT7H
] = { "mhpmevent7h", sscofpmf
, read_mhpmeventh
,
4083 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4084 [CSR_MHPMEVENT8H
] = { "mhpmevent8h", sscofpmf
, read_mhpmeventh
,
4086 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4087 [CSR_MHPMEVENT9H
] = { "mhpmevent9h", sscofpmf
, read_mhpmeventh
,
4089 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4090 [CSR_MHPMEVENT10H
] = { "mhpmevent10h", sscofpmf
, read_mhpmeventh
,
4092 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4093 [CSR_MHPMEVENT11H
] = { "mhpmevent11h", sscofpmf
, read_mhpmeventh
,
4095 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4096 [CSR_MHPMEVENT12H
] = { "mhpmevent12h", sscofpmf
, read_mhpmeventh
,
4098 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4099 [CSR_MHPMEVENT13H
] = { "mhpmevent13h", sscofpmf
, read_mhpmeventh
,
4101 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4102 [CSR_MHPMEVENT14H
] = { "mhpmevent14h", sscofpmf
, read_mhpmeventh
,
4104 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4105 [CSR_MHPMEVENT15H
] = { "mhpmevent15h", sscofpmf
, read_mhpmeventh
,
4107 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4108 [CSR_MHPMEVENT16H
] = { "mhpmevent16h", sscofpmf
, read_mhpmeventh
,
4110 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4111 [CSR_MHPMEVENT17H
] = { "mhpmevent17h", sscofpmf
, read_mhpmeventh
,
4113 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4114 [CSR_MHPMEVENT18H
] = { "mhpmevent18h", sscofpmf
, read_mhpmeventh
,
4116 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4117 [CSR_MHPMEVENT19H
] = { "mhpmevent19h", sscofpmf
, read_mhpmeventh
,
4119 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4120 [CSR_MHPMEVENT20H
] = { "mhpmevent20h", sscofpmf
, read_mhpmeventh
,
4122 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4123 [CSR_MHPMEVENT21H
] = { "mhpmevent21h", sscofpmf
, read_mhpmeventh
,
4125 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4126 [CSR_MHPMEVENT22H
] = { "mhpmevent22h", sscofpmf
, read_mhpmeventh
,
4128 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4129 [CSR_MHPMEVENT23H
] = { "mhpmevent23h", sscofpmf
, read_mhpmeventh
,
4131 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4132 [CSR_MHPMEVENT24H
] = { "mhpmevent24h", sscofpmf
, read_mhpmeventh
,
4134 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4135 [CSR_MHPMEVENT25H
] = { "mhpmevent25h", sscofpmf
, read_mhpmeventh
,
4137 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4138 [CSR_MHPMEVENT26H
] = { "mhpmevent26h", sscofpmf
, read_mhpmeventh
,
4140 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4141 [CSR_MHPMEVENT27H
] = { "mhpmevent27h", sscofpmf
, read_mhpmeventh
,
4143 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4144 [CSR_MHPMEVENT28H
] = { "mhpmevent28h", sscofpmf
, read_mhpmeventh
,
4146 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4147 [CSR_MHPMEVENT29H
] = { "mhpmevent29h", sscofpmf
, read_mhpmeventh
,
4149 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4150 [CSR_MHPMEVENT30H
] = { "mhpmevent30h", sscofpmf
, read_mhpmeventh
,
4152 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4153 [CSR_MHPMEVENT31H
] = { "mhpmevent31h", sscofpmf
, read_mhpmeventh
,
4155 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4157 [CSR_HPMCOUNTER3H
] = { "hpmcounter3h", ctr32
, read_hpmcounterh
},
4158 [CSR_HPMCOUNTER4H
] = { "hpmcounter4h", ctr32
, read_hpmcounterh
},
4159 [CSR_HPMCOUNTER5H
] = { "hpmcounter5h", ctr32
, read_hpmcounterh
},
4160 [CSR_HPMCOUNTER6H
] = { "hpmcounter6h", ctr32
, read_hpmcounterh
},
4161 [CSR_HPMCOUNTER7H
] = { "hpmcounter7h", ctr32
, read_hpmcounterh
},
4162 [CSR_HPMCOUNTER8H
] = { "hpmcounter8h", ctr32
, read_hpmcounterh
},
4163 [CSR_HPMCOUNTER9H
] = { "hpmcounter9h", ctr32
, read_hpmcounterh
},
4164 [CSR_HPMCOUNTER10H
] = { "hpmcounter10h", ctr32
, read_hpmcounterh
},
4165 [CSR_HPMCOUNTER11H
] = { "hpmcounter11h", ctr32
, read_hpmcounterh
},
4166 [CSR_HPMCOUNTER12H
] = { "hpmcounter12h", ctr32
, read_hpmcounterh
},
4167 [CSR_HPMCOUNTER13H
] = { "hpmcounter13h", ctr32
, read_hpmcounterh
},
4168 [CSR_HPMCOUNTER14H
] = { "hpmcounter14h", ctr32
, read_hpmcounterh
},
4169 [CSR_HPMCOUNTER15H
] = { "hpmcounter15h", ctr32
, read_hpmcounterh
},
4170 [CSR_HPMCOUNTER16H
] = { "hpmcounter16h", ctr32
, read_hpmcounterh
},
4171 [CSR_HPMCOUNTER17H
] = { "hpmcounter17h", ctr32
, read_hpmcounterh
},
4172 [CSR_HPMCOUNTER18H
] = { "hpmcounter18h", ctr32
, read_hpmcounterh
},
4173 [CSR_HPMCOUNTER19H
] = { "hpmcounter19h", ctr32
, read_hpmcounterh
},
4174 [CSR_HPMCOUNTER20H
] = { "hpmcounter20h", ctr32
, read_hpmcounterh
},
4175 [CSR_HPMCOUNTER21H
] = { "hpmcounter21h", ctr32
, read_hpmcounterh
},
4176 [CSR_HPMCOUNTER22H
] = { "hpmcounter22h", ctr32
, read_hpmcounterh
},
4177 [CSR_HPMCOUNTER23H
] = { "hpmcounter23h", ctr32
, read_hpmcounterh
},
4178 [CSR_HPMCOUNTER24H
] = { "hpmcounter24h", ctr32
, read_hpmcounterh
},
4179 [CSR_HPMCOUNTER25H
] = { "hpmcounter25h", ctr32
, read_hpmcounterh
},
4180 [CSR_HPMCOUNTER26H
] = { "hpmcounter26h", ctr32
, read_hpmcounterh
},
4181 [CSR_HPMCOUNTER27H
] = { "hpmcounter27h", ctr32
, read_hpmcounterh
},
4182 [CSR_HPMCOUNTER28H
] = { "hpmcounter28h", ctr32
, read_hpmcounterh
},
4183 [CSR_HPMCOUNTER29H
] = { "hpmcounter29h", ctr32
, read_hpmcounterh
},
4184 [CSR_HPMCOUNTER30H
] = { "hpmcounter30h", ctr32
, read_hpmcounterh
},
4185 [CSR_HPMCOUNTER31H
] = { "hpmcounter31h", ctr32
, read_hpmcounterh
},
4187 [CSR_MHPMCOUNTER3H
] = { "mhpmcounter3h", mctr32
, read_hpmcounterh
,
4188 write_mhpmcounterh
},
4189 [CSR_MHPMCOUNTER4H
] = { "mhpmcounter4h", mctr32
, read_hpmcounterh
,
4190 write_mhpmcounterh
},
4191 [CSR_MHPMCOUNTER5H
] = { "mhpmcounter5h", mctr32
, read_hpmcounterh
,
4192 write_mhpmcounterh
},
4193 [CSR_MHPMCOUNTER6H
] = { "mhpmcounter6h", mctr32
, read_hpmcounterh
,
4194 write_mhpmcounterh
},
4195 [CSR_MHPMCOUNTER7H
] = { "mhpmcounter7h", mctr32
, read_hpmcounterh
,
4196 write_mhpmcounterh
},
4197 [CSR_MHPMCOUNTER8H
] = { "mhpmcounter8h", mctr32
, read_hpmcounterh
,
4198 write_mhpmcounterh
},
4199 [CSR_MHPMCOUNTER9H
] = { "mhpmcounter9h", mctr32
, read_hpmcounterh
,
4200 write_mhpmcounterh
},
4201 [CSR_MHPMCOUNTER10H
] = { "mhpmcounter10h", mctr32
, read_hpmcounterh
,
4202 write_mhpmcounterh
},
4203 [CSR_MHPMCOUNTER11H
] = { "mhpmcounter11h", mctr32
, read_hpmcounterh
,
4204 write_mhpmcounterh
},
4205 [CSR_MHPMCOUNTER12H
] = { "mhpmcounter12h", mctr32
, read_hpmcounterh
,
4206 write_mhpmcounterh
},
4207 [CSR_MHPMCOUNTER13H
] = { "mhpmcounter13h", mctr32
, read_hpmcounterh
,
4208 write_mhpmcounterh
},
4209 [CSR_MHPMCOUNTER14H
] = { "mhpmcounter14h", mctr32
, read_hpmcounterh
,
4210 write_mhpmcounterh
},
4211 [CSR_MHPMCOUNTER15H
] = { "mhpmcounter15h", mctr32
, read_hpmcounterh
,
4212 write_mhpmcounterh
},
4213 [CSR_MHPMCOUNTER16H
] = { "mhpmcounter16h", mctr32
, read_hpmcounterh
,
4214 write_mhpmcounterh
},
4215 [CSR_MHPMCOUNTER17H
] = { "mhpmcounter17h", mctr32
, read_hpmcounterh
,
4216 write_mhpmcounterh
},
4217 [CSR_MHPMCOUNTER18H
] = { "mhpmcounter18h", mctr32
, read_hpmcounterh
,
4218 write_mhpmcounterh
},
4219 [CSR_MHPMCOUNTER19H
] = { "mhpmcounter19h", mctr32
, read_hpmcounterh
,
4220 write_mhpmcounterh
},
4221 [CSR_MHPMCOUNTER20H
] = { "mhpmcounter20h", mctr32
, read_hpmcounterh
,
4222 write_mhpmcounterh
},
4223 [CSR_MHPMCOUNTER21H
] = { "mhpmcounter21h", mctr32
, read_hpmcounterh
,
4224 write_mhpmcounterh
},
4225 [CSR_MHPMCOUNTER22H
] = { "mhpmcounter22h", mctr32
, read_hpmcounterh
,
4226 write_mhpmcounterh
},
4227 [CSR_MHPMCOUNTER23H
] = { "mhpmcounter23h", mctr32
, read_hpmcounterh
,
4228 write_mhpmcounterh
},
4229 [CSR_MHPMCOUNTER24H
] = { "mhpmcounter24h", mctr32
, read_hpmcounterh
,
4230 write_mhpmcounterh
},
4231 [CSR_MHPMCOUNTER25H
] = { "mhpmcounter25h", mctr32
, read_hpmcounterh
,
4232 write_mhpmcounterh
},
4233 [CSR_MHPMCOUNTER26H
] = { "mhpmcounter26h", mctr32
, read_hpmcounterh
,
4234 write_mhpmcounterh
},
4235 [CSR_MHPMCOUNTER27H
] = { "mhpmcounter27h", mctr32
, read_hpmcounterh
,
4236 write_mhpmcounterh
},
4237 [CSR_MHPMCOUNTER28H
] = { "mhpmcounter28h", mctr32
, read_hpmcounterh
,
4238 write_mhpmcounterh
},
4239 [CSR_MHPMCOUNTER29H
] = { "mhpmcounter29h", mctr32
, read_hpmcounterh
,
4240 write_mhpmcounterh
},
4241 [CSR_MHPMCOUNTER30H
] = { "mhpmcounter30h", mctr32
, read_hpmcounterh
,
4242 write_mhpmcounterh
},
4243 [CSR_MHPMCOUNTER31H
] = { "mhpmcounter31h", mctr32
, read_hpmcounterh
,
4244 write_mhpmcounterh
},
4245 [CSR_SCOUNTOVF
] = { "scountovf", sscofpmf
, read_scountovf
,
4246 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4248 #endif /* !CONFIG_USER_ONLY */