]> git.proxmox.com Git - mirror_qemu.git/blob - target/riscv/csr.c
target/riscv: Fix the relationship between menvcfg.PBMTE/STCE and Svpbmt/Sstc extensions
[mirror_qemu.git] / target / riscv / csr.c
1 /*
2 * RISC-V Control and Status Registers.
3 *
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "qemu/timer.h"
23 #include "cpu.h"
24 #include "pmu.h"
25 #include "time_helper.h"
26 #include "qemu/main-loop.h"
27 #include "exec/exec-all.h"
28 #include "sysemu/cpu-timers.h"
29 #include "qemu/guest-random.h"
30 #include "qapi/error.h"
31
32 /* CSR function table public API */
33 void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops)
34 {
35 *ops = csr_ops[csrno & (CSR_TABLE_SIZE - 1)];
36 }
37
38 void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops)
39 {
40 csr_ops[csrno & (CSR_TABLE_SIZE - 1)] = *ops;
41 }
42
43 /* Predicates */
44 #if !defined(CONFIG_USER_ONLY)
45 static RISCVException smstateen_acc_ok(CPURISCVState *env, int index,
46 uint64_t bit)
47 {
48 bool virt = riscv_cpu_virt_enabled(env);
49 CPUState *cs = env_cpu(env);
50 RISCVCPU *cpu = RISCV_CPU(cs);
51
52 if (env->priv == PRV_M || !cpu->cfg.ext_smstateen) {
53 return RISCV_EXCP_NONE;
54 }
55
56 if (!(env->mstateen[index] & bit)) {
57 return RISCV_EXCP_ILLEGAL_INST;
58 }
59
60 if (virt) {
61 if (!(env->hstateen[index] & bit)) {
62 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
63 }
64
65 if (env->priv == PRV_U && !(env->sstateen[index] & bit)) {
66 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
67 }
68 }
69
70 if (env->priv == PRV_U && riscv_has_ext(env, RVS)) {
71 if (!(env->sstateen[index] & bit)) {
72 return RISCV_EXCP_ILLEGAL_INST;
73 }
74 }
75
76 return RISCV_EXCP_NONE;
77 }
78 #endif
79
80 static RISCVException fs(CPURISCVState *env, int csrno)
81 {
82 #if !defined(CONFIG_USER_ONLY)
83 if (!env->debugger && !riscv_cpu_fp_enabled(env) &&
84 !RISCV_CPU(env_cpu(env))->cfg.ext_zfinx) {
85 return RISCV_EXCP_ILLEGAL_INST;
86 }
87 #endif
88 return RISCV_EXCP_NONE;
89 }
90
91 static RISCVException vs(CPURISCVState *env, int csrno)
92 {
93 CPUState *cs = env_cpu(env);
94 RISCVCPU *cpu = RISCV_CPU(cs);
95
96 if (env->misa_ext & RVV ||
97 cpu->cfg.ext_zve32f || cpu->cfg.ext_zve64f) {
98 #if !defined(CONFIG_USER_ONLY)
99 if (!env->debugger && !riscv_cpu_vector_enabled(env)) {
100 return RISCV_EXCP_ILLEGAL_INST;
101 }
102 #endif
103 return RISCV_EXCP_NONE;
104 }
105 return RISCV_EXCP_ILLEGAL_INST;
106 }
107
108 static RISCVException ctr(CPURISCVState *env, int csrno)
109 {
110 #if !defined(CONFIG_USER_ONLY)
111 CPUState *cs = env_cpu(env);
112 RISCVCPU *cpu = RISCV_CPU(cs);
113 int ctr_index;
114 target_ulong ctr_mask;
115 int base_csrno = CSR_CYCLE;
116 bool rv32 = riscv_cpu_mxl(env) == MXL_RV32 ? true : false;
117
118 if (rv32 && csrno >= CSR_CYCLEH) {
119 /* Offset for RV32 hpmcounternh counters */
120 base_csrno += 0x80;
121 }
122 ctr_index = csrno - base_csrno;
123 ctr_mask = BIT(ctr_index);
124
125 if ((csrno >= CSR_CYCLE && csrno <= CSR_INSTRET) ||
126 (csrno >= CSR_CYCLEH && csrno <= CSR_INSTRETH)) {
127 goto skip_ext_pmu_check;
128 }
129
130 if (!(cpu->pmu_avail_ctrs & ctr_mask)) {
131 /* No counter is enabled in PMU or the counter is out of range */
132 return RISCV_EXCP_ILLEGAL_INST;
133 }
134
135 skip_ext_pmu_check:
136
137 if (env->priv < PRV_M && !get_field(env->mcounteren, ctr_mask)) {
138 return RISCV_EXCP_ILLEGAL_INST;
139 }
140
141 if (riscv_cpu_virt_enabled(env)) {
142 if (!get_field(env->hcounteren, ctr_mask) ||
143 (env->priv == PRV_U && !get_field(env->scounteren, ctr_mask))) {
144 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
145 }
146 }
147
148 if (riscv_has_ext(env, RVS) && env->priv == PRV_U &&
149 !get_field(env->scounteren, ctr_mask)) {
150 return RISCV_EXCP_ILLEGAL_INST;
151 }
152
153 #endif
154 return RISCV_EXCP_NONE;
155 }
156
157 static RISCVException ctr32(CPURISCVState *env, int csrno)
158 {
159 if (riscv_cpu_mxl(env) != MXL_RV32) {
160 return RISCV_EXCP_ILLEGAL_INST;
161 }
162
163 return ctr(env, csrno);
164 }
165
166 #if !defined(CONFIG_USER_ONLY)
167 static RISCVException mctr(CPURISCVState *env, int csrno)
168 {
169 CPUState *cs = env_cpu(env);
170 RISCVCPU *cpu = RISCV_CPU(cs);
171 int ctr_index;
172 int base_csrno = CSR_MHPMCOUNTER3;
173
174 if ((riscv_cpu_mxl(env) == MXL_RV32) && csrno >= CSR_MCYCLEH) {
175 /* Offset for RV32 mhpmcounternh counters */
176 base_csrno += 0x80;
177 }
178 ctr_index = csrno - base_csrno;
179 if (!cpu->cfg.pmu_num || ctr_index >= cpu->cfg.pmu_num) {
180 /* The PMU is not enabled or counter is out of range*/
181 return RISCV_EXCP_ILLEGAL_INST;
182 }
183
184 return RISCV_EXCP_NONE;
185 }
186
187 static RISCVException mctr32(CPURISCVState *env, int csrno)
188 {
189 if (riscv_cpu_mxl(env) != MXL_RV32) {
190 return RISCV_EXCP_ILLEGAL_INST;
191 }
192
193 return mctr(env, csrno);
194 }
195
196 static RISCVException sscofpmf(CPURISCVState *env, int csrno)
197 {
198 CPUState *cs = env_cpu(env);
199 RISCVCPU *cpu = RISCV_CPU(cs);
200
201 if (!cpu->cfg.ext_sscofpmf) {
202 return RISCV_EXCP_ILLEGAL_INST;
203 }
204
205 return RISCV_EXCP_NONE;
206 }
207
208 static RISCVException any(CPURISCVState *env, int csrno)
209 {
210 return RISCV_EXCP_NONE;
211 }
212
213 static RISCVException any32(CPURISCVState *env, int csrno)
214 {
215 if (riscv_cpu_mxl(env) != MXL_RV32) {
216 return RISCV_EXCP_ILLEGAL_INST;
217 }
218
219 return any(env, csrno);
220
221 }
222
223 static int aia_any(CPURISCVState *env, int csrno)
224 {
225 RISCVCPU *cpu = env_archcpu(env);
226
227 if (!cpu->cfg.ext_smaia) {
228 return RISCV_EXCP_ILLEGAL_INST;
229 }
230
231 return any(env, csrno);
232 }
233
234 static int aia_any32(CPURISCVState *env, int csrno)
235 {
236 RISCVCPU *cpu = env_archcpu(env);
237
238 if (!cpu->cfg.ext_smaia) {
239 return RISCV_EXCP_ILLEGAL_INST;
240 }
241
242 return any32(env, csrno);
243 }
244
245 static RISCVException smode(CPURISCVState *env, int csrno)
246 {
247 if (riscv_has_ext(env, RVS)) {
248 return RISCV_EXCP_NONE;
249 }
250
251 return RISCV_EXCP_ILLEGAL_INST;
252 }
253
254 static int smode32(CPURISCVState *env, int csrno)
255 {
256 if (riscv_cpu_mxl(env) != MXL_RV32) {
257 return RISCV_EXCP_ILLEGAL_INST;
258 }
259
260 return smode(env, csrno);
261 }
262
263 static int aia_smode(CPURISCVState *env, int csrno)
264 {
265 RISCVCPU *cpu = env_archcpu(env);
266
267 if (!cpu->cfg.ext_ssaia) {
268 return RISCV_EXCP_ILLEGAL_INST;
269 }
270
271 return smode(env, csrno);
272 }
273
274 static int aia_smode32(CPURISCVState *env, int csrno)
275 {
276 RISCVCPU *cpu = env_archcpu(env);
277
278 if (!cpu->cfg.ext_ssaia) {
279 return RISCV_EXCP_ILLEGAL_INST;
280 }
281
282 return smode32(env, csrno);
283 }
284
285 static RISCVException hmode(CPURISCVState *env, int csrno)
286 {
287 if (riscv_has_ext(env, RVH)) {
288 return RISCV_EXCP_NONE;
289 }
290
291 return RISCV_EXCP_ILLEGAL_INST;
292 }
293
294 static RISCVException hmode32(CPURISCVState *env, int csrno)
295 {
296 if (riscv_cpu_mxl(env) != MXL_RV32) {
297 return RISCV_EXCP_ILLEGAL_INST;
298 }
299
300 return hmode(env, csrno);
301
302 }
303
304 static RISCVException umode(CPURISCVState *env, int csrno)
305 {
306 if (riscv_has_ext(env, RVU)) {
307 return RISCV_EXCP_NONE;
308 }
309
310 return RISCV_EXCP_ILLEGAL_INST;
311 }
312
313 static RISCVException umode32(CPURISCVState *env, int csrno)
314 {
315 if (riscv_cpu_mxl(env) != MXL_RV32) {
316 return RISCV_EXCP_ILLEGAL_INST;
317 }
318
319 return umode(env, csrno);
320 }
321
322 static RISCVException mstateen(CPURISCVState *env, int csrno)
323 {
324 CPUState *cs = env_cpu(env);
325 RISCVCPU *cpu = RISCV_CPU(cs);
326
327 if (!cpu->cfg.ext_smstateen) {
328 return RISCV_EXCP_ILLEGAL_INST;
329 }
330
331 return any(env, csrno);
332 }
333
334 static RISCVException hstateen_pred(CPURISCVState *env, int csrno, int base)
335 {
336 CPUState *cs = env_cpu(env);
337 RISCVCPU *cpu = RISCV_CPU(cs);
338
339 if (!cpu->cfg.ext_smstateen) {
340 return RISCV_EXCP_ILLEGAL_INST;
341 }
342
343 if (env->priv < PRV_M) {
344 if (!(env->mstateen[csrno - base] & SMSTATEEN_STATEEN)) {
345 return RISCV_EXCP_ILLEGAL_INST;
346 }
347 }
348
349 return hmode(env, csrno);
350 }
351
352 static RISCVException hstateen(CPURISCVState *env, int csrno)
353 {
354 return hstateen_pred(env, csrno, CSR_HSTATEEN0);
355 }
356
357 static RISCVException hstateenh(CPURISCVState *env, int csrno)
358 {
359 return hstateen_pred(env, csrno, CSR_HSTATEEN0H);
360 }
361
362 static RISCVException sstateen(CPURISCVState *env, int csrno)
363 {
364 bool virt = riscv_cpu_virt_enabled(env);
365 int index = csrno - CSR_SSTATEEN0;
366 CPUState *cs = env_cpu(env);
367 RISCVCPU *cpu = RISCV_CPU(cs);
368
369 if (!cpu->cfg.ext_smstateen) {
370 return RISCV_EXCP_ILLEGAL_INST;
371 }
372
373 if (env->priv < PRV_M) {
374 if (!(env->mstateen[index] & SMSTATEEN_STATEEN)) {
375 return RISCV_EXCP_ILLEGAL_INST;
376 }
377
378 if (virt) {
379 if (!(env->hstateen[index] & SMSTATEEN_STATEEN)) {
380 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
381 }
382 }
383 }
384
385 return smode(env, csrno);
386 }
387
388 /* Checks if PointerMasking registers could be accessed */
389 static RISCVException pointer_masking(CPURISCVState *env, int csrno)
390 {
391 /* Check if j-ext is present */
392 if (riscv_has_ext(env, RVJ)) {
393 return RISCV_EXCP_NONE;
394 }
395 return RISCV_EXCP_ILLEGAL_INST;
396 }
397
398 static int aia_hmode(CPURISCVState *env, int csrno)
399 {
400 RISCVCPU *cpu = env_archcpu(env);
401
402 if (!cpu->cfg.ext_ssaia) {
403 return RISCV_EXCP_ILLEGAL_INST;
404 }
405
406 return hmode(env, csrno);
407 }
408
409 static int aia_hmode32(CPURISCVState *env, int csrno)
410 {
411 RISCVCPU *cpu = env_archcpu(env);
412
413 if (!cpu->cfg.ext_ssaia) {
414 return RISCV_EXCP_ILLEGAL_INST;
415 }
416
417 return hmode32(env, csrno);
418 }
419
420 static RISCVException pmp(CPURISCVState *env, int csrno)
421 {
422 if (riscv_feature(env, RISCV_FEATURE_PMP)) {
423 return RISCV_EXCP_NONE;
424 }
425
426 return RISCV_EXCP_ILLEGAL_INST;
427 }
428
429 static RISCVException epmp(CPURISCVState *env, int csrno)
430 {
431 if (env->priv == PRV_M && riscv_feature(env, RISCV_FEATURE_EPMP)) {
432 return RISCV_EXCP_NONE;
433 }
434
435 return RISCV_EXCP_ILLEGAL_INST;
436 }
437
438 static RISCVException debug(CPURISCVState *env, int csrno)
439 {
440 if (riscv_feature(env, RISCV_FEATURE_DEBUG)) {
441 return RISCV_EXCP_NONE;
442 }
443
444 return RISCV_EXCP_ILLEGAL_INST;
445 }
446 #endif
447
448 static RISCVException seed(CPURISCVState *env, int csrno)
449 {
450 RISCVCPU *cpu = env_archcpu(env);
451
452 if (!cpu->cfg.ext_zkr) {
453 return RISCV_EXCP_ILLEGAL_INST;
454 }
455
456 #if !defined(CONFIG_USER_ONLY)
457 /*
458 * With a CSR read-write instruction:
459 * 1) The seed CSR is always available in machine mode as normal.
460 * 2) Attempted access to seed from virtual modes VS and VU always raises
461 * an exception(virtual instruction exception only if mseccfg.sseed=1).
462 * 3) Without the corresponding access control bit set to 1, any attempted
463 * access to seed from U, S or HS modes will raise an illegal instruction
464 * exception.
465 */
466 if (env->priv == PRV_M) {
467 return RISCV_EXCP_NONE;
468 } else if (riscv_cpu_virt_enabled(env)) {
469 if (env->mseccfg & MSECCFG_SSEED) {
470 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
471 } else {
472 return RISCV_EXCP_ILLEGAL_INST;
473 }
474 } else {
475 if (env->priv == PRV_S && (env->mseccfg & MSECCFG_SSEED)) {
476 return RISCV_EXCP_NONE;
477 } else if (env->priv == PRV_U && (env->mseccfg & MSECCFG_USEED)) {
478 return RISCV_EXCP_NONE;
479 } else {
480 return RISCV_EXCP_ILLEGAL_INST;
481 }
482 }
483 #else
484 return RISCV_EXCP_NONE;
485 #endif
486 }
487
488 /* User Floating-Point CSRs */
489 static RISCVException read_fflags(CPURISCVState *env, int csrno,
490 target_ulong *val)
491 {
492 *val = riscv_cpu_get_fflags(env);
493 return RISCV_EXCP_NONE;
494 }
495
496 static RISCVException write_fflags(CPURISCVState *env, int csrno,
497 target_ulong val)
498 {
499 #if !defined(CONFIG_USER_ONLY)
500 if (riscv_has_ext(env, RVF)) {
501 env->mstatus |= MSTATUS_FS;
502 }
503 #endif
504 riscv_cpu_set_fflags(env, val & (FSR_AEXC >> FSR_AEXC_SHIFT));
505 return RISCV_EXCP_NONE;
506 }
507
508 static RISCVException read_frm(CPURISCVState *env, int csrno,
509 target_ulong *val)
510 {
511 *val = env->frm;
512 return RISCV_EXCP_NONE;
513 }
514
515 static RISCVException write_frm(CPURISCVState *env, int csrno,
516 target_ulong val)
517 {
518 #if !defined(CONFIG_USER_ONLY)
519 if (riscv_has_ext(env, RVF)) {
520 env->mstatus |= MSTATUS_FS;
521 }
522 #endif
523 env->frm = val & (FSR_RD >> FSR_RD_SHIFT);
524 return RISCV_EXCP_NONE;
525 }
526
527 static RISCVException read_fcsr(CPURISCVState *env, int csrno,
528 target_ulong *val)
529 {
530 *val = (riscv_cpu_get_fflags(env) << FSR_AEXC_SHIFT)
531 | (env->frm << FSR_RD_SHIFT);
532 return RISCV_EXCP_NONE;
533 }
534
535 static RISCVException write_fcsr(CPURISCVState *env, int csrno,
536 target_ulong val)
537 {
538 #if !defined(CONFIG_USER_ONLY)
539 if (riscv_has_ext(env, RVF)) {
540 env->mstatus |= MSTATUS_FS;
541 }
542 #endif
543 env->frm = (val & FSR_RD) >> FSR_RD_SHIFT;
544 riscv_cpu_set_fflags(env, (val & FSR_AEXC) >> FSR_AEXC_SHIFT);
545 return RISCV_EXCP_NONE;
546 }
547
548 static RISCVException read_vtype(CPURISCVState *env, int csrno,
549 target_ulong *val)
550 {
551 uint64_t vill;
552 switch (env->xl) {
553 case MXL_RV32:
554 vill = (uint32_t)env->vill << 31;
555 break;
556 case MXL_RV64:
557 vill = (uint64_t)env->vill << 63;
558 break;
559 default:
560 g_assert_not_reached();
561 }
562 *val = (target_ulong)vill | env->vtype;
563 return RISCV_EXCP_NONE;
564 }
565
566 static RISCVException read_vl(CPURISCVState *env, int csrno,
567 target_ulong *val)
568 {
569 *val = env->vl;
570 return RISCV_EXCP_NONE;
571 }
572
573 static int read_vlenb(CPURISCVState *env, int csrno, target_ulong *val)
574 {
575 *val = env_archcpu(env)->cfg.vlen >> 3;
576 return RISCV_EXCP_NONE;
577 }
578
579 static RISCVException read_vxrm(CPURISCVState *env, int csrno,
580 target_ulong *val)
581 {
582 *val = env->vxrm;
583 return RISCV_EXCP_NONE;
584 }
585
586 static RISCVException write_vxrm(CPURISCVState *env, int csrno,
587 target_ulong val)
588 {
589 #if !defined(CONFIG_USER_ONLY)
590 env->mstatus |= MSTATUS_VS;
591 #endif
592 env->vxrm = val;
593 return RISCV_EXCP_NONE;
594 }
595
596 static RISCVException read_vxsat(CPURISCVState *env, int csrno,
597 target_ulong *val)
598 {
599 *val = env->vxsat;
600 return RISCV_EXCP_NONE;
601 }
602
603 static RISCVException write_vxsat(CPURISCVState *env, int csrno,
604 target_ulong val)
605 {
606 #if !defined(CONFIG_USER_ONLY)
607 env->mstatus |= MSTATUS_VS;
608 #endif
609 env->vxsat = val;
610 return RISCV_EXCP_NONE;
611 }
612
613 static RISCVException read_vstart(CPURISCVState *env, int csrno,
614 target_ulong *val)
615 {
616 *val = env->vstart;
617 return RISCV_EXCP_NONE;
618 }
619
620 static RISCVException write_vstart(CPURISCVState *env, int csrno,
621 target_ulong val)
622 {
623 #if !defined(CONFIG_USER_ONLY)
624 env->mstatus |= MSTATUS_VS;
625 #endif
626 /*
627 * The vstart CSR is defined to have only enough writable bits
628 * to hold the largest element index, i.e. lg2(VLEN) bits.
629 */
630 env->vstart = val & ~(~0ULL << ctzl(env_archcpu(env)->cfg.vlen));
631 return RISCV_EXCP_NONE;
632 }
633
634 static int read_vcsr(CPURISCVState *env, int csrno, target_ulong *val)
635 {
636 *val = (env->vxrm << VCSR_VXRM_SHIFT) | (env->vxsat << VCSR_VXSAT_SHIFT);
637 return RISCV_EXCP_NONE;
638 }
639
640 static int write_vcsr(CPURISCVState *env, int csrno, target_ulong val)
641 {
642 #if !defined(CONFIG_USER_ONLY)
643 env->mstatus |= MSTATUS_VS;
644 #endif
645 env->vxrm = (val & VCSR_VXRM) >> VCSR_VXRM_SHIFT;
646 env->vxsat = (val & VCSR_VXSAT) >> VCSR_VXSAT_SHIFT;
647 return RISCV_EXCP_NONE;
648 }
649
650 /* User Timers and Counters */
651 static target_ulong get_ticks(bool shift)
652 {
653 int64_t val;
654 target_ulong result;
655
656 #if !defined(CONFIG_USER_ONLY)
657 if (icount_enabled()) {
658 val = icount_get();
659 } else {
660 val = cpu_get_host_ticks();
661 }
662 #else
663 val = cpu_get_host_ticks();
664 #endif
665
666 if (shift) {
667 result = val >> 32;
668 } else {
669 result = val;
670 }
671
672 return result;
673 }
674
675 #if defined(CONFIG_USER_ONLY)
676 static RISCVException read_time(CPURISCVState *env, int csrno,
677 target_ulong *val)
678 {
679 *val = cpu_get_host_ticks();
680 return RISCV_EXCP_NONE;
681 }
682
683 static RISCVException read_timeh(CPURISCVState *env, int csrno,
684 target_ulong *val)
685 {
686 *val = cpu_get_host_ticks() >> 32;
687 return RISCV_EXCP_NONE;
688 }
689
690 static int read_hpmcounter(CPURISCVState *env, int csrno, target_ulong *val)
691 {
692 *val = get_ticks(false);
693 return RISCV_EXCP_NONE;
694 }
695
696 static int read_hpmcounterh(CPURISCVState *env, int csrno, target_ulong *val)
697 {
698 *val = get_ticks(true);
699 return RISCV_EXCP_NONE;
700 }
701
702 #else /* CONFIG_USER_ONLY */
703
704 static int read_mhpmevent(CPURISCVState *env, int csrno, target_ulong *val)
705 {
706 int evt_index = csrno - CSR_MCOUNTINHIBIT;
707
708 *val = env->mhpmevent_val[evt_index];
709
710 return RISCV_EXCP_NONE;
711 }
712
713 static int write_mhpmevent(CPURISCVState *env, int csrno, target_ulong val)
714 {
715 int evt_index = csrno - CSR_MCOUNTINHIBIT;
716 uint64_t mhpmevt_val = val;
717
718 env->mhpmevent_val[evt_index] = val;
719
720 if (riscv_cpu_mxl(env) == MXL_RV32) {
721 mhpmevt_val = mhpmevt_val |
722 ((uint64_t)env->mhpmeventh_val[evt_index] << 32);
723 }
724 riscv_pmu_update_event_map(env, mhpmevt_val, evt_index);
725
726 return RISCV_EXCP_NONE;
727 }
728
729 static int read_mhpmeventh(CPURISCVState *env, int csrno, target_ulong *val)
730 {
731 int evt_index = csrno - CSR_MHPMEVENT3H + 3;
732
733 *val = env->mhpmeventh_val[evt_index];
734
735 return RISCV_EXCP_NONE;
736 }
737
738 static int write_mhpmeventh(CPURISCVState *env, int csrno, target_ulong val)
739 {
740 int evt_index = csrno - CSR_MHPMEVENT3H + 3;
741 uint64_t mhpmevth_val = val;
742 uint64_t mhpmevt_val = env->mhpmevent_val[evt_index];
743
744 mhpmevt_val = mhpmevt_val | (mhpmevth_val << 32);
745 env->mhpmeventh_val[evt_index] = val;
746
747 riscv_pmu_update_event_map(env, mhpmevt_val, evt_index);
748
749 return RISCV_EXCP_NONE;
750 }
751
752 static int write_mhpmcounter(CPURISCVState *env, int csrno, target_ulong val)
753 {
754 int ctr_idx = csrno - CSR_MCYCLE;
755 PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
756 uint64_t mhpmctr_val = val;
757
758 counter->mhpmcounter_val = val;
759 if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
760 riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
761 counter->mhpmcounter_prev = get_ticks(false);
762 if (ctr_idx > 2) {
763 if (riscv_cpu_mxl(env) == MXL_RV32) {
764 mhpmctr_val = mhpmctr_val |
765 ((uint64_t)counter->mhpmcounterh_val << 32);
766 }
767 riscv_pmu_setup_timer(env, mhpmctr_val, ctr_idx);
768 }
769 } else {
770 /* Other counters can keep incrementing from the given value */
771 counter->mhpmcounter_prev = val;
772 }
773
774 return RISCV_EXCP_NONE;
775 }
776
777 static int write_mhpmcounterh(CPURISCVState *env, int csrno, target_ulong val)
778 {
779 int ctr_idx = csrno - CSR_MCYCLEH;
780 PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
781 uint64_t mhpmctr_val = counter->mhpmcounter_val;
782 uint64_t mhpmctrh_val = val;
783
784 counter->mhpmcounterh_val = val;
785 mhpmctr_val = mhpmctr_val | (mhpmctrh_val << 32);
786 if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
787 riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
788 counter->mhpmcounterh_prev = get_ticks(true);
789 if (ctr_idx > 2) {
790 riscv_pmu_setup_timer(env, mhpmctr_val, ctr_idx);
791 }
792 } else {
793 counter->mhpmcounterh_prev = val;
794 }
795
796 return RISCV_EXCP_NONE;
797 }
798
799 static RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val,
800 bool upper_half, uint32_t ctr_idx)
801 {
802 PMUCTRState counter = env->pmu_ctrs[ctr_idx];
803 target_ulong ctr_prev = upper_half ? counter.mhpmcounterh_prev :
804 counter.mhpmcounter_prev;
805 target_ulong ctr_val = upper_half ? counter.mhpmcounterh_val :
806 counter.mhpmcounter_val;
807
808 if (get_field(env->mcountinhibit, BIT(ctr_idx))) {
809 /**
810 * Counter should not increment if inhibit bit is set. We can't really
811 * stop the icount counting. Just return the counter value written by
812 * the supervisor to indicate that counter was not incremented.
813 */
814 if (!counter.started) {
815 *val = ctr_val;
816 return RISCV_EXCP_NONE;
817 } else {
818 /* Mark that the counter has been stopped */
819 counter.started = false;
820 }
821 }
822
823 /**
824 * The kernel computes the perf delta by subtracting the current value from
825 * the value it initialized previously (ctr_val).
826 */
827 if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
828 riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
829 *val = get_ticks(upper_half) - ctr_prev + ctr_val;
830 } else {
831 *val = ctr_val;
832 }
833
834 return RISCV_EXCP_NONE;
835 }
836
837 static int read_hpmcounter(CPURISCVState *env, int csrno, target_ulong *val)
838 {
839 uint16_t ctr_index;
840
841 if (csrno >= CSR_MCYCLE && csrno <= CSR_MHPMCOUNTER31) {
842 ctr_index = csrno - CSR_MCYCLE;
843 } else if (csrno >= CSR_CYCLE && csrno <= CSR_HPMCOUNTER31) {
844 ctr_index = csrno - CSR_CYCLE;
845 } else {
846 return RISCV_EXCP_ILLEGAL_INST;
847 }
848
849 return riscv_pmu_read_ctr(env, val, false, ctr_index);
850 }
851
852 static int read_hpmcounterh(CPURISCVState *env, int csrno, target_ulong *val)
853 {
854 uint16_t ctr_index;
855
856 if (csrno >= CSR_MCYCLEH && csrno <= CSR_MHPMCOUNTER31H) {
857 ctr_index = csrno - CSR_MCYCLEH;
858 } else if (csrno >= CSR_CYCLEH && csrno <= CSR_HPMCOUNTER31H) {
859 ctr_index = csrno - CSR_CYCLEH;
860 } else {
861 return RISCV_EXCP_ILLEGAL_INST;
862 }
863
864 return riscv_pmu_read_ctr(env, val, true, ctr_index);
865 }
866
867 static int read_scountovf(CPURISCVState *env, int csrno, target_ulong *val)
868 {
869 int mhpmevt_start = CSR_MHPMEVENT3 - CSR_MCOUNTINHIBIT;
870 int i;
871 *val = 0;
872 target_ulong *mhpm_evt_val;
873 uint64_t of_bit_mask;
874
875 if (riscv_cpu_mxl(env) == MXL_RV32) {
876 mhpm_evt_val = env->mhpmeventh_val;
877 of_bit_mask = MHPMEVENTH_BIT_OF;
878 } else {
879 mhpm_evt_val = env->mhpmevent_val;
880 of_bit_mask = MHPMEVENT_BIT_OF;
881 }
882
883 for (i = mhpmevt_start; i < RV_MAX_MHPMEVENTS; i++) {
884 if ((get_field(env->mcounteren, BIT(i))) &&
885 (mhpm_evt_val[i] & of_bit_mask)) {
886 *val |= BIT(i);
887 }
888 }
889
890 return RISCV_EXCP_NONE;
891 }
892
893 static RISCVException read_time(CPURISCVState *env, int csrno,
894 target_ulong *val)
895 {
896 uint64_t delta = riscv_cpu_virt_enabled(env) ? env->htimedelta : 0;
897
898 if (!env->rdtime_fn) {
899 return RISCV_EXCP_ILLEGAL_INST;
900 }
901
902 *val = env->rdtime_fn(env->rdtime_fn_arg) + delta;
903 return RISCV_EXCP_NONE;
904 }
905
906 static RISCVException read_timeh(CPURISCVState *env, int csrno,
907 target_ulong *val)
908 {
909 uint64_t delta = riscv_cpu_virt_enabled(env) ? env->htimedelta : 0;
910
911 if (!env->rdtime_fn) {
912 return RISCV_EXCP_ILLEGAL_INST;
913 }
914
915 *val = (env->rdtime_fn(env->rdtime_fn_arg) + delta) >> 32;
916 return RISCV_EXCP_NONE;
917 }
918
919 static RISCVException sstc(CPURISCVState *env, int csrno)
920 {
921 CPUState *cs = env_cpu(env);
922 RISCVCPU *cpu = RISCV_CPU(cs);
923 bool hmode_check = false;
924
925 if (!cpu->cfg.ext_sstc || !env->rdtime_fn) {
926 return RISCV_EXCP_ILLEGAL_INST;
927 }
928
929 if (env->priv == PRV_M) {
930 return RISCV_EXCP_NONE;
931 }
932
933 /*
934 * No need of separate function for rv32 as menvcfg stores both menvcfg
935 * menvcfgh for RV32.
936 */
937 if (!(get_field(env->mcounteren, COUNTEREN_TM) &&
938 get_field(env->menvcfg, MENVCFG_STCE))) {
939 return RISCV_EXCP_ILLEGAL_INST;
940 }
941
942 if (riscv_cpu_virt_enabled(env)) {
943 if (!(get_field(env->hcounteren, COUNTEREN_TM) &&
944 get_field(env->henvcfg, HENVCFG_STCE))) {
945 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
946 }
947 }
948
949 if ((csrno == CSR_VSTIMECMP) || (csrno == CSR_VSTIMECMPH)) {
950 hmode_check = true;
951 }
952
953 return hmode_check ? hmode(env, csrno) : smode(env, csrno);
954 }
955
956 static RISCVException sstc_32(CPURISCVState *env, int csrno)
957 {
958 if (riscv_cpu_mxl(env) != MXL_RV32) {
959 return RISCV_EXCP_ILLEGAL_INST;
960 }
961
962 return sstc(env, csrno);
963 }
964
965 static RISCVException read_vstimecmp(CPURISCVState *env, int csrno,
966 target_ulong *val)
967 {
968 *val = env->vstimecmp;
969
970 return RISCV_EXCP_NONE;
971 }
972
973 static RISCVException read_vstimecmph(CPURISCVState *env, int csrno,
974 target_ulong *val)
975 {
976 *val = env->vstimecmp >> 32;
977
978 return RISCV_EXCP_NONE;
979 }
980
981 static RISCVException write_vstimecmp(CPURISCVState *env, int csrno,
982 target_ulong val)
983 {
984 RISCVCPU *cpu = env_archcpu(env);
985
986 if (riscv_cpu_mxl(env) == MXL_RV32) {
987 env->vstimecmp = deposit64(env->vstimecmp, 0, 32, (uint64_t)val);
988 } else {
989 env->vstimecmp = val;
990 }
991
992 riscv_timer_write_timecmp(cpu, env->vstimer, env->vstimecmp,
993 env->htimedelta, MIP_VSTIP);
994
995 return RISCV_EXCP_NONE;
996 }
997
998 static RISCVException write_vstimecmph(CPURISCVState *env, int csrno,
999 target_ulong val)
1000 {
1001 RISCVCPU *cpu = env_archcpu(env);
1002
1003 env->vstimecmp = deposit64(env->vstimecmp, 32, 32, (uint64_t)val);
1004 riscv_timer_write_timecmp(cpu, env->vstimer, env->vstimecmp,
1005 env->htimedelta, MIP_VSTIP);
1006
1007 return RISCV_EXCP_NONE;
1008 }
1009
1010 static RISCVException read_stimecmp(CPURISCVState *env, int csrno,
1011 target_ulong *val)
1012 {
1013 if (riscv_cpu_virt_enabled(env)) {
1014 *val = env->vstimecmp;
1015 } else {
1016 *val = env->stimecmp;
1017 }
1018
1019 return RISCV_EXCP_NONE;
1020 }
1021
1022 static RISCVException read_stimecmph(CPURISCVState *env, int csrno,
1023 target_ulong *val)
1024 {
1025 if (riscv_cpu_virt_enabled(env)) {
1026 *val = env->vstimecmp >> 32;
1027 } else {
1028 *val = env->stimecmp >> 32;
1029 }
1030
1031 return RISCV_EXCP_NONE;
1032 }
1033
1034 static RISCVException write_stimecmp(CPURISCVState *env, int csrno,
1035 target_ulong val)
1036 {
1037 RISCVCPU *cpu = env_archcpu(env);
1038
1039 if (riscv_cpu_virt_enabled(env)) {
1040 if (env->hvictl & HVICTL_VTI) {
1041 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
1042 }
1043 return write_vstimecmp(env, csrno, val);
1044 }
1045
1046 if (riscv_cpu_mxl(env) == MXL_RV32) {
1047 env->stimecmp = deposit64(env->stimecmp, 0, 32, (uint64_t)val);
1048 } else {
1049 env->stimecmp = val;
1050 }
1051
1052 riscv_timer_write_timecmp(cpu, env->stimer, env->stimecmp, 0, MIP_STIP);
1053
1054 return RISCV_EXCP_NONE;
1055 }
1056
1057 static RISCVException write_stimecmph(CPURISCVState *env, int csrno,
1058 target_ulong val)
1059 {
1060 RISCVCPU *cpu = env_archcpu(env);
1061
1062 if (riscv_cpu_virt_enabled(env)) {
1063 if (env->hvictl & HVICTL_VTI) {
1064 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
1065 }
1066 return write_vstimecmph(env, csrno, val);
1067 }
1068
1069 env->stimecmp = deposit64(env->stimecmp, 32, 32, (uint64_t)val);
1070 riscv_timer_write_timecmp(cpu, env->stimer, env->stimecmp, 0, MIP_STIP);
1071
1072 return RISCV_EXCP_NONE;
1073 }
1074
1075 /* Machine constants */
1076
1077 #define M_MODE_INTERRUPTS ((uint64_t)(MIP_MSIP | MIP_MTIP | MIP_MEIP))
1078 #define S_MODE_INTERRUPTS ((uint64_t)(MIP_SSIP | MIP_STIP | MIP_SEIP | \
1079 MIP_LCOFIP))
1080 #define VS_MODE_INTERRUPTS ((uint64_t)(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP))
1081 #define HS_MODE_INTERRUPTS ((uint64_t)(MIP_SGEIP | VS_MODE_INTERRUPTS))
1082
1083 #define VSTOPI_NUM_SRCS 5
1084
1085 static const uint64_t delegable_ints = S_MODE_INTERRUPTS |
1086 VS_MODE_INTERRUPTS;
1087 static const uint64_t vs_delegable_ints = VS_MODE_INTERRUPTS;
1088 static const uint64_t all_ints = M_MODE_INTERRUPTS | S_MODE_INTERRUPTS |
1089 HS_MODE_INTERRUPTS;
1090 #define DELEGABLE_EXCPS ((1ULL << (RISCV_EXCP_INST_ADDR_MIS)) | \
1091 (1ULL << (RISCV_EXCP_INST_ACCESS_FAULT)) | \
1092 (1ULL << (RISCV_EXCP_ILLEGAL_INST)) | \
1093 (1ULL << (RISCV_EXCP_BREAKPOINT)) | \
1094 (1ULL << (RISCV_EXCP_LOAD_ADDR_MIS)) | \
1095 (1ULL << (RISCV_EXCP_LOAD_ACCESS_FAULT)) | \
1096 (1ULL << (RISCV_EXCP_STORE_AMO_ADDR_MIS)) | \
1097 (1ULL << (RISCV_EXCP_STORE_AMO_ACCESS_FAULT)) | \
1098 (1ULL << (RISCV_EXCP_U_ECALL)) | \
1099 (1ULL << (RISCV_EXCP_S_ECALL)) | \
1100 (1ULL << (RISCV_EXCP_VS_ECALL)) | \
1101 (1ULL << (RISCV_EXCP_M_ECALL)) | \
1102 (1ULL << (RISCV_EXCP_INST_PAGE_FAULT)) | \
1103 (1ULL << (RISCV_EXCP_LOAD_PAGE_FAULT)) | \
1104 (1ULL << (RISCV_EXCP_STORE_PAGE_FAULT)) | \
1105 (1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT)) | \
1106 (1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT)) | \
1107 (1ULL << (RISCV_EXCP_VIRT_INSTRUCTION_FAULT)) | \
1108 (1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT)))
1109 static const target_ulong vs_delegable_excps = DELEGABLE_EXCPS &
1110 ~((1ULL << (RISCV_EXCP_S_ECALL)) |
1111 (1ULL << (RISCV_EXCP_VS_ECALL)) |
1112 (1ULL << (RISCV_EXCP_M_ECALL)) |
1113 (1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT)) |
1114 (1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT)) |
1115 (1ULL << (RISCV_EXCP_VIRT_INSTRUCTION_FAULT)) |
1116 (1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT)));
1117 static const target_ulong sstatus_v1_10_mask = SSTATUS_SIE | SSTATUS_SPIE |
1118 SSTATUS_UIE | SSTATUS_UPIE | SSTATUS_SPP | SSTATUS_FS | SSTATUS_XS |
1119 SSTATUS_SUM | SSTATUS_MXR | SSTATUS_VS;
1120 static const target_ulong sip_writable_mask = SIP_SSIP | MIP_USIP | MIP_UEIP |
1121 SIP_LCOFIP;
1122 static const target_ulong hip_writable_mask = MIP_VSSIP;
1123 static const target_ulong hvip_writable_mask = MIP_VSSIP | MIP_VSTIP | MIP_VSEIP;
1124 static const target_ulong vsip_writable_mask = MIP_VSSIP;
1125
1126 static const char valid_vm_1_10_32[16] = {
1127 [VM_1_10_MBARE] = 1,
1128 [VM_1_10_SV32] = 1
1129 };
1130
1131 static const char valid_vm_1_10_64[16] = {
1132 [VM_1_10_MBARE] = 1,
1133 [VM_1_10_SV39] = 1,
1134 [VM_1_10_SV48] = 1,
1135 [VM_1_10_SV57] = 1
1136 };
1137
1138 /* Machine Information Registers */
1139 static RISCVException read_zero(CPURISCVState *env, int csrno,
1140 target_ulong *val)
1141 {
1142 *val = 0;
1143 return RISCV_EXCP_NONE;
1144 }
1145
1146 static RISCVException write_ignore(CPURISCVState *env, int csrno,
1147 target_ulong val)
1148 {
1149 return RISCV_EXCP_NONE;
1150 }
1151
1152 static RISCVException read_mvendorid(CPURISCVState *env, int csrno,
1153 target_ulong *val)
1154 {
1155 CPUState *cs = env_cpu(env);
1156 RISCVCPU *cpu = RISCV_CPU(cs);
1157
1158 *val = cpu->cfg.mvendorid;
1159 return RISCV_EXCP_NONE;
1160 }
1161
1162 static RISCVException read_marchid(CPURISCVState *env, int csrno,
1163 target_ulong *val)
1164 {
1165 CPUState *cs = env_cpu(env);
1166 RISCVCPU *cpu = RISCV_CPU(cs);
1167
1168 *val = cpu->cfg.marchid;
1169 return RISCV_EXCP_NONE;
1170 }
1171
1172 static RISCVException read_mimpid(CPURISCVState *env, int csrno,
1173 target_ulong *val)
1174 {
1175 CPUState *cs = env_cpu(env);
1176 RISCVCPU *cpu = RISCV_CPU(cs);
1177
1178 *val = cpu->cfg.mimpid;
1179 return RISCV_EXCP_NONE;
1180 }
1181
1182 static RISCVException read_mhartid(CPURISCVState *env, int csrno,
1183 target_ulong *val)
1184 {
1185 *val = env->mhartid;
1186 return RISCV_EXCP_NONE;
1187 }
1188
1189 /* Machine Trap Setup */
1190
1191 /* We do not store SD explicitly, only compute it on demand. */
1192 static uint64_t add_status_sd(RISCVMXL xl, uint64_t status)
1193 {
1194 if ((status & MSTATUS_FS) == MSTATUS_FS ||
1195 (status & MSTATUS_VS) == MSTATUS_VS ||
1196 (status & MSTATUS_XS) == MSTATUS_XS) {
1197 switch (xl) {
1198 case MXL_RV32:
1199 return status | MSTATUS32_SD;
1200 case MXL_RV64:
1201 return status | MSTATUS64_SD;
1202 case MXL_RV128:
1203 return MSTATUSH128_SD;
1204 default:
1205 g_assert_not_reached();
1206 }
1207 }
1208 return status;
1209 }
1210
1211 static RISCVException read_mstatus(CPURISCVState *env, int csrno,
1212 target_ulong *val)
1213 {
1214 *val = add_status_sd(riscv_cpu_mxl(env), env->mstatus);
1215 return RISCV_EXCP_NONE;
1216 }
1217
1218 static int validate_vm(CPURISCVState *env, target_ulong vm)
1219 {
1220 if (riscv_cpu_mxl(env) == MXL_RV32) {
1221 return valid_vm_1_10_32[vm & 0xf];
1222 } else {
1223 return valid_vm_1_10_64[vm & 0xf];
1224 }
1225 }
1226
1227 static RISCVException write_mstatus(CPURISCVState *env, int csrno,
1228 target_ulong val)
1229 {
1230 uint64_t mstatus = env->mstatus;
1231 uint64_t mask = 0;
1232 RISCVMXL xl = riscv_cpu_mxl(env);
1233
1234 /* flush tlb on mstatus fields that affect VM */
1235 if ((val ^ mstatus) & (MSTATUS_MXR | MSTATUS_MPP | MSTATUS_MPV |
1236 MSTATUS_MPRV | MSTATUS_SUM)) {
1237 tlb_flush(env_cpu(env));
1238 }
1239 mask = MSTATUS_SIE | MSTATUS_SPIE | MSTATUS_MIE | MSTATUS_MPIE |
1240 MSTATUS_SPP | MSTATUS_MPRV | MSTATUS_SUM |
1241 MSTATUS_MPP | MSTATUS_MXR | MSTATUS_TVM | MSTATUS_TSR |
1242 MSTATUS_TW | MSTATUS_VS;
1243
1244 if (riscv_has_ext(env, RVF)) {
1245 mask |= MSTATUS_FS;
1246 }
1247
1248 if (xl != MXL_RV32 || env->debugger) {
1249 /*
1250 * RV32: MPV and GVA are not in mstatus. The current plan is to
1251 * add them to mstatush. For now, we just don't support it.
1252 */
1253 mask |= MSTATUS_MPV | MSTATUS_GVA;
1254 if ((val & MSTATUS64_UXL) != 0) {
1255 mask |= MSTATUS64_UXL;
1256 }
1257 }
1258
1259 mstatus = (mstatus & ~mask) | (val & mask);
1260
1261 if (xl > MXL_RV32) {
1262 /* SXL field is for now read only */
1263 mstatus = set_field(mstatus, MSTATUS64_SXL, xl);
1264 }
1265 env->mstatus = mstatus;
1266 env->xl = cpu_recompute_xl(env);
1267
1268 return RISCV_EXCP_NONE;
1269 }
1270
1271 static RISCVException read_mstatush(CPURISCVState *env, int csrno,
1272 target_ulong *val)
1273 {
1274 *val = env->mstatus >> 32;
1275 return RISCV_EXCP_NONE;
1276 }
1277
1278 static RISCVException write_mstatush(CPURISCVState *env, int csrno,
1279 target_ulong val)
1280 {
1281 uint64_t valh = (uint64_t)val << 32;
1282 uint64_t mask = MSTATUS_MPV | MSTATUS_GVA;
1283
1284 if ((valh ^ env->mstatus) & (MSTATUS_MPV)) {
1285 tlb_flush(env_cpu(env));
1286 }
1287
1288 env->mstatus = (env->mstatus & ~mask) | (valh & mask);
1289
1290 return RISCV_EXCP_NONE;
1291 }
1292
1293 static RISCVException read_mstatus_i128(CPURISCVState *env, int csrno,
1294 Int128 *val)
1295 {
1296 *val = int128_make128(env->mstatus, add_status_sd(MXL_RV128, env->mstatus));
1297 return RISCV_EXCP_NONE;
1298 }
1299
1300 static RISCVException read_misa_i128(CPURISCVState *env, int csrno,
1301 Int128 *val)
1302 {
1303 *val = int128_make128(env->misa_ext, (uint64_t)MXL_RV128 << 62);
1304 return RISCV_EXCP_NONE;
1305 }
1306
1307 static RISCVException read_misa(CPURISCVState *env, int csrno,
1308 target_ulong *val)
1309 {
1310 target_ulong misa;
1311
1312 switch (env->misa_mxl) {
1313 case MXL_RV32:
1314 misa = (target_ulong)MXL_RV32 << 30;
1315 break;
1316 #ifdef TARGET_RISCV64
1317 case MXL_RV64:
1318 misa = (target_ulong)MXL_RV64 << 62;
1319 break;
1320 #endif
1321 default:
1322 g_assert_not_reached();
1323 }
1324
1325 *val = misa | env->misa_ext;
1326 return RISCV_EXCP_NONE;
1327 }
1328
1329 static RISCVException write_misa(CPURISCVState *env, int csrno,
1330 target_ulong val)
1331 {
1332 if (!riscv_feature(env, RISCV_FEATURE_MISA)) {
1333 /* drop write to misa */
1334 return RISCV_EXCP_NONE;
1335 }
1336
1337 /* 'I' or 'E' must be present */
1338 if (!(val & (RVI | RVE))) {
1339 /* It is not, drop write to misa */
1340 return RISCV_EXCP_NONE;
1341 }
1342
1343 /* 'E' excludes all other extensions */
1344 if (val & RVE) {
1345 /* when we support 'E' we can do "val = RVE;" however
1346 * for now we just drop writes if 'E' is present.
1347 */
1348 return RISCV_EXCP_NONE;
1349 }
1350
1351 /*
1352 * misa.MXL writes are not supported by QEMU.
1353 * Drop writes to those bits.
1354 */
1355
1356 /* Mask extensions that are not supported by this hart */
1357 val &= env->misa_ext_mask;
1358
1359 /* Mask extensions that are not supported by QEMU */
1360 val &= (RVI | RVE | RVM | RVA | RVF | RVD | RVC | RVS | RVU | RVV);
1361
1362 /* 'D' depends on 'F', so clear 'D' if 'F' is not present */
1363 if ((val & RVD) && !(val & RVF)) {
1364 val &= ~RVD;
1365 }
1366
1367 /* Suppress 'C' if next instruction is not aligned
1368 * TODO: this should check next_pc
1369 */
1370 if ((val & RVC) && (GETPC() & ~3) != 0) {
1371 val &= ~RVC;
1372 }
1373
1374 /* If nothing changed, do nothing. */
1375 if (val == env->misa_ext) {
1376 return RISCV_EXCP_NONE;
1377 }
1378
1379 if (!(val & RVF)) {
1380 env->mstatus &= ~MSTATUS_FS;
1381 }
1382
1383 /* flush translation cache */
1384 tb_flush(env_cpu(env));
1385 env->misa_ext = val;
1386 env->xl = riscv_cpu_mxl(env);
1387 return RISCV_EXCP_NONE;
1388 }
1389
1390 static RISCVException read_medeleg(CPURISCVState *env, int csrno,
1391 target_ulong *val)
1392 {
1393 *val = env->medeleg;
1394 return RISCV_EXCP_NONE;
1395 }
1396
1397 static RISCVException write_medeleg(CPURISCVState *env, int csrno,
1398 target_ulong val)
1399 {
1400 env->medeleg = (env->medeleg & ~DELEGABLE_EXCPS) | (val & DELEGABLE_EXCPS);
1401 return RISCV_EXCP_NONE;
1402 }
1403
1404 static RISCVException rmw_mideleg64(CPURISCVState *env, int csrno,
1405 uint64_t *ret_val,
1406 uint64_t new_val, uint64_t wr_mask)
1407 {
1408 uint64_t mask = wr_mask & delegable_ints;
1409
1410 if (ret_val) {
1411 *ret_val = env->mideleg;
1412 }
1413
1414 env->mideleg = (env->mideleg & ~mask) | (new_val & mask);
1415
1416 if (riscv_has_ext(env, RVH)) {
1417 env->mideleg |= HS_MODE_INTERRUPTS;
1418 }
1419
1420 return RISCV_EXCP_NONE;
1421 }
1422
1423 static RISCVException rmw_mideleg(CPURISCVState *env, int csrno,
1424 target_ulong *ret_val,
1425 target_ulong new_val, target_ulong wr_mask)
1426 {
1427 uint64_t rval;
1428 RISCVException ret;
1429
1430 ret = rmw_mideleg64(env, csrno, &rval, new_val, wr_mask);
1431 if (ret_val) {
1432 *ret_val = rval;
1433 }
1434
1435 return ret;
1436 }
1437
1438 static RISCVException rmw_midelegh(CPURISCVState *env, int csrno,
1439 target_ulong *ret_val,
1440 target_ulong new_val,
1441 target_ulong wr_mask)
1442 {
1443 uint64_t rval;
1444 RISCVException ret;
1445
1446 ret = rmw_mideleg64(env, csrno, &rval,
1447 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
1448 if (ret_val) {
1449 *ret_val = rval >> 32;
1450 }
1451
1452 return ret;
1453 }
1454
1455 static RISCVException rmw_mie64(CPURISCVState *env, int csrno,
1456 uint64_t *ret_val,
1457 uint64_t new_val, uint64_t wr_mask)
1458 {
1459 uint64_t mask = wr_mask & all_ints;
1460
1461 if (ret_val) {
1462 *ret_val = env->mie;
1463 }
1464
1465 env->mie = (env->mie & ~mask) | (new_val & mask);
1466
1467 if (!riscv_has_ext(env, RVH)) {
1468 env->mie &= ~((uint64_t)MIP_SGEIP);
1469 }
1470
1471 return RISCV_EXCP_NONE;
1472 }
1473
1474 static RISCVException rmw_mie(CPURISCVState *env, int csrno,
1475 target_ulong *ret_val,
1476 target_ulong new_val, target_ulong wr_mask)
1477 {
1478 uint64_t rval;
1479 RISCVException ret;
1480
1481 ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask);
1482 if (ret_val) {
1483 *ret_val = rval;
1484 }
1485
1486 return ret;
1487 }
1488
1489 static RISCVException rmw_mieh(CPURISCVState *env, int csrno,
1490 target_ulong *ret_val,
1491 target_ulong new_val, target_ulong wr_mask)
1492 {
1493 uint64_t rval;
1494 RISCVException ret;
1495
1496 ret = rmw_mie64(env, csrno, &rval,
1497 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
1498 if (ret_val) {
1499 *ret_val = rval >> 32;
1500 }
1501
1502 return ret;
1503 }
1504
1505 static int read_mtopi(CPURISCVState *env, int csrno, target_ulong *val)
1506 {
1507 int irq;
1508 uint8_t iprio;
1509
1510 irq = riscv_cpu_mirq_pending(env);
1511 if (irq <= 0 || irq > 63) {
1512 *val = 0;
1513 } else {
1514 iprio = env->miprio[irq];
1515 if (!iprio) {
1516 if (riscv_cpu_default_priority(irq) > IPRIO_DEFAULT_M) {
1517 iprio = IPRIO_MMAXIPRIO;
1518 }
1519 }
1520 *val = (irq & TOPI_IID_MASK) << TOPI_IID_SHIFT;
1521 *val |= iprio;
1522 }
1523
1524 return RISCV_EXCP_NONE;
1525 }
1526
1527 static int aia_xlate_vs_csrno(CPURISCVState *env, int csrno)
1528 {
1529 if (!riscv_cpu_virt_enabled(env)) {
1530 return csrno;
1531 }
1532
1533 switch (csrno) {
1534 case CSR_SISELECT:
1535 return CSR_VSISELECT;
1536 case CSR_SIREG:
1537 return CSR_VSIREG;
1538 case CSR_STOPEI:
1539 return CSR_VSTOPEI;
1540 default:
1541 return csrno;
1542 };
1543 }
1544
1545 static int rmw_xiselect(CPURISCVState *env, int csrno, target_ulong *val,
1546 target_ulong new_val, target_ulong wr_mask)
1547 {
1548 target_ulong *iselect;
1549
1550 /* Translate CSR number for VS-mode */
1551 csrno = aia_xlate_vs_csrno(env, csrno);
1552
1553 /* Find the iselect CSR based on CSR number */
1554 switch (csrno) {
1555 case CSR_MISELECT:
1556 iselect = &env->miselect;
1557 break;
1558 case CSR_SISELECT:
1559 iselect = &env->siselect;
1560 break;
1561 case CSR_VSISELECT:
1562 iselect = &env->vsiselect;
1563 break;
1564 default:
1565 return RISCV_EXCP_ILLEGAL_INST;
1566 };
1567
1568 if (val) {
1569 *val = *iselect;
1570 }
1571
1572 wr_mask &= ISELECT_MASK;
1573 if (wr_mask) {
1574 *iselect = (*iselect & ~wr_mask) | (new_val & wr_mask);
1575 }
1576
1577 return RISCV_EXCP_NONE;
1578 }
1579
1580 static int rmw_iprio(target_ulong xlen,
1581 target_ulong iselect, uint8_t *iprio,
1582 target_ulong *val, target_ulong new_val,
1583 target_ulong wr_mask, int ext_irq_no)
1584 {
1585 int i, firq, nirqs;
1586 target_ulong old_val;
1587
1588 if (iselect < ISELECT_IPRIO0 || ISELECT_IPRIO15 < iselect) {
1589 return -EINVAL;
1590 }
1591 if (xlen != 32 && iselect & 0x1) {
1592 return -EINVAL;
1593 }
1594
1595 nirqs = 4 * (xlen / 32);
1596 firq = ((iselect - ISELECT_IPRIO0) / (xlen / 32)) * (nirqs);
1597
1598 old_val = 0;
1599 for (i = 0; i < nirqs; i++) {
1600 old_val |= ((target_ulong)iprio[firq + i]) << (IPRIO_IRQ_BITS * i);
1601 }
1602
1603 if (val) {
1604 *val = old_val;
1605 }
1606
1607 if (wr_mask) {
1608 new_val = (old_val & ~wr_mask) | (new_val & wr_mask);
1609 for (i = 0; i < nirqs; i++) {
1610 /*
1611 * M-level and S-level external IRQ priority always read-only
1612 * zero. This means default priority order is always preferred
1613 * for M-level and S-level external IRQs.
1614 */
1615 if ((firq + i) == ext_irq_no) {
1616 continue;
1617 }
1618 iprio[firq + i] = (new_val >> (IPRIO_IRQ_BITS * i)) & 0xff;
1619 }
1620 }
1621
1622 return 0;
1623 }
1624
1625 static int rmw_xireg(CPURISCVState *env, int csrno, target_ulong *val,
1626 target_ulong new_val, target_ulong wr_mask)
1627 {
1628 bool virt;
1629 uint8_t *iprio;
1630 int ret = -EINVAL;
1631 target_ulong priv, isel, vgein;
1632
1633 /* Translate CSR number for VS-mode */
1634 csrno = aia_xlate_vs_csrno(env, csrno);
1635
1636 /* Decode register details from CSR number */
1637 virt = false;
1638 switch (csrno) {
1639 case CSR_MIREG:
1640 iprio = env->miprio;
1641 isel = env->miselect;
1642 priv = PRV_M;
1643 break;
1644 case CSR_SIREG:
1645 iprio = env->siprio;
1646 isel = env->siselect;
1647 priv = PRV_S;
1648 break;
1649 case CSR_VSIREG:
1650 iprio = env->hviprio;
1651 isel = env->vsiselect;
1652 priv = PRV_S;
1653 virt = true;
1654 break;
1655 default:
1656 goto done;
1657 };
1658
1659 /* Find the selected guest interrupt file */
1660 vgein = (virt) ? get_field(env->hstatus, HSTATUS_VGEIN) : 0;
1661
1662 if (ISELECT_IPRIO0 <= isel && isel <= ISELECT_IPRIO15) {
1663 /* Local interrupt priority registers not available for VS-mode */
1664 if (!virt) {
1665 ret = rmw_iprio(riscv_cpu_mxl_bits(env),
1666 isel, iprio, val, new_val, wr_mask,
1667 (priv == PRV_M) ? IRQ_M_EXT : IRQ_S_EXT);
1668 }
1669 } else if (ISELECT_IMSIC_FIRST <= isel && isel <= ISELECT_IMSIC_LAST) {
1670 /* IMSIC registers only available when machine implements it. */
1671 if (env->aia_ireg_rmw_fn[priv]) {
1672 /* Selected guest interrupt file should not be zero */
1673 if (virt && (!vgein || env->geilen < vgein)) {
1674 goto done;
1675 }
1676 /* Call machine specific IMSIC register emulation */
1677 ret = env->aia_ireg_rmw_fn[priv](env->aia_ireg_rmw_fn_arg[priv],
1678 AIA_MAKE_IREG(isel, priv, virt, vgein,
1679 riscv_cpu_mxl_bits(env)),
1680 val, new_val, wr_mask);
1681 }
1682 }
1683
1684 done:
1685 if (ret) {
1686 return (riscv_cpu_virt_enabled(env) && virt) ?
1687 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
1688 }
1689 return RISCV_EXCP_NONE;
1690 }
1691
1692 static int rmw_xtopei(CPURISCVState *env, int csrno, target_ulong *val,
1693 target_ulong new_val, target_ulong wr_mask)
1694 {
1695 bool virt;
1696 int ret = -EINVAL;
1697 target_ulong priv, vgein;
1698
1699 /* Translate CSR number for VS-mode */
1700 csrno = aia_xlate_vs_csrno(env, csrno);
1701
1702 /* Decode register details from CSR number */
1703 virt = false;
1704 switch (csrno) {
1705 case CSR_MTOPEI:
1706 priv = PRV_M;
1707 break;
1708 case CSR_STOPEI:
1709 priv = PRV_S;
1710 break;
1711 case CSR_VSTOPEI:
1712 priv = PRV_S;
1713 virt = true;
1714 break;
1715 default:
1716 goto done;
1717 };
1718
1719 /* IMSIC CSRs only available when machine implements IMSIC. */
1720 if (!env->aia_ireg_rmw_fn[priv]) {
1721 goto done;
1722 }
1723
1724 /* Find the selected guest interrupt file */
1725 vgein = (virt) ? get_field(env->hstatus, HSTATUS_VGEIN) : 0;
1726
1727 /* Selected guest interrupt file should be valid */
1728 if (virt && (!vgein || env->geilen < vgein)) {
1729 goto done;
1730 }
1731
1732 /* Call machine specific IMSIC register emulation for TOPEI */
1733 ret = env->aia_ireg_rmw_fn[priv](env->aia_ireg_rmw_fn_arg[priv],
1734 AIA_MAKE_IREG(ISELECT_IMSIC_TOPEI, priv, virt, vgein,
1735 riscv_cpu_mxl_bits(env)),
1736 val, new_val, wr_mask);
1737
1738 done:
1739 if (ret) {
1740 return (riscv_cpu_virt_enabled(env) && virt) ?
1741 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
1742 }
1743 return RISCV_EXCP_NONE;
1744 }
1745
1746 static RISCVException read_mtvec(CPURISCVState *env, int csrno,
1747 target_ulong *val)
1748 {
1749 *val = env->mtvec;
1750 return RISCV_EXCP_NONE;
1751 }
1752
1753 static RISCVException write_mtvec(CPURISCVState *env, int csrno,
1754 target_ulong val)
1755 {
1756 /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
1757 if ((val & 3) < 2) {
1758 env->mtvec = val;
1759 } else {
1760 qemu_log_mask(LOG_UNIMP, "CSR_MTVEC: reserved mode not supported\n");
1761 }
1762 return RISCV_EXCP_NONE;
1763 }
1764
1765 static RISCVException read_mcountinhibit(CPURISCVState *env, int csrno,
1766 target_ulong *val)
1767 {
1768 *val = env->mcountinhibit;
1769 return RISCV_EXCP_NONE;
1770 }
1771
1772 static RISCVException write_mcountinhibit(CPURISCVState *env, int csrno,
1773 target_ulong val)
1774 {
1775 int cidx;
1776 PMUCTRState *counter;
1777
1778 env->mcountinhibit = val;
1779
1780 /* Check if any other counter is also monitoring cycles/instructions */
1781 for (cidx = 0; cidx < RV_MAX_MHPMCOUNTERS; cidx++) {
1782 if (!get_field(env->mcountinhibit, BIT(cidx))) {
1783 counter = &env->pmu_ctrs[cidx];
1784 counter->started = true;
1785 }
1786 }
1787
1788 return RISCV_EXCP_NONE;
1789 }
1790
1791 static RISCVException read_mcounteren(CPURISCVState *env, int csrno,
1792 target_ulong *val)
1793 {
1794 *val = env->mcounteren;
1795 return RISCV_EXCP_NONE;
1796 }
1797
1798 static RISCVException write_mcounteren(CPURISCVState *env, int csrno,
1799 target_ulong val)
1800 {
1801 env->mcounteren = val;
1802 return RISCV_EXCP_NONE;
1803 }
1804
1805 /* Machine Trap Handling */
1806 static RISCVException read_mscratch_i128(CPURISCVState *env, int csrno,
1807 Int128 *val)
1808 {
1809 *val = int128_make128(env->mscratch, env->mscratchh);
1810 return RISCV_EXCP_NONE;
1811 }
1812
1813 static RISCVException write_mscratch_i128(CPURISCVState *env, int csrno,
1814 Int128 val)
1815 {
1816 env->mscratch = int128_getlo(val);
1817 env->mscratchh = int128_gethi(val);
1818 return RISCV_EXCP_NONE;
1819 }
1820
1821 static RISCVException read_mscratch(CPURISCVState *env, int csrno,
1822 target_ulong *val)
1823 {
1824 *val = env->mscratch;
1825 return RISCV_EXCP_NONE;
1826 }
1827
1828 static RISCVException write_mscratch(CPURISCVState *env, int csrno,
1829 target_ulong val)
1830 {
1831 env->mscratch = val;
1832 return RISCV_EXCP_NONE;
1833 }
1834
1835 static RISCVException read_mepc(CPURISCVState *env, int csrno,
1836 target_ulong *val)
1837 {
1838 *val = env->mepc;
1839 return RISCV_EXCP_NONE;
1840 }
1841
1842 static RISCVException write_mepc(CPURISCVState *env, int csrno,
1843 target_ulong val)
1844 {
1845 env->mepc = val;
1846 return RISCV_EXCP_NONE;
1847 }
1848
1849 static RISCVException read_mcause(CPURISCVState *env, int csrno,
1850 target_ulong *val)
1851 {
1852 *val = env->mcause;
1853 return RISCV_EXCP_NONE;
1854 }
1855
1856 static RISCVException write_mcause(CPURISCVState *env, int csrno,
1857 target_ulong val)
1858 {
1859 env->mcause = val;
1860 return RISCV_EXCP_NONE;
1861 }
1862
1863 static RISCVException read_mtval(CPURISCVState *env, int csrno,
1864 target_ulong *val)
1865 {
1866 *val = env->mtval;
1867 return RISCV_EXCP_NONE;
1868 }
1869
1870 static RISCVException write_mtval(CPURISCVState *env, int csrno,
1871 target_ulong val)
1872 {
1873 env->mtval = val;
1874 return RISCV_EXCP_NONE;
1875 }
1876
1877 /* Execution environment configuration setup */
1878 static RISCVException read_menvcfg(CPURISCVState *env, int csrno,
1879 target_ulong *val)
1880 {
1881 *val = env->menvcfg;
1882 return RISCV_EXCP_NONE;
1883 }
1884
1885 static RISCVException write_menvcfg(CPURISCVState *env, int csrno,
1886 target_ulong val)
1887 {
1888 RISCVCPUConfig *cfg = &env_archcpu(env)->cfg;
1889 uint64_t mask = MENVCFG_FIOM | MENVCFG_CBIE | MENVCFG_CBCFE | MENVCFG_CBZE;
1890
1891 if (riscv_cpu_mxl(env) == MXL_RV64) {
1892 mask |= (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) |
1893 (cfg->ext_sstc ? MENVCFG_STCE : 0);
1894 }
1895 env->menvcfg = (env->menvcfg & ~mask) | (val & mask);
1896
1897 return RISCV_EXCP_NONE;
1898 }
1899
1900 static RISCVException read_menvcfgh(CPURISCVState *env, int csrno,
1901 target_ulong *val)
1902 {
1903 *val = env->menvcfg >> 32;
1904 return RISCV_EXCP_NONE;
1905 }
1906
1907 static RISCVException write_menvcfgh(CPURISCVState *env, int csrno,
1908 target_ulong val)
1909 {
1910 RISCVCPUConfig *cfg = &env_archcpu(env)->cfg;
1911 uint64_t mask = (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) |
1912 (cfg->ext_sstc ? MENVCFG_STCE : 0);
1913 uint64_t valh = (uint64_t)val << 32;
1914
1915 env->menvcfg = (env->menvcfg & ~mask) | (valh & mask);
1916
1917 return RISCV_EXCP_NONE;
1918 }
1919
1920 static RISCVException read_senvcfg(CPURISCVState *env, int csrno,
1921 target_ulong *val)
1922 {
1923 RISCVException ret;
1924
1925 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
1926 if (ret != RISCV_EXCP_NONE) {
1927 return ret;
1928 }
1929
1930 *val = env->senvcfg;
1931 return RISCV_EXCP_NONE;
1932 }
1933
1934 static RISCVException write_senvcfg(CPURISCVState *env, int csrno,
1935 target_ulong val)
1936 {
1937 uint64_t mask = SENVCFG_FIOM | SENVCFG_CBIE | SENVCFG_CBCFE | SENVCFG_CBZE;
1938 RISCVException ret;
1939
1940 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
1941 if (ret != RISCV_EXCP_NONE) {
1942 return ret;
1943 }
1944
1945 env->senvcfg = (env->senvcfg & ~mask) | (val & mask);
1946 return RISCV_EXCP_NONE;
1947 }
1948
1949 static RISCVException read_henvcfg(CPURISCVState *env, int csrno,
1950 target_ulong *val)
1951 {
1952 RISCVException ret;
1953
1954 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
1955 if (ret != RISCV_EXCP_NONE) {
1956 return ret;
1957 }
1958
1959 *val = env->henvcfg;
1960 return RISCV_EXCP_NONE;
1961 }
1962
1963 static RISCVException write_henvcfg(CPURISCVState *env, int csrno,
1964 target_ulong val)
1965 {
1966 uint64_t mask = HENVCFG_FIOM | HENVCFG_CBIE | HENVCFG_CBCFE | HENVCFG_CBZE;
1967 RISCVException ret;
1968
1969 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
1970 if (ret != RISCV_EXCP_NONE) {
1971 return ret;
1972 }
1973
1974 if (riscv_cpu_mxl(env) == MXL_RV64) {
1975 mask |= HENVCFG_PBMTE | HENVCFG_STCE;
1976 }
1977
1978 env->henvcfg = (env->henvcfg & ~mask) | (val & mask);
1979
1980 return RISCV_EXCP_NONE;
1981 }
1982
1983 static RISCVException read_henvcfgh(CPURISCVState *env, int csrno,
1984 target_ulong *val)
1985 {
1986 RISCVException ret;
1987
1988 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
1989 if (ret != RISCV_EXCP_NONE) {
1990 return ret;
1991 }
1992
1993 *val = env->henvcfg >> 32;
1994 return RISCV_EXCP_NONE;
1995 }
1996
1997 static RISCVException write_henvcfgh(CPURISCVState *env, int csrno,
1998 target_ulong val)
1999 {
2000 uint64_t mask = HENVCFG_PBMTE | HENVCFG_STCE;
2001 uint64_t valh = (uint64_t)val << 32;
2002 RISCVException ret;
2003
2004 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
2005 if (ret != RISCV_EXCP_NONE) {
2006 return ret;
2007 }
2008
2009 env->henvcfg = (env->henvcfg & ~mask) | (valh & mask);
2010 return RISCV_EXCP_NONE;
2011 }
2012
2013 static RISCVException read_mstateen(CPURISCVState *env, int csrno,
2014 target_ulong *val)
2015 {
2016 *val = env->mstateen[csrno - CSR_MSTATEEN0];
2017
2018 return RISCV_EXCP_NONE;
2019 }
2020
2021 static RISCVException write_mstateen(CPURISCVState *env, int csrno,
2022 uint64_t wr_mask, target_ulong new_val)
2023 {
2024 uint64_t *reg;
2025
2026 reg = &env->mstateen[csrno - CSR_MSTATEEN0];
2027 *reg = (*reg & ~wr_mask) | (new_val & wr_mask);
2028
2029 return RISCV_EXCP_NONE;
2030 }
2031
2032 static RISCVException write_mstateen0(CPURISCVState *env, int csrno,
2033 target_ulong new_val)
2034 {
2035 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
2036
2037 return write_mstateen(env, csrno, wr_mask, new_val);
2038 }
2039
2040 static RISCVException write_mstateen_1_3(CPURISCVState *env, int csrno,
2041 target_ulong new_val)
2042 {
2043 return write_mstateen(env, csrno, SMSTATEEN_STATEEN, new_val);
2044 }
2045
2046 static RISCVException read_mstateenh(CPURISCVState *env, int csrno,
2047 target_ulong *val)
2048 {
2049 *val = env->mstateen[csrno - CSR_MSTATEEN0H] >> 32;
2050
2051 return RISCV_EXCP_NONE;
2052 }
2053
2054 static RISCVException write_mstateenh(CPURISCVState *env, int csrno,
2055 uint64_t wr_mask, target_ulong new_val)
2056 {
2057 uint64_t *reg, val;
2058
2059 reg = &env->mstateen[csrno - CSR_MSTATEEN0H];
2060 val = (uint64_t)new_val << 32;
2061 val |= *reg & 0xFFFFFFFF;
2062 *reg = (*reg & ~wr_mask) | (val & wr_mask);
2063
2064 return RISCV_EXCP_NONE;
2065 }
2066
2067 static RISCVException write_mstateen0h(CPURISCVState *env, int csrno,
2068 target_ulong new_val)
2069 {
2070 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
2071
2072 return write_mstateenh(env, csrno, wr_mask, new_val);
2073 }
2074
2075 static RISCVException write_mstateenh_1_3(CPURISCVState *env, int csrno,
2076 target_ulong new_val)
2077 {
2078 return write_mstateenh(env, csrno, SMSTATEEN_STATEEN, new_val);
2079 }
2080
2081 static RISCVException read_hstateen(CPURISCVState *env, int csrno,
2082 target_ulong *val)
2083 {
2084 int index = csrno - CSR_HSTATEEN0;
2085
2086 *val = env->hstateen[index] & env->mstateen[index];
2087
2088 return RISCV_EXCP_NONE;
2089 }
2090
2091 static RISCVException write_hstateen(CPURISCVState *env, int csrno,
2092 uint64_t mask, target_ulong new_val)
2093 {
2094 int index = csrno - CSR_HSTATEEN0;
2095 uint64_t *reg, wr_mask;
2096
2097 reg = &env->hstateen[index];
2098 wr_mask = env->mstateen[index] & mask;
2099 *reg = (*reg & ~wr_mask) | (new_val & wr_mask);
2100
2101 return RISCV_EXCP_NONE;
2102 }
2103
2104 static RISCVException write_hstateen0(CPURISCVState *env, int csrno,
2105 target_ulong new_val)
2106 {
2107 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
2108
2109 return write_hstateen(env, csrno, wr_mask, new_val);
2110 }
2111
2112 static RISCVException write_hstateen_1_3(CPURISCVState *env, int csrno,
2113 target_ulong new_val)
2114 {
2115 return write_hstateen(env, csrno, SMSTATEEN_STATEEN, new_val);
2116 }
2117
2118 static RISCVException read_hstateenh(CPURISCVState *env, int csrno,
2119 target_ulong *val)
2120 {
2121 int index = csrno - CSR_HSTATEEN0H;
2122
2123 *val = (env->hstateen[index] >> 32) & (env->mstateen[index] >> 32);
2124
2125 return RISCV_EXCP_NONE;
2126 }
2127
2128 static RISCVException write_hstateenh(CPURISCVState *env, int csrno,
2129 uint64_t mask, target_ulong new_val)
2130 {
2131 int index = csrno - CSR_HSTATEEN0H;
2132 uint64_t *reg, wr_mask, val;
2133
2134 reg = &env->hstateen[index];
2135 val = (uint64_t)new_val << 32;
2136 val |= *reg & 0xFFFFFFFF;
2137 wr_mask = env->mstateen[index] & mask;
2138 *reg = (*reg & ~wr_mask) | (val & wr_mask);
2139
2140 return RISCV_EXCP_NONE;
2141 }
2142
2143 static RISCVException write_hstateen0h(CPURISCVState *env, int csrno,
2144 target_ulong new_val)
2145 {
2146 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
2147
2148 return write_hstateenh(env, csrno, wr_mask, new_val);
2149 }
2150
2151 static RISCVException write_hstateenh_1_3(CPURISCVState *env, int csrno,
2152 target_ulong new_val)
2153 {
2154 return write_hstateenh(env, csrno, SMSTATEEN_STATEEN, new_val);
2155 }
2156
2157 static RISCVException read_sstateen(CPURISCVState *env, int csrno,
2158 target_ulong *val)
2159 {
2160 bool virt = riscv_cpu_virt_enabled(env);
2161 int index = csrno - CSR_SSTATEEN0;
2162
2163 *val = env->sstateen[index] & env->mstateen[index];
2164 if (virt) {
2165 *val &= env->hstateen[index];
2166 }
2167
2168 return RISCV_EXCP_NONE;
2169 }
2170
2171 static RISCVException write_sstateen(CPURISCVState *env, int csrno,
2172 uint64_t mask, target_ulong new_val)
2173 {
2174 bool virt = riscv_cpu_virt_enabled(env);
2175 int index = csrno - CSR_SSTATEEN0;
2176 uint64_t wr_mask;
2177 uint64_t *reg;
2178
2179 wr_mask = env->mstateen[index] & mask;
2180 if (virt) {
2181 wr_mask &= env->hstateen[index];
2182 }
2183
2184 reg = &env->sstateen[index];
2185 *reg = (*reg & ~wr_mask) | (new_val & wr_mask);
2186
2187 return RISCV_EXCP_NONE;
2188 }
2189
2190 static RISCVException write_sstateen0(CPURISCVState *env, int csrno,
2191 target_ulong new_val)
2192 {
2193 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
2194
2195 return write_sstateen(env, csrno, wr_mask, new_val);
2196 }
2197
2198 static RISCVException write_sstateen_1_3(CPURISCVState *env, int csrno,
2199 target_ulong new_val)
2200 {
2201 return write_sstateen(env, csrno, SMSTATEEN_STATEEN, new_val);
2202 }
2203
2204 static RISCVException rmw_mip64(CPURISCVState *env, int csrno,
2205 uint64_t *ret_val,
2206 uint64_t new_val, uint64_t wr_mask)
2207 {
2208 RISCVCPU *cpu = env_archcpu(env);
2209 uint64_t old_mip, mask = wr_mask & delegable_ints;
2210 uint32_t gin;
2211
2212 if (mask & MIP_SEIP) {
2213 env->software_seip = new_val & MIP_SEIP;
2214 new_val |= env->external_seip * MIP_SEIP;
2215 }
2216
2217 if (cpu->cfg.ext_sstc && (env->priv == PRV_M) &&
2218 get_field(env->menvcfg, MENVCFG_STCE)) {
2219 /* sstc extension forbids STIP & VSTIP to be writeable in mip */
2220 mask = mask & ~(MIP_STIP | MIP_VSTIP);
2221 }
2222
2223 if (mask) {
2224 old_mip = riscv_cpu_update_mip(cpu, mask, (new_val & mask));
2225 } else {
2226 old_mip = env->mip;
2227 }
2228
2229 if (csrno != CSR_HVIP) {
2230 gin = get_field(env->hstatus, HSTATUS_VGEIN);
2231 old_mip |= (env->hgeip & ((target_ulong)1 << gin)) ? MIP_VSEIP : 0;
2232 old_mip |= env->vstime_irq ? MIP_VSTIP : 0;
2233 }
2234
2235 if (ret_val) {
2236 *ret_val = old_mip;
2237 }
2238
2239 return RISCV_EXCP_NONE;
2240 }
2241
2242 static RISCVException rmw_mip(CPURISCVState *env, int csrno,
2243 target_ulong *ret_val,
2244 target_ulong new_val, target_ulong wr_mask)
2245 {
2246 uint64_t rval;
2247 RISCVException ret;
2248
2249 ret = rmw_mip64(env, csrno, &rval, new_val, wr_mask);
2250 if (ret_val) {
2251 *ret_val = rval;
2252 }
2253
2254 return ret;
2255 }
2256
2257 static RISCVException rmw_miph(CPURISCVState *env, int csrno,
2258 target_ulong *ret_val,
2259 target_ulong new_val, target_ulong wr_mask)
2260 {
2261 uint64_t rval;
2262 RISCVException ret;
2263
2264 ret = rmw_mip64(env, csrno, &rval,
2265 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2266 if (ret_val) {
2267 *ret_val = rval >> 32;
2268 }
2269
2270 return ret;
2271 }
2272
2273 /* Supervisor Trap Setup */
2274 static RISCVException read_sstatus_i128(CPURISCVState *env, int csrno,
2275 Int128 *val)
2276 {
2277 uint64_t mask = sstatus_v1_10_mask;
2278 uint64_t sstatus = env->mstatus & mask;
2279 if (env->xl != MXL_RV32 || env->debugger) {
2280 mask |= SSTATUS64_UXL;
2281 }
2282
2283 *val = int128_make128(sstatus, add_status_sd(MXL_RV128, sstatus));
2284 return RISCV_EXCP_NONE;
2285 }
2286
2287 static RISCVException read_sstatus(CPURISCVState *env, int csrno,
2288 target_ulong *val)
2289 {
2290 target_ulong mask = (sstatus_v1_10_mask);
2291 if (env->xl != MXL_RV32 || env->debugger) {
2292 mask |= SSTATUS64_UXL;
2293 }
2294 /* TODO: Use SXL not MXL. */
2295 *val = add_status_sd(riscv_cpu_mxl(env), env->mstatus & mask);
2296 return RISCV_EXCP_NONE;
2297 }
2298
2299 static RISCVException write_sstatus(CPURISCVState *env, int csrno,
2300 target_ulong val)
2301 {
2302 target_ulong mask = (sstatus_v1_10_mask);
2303
2304 if (env->xl != MXL_RV32 || env->debugger) {
2305 if ((val & SSTATUS64_UXL) != 0) {
2306 mask |= SSTATUS64_UXL;
2307 }
2308 }
2309 target_ulong newval = (env->mstatus & ~mask) | (val & mask);
2310 return write_mstatus(env, CSR_MSTATUS, newval);
2311 }
2312
2313 static RISCVException rmw_vsie64(CPURISCVState *env, int csrno,
2314 uint64_t *ret_val,
2315 uint64_t new_val, uint64_t wr_mask)
2316 {
2317 RISCVException ret;
2318 uint64_t rval, mask = env->hideleg & VS_MODE_INTERRUPTS;
2319
2320 /* Bring VS-level bits to correct position */
2321 new_val = (new_val & (VS_MODE_INTERRUPTS >> 1)) << 1;
2322 wr_mask = (wr_mask & (VS_MODE_INTERRUPTS >> 1)) << 1;
2323
2324 ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask & mask);
2325 if (ret_val) {
2326 *ret_val = (rval & mask) >> 1;
2327 }
2328
2329 return ret;
2330 }
2331
2332 static RISCVException rmw_vsie(CPURISCVState *env, int csrno,
2333 target_ulong *ret_val,
2334 target_ulong new_val, target_ulong wr_mask)
2335 {
2336 uint64_t rval;
2337 RISCVException ret;
2338
2339 ret = rmw_vsie64(env, csrno, &rval, new_val, wr_mask);
2340 if (ret_val) {
2341 *ret_val = rval;
2342 }
2343
2344 return ret;
2345 }
2346
2347 static RISCVException rmw_vsieh(CPURISCVState *env, int csrno,
2348 target_ulong *ret_val,
2349 target_ulong new_val, target_ulong wr_mask)
2350 {
2351 uint64_t rval;
2352 RISCVException ret;
2353
2354 ret = rmw_vsie64(env, csrno, &rval,
2355 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2356 if (ret_val) {
2357 *ret_val = rval >> 32;
2358 }
2359
2360 return ret;
2361 }
2362
2363 static RISCVException rmw_sie64(CPURISCVState *env, int csrno,
2364 uint64_t *ret_val,
2365 uint64_t new_val, uint64_t wr_mask)
2366 {
2367 RISCVException ret;
2368 uint64_t mask = env->mideleg & S_MODE_INTERRUPTS;
2369
2370 if (riscv_cpu_virt_enabled(env)) {
2371 if (env->hvictl & HVICTL_VTI) {
2372 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
2373 }
2374 ret = rmw_vsie64(env, CSR_VSIE, ret_val, new_val, wr_mask);
2375 } else {
2376 ret = rmw_mie64(env, csrno, ret_val, new_val, wr_mask & mask);
2377 }
2378
2379 if (ret_val) {
2380 *ret_val &= mask;
2381 }
2382
2383 return ret;
2384 }
2385
2386 static RISCVException rmw_sie(CPURISCVState *env, int csrno,
2387 target_ulong *ret_val,
2388 target_ulong new_val, target_ulong wr_mask)
2389 {
2390 uint64_t rval;
2391 RISCVException ret;
2392
2393 ret = rmw_sie64(env, csrno, &rval, new_val, wr_mask);
2394 if (ret == RISCV_EXCP_NONE && ret_val) {
2395 *ret_val = rval;
2396 }
2397
2398 return ret;
2399 }
2400
2401 static RISCVException rmw_sieh(CPURISCVState *env, int csrno,
2402 target_ulong *ret_val,
2403 target_ulong new_val, target_ulong wr_mask)
2404 {
2405 uint64_t rval;
2406 RISCVException ret;
2407
2408 ret = rmw_sie64(env, csrno, &rval,
2409 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2410 if (ret_val) {
2411 *ret_val = rval >> 32;
2412 }
2413
2414 return ret;
2415 }
2416
2417 static RISCVException read_stvec(CPURISCVState *env, int csrno,
2418 target_ulong *val)
2419 {
2420 *val = env->stvec;
2421 return RISCV_EXCP_NONE;
2422 }
2423
2424 static RISCVException write_stvec(CPURISCVState *env, int csrno,
2425 target_ulong val)
2426 {
2427 /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
2428 if ((val & 3) < 2) {
2429 env->stvec = val;
2430 } else {
2431 qemu_log_mask(LOG_UNIMP, "CSR_STVEC: reserved mode not supported\n");
2432 }
2433 return RISCV_EXCP_NONE;
2434 }
2435
2436 static RISCVException read_scounteren(CPURISCVState *env, int csrno,
2437 target_ulong *val)
2438 {
2439 *val = env->scounteren;
2440 return RISCV_EXCP_NONE;
2441 }
2442
2443 static RISCVException write_scounteren(CPURISCVState *env, int csrno,
2444 target_ulong val)
2445 {
2446 env->scounteren = val;
2447 return RISCV_EXCP_NONE;
2448 }
2449
2450 /* Supervisor Trap Handling */
2451 static RISCVException read_sscratch_i128(CPURISCVState *env, int csrno,
2452 Int128 *val)
2453 {
2454 *val = int128_make128(env->sscratch, env->sscratchh);
2455 return RISCV_EXCP_NONE;
2456 }
2457
2458 static RISCVException write_sscratch_i128(CPURISCVState *env, int csrno,
2459 Int128 val)
2460 {
2461 env->sscratch = int128_getlo(val);
2462 env->sscratchh = int128_gethi(val);
2463 return RISCV_EXCP_NONE;
2464 }
2465
2466 static RISCVException read_sscratch(CPURISCVState *env, int csrno,
2467 target_ulong *val)
2468 {
2469 *val = env->sscratch;
2470 return RISCV_EXCP_NONE;
2471 }
2472
2473 static RISCVException write_sscratch(CPURISCVState *env, int csrno,
2474 target_ulong val)
2475 {
2476 env->sscratch = val;
2477 return RISCV_EXCP_NONE;
2478 }
2479
2480 static RISCVException read_sepc(CPURISCVState *env, int csrno,
2481 target_ulong *val)
2482 {
2483 *val = env->sepc;
2484 return RISCV_EXCP_NONE;
2485 }
2486
2487 static RISCVException write_sepc(CPURISCVState *env, int csrno,
2488 target_ulong val)
2489 {
2490 env->sepc = val;
2491 return RISCV_EXCP_NONE;
2492 }
2493
2494 static RISCVException read_scause(CPURISCVState *env, int csrno,
2495 target_ulong *val)
2496 {
2497 *val = env->scause;
2498 return RISCV_EXCP_NONE;
2499 }
2500
2501 static RISCVException write_scause(CPURISCVState *env, int csrno,
2502 target_ulong val)
2503 {
2504 env->scause = val;
2505 return RISCV_EXCP_NONE;
2506 }
2507
2508 static RISCVException read_stval(CPURISCVState *env, int csrno,
2509 target_ulong *val)
2510 {
2511 *val = env->stval;
2512 return RISCV_EXCP_NONE;
2513 }
2514
2515 static RISCVException write_stval(CPURISCVState *env, int csrno,
2516 target_ulong val)
2517 {
2518 env->stval = val;
2519 return RISCV_EXCP_NONE;
2520 }
2521
2522 static RISCVException rmw_vsip64(CPURISCVState *env, int csrno,
2523 uint64_t *ret_val,
2524 uint64_t new_val, uint64_t wr_mask)
2525 {
2526 RISCVException ret;
2527 uint64_t rval, mask = env->hideleg & VS_MODE_INTERRUPTS;
2528
2529 /* Bring VS-level bits to correct position */
2530 new_val = (new_val & (VS_MODE_INTERRUPTS >> 1)) << 1;
2531 wr_mask = (wr_mask & (VS_MODE_INTERRUPTS >> 1)) << 1;
2532
2533 ret = rmw_mip64(env, csrno, &rval, new_val,
2534 wr_mask & mask & vsip_writable_mask);
2535 if (ret_val) {
2536 *ret_val = (rval & mask) >> 1;
2537 }
2538
2539 return ret;
2540 }
2541
2542 static RISCVException rmw_vsip(CPURISCVState *env, int csrno,
2543 target_ulong *ret_val,
2544 target_ulong new_val, target_ulong wr_mask)
2545 {
2546 uint64_t rval;
2547 RISCVException ret;
2548
2549 ret = rmw_vsip64(env, csrno, &rval, new_val, wr_mask);
2550 if (ret_val) {
2551 *ret_val = rval;
2552 }
2553
2554 return ret;
2555 }
2556
2557 static RISCVException rmw_vsiph(CPURISCVState *env, int csrno,
2558 target_ulong *ret_val,
2559 target_ulong new_val, target_ulong wr_mask)
2560 {
2561 uint64_t rval;
2562 RISCVException ret;
2563
2564 ret = rmw_vsip64(env, csrno, &rval,
2565 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2566 if (ret_val) {
2567 *ret_val = rval >> 32;
2568 }
2569
2570 return ret;
2571 }
2572
2573 static RISCVException rmw_sip64(CPURISCVState *env, int csrno,
2574 uint64_t *ret_val,
2575 uint64_t new_val, uint64_t wr_mask)
2576 {
2577 RISCVException ret;
2578 uint64_t mask = env->mideleg & sip_writable_mask;
2579
2580 if (riscv_cpu_virt_enabled(env)) {
2581 if (env->hvictl & HVICTL_VTI) {
2582 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
2583 }
2584 ret = rmw_vsip64(env, CSR_VSIP, ret_val, new_val, wr_mask);
2585 } else {
2586 ret = rmw_mip64(env, csrno, ret_val, new_val, wr_mask & mask);
2587 }
2588
2589 if (ret_val) {
2590 *ret_val &= env->mideleg & S_MODE_INTERRUPTS;
2591 }
2592
2593 return ret;
2594 }
2595
2596 static RISCVException rmw_sip(CPURISCVState *env, int csrno,
2597 target_ulong *ret_val,
2598 target_ulong new_val, target_ulong wr_mask)
2599 {
2600 uint64_t rval;
2601 RISCVException ret;
2602
2603 ret = rmw_sip64(env, csrno, &rval, new_val, wr_mask);
2604 if (ret_val) {
2605 *ret_val = rval;
2606 }
2607
2608 return ret;
2609 }
2610
2611 static RISCVException rmw_siph(CPURISCVState *env, int csrno,
2612 target_ulong *ret_val,
2613 target_ulong new_val, target_ulong wr_mask)
2614 {
2615 uint64_t rval;
2616 RISCVException ret;
2617
2618 ret = rmw_sip64(env, csrno, &rval,
2619 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2620 if (ret_val) {
2621 *ret_val = rval >> 32;
2622 }
2623
2624 return ret;
2625 }
2626
2627 /* Supervisor Protection and Translation */
2628 static RISCVException read_satp(CPURISCVState *env, int csrno,
2629 target_ulong *val)
2630 {
2631 if (!riscv_feature(env, RISCV_FEATURE_MMU)) {
2632 *val = 0;
2633 return RISCV_EXCP_NONE;
2634 }
2635
2636 if (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_TVM)) {
2637 return RISCV_EXCP_ILLEGAL_INST;
2638 } else {
2639 *val = env->satp;
2640 }
2641
2642 return RISCV_EXCP_NONE;
2643 }
2644
2645 static RISCVException write_satp(CPURISCVState *env, int csrno,
2646 target_ulong val)
2647 {
2648 target_ulong vm, mask;
2649
2650 if (!riscv_feature(env, RISCV_FEATURE_MMU)) {
2651 return RISCV_EXCP_NONE;
2652 }
2653
2654 if (riscv_cpu_mxl(env) == MXL_RV32) {
2655 vm = validate_vm(env, get_field(val, SATP32_MODE));
2656 mask = (val ^ env->satp) & (SATP32_MODE | SATP32_ASID | SATP32_PPN);
2657 } else {
2658 vm = validate_vm(env, get_field(val, SATP64_MODE));
2659 mask = (val ^ env->satp) & (SATP64_MODE | SATP64_ASID | SATP64_PPN);
2660 }
2661
2662 if (vm && mask) {
2663 if (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_TVM)) {
2664 return RISCV_EXCP_ILLEGAL_INST;
2665 } else {
2666 /*
2667 * The ISA defines SATP.MODE=Bare as "no translation", but we still
2668 * pass these through QEMU's TLB emulation as it improves
2669 * performance. Flushing the TLB on SATP writes with paging
2670 * enabled avoids leaking those invalid cached mappings.
2671 */
2672 tlb_flush(env_cpu(env));
2673 env->satp = val;
2674 }
2675 }
2676 return RISCV_EXCP_NONE;
2677 }
2678
2679 static int read_vstopi(CPURISCVState *env, int csrno, target_ulong *val)
2680 {
2681 int irq, ret;
2682 target_ulong topei;
2683 uint64_t vseip, vsgein;
2684 uint32_t iid, iprio, hviid, hviprio, gein;
2685 uint32_t s, scount = 0, siid[VSTOPI_NUM_SRCS], siprio[VSTOPI_NUM_SRCS];
2686
2687 gein = get_field(env->hstatus, HSTATUS_VGEIN);
2688 hviid = get_field(env->hvictl, HVICTL_IID);
2689 hviprio = get_field(env->hvictl, HVICTL_IPRIO);
2690
2691 if (gein) {
2692 vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0;
2693 vseip = env->mie & (env->mip | vsgein) & MIP_VSEIP;
2694 if (gein <= env->geilen && vseip) {
2695 siid[scount] = IRQ_S_EXT;
2696 siprio[scount] = IPRIO_MMAXIPRIO + 1;
2697 if (env->aia_ireg_rmw_fn[PRV_S]) {
2698 /*
2699 * Call machine specific IMSIC register emulation for
2700 * reading TOPEI.
2701 */
2702 ret = env->aia_ireg_rmw_fn[PRV_S](
2703 env->aia_ireg_rmw_fn_arg[PRV_S],
2704 AIA_MAKE_IREG(ISELECT_IMSIC_TOPEI, PRV_S, true, gein,
2705 riscv_cpu_mxl_bits(env)),
2706 &topei, 0, 0);
2707 if (!ret && topei) {
2708 siprio[scount] = topei & IMSIC_TOPEI_IPRIO_MASK;
2709 }
2710 }
2711 scount++;
2712 }
2713 } else {
2714 if (hviid == IRQ_S_EXT && hviprio) {
2715 siid[scount] = IRQ_S_EXT;
2716 siprio[scount] = hviprio;
2717 scount++;
2718 }
2719 }
2720
2721 if (env->hvictl & HVICTL_VTI) {
2722 if (hviid != IRQ_S_EXT) {
2723 siid[scount] = hviid;
2724 siprio[scount] = hviprio;
2725 scount++;
2726 }
2727 } else {
2728 irq = riscv_cpu_vsirq_pending(env);
2729 if (irq != IRQ_S_EXT && 0 < irq && irq <= 63) {
2730 siid[scount] = irq;
2731 siprio[scount] = env->hviprio[irq];
2732 scount++;
2733 }
2734 }
2735
2736 iid = 0;
2737 iprio = UINT_MAX;
2738 for (s = 0; s < scount; s++) {
2739 if (siprio[s] < iprio) {
2740 iid = siid[s];
2741 iprio = siprio[s];
2742 }
2743 }
2744
2745 if (iid) {
2746 if (env->hvictl & HVICTL_IPRIOM) {
2747 if (iprio > IPRIO_MMAXIPRIO) {
2748 iprio = IPRIO_MMAXIPRIO;
2749 }
2750 if (!iprio) {
2751 if (riscv_cpu_default_priority(iid) > IPRIO_DEFAULT_S) {
2752 iprio = IPRIO_MMAXIPRIO;
2753 }
2754 }
2755 } else {
2756 iprio = 1;
2757 }
2758 } else {
2759 iprio = 0;
2760 }
2761
2762 *val = (iid & TOPI_IID_MASK) << TOPI_IID_SHIFT;
2763 *val |= iprio;
2764 return RISCV_EXCP_NONE;
2765 }
2766
2767 static int read_stopi(CPURISCVState *env, int csrno, target_ulong *val)
2768 {
2769 int irq;
2770 uint8_t iprio;
2771
2772 if (riscv_cpu_virt_enabled(env)) {
2773 return read_vstopi(env, CSR_VSTOPI, val);
2774 }
2775
2776 irq = riscv_cpu_sirq_pending(env);
2777 if (irq <= 0 || irq > 63) {
2778 *val = 0;
2779 } else {
2780 iprio = env->siprio[irq];
2781 if (!iprio) {
2782 if (riscv_cpu_default_priority(irq) > IPRIO_DEFAULT_S) {
2783 iprio = IPRIO_MMAXIPRIO;
2784 }
2785 }
2786 *val = (irq & TOPI_IID_MASK) << TOPI_IID_SHIFT;
2787 *val |= iprio;
2788 }
2789
2790 return RISCV_EXCP_NONE;
2791 }
2792
2793 /* Hypervisor Extensions */
2794 static RISCVException read_hstatus(CPURISCVState *env, int csrno,
2795 target_ulong *val)
2796 {
2797 *val = env->hstatus;
2798 if (riscv_cpu_mxl(env) != MXL_RV32) {
2799 /* We only support 64-bit VSXL */
2800 *val = set_field(*val, HSTATUS_VSXL, 2);
2801 }
2802 /* We only support little endian */
2803 *val = set_field(*val, HSTATUS_VSBE, 0);
2804 return RISCV_EXCP_NONE;
2805 }
2806
2807 static RISCVException write_hstatus(CPURISCVState *env, int csrno,
2808 target_ulong val)
2809 {
2810 env->hstatus = val;
2811 if (riscv_cpu_mxl(env) != MXL_RV32 && get_field(val, HSTATUS_VSXL) != 2) {
2812 qemu_log_mask(LOG_UNIMP, "QEMU does not support mixed HSXLEN options.");
2813 }
2814 if (get_field(val, HSTATUS_VSBE) != 0) {
2815 qemu_log_mask(LOG_UNIMP, "QEMU does not support big endian guests.");
2816 }
2817 return RISCV_EXCP_NONE;
2818 }
2819
2820 static RISCVException read_hedeleg(CPURISCVState *env, int csrno,
2821 target_ulong *val)
2822 {
2823 *val = env->hedeleg;
2824 return RISCV_EXCP_NONE;
2825 }
2826
2827 static RISCVException write_hedeleg(CPURISCVState *env, int csrno,
2828 target_ulong val)
2829 {
2830 env->hedeleg = val & vs_delegable_excps;
2831 return RISCV_EXCP_NONE;
2832 }
2833
2834 static RISCVException rmw_hideleg64(CPURISCVState *env, int csrno,
2835 uint64_t *ret_val,
2836 uint64_t new_val, uint64_t wr_mask)
2837 {
2838 uint64_t mask = wr_mask & vs_delegable_ints;
2839
2840 if (ret_val) {
2841 *ret_val = env->hideleg & vs_delegable_ints;
2842 }
2843
2844 env->hideleg = (env->hideleg & ~mask) | (new_val & mask);
2845 return RISCV_EXCP_NONE;
2846 }
2847
2848 static RISCVException rmw_hideleg(CPURISCVState *env, int csrno,
2849 target_ulong *ret_val,
2850 target_ulong new_val, target_ulong wr_mask)
2851 {
2852 uint64_t rval;
2853 RISCVException ret;
2854
2855 ret = rmw_hideleg64(env, csrno, &rval, new_val, wr_mask);
2856 if (ret_val) {
2857 *ret_val = rval;
2858 }
2859
2860 return ret;
2861 }
2862
2863 static RISCVException rmw_hidelegh(CPURISCVState *env, int csrno,
2864 target_ulong *ret_val,
2865 target_ulong new_val, target_ulong wr_mask)
2866 {
2867 uint64_t rval;
2868 RISCVException ret;
2869
2870 ret = rmw_hideleg64(env, csrno, &rval,
2871 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2872 if (ret_val) {
2873 *ret_val = rval >> 32;
2874 }
2875
2876 return ret;
2877 }
2878
2879 static RISCVException rmw_hvip64(CPURISCVState *env, int csrno,
2880 uint64_t *ret_val,
2881 uint64_t new_val, uint64_t wr_mask)
2882 {
2883 RISCVException ret;
2884
2885 ret = rmw_mip64(env, csrno, ret_val, new_val,
2886 wr_mask & hvip_writable_mask);
2887 if (ret_val) {
2888 *ret_val &= VS_MODE_INTERRUPTS;
2889 }
2890
2891 return ret;
2892 }
2893
2894 static RISCVException rmw_hvip(CPURISCVState *env, int csrno,
2895 target_ulong *ret_val,
2896 target_ulong new_val, target_ulong wr_mask)
2897 {
2898 uint64_t rval;
2899 RISCVException ret;
2900
2901 ret = rmw_hvip64(env, csrno, &rval, new_val, wr_mask);
2902 if (ret_val) {
2903 *ret_val = rval;
2904 }
2905
2906 return ret;
2907 }
2908
2909 static RISCVException rmw_hviph(CPURISCVState *env, int csrno,
2910 target_ulong *ret_val,
2911 target_ulong new_val, target_ulong wr_mask)
2912 {
2913 uint64_t rval;
2914 RISCVException ret;
2915
2916 ret = rmw_hvip64(env, csrno, &rval,
2917 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2918 if (ret_val) {
2919 *ret_val = rval >> 32;
2920 }
2921
2922 return ret;
2923 }
2924
2925 static RISCVException rmw_hip(CPURISCVState *env, int csrno,
2926 target_ulong *ret_value,
2927 target_ulong new_value, target_ulong write_mask)
2928 {
2929 int ret = rmw_mip(env, csrno, ret_value, new_value,
2930 write_mask & hip_writable_mask);
2931
2932 if (ret_value) {
2933 *ret_value &= HS_MODE_INTERRUPTS;
2934 }
2935 return ret;
2936 }
2937
2938 static RISCVException rmw_hie(CPURISCVState *env, int csrno,
2939 target_ulong *ret_val,
2940 target_ulong new_val, target_ulong wr_mask)
2941 {
2942 uint64_t rval;
2943 RISCVException ret;
2944
2945 ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask & HS_MODE_INTERRUPTS);
2946 if (ret_val) {
2947 *ret_val = rval & HS_MODE_INTERRUPTS;
2948 }
2949
2950 return ret;
2951 }
2952
2953 static RISCVException read_hcounteren(CPURISCVState *env, int csrno,
2954 target_ulong *val)
2955 {
2956 *val = env->hcounteren;
2957 return RISCV_EXCP_NONE;
2958 }
2959
2960 static RISCVException write_hcounteren(CPURISCVState *env, int csrno,
2961 target_ulong val)
2962 {
2963 env->hcounteren = val;
2964 return RISCV_EXCP_NONE;
2965 }
2966
2967 static RISCVException read_hgeie(CPURISCVState *env, int csrno,
2968 target_ulong *val)
2969 {
2970 if (val) {
2971 *val = env->hgeie;
2972 }
2973 return RISCV_EXCP_NONE;
2974 }
2975
2976 static RISCVException write_hgeie(CPURISCVState *env, int csrno,
2977 target_ulong val)
2978 {
2979 /* Only GEILEN:1 bits implemented and BIT0 is never implemented */
2980 val &= ((((target_ulong)1) << env->geilen) - 1) << 1;
2981 env->hgeie = val;
2982 /* Update mip.SGEIP bit */
2983 riscv_cpu_update_mip(env_archcpu(env), MIP_SGEIP,
2984 BOOL_TO_MASK(!!(env->hgeie & env->hgeip)));
2985 return RISCV_EXCP_NONE;
2986 }
2987
2988 static RISCVException read_htval(CPURISCVState *env, int csrno,
2989 target_ulong *val)
2990 {
2991 *val = env->htval;
2992 return RISCV_EXCP_NONE;
2993 }
2994
2995 static RISCVException write_htval(CPURISCVState *env, int csrno,
2996 target_ulong val)
2997 {
2998 env->htval = val;
2999 return RISCV_EXCP_NONE;
3000 }
3001
3002 static RISCVException read_htinst(CPURISCVState *env, int csrno,
3003 target_ulong *val)
3004 {
3005 *val = env->htinst;
3006 return RISCV_EXCP_NONE;
3007 }
3008
3009 static RISCVException write_htinst(CPURISCVState *env, int csrno,
3010 target_ulong val)
3011 {
3012 return RISCV_EXCP_NONE;
3013 }
3014
3015 static RISCVException read_hgeip(CPURISCVState *env, int csrno,
3016 target_ulong *val)
3017 {
3018 if (val) {
3019 *val = env->hgeip;
3020 }
3021 return RISCV_EXCP_NONE;
3022 }
3023
3024 static RISCVException read_hgatp(CPURISCVState *env, int csrno,
3025 target_ulong *val)
3026 {
3027 *val = env->hgatp;
3028 return RISCV_EXCP_NONE;
3029 }
3030
3031 static RISCVException write_hgatp(CPURISCVState *env, int csrno,
3032 target_ulong val)
3033 {
3034 env->hgatp = val;
3035 return RISCV_EXCP_NONE;
3036 }
3037
3038 static RISCVException read_htimedelta(CPURISCVState *env, int csrno,
3039 target_ulong *val)
3040 {
3041 if (!env->rdtime_fn) {
3042 return RISCV_EXCP_ILLEGAL_INST;
3043 }
3044
3045 *val = env->htimedelta;
3046 return RISCV_EXCP_NONE;
3047 }
3048
3049 static RISCVException write_htimedelta(CPURISCVState *env, int csrno,
3050 target_ulong val)
3051 {
3052 RISCVCPU *cpu = env_archcpu(env);
3053
3054 if (!env->rdtime_fn) {
3055 return RISCV_EXCP_ILLEGAL_INST;
3056 }
3057
3058 if (riscv_cpu_mxl(env) == MXL_RV32) {
3059 env->htimedelta = deposit64(env->htimedelta, 0, 32, (uint64_t)val);
3060 } else {
3061 env->htimedelta = val;
3062 }
3063
3064 if (cpu->cfg.ext_sstc && env->rdtime_fn) {
3065 riscv_timer_write_timecmp(cpu, env->vstimer, env->vstimecmp,
3066 env->htimedelta, MIP_VSTIP);
3067 }
3068
3069 return RISCV_EXCP_NONE;
3070 }
3071
3072 static RISCVException read_htimedeltah(CPURISCVState *env, int csrno,
3073 target_ulong *val)
3074 {
3075 if (!env->rdtime_fn) {
3076 return RISCV_EXCP_ILLEGAL_INST;
3077 }
3078
3079 *val = env->htimedelta >> 32;
3080 return RISCV_EXCP_NONE;
3081 }
3082
3083 static RISCVException write_htimedeltah(CPURISCVState *env, int csrno,
3084 target_ulong val)
3085 {
3086 RISCVCPU *cpu = env_archcpu(env);
3087
3088 if (!env->rdtime_fn) {
3089 return RISCV_EXCP_ILLEGAL_INST;
3090 }
3091
3092 env->htimedelta = deposit64(env->htimedelta, 32, 32, (uint64_t)val);
3093
3094 if (cpu->cfg.ext_sstc && env->rdtime_fn) {
3095 riscv_timer_write_timecmp(cpu, env->vstimer, env->vstimecmp,
3096 env->htimedelta, MIP_VSTIP);
3097 }
3098
3099 return RISCV_EXCP_NONE;
3100 }
3101
3102 static int read_hvictl(CPURISCVState *env, int csrno, target_ulong *val)
3103 {
3104 *val = env->hvictl;
3105 return RISCV_EXCP_NONE;
3106 }
3107
3108 static int write_hvictl(CPURISCVState *env, int csrno, target_ulong val)
3109 {
3110 env->hvictl = val & HVICTL_VALID_MASK;
3111 return RISCV_EXCP_NONE;
3112 }
3113
3114 static int read_hvipriox(CPURISCVState *env, int first_index,
3115 uint8_t *iprio, target_ulong *val)
3116 {
3117 int i, irq, rdzero, num_irqs = 4 * (riscv_cpu_mxl_bits(env) / 32);
3118
3119 /* First index has to be a multiple of number of irqs per register */
3120 if (first_index % num_irqs) {
3121 return (riscv_cpu_virt_enabled(env)) ?
3122 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
3123 }
3124
3125 /* Fill-up return value */
3126 *val = 0;
3127 for (i = 0; i < num_irqs; i++) {
3128 if (riscv_cpu_hviprio_index2irq(first_index + i, &irq, &rdzero)) {
3129 continue;
3130 }
3131 if (rdzero) {
3132 continue;
3133 }
3134 *val |= ((target_ulong)iprio[irq]) << (i * 8);
3135 }
3136
3137 return RISCV_EXCP_NONE;
3138 }
3139
3140 static int write_hvipriox(CPURISCVState *env, int first_index,
3141 uint8_t *iprio, target_ulong val)
3142 {
3143 int i, irq, rdzero, num_irqs = 4 * (riscv_cpu_mxl_bits(env) / 32);
3144
3145 /* First index has to be a multiple of number of irqs per register */
3146 if (first_index % num_irqs) {
3147 return (riscv_cpu_virt_enabled(env)) ?
3148 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
3149 }
3150
3151 /* Fill-up priority arrary */
3152 for (i = 0; i < num_irqs; i++) {
3153 if (riscv_cpu_hviprio_index2irq(first_index + i, &irq, &rdzero)) {
3154 continue;
3155 }
3156 if (rdzero) {
3157 iprio[irq] = 0;
3158 } else {
3159 iprio[irq] = (val >> (i * 8)) & 0xff;
3160 }
3161 }
3162
3163 return RISCV_EXCP_NONE;
3164 }
3165
3166 static int read_hviprio1(CPURISCVState *env, int csrno, target_ulong *val)
3167 {
3168 return read_hvipriox(env, 0, env->hviprio, val);
3169 }
3170
3171 static int write_hviprio1(CPURISCVState *env, int csrno, target_ulong val)
3172 {
3173 return write_hvipriox(env, 0, env->hviprio, val);
3174 }
3175
3176 static int read_hviprio1h(CPURISCVState *env, int csrno, target_ulong *val)
3177 {
3178 return read_hvipriox(env, 4, env->hviprio, val);
3179 }
3180
3181 static int write_hviprio1h(CPURISCVState *env, int csrno, target_ulong val)
3182 {
3183 return write_hvipriox(env, 4, env->hviprio, val);
3184 }
3185
3186 static int read_hviprio2(CPURISCVState *env, int csrno, target_ulong *val)
3187 {
3188 return read_hvipriox(env, 8, env->hviprio, val);
3189 }
3190
3191 static int write_hviprio2(CPURISCVState *env, int csrno, target_ulong val)
3192 {
3193 return write_hvipriox(env, 8, env->hviprio, val);
3194 }
3195
3196 static int read_hviprio2h(CPURISCVState *env, int csrno, target_ulong *val)
3197 {
3198 return read_hvipriox(env, 12, env->hviprio, val);
3199 }
3200
3201 static int write_hviprio2h(CPURISCVState *env, int csrno, target_ulong val)
3202 {
3203 return write_hvipriox(env, 12, env->hviprio, val);
3204 }
3205
3206 /* Virtual CSR Registers */
3207 static RISCVException read_vsstatus(CPURISCVState *env, int csrno,
3208 target_ulong *val)
3209 {
3210 *val = env->vsstatus;
3211 return RISCV_EXCP_NONE;
3212 }
3213
3214 static RISCVException write_vsstatus(CPURISCVState *env, int csrno,
3215 target_ulong val)
3216 {
3217 uint64_t mask = (target_ulong)-1;
3218 if ((val & VSSTATUS64_UXL) == 0) {
3219 mask &= ~VSSTATUS64_UXL;
3220 }
3221 env->vsstatus = (env->vsstatus & ~mask) | (uint64_t)val;
3222 return RISCV_EXCP_NONE;
3223 }
3224
3225 static int read_vstvec(CPURISCVState *env, int csrno, target_ulong *val)
3226 {
3227 *val = env->vstvec;
3228 return RISCV_EXCP_NONE;
3229 }
3230
3231 static RISCVException write_vstvec(CPURISCVState *env, int csrno,
3232 target_ulong val)
3233 {
3234 env->vstvec = val;
3235 return RISCV_EXCP_NONE;
3236 }
3237
3238 static RISCVException read_vsscratch(CPURISCVState *env, int csrno,
3239 target_ulong *val)
3240 {
3241 *val = env->vsscratch;
3242 return RISCV_EXCP_NONE;
3243 }
3244
3245 static RISCVException write_vsscratch(CPURISCVState *env, int csrno,
3246 target_ulong val)
3247 {
3248 env->vsscratch = val;
3249 return RISCV_EXCP_NONE;
3250 }
3251
3252 static RISCVException read_vsepc(CPURISCVState *env, int csrno,
3253 target_ulong *val)
3254 {
3255 *val = env->vsepc;
3256 return RISCV_EXCP_NONE;
3257 }
3258
3259 static RISCVException write_vsepc(CPURISCVState *env, int csrno,
3260 target_ulong val)
3261 {
3262 env->vsepc = val;
3263 return RISCV_EXCP_NONE;
3264 }
3265
3266 static RISCVException read_vscause(CPURISCVState *env, int csrno,
3267 target_ulong *val)
3268 {
3269 *val = env->vscause;
3270 return RISCV_EXCP_NONE;
3271 }
3272
3273 static RISCVException write_vscause(CPURISCVState *env, int csrno,
3274 target_ulong val)
3275 {
3276 env->vscause = val;
3277 return RISCV_EXCP_NONE;
3278 }
3279
3280 static RISCVException read_vstval(CPURISCVState *env, int csrno,
3281 target_ulong *val)
3282 {
3283 *val = env->vstval;
3284 return RISCV_EXCP_NONE;
3285 }
3286
3287 static RISCVException write_vstval(CPURISCVState *env, int csrno,
3288 target_ulong val)
3289 {
3290 env->vstval = val;
3291 return RISCV_EXCP_NONE;
3292 }
3293
3294 static RISCVException read_vsatp(CPURISCVState *env, int csrno,
3295 target_ulong *val)
3296 {
3297 *val = env->vsatp;
3298 return RISCV_EXCP_NONE;
3299 }
3300
3301 static RISCVException write_vsatp(CPURISCVState *env, int csrno,
3302 target_ulong val)
3303 {
3304 env->vsatp = val;
3305 return RISCV_EXCP_NONE;
3306 }
3307
3308 static RISCVException read_mtval2(CPURISCVState *env, int csrno,
3309 target_ulong *val)
3310 {
3311 *val = env->mtval2;
3312 return RISCV_EXCP_NONE;
3313 }
3314
3315 static RISCVException write_mtval2(CPURISCVState *env, int csrno,
3316 target_ulong val)
3317 {
3318 env->mtval2 = val;
3319 return RISCV_EXCP_NONE;
3320 }
3321
3322 static RISCVException read_mtinst(CPURISCVState *env, int csrno,
3323 target_ulong *val)
3324 {
3325 *val = env->mtinst;
3326 return RISCV_EXCP_NONE;
3327 }
3328
3329 static RISCVException write_mtinst(CPURISCVState *env, int csrno,
3330 target_ulong val)
3331 {
3332 env->mtinst = val;
3333 return RISCV_EXCP_NONE;
3334 }
3335
3336 /* Physical Memory Protection */
3337 static RISCVException read_mseccfg(CPURISCVState *env, int csrno,
3338 target_ulong *val)
3339 {
3340 *val = mseccfg_csr_read(env);
3341 return RISCV_EXCP_NONE;
3342 }
3343
3344 static RISCVException write_mseccfg(CPURISCVState *env, int csrno,
3345 target_ulong val)
3346 {
3347 mseccfg_csr_write(env, val);
3348 return RISCV_EXCP_NONE;
3349 }
3350
3351 static bool check_pmp_reg_index(CPURISCVState *env, uint32_t reg_index)
3352 {
3353 /* TODO: RV128 restriction check */
3354 if ((reg_index & 1) && (riscv_cpu_mxl(env) == MXL_RV64)) {
3355 return false;
3356 }
3357 return true;
3358 }
3359
3360 static RISCVException read_pmpcfg(CPURISCVState *env, int csrno,
3361 target_ulong *val)
3362 {
3363 uint32_t reg_index = csrno - CSR_PMPCFG0;
3364
3365 if (!check_pmp_reg_index(env, reg_index)) {
3366 return RISCV_EXCP_ILLEGAL_INST;
3367 }
3368 *val = pmpcfg_csr_read(env, csrno - CSR_PMPCFG0);
3369 return RISCV_EXCP_NONE;
3370 }
3371
3372 static RISCVException write_pmpcfg(CPURISCVState *env, int csrno,
3373 target_ulong val)
3374 {
3375 uint32_t reg_index = csrno - CSR_PMPCFG0;
3376
3377 if (!check_pmp_reg_index(env, reg_index)) {
3378 return RISCV_EXCP_ILLEGAL_INST;
3379 }
3380 pmpcfg_csr_write(env, csrno - CSR_PMPCFG0, val);
3381 return RISCV_EXCP_NONE;
3382 }
3383
3384 static RISCVException read_pmpaddr(CPURISCVState *env, int csrno,
3385 target_ulong *val)
3386 {
3387 *val = pmpaddr_csr_read(env, csrno - CSR_PMPADDR0);
3388 return RISCV_EXCP_NONE;
3389 }
3390
3391 static RISCVException write_pmpaddr(CPURISCVState *env, int csrno,
3392 target_ulong val)
3393 {
3394 pmpaddr_csr_write(env, csrno - CSR_PMPADDR0, val);
3395 return RISCV_EXCP_NONE;
3396 }
3397
3398 static RISCVException read_tselect(CPURISCVState *env, int csrno,
3399 target_ulong *val)
3400 {
3401 *val = tselect_csr_read(env);
3402 return RISCV_EXCP_NONE;
3403 }
3404
3405 static RISCVException write_tselect(CPURISCVState *env, int csrno,
3406 target_ulong val)
3407 {
3408 tselect_csr_write(env, val);
3409 return RISCV_EXCP_NONE;
3410 }
3411
3412 static RISCVException read_tdata(CPURISCVState *env, int csrno,
3413 target_ulong *val)
3414 {
3415 /* return 0 in tdata1 to end the trigger enumeration */
3416 if (env->trigger_cur >= RV_MAX_TRIGGERS && csrno == CSR_TDATA1) {
3417 *val = 0;
3418 return RISCV_EXCP_NONE;
3419 }
3420
3421 if (!tdata_available(env, csrno - CSR_TDATA1)) {
3422 return RISCV_EXCP_ILLEGAL_INST;
3423 }
3424
3425 *val = tdata_csr_read(env, csrno - CSR_TDATA1);
3426 return RISCV_EXCP_NONE;
3427 }
3428
3429 static RISCVException write_tdata(CPURISCVState *env, int csrno,
3430 target_ulong val)
3431 {
3432 if (!tdata_available(env, csrno - CSR_TDATA1)) {
3433 return RISCV_EXCP_ILLEGAL_INST;
3434 }
3435
3436 tdata_csr_write(env, csrno - CSR_TDATA1, val);
3437 return RISCV_EXCP_NONE;
3438 }
3439
3440 static RISCVException read_tinfo(CPURISCVState *env, int csrno,
3441 target_ulong *val)
3442 {
3443 *val = tinfo_csr_read(env);
3444 return RISCV_EXCP_NONE;
3445 }
3446
3447 /*
3448 * Functions to access Pointer Masking feature registers
3449 * We have to check if current priv lvl could modify
3450 * csr in given mode
3451 */
3452 static bool check_pm_current_disabled(CPURISCVState *env, int csrno)
3453 {
3454 int csr_priv = get_field(csrno, 0x300);
3455 int pm_current;
3456
3457 if (env->debugger) {
3458 return false;
3459 }
3460 /*
3461 * If priv lvls differ that means we're accessing csr from higher priv lvl,
3462 * so allow the access
3463 */
3464 if (env->priv != csr_priv) {
3465 return false;
3466 }
3467 switch (env->priv) {
3468 case PRV_M:
3469 pm_current = get_field(env->mmte, M_PM_CURRENT);
3470 break;
3471 case PRV_S:
3472 pm_current = get_field(env->mmte, S_PM_CURRENT);
3473 break;
3474 case PRV_U:
3475 pm_current = get_field(env->mmte, U_PM_CURRENT);
3476 break;
3477 default:
3478 g_assert_not_reached();
3479 }
3480 /* It's same priv lvl, so we allow to modify csr only if pm.current==1 */
3481 return !pm_current;
3482 }
3483
3484 static RISCVException read_mmte(CPURISCVState *env, int csrno,
3485 target_ulong *val)
3486 {
3487 *val = env->mmte & MMTE_MASK;
3488 return RISCV_EXCP_NONE;
3489 }
3490
3491 static RISCVException write_mmte(CPURISCVState *env, int csrno,
3492 target_ulong val)
3493 {
3494 uint64_t mstatus;
3495 target_ulong wpri_val = val & MMTE_MASK;
3496
3497 if (val != wpri_val) {
3498 qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s" TARGET_FMT_lx "\n",
3499 "MMTE: WPRI violation written 0x", val,
3500 "vs expected 0x", wpri_val);
3501 }
3502 /* for machine mode pm.current is hardwired to 1 */
3503 wpri_val |= MMTE_M_PM_CURRENT;
3504
3505 /* hardwiring pm.instruction bit to 0, since it's not supported yet */
3506 wpri_val &= ~(MMTE_M_PM_INSN | MMTE_S_PM_INSN | MMTE_U_PM_INSN);
3507 env->mmte = wpri_val | PM_EXT_DIRTY;
3508 riscv_cpu_update_mask(env);
3509
3510 /* Set XS and SD bits, since PM CSRs are dirty */
3511 mstatus = env->mstatus | MSTATUS_XS;
3512 write_mstatus(env, csrno, mstatus);
3513 return RISCV_EXCP_NONE;
3514 }
3515
3516 static RISCVException read_smte(CPURISCVState *env, int csrno,
3517 target_ulong *val)
3518 {
3519 *val = env->mmte & SMTE_MASK;
3520 return RISCV_EXCP_NONE;
3521 }
3522
3523 static RISCVException write_smte(CPURISCVState *env, int csrno,
3524 target_ulong val)
3525 {
3526 target_ulong wpri_val = val & SMTE_MASK;
3527
3528 if (val != wpri_val) {
3529 qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s" TARGET_FMT_lx "\n",
3530 "SMTE: WPRI violation written 0x", val,
3531 "vs expected 0x", wpri_val);
3532 }
3533
3534 /* if pm.current==0 we can't modify current PM CSRs */
3535 if (check_pm_current_disabled(env, csrno)) {
3536 return RISCV_EXCP_NONE;
3537 }
3538
3539 wpri_val |= (env->mmte & ~SMTE_MASK);
3540 write_mmte(env, csrno, wpri_val);
3541 return RISCV_EXCP_NONE;
3542 }
3543
3544 static RISCVException read_umte(CPURISCVState *env, int csrno,
3545 target_ulong *val)
3546 {
3547 *val = env->mmte & UMTE_MASK;
3548 return RISCV_EXCP_NONE;
3549 }
3550
3551 static RISCVException write_umte(CPURISCVState *env, int csrno,
3552 target_ulong val)
3553 {
3554 target_ulong wpri_val = val & UMTE_MASK;
3555
3556 if (val != wpri_val) {
3557 qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s" TARGET_FMT_lx "\n",
3558 "UMTE: WPRI violation written 0x", val,
3559 "vs expected 0x", wpri_val);
3560 }
3561
3562 if (check_pm_current_disabled(env, csrno)) {
3563 return RISCV_EXCP_NONE;
3564 }
3565
3566 wpri_val |= (env->mmte & ~UMTE_MASK);
3567 write_mmte(env, csrno, wpri_val);
3568 return RISCV_EXCP_NONE;
3569 }
3570
3571 static RISCVException read_mpmmask(CPURISCVState *env, int csrno,
3572 target_ulong *val)
3573 {
3574 *val = env->mpmmask;
3575 return RISCV_EXCP_NONE;
3576 }
3577
3578 static RISCVException write_mpmmask(CPURISCVState *env, int csrno,
3579 target_ulong val)
3580 {
3581 uint64_t mstatus;
3582
3583 env->mpmmask = val;
3584 if ((env->priv == PRV_M) && (env->mmte & M_PM_ENABLE)) {
3585 env->cur_pmmask = val;
3586 }
3587 env->mmte |= PM_EXT_DIRTY;
3588
3589 /* Set XS and SD bits, since PM CSRs are dirty */
3590 mstatus = env->mstatus | MSTATUS_XS;
3591 write_mstatus(env, csrno, mstatus);
3592 return RISCV_EXCP_NONE;
3593 }
3594
3595 static RISCVException read_spmmask(CPURISCVState *env, int csrno,
3596 target_ulong *val)
3597 {
3598 *val = env->spmmask;
3599 return RISCV_EXCP_NONE;
3600 }
3601
3602 static RISCVException write_spmmask(CPURISCVState *env, int csrno,
3603 target_ulong val)
3604 {
3605 uint64_t mstatus;
3606
3607 /* if pm.current==0 we can't modify current PM CSRs */
3608 if (check_pm_current_disabled(env, csrno)) {
3609 return RISCV_EXCP_NONE;
3610 }
3611 env->spmmask = val;
3612 if ((env->priv == PRV_S) && (env->mmte & S_PM_ENABLE)) {
3613 env->cur_pmmask = val;
3614 }
3615 env->mmte |= PM_EXT_DIRTY;
3616
3617 /* Set XS and SD bits, since PM CSRs are dirty */
3618 mstatus = env->mstatus | MSTATUS_XS;
3619 write_mstatus(env, csrno, mstatus);
3620 return RISCV_EXCP_NONE;
3621 }
3622
3623 static RISCVException read_upmmask(CPURISCVState *env, int csrno,
3624 target_ulong *val)
3625 {
3626 *val = env->upmmask;
3627 return RISCV_EXCP_NONE;
3628 }
3629
3630 static RISCVException write_upmmask(CPURISCVState *env, int csrno,
3631 target_ulong val)
3632 {
3633 uint64_t mstatus;
3634
3635 /* if pm.current==0 we can't modify current PM CSRs */
3636 if (check_pm_current_disabled(env, csrno)) {
3637 return RISCV_EXCP_NONE;
3638 }
3639 env->upmmask = val;
3640 if ((env->priv == PRV_U) && (env->mmte & U_PM_ENABLE)) {
3641 env->cur_pmmask = val;
3642 }
3643 env->mmte |= PM_EXT_DIRTY;
3644
3645 /* Set XS and SD bits, since PM CSRs are dirty */
3646 mstatus = env->mstatus | MSTATUS_XS;
3647 write_mstatus(env, csrno, mstatus);
3648 return RISCV_EXCP_NONE;
3649 }
3650
3651 static RISCVException read_mpmbase(CPURISCVState *env, int csrno,
3652 target_ulong *val)
3653 {
3654 *val = env->mpmbase;
3655 return RISCV_EXCP_NONE;
3656 }
3657
3658 static RISCVException write_mpmbase(CPURISCVState *env, int csrno,
3659 target_ulong val)
3660 {
3661 uint64_t mstatus;
3662
3663 env->mpmbase = val;
3664 if ((env->priv == PRV_M) && (env->mmte & M_PM_ENABLE)) {
3665 env->cur_pmbase = val;
3666 }
3667 env->mmte |= PM_EXT_DIRTY;
3668
3669 /* Set XS and SD bits, since PM CSRs are dirty */
3670 mstatus = env->mstatus | MSTATUS_XS;
3671 write_mstatus(env, csrno, mstatus);
3672 return RISCV_EXCP_NONE;
3673 }
3674
3675 static RISCVException read_spmbase(CPURISCVState *env, int csrno,
3676 target_ulong *val)
3677 {
3678 *val = env->spmbase;
3679 return RISCV_EXCP_NONE;
3680 }
3681
3682 static RISCVException write_spmbase(CPURISCVState *env, int csrno,
3683 target_ulong val)
3684 {
3685 uint64_t mstatus;
3686
3687 /* if pm.current==0 we can't modify current PM CSRs */
3688 if (check_pm_current_disabled(env, csrno)) {
3689 return RISCV_EXCP_NONE;
3690 }
3691 env->spmbase = val;
3692 if ((env->priv == PRV_S) && (env->mmte & S_PM_ENABLE)) {
3693 env->cur_pmbase = val;
3694 }
3695 env->mmte |= PM_EXT_DIRTY;
3696
3697 /* Set XS and SD bits, since PM CSRs are dirty */
3698 mstatus = env->mstatus | MSTATUS_XS;
3699 write_mstatus(env, csrno, mstatus);
3700 return RISCV_EXCP_NONE;
3701 }
3702
3703 static RISCVException read_upmbase(CPURISCVState *env, int csrno,
3704 target_ulong *val)
3705 {
3706 *val = env->upmbase;
3707 return RISCV_EXCP_NONE;
3708 }
3709
3710 static RISCVException write_upmbase(CPURISCVState *env, int csrno,
3711 target_ulong val)
3712 {
3713 uint64_t mstatus;
3714
3715 /* if pm.current==0 we can't modify current PM CSRs */
3716 if (check_pm_current_disabled(env, csrno)) {
3717 return RISCV_EXCP_NONE;
3718 }
3719 env->upmbase = val;
3720 if ((env->priv == PRV_U) && (env->mmte & U_PM_ENABLE)) {
3721 env->cur_pmbase = val;
3722 }
3723 env->mmte |= PM_EXT_DIRTY;
3724
3725 /* Set XS and SD bits, since PM CSRs are dirty */
3726 mstatus = env->mstatus | MSTATUS_XS;
3727 write_mstatus(env, csrno, mstatus);
3728 return RISCV_EXCP_NONE;
3729 }
3730
3731 #endif
3732
3733 /* Crypto Extension */
3734 static RISCVException rmw_seed(CPURISCVState *env, int csrno,
3735 target_ulong *ret_value,
3736 target_ulong new_value,
3737 target_ulong write_mask)
3738 {
3739 uint16_t random_v;
3740 Error *random_e = NULL;
3741 int random_r;
3742 target_ulong rval;
3743
3744 random_r = qemu_guest_getrandom(&random_v, 2, &random_e);
3745 if (unlikely(random_r < 0)) {
3746 /*
3747 * Failed, for unknown reasons in the crypto subsystem.
3748 * The best we can do is log the reason and return a
3749 * failure indication to the guest. There is no reason
3750 * we know to expect the failure to be transitory, so
3751 * indicate DEAD to avoid having the guest spin on WAIT.
3752 */
3753 qemu_log_mask(LOG_UNIMP, "%s: Crypto failure: %s",
3754 __func__, error_get_pretty(random_e));
3755 error_free(random_e);
3756 rval = SEED_OPST_DEAD;
3757 } else {
3758 rval = random_v | SEED_OPST_ES16;
3759 }
3760
3761 if (ret_value) {
3762 *ret_value = rval;
3763 }
3764
3765 return RISCV_EXCP_NONE;
3766 }
3767
3768 /*
3769 * riscv_csrrw - read and/or update control and status register
3770 *
3771 * csrr <-> riscv_csrrw(env, csrno, ret_value, 0, 0);
3772 * csrrw <-> riscv_csrrw(env, csrno, ret_value, value, -1);
3773 * csrrs <-> riscv_csrrw(env, csrno, ret_value, -1, value);
3774 * csrrc <-> riscv_csrrw(env, csrno, ret_value, 0, value);
3775 */
3776
3777 static inline RISCVException riscv_csrrw_check(CPURISCVState *env,
3778 int csrno,
3779 bool write_mask,
3780 RISCVCPU *cpu)
3781 {
3782 /* check privileges and return RISCV_EXCP_ILLEGAL_INST if check fails */
3783 int read_only = get_field(csrno, 0xC00) == 3;
3784 int csr_min_priv = csr_ops[csrno].min_priv_ver;
3785
3786 /* ensure the CSR extension is enabled. */
3787 if (!cpu->cfg.ext_icsr) {
3788 return RISCV_EXCP_ILLEGAL_INST;
3789 }
3790
3791 if (env->priv_ver < csr_min_priv) {
3792 return RISCV_EXCP_ILLEGAL_INST;
3793 }
3794
3795 /* check predicate */
3796 if (!csr_ops[csrno].predicate) {
3797 return RISCV_EXCP_ILLEGAL_INST;
3798 }
3799
3800 if (write_mask && read_only) {
3801 return RISCV_EXCP_ILLEGAL_INST;
3802 }
3803
3804 RISCVException ret = csr_ops[csrno].predicate(env, csrno);
3805 if (ret != RISCV_EXCP_NONE) {
3806 return ret;
3807 }
3808
3809 #if !defined(CONFIG_USER_ONLY)
3810 int csr_priv, effective_priv = env->priv;
3811
3812 if (riscv_has_ext(env, RVH) && env->priv == PRV_S &&
3813 !riscv_cpu_virt_enabled(env)) {
3814 /*
3815 * We are in HS mode. Add 1 to the effective privledge level to
3816 * allow us to access the Hypervisor CSRs.
3817 */
3818 effective_priv++;
3819 }
3820
3821 csr_priv = get_field(csrno, 0x300);
3822 if (!env->debugger && (effective_priv < csr_priv)) {
3823 if (csr_priv == (PRV_S + 1) && riscv_cpu_virt_enabled(env)) {
3824 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
3825 }
3826 return RISCV_EXCP_ILLEGAL_INST;
3827 }
3828 #endif
3829 return RISCV_EXCP_NONE;
3830 }
3831
3832 static RISCVException riscv_csrrw_do64(CPURISCVState *env, int csrno,
3833 target_ulong *ret_value,
3834 target_ulong new_value,
3835 target_ulong write_mask)
3836 {
3837 RISCVException ret;
3838 target_ulong old_value;
3839
3840 /* execute combined read/write operation if it exists */
3841 if (csr_ops[csrno].op) {
3842 return csr_ops[csrno].op(env, csrno, ret_value, new_value, write_mask);
3843 }
3844
3845 /* if no accessor exists then return failure */
3846 if (!csr_ops[csrno].read) {
3847 return RISCV_EXCP_ILLEGAL_INST;
3848 }
3849 /* read old value */
3850 ret = csr_ops[csrno].read(env, csrno, &old_value);
3851 if (ret != RISCV_EXCP_NONE) {
3852 return ret;
3853 }
3854
3855 /* write value if writable and write mask set, otherwise drop writes */
3856 if (write_mask) {
3857 new_value = (old_value & ~write_mask) | (new_value & write_mask);
3858 if (csr_ops[csrno].write) {
3859 ret = csr_ops[csrno].write(env, csrno, new_value);
3860 if (ret != RISCV_EXCP_NONE) {
3861 return ret;
3862 }
3863 }
3864 }
3865
3866 /* return old value */
3867 if (ret_value) {
3868 *ret_value = old_value;
3869 }
3870
3871 return RISCV_EXCP_NONE;
3872 }
3873
3874 RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
3875 target_ulong *ret_value,
3876 target_ulong new_value, target_ulong write_mask)
3877 {
3878 RISCVCPU *cpu = env_archcpu(env);
3879
3880 RISCVException ret = riscv_csrrw_check(env, csrno, write_mask, cpu);
3881 if (ret != RISCV_EXCP_NONE) {
3882 return ret;
3883 }
3884
3885 return riscv_csrrw_do64(env, csrno, ret_value, new_value, write_mask);
3886 }
3887
3888 static RISCVException riscv_csrrw_do128(CPURISCVState *env, int csrno,
3889 Int128 *ret_value,
3890 Int128 new_value,
3891 Int128 write_mask)
3892 {
3893 RISCVException ret;
3894 Int128 old_value;
3895
3896 /* read old value */
3897 ret = csr_ops[csrno].read128(env, csrno, &old_value);
3898 if (ret != RISCV_EXCP_NONE) {
3899 return ret;
3900 }
3901
3902 /* write value if writable and write mask set, otherwise drop writes */
3903 if (int128_nz(write_mask)) {
3904 new_value = int128_or(int128_and(old_value, int128_not(write_mask)),
3905 int128_and(new_value, write_mask));
3906 if (csr_ops[csrno].write128) {
3907 ret = csr_ops[csrno].write128(env, csrno, new_value);
3908 if (ret != RISCV_EXCP_NONE) {
3909 return ret;
3910 }
3911 } else if (csr_ops[csrno].write) {
3912 /* avoids having to write wrappers for all registers */
3913 ret = csr_ops[csrno].write(env, csrno, int128_getlo(new_value));
3914 if (ret != RISCV_EXCP_NONE) {
3915 return ret;
3916 }
3917 }
3918 }
3919
3920 /* return old value */
3921 if (ret_value) {
3922 *ret_value = old_value;
3923 }
3924
3925 return RISCV_EXCP_NONE;
3926 }
3927
3928 RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
3929 Int128 *ret_value,
3930 Int128 new_value, Int128 write_mask)
3931 {
3932 RISCVException ret;
3933 RISCVCPU *cpu = env_archcpu(env);
3934
3935 ret = riscv_csrrw_check(env, csrno, int128_nz(write_mask), cpu);
3936 if (ret != RISCV_EXCP_NONE) {
3937 return ret;
3938 }
3939
3940 if (csr_ops[csrno].read128) {
3941 return riscv_csrrw_do128(env, csrno, ret_value, new_value, write_mask);
3942 }
3943
3944 /*
3945 * Fall back to 64-bit version for now, if the 128-bit alternative isn't
3946 * at all defined.
3947 * Note, some CSRs don't need to extend to MXLEN (64 upper bits non
3948 * significant), for those, this fallback is correctly handling the accesses
3949 */
3950 target_ulong old_value;
3951 ret = riscv_csrrw_do64(env, csrno, &old_value,
3952 int128_getlo(new_value),
3953 int128_getlo(write_mask));
3954 if (ret == RISCV_EXCP_NONE && ret_value) {
3955 *ret_value = int128_make64(old_value);
3956 }
3957 return ret;
3958 }
3959
3960 /*
3961 * Debugger support. If not in user mode, set env->debugger before the
3962 * riscv_csrrw call and clear it after the call.
3963 */
3964 RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno,
3965 target_ulong *ret_value,
3966 target_ulong new_value,
3967 target_ulong write_mask)
3968 {
3969 RISCVException ret;
3970 #if !defined(CONFIG_USER_ONLY)
3971 env->debugger = true;
3972 #endif
3973 ret = riscv_csrrw(env, csrno, ret_value, new_value, write_mask);
3974 #if !defined(CONFIG_USER_ONLY)
3975 env->debugger = false;
3976 #endif
3977 return ret;
3978 }
3979
3980 /* Control and Status Register function table */
3981 riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
3982 /* User Floating-Point CSRs */
3983 [CSR_FFLAGS] = { "fflags", fs, read_fflags, write_fflags },
3984 [CSR_FRM] = { "frm", fs, read_frm, write_frm },
3985 [CSR_FCSR] = { "fcsr", fs, read_fcsr, write_fcsr },
3986 /* Vector CSRs */
3987 [CSR_VSTART] = { "vstart", vs, read_vstart, write_vstart },
3988 [CSR_VXSAT] = { "vxsat", vs, read_vxsat, write_vxsat },
3989 [CSR_VXRM] = { "vxrm", vs, read_vxrm, write_vxrm },
3990 [CSR_VCSR] = { "vcsr", vs, read_vcsr, write_vcsr },
3991 [CSR_VL] = { "vl", vs, read_vl },
3992 [CSR_VTYPE] = { "vtype", vs, read_vtype },
3993 [CSR_VLENB] = { "vlenb", vs, read_vlenb },
3994 /* User Timers and Counters */
3995 [CSR_CYCLE] = { "cycle", ctr, read_hpmcounter },
3996 [CSR_INSTRET] = { "instret", ctr, read_hpmcounter },
3997 [CSR_CYCLEH] = { "cycleh", ctr32, read_hpmcounterh },
3998 [CSR_INSTRETH] = { "instreth", ctr32, read_hpmcounterh },
3999
4000 /*
4001 * In privileged mode, the monitor will have to emulate TIME CSRs only if
4002 * rdtime callback is not provided by machine/platform emulation.
4003 */
4004 [CSR_TIME] = { "time", ctr, read_time },
4005 [CSR_TIMEH] = { "timeh", ctr32, read_timeh },
4006
4007 /* Crypto Extension */
4008 [CSR_SEED] = { "seed", seed, NULL, NULL, rmw_seed },
4009
4010 #if !defined(CONFIG_USER_ONLY)
4011 /* Machine Timers and Counters */
4012 [CSR_MCYCLE] = { "mcycle", any, read_hpmcounter,
4013 write_mhpmcounter },
4014 [CSR_MINSTRET] = { "minstret", any, read_hpmcounter,
4015 write_mhpmcounter },
4016 [CSR_MCYCLEH] = { "mcycleh", any32, read_hpmcounterh,
4017 write_mhpmcounterh },
4018 [CSR_MINSTRETH] = { "minstreth", any32, read_hpmcounterh,
4019 write_mhpmcounterh },
4020
4021 /* Machine Information Registers */
4022 [CSR_MVENDORID] = { "mvendorid", any, read_mvendorid },
4023 [CSR_MARCHID] = { "marchid", any, read_marchid },
4024 [CSR_MIMPID] = { "mimpid", any, read_mimpid },
4025 [CSR_MHARTID] = { "mhartid", any, read_mhartid },
4026
4027 [CSR_MCONFIGPTR] = { "mconfigptr", any, read_zero,
4028 .min_priv_ver = PRIV_VERSION_1_12_0 },
4029 /* Machine Trap Setup */
4030 [CSR_MSTATUS] = { "mstatus", any, read_mstatus, write_mstatus,
4031 NULL, read_mstatus_i128 },
4032 [CSR_MISA] = { "misa", any, read_misa, write_misa,
4033 NULL, read_misa_i128 },
4034 [CSR_MIDELEG] = { "mideleg", any, NULL, NULL, rmw_mideleg },
4035 [CSR_MEDELEG] = { "medeleg", any, read_medeleg, write_medeleg },
4036 [CSR_MIE] = { "mie", any, NULL, NULL, rmw_mie },
4037 [CSR_MTVEC] = { "mtvec", any, read_mtvec, write_mtvec },
4038 [CSR_MCOUNTEREN] = { "mcounteren", umode, read_mcounteren,
4039 write_mcounteren },
4040
4041 [CSR_MSTATUSH] = { "mstatush", any32, read_mstatush,
4042 write_mstatush },
4043
4044 /* Machine Trap Handling */
4045 [CSR_MSCRATCH] = { "mscratch", any, read_mscratch, write_mscratch,
4046 NULL, read_mscratch_i128, write_mscratch_i128 },
4047 [CSR_MEPC] = { "mepc", any, read_mepc, write_mepc },
4048 [CSR_MCAUSE] = { "mcause", any, read_mcause, write_mcause },
4049 [CSR_MTVAL] = { "mtval", any, read_mtval, write_mtval },
4050 [CSR_MIP] = { "mip", any, NULL, NULL, rmw_mip },
4051
4052 /* Machine-Level Window to Indirectly Accessed Registers (AIA) */
4053 [CSR_MISELECT] = { "miselect", aia_any, NULL, NULL, rmw_xiselect },
4054 [CSR_MIREG] = { "mireg", aia_any, NULL, NULL, rmw_xireg },
4055
4056 /* Machine-Level Interrupts (AIA) */
4057 [CSR_MTOPEI] = { "mtopei", aia_any, NULL, NULL, rmw_xtopei },
4058 [CSR_MTOPI] = { "mtopi", aia_any, read_mtopi },
4059
4060 /* Virtual Interrupts for Supervisor Level (AIA) */
4061 [CSR_MVIEN] = { "mvien", aia_any, read_zero, write_ignore },
4062 [CSR_MVIP] = { "mvip", aia_any, read_zero, write_ignore },
4063
4064 /* Machine-Level High-Half CSRs (AIA) */
4065 [CSR_MIDELEGH] = { "midelegh", aia_any32, NULL, NULL, rmw_midelegh },
4066 [CSR_MIEH] = { "mieh", aia_any32, NULL, NULL, rmw_mieh },
4067 [CSR_MVIENH] = { "mvienh", aia_any32, read_zero, write_ignore },
4068 [CSR_MVIPH] = { "mviph", aia_any32, read_zero, write_ignore },
4069 [CSR_MIPH] = { "miph", aia_any32, NULL, NULL, rmw_miph },
4070
4071 /* Execution environment configuration */
4072 [CSR_MENVCFG] = { "menvcfg", umode, read_menvcfg, write_menvcfg,
4073 .min_priv_ver = PRIV_VERSION_1_12_0 },
4074 [CSR_MENVCFGH] = { "menvcfgh", umode32, read_menvcfgh, write_menvcfgh,
4075 .min_priv_ver = PRIV_VERSION_1_12_0 },
4076 [CSR_SENVCFG] = { "senvcfg", smode, read_senvcfg, write_senvcfg,
4077 .min_priv_ver = PRIV_VERSION_1_12_0 },
4078 [CSR_HENVCFG] = { "henvcfg", hmode, read_henvcfg, write_henvcfg,
4079 .min_priv_ver = PRIV_VERSION_1_12_0 },
4080 [CSR_HENVCFGH] = { "henvcfgh", hmode32, read_henvcfgh, write_henvcfgh,
4081 .min_priv_ver = PRIV_VERSION_1_12_0 },
4082
4083 /* Smstateen extension CSRs */
4084 [CSR_MSTATEEN0] = { "mstateen0", mstateen, read_mstateen, write_mstateen0,
4085 .min_priv_ver = PRIV_VERSION_1_12_0 },
4086 [CSR_MSTATEEN0H] = { "mstateen0h", mstateen, read_mstateenh,
4087 write_mstateen0h,
4088 .min_priv_ver = PRIV_VERSION_1_12_0 },
4089 [CSR_MSTATEEN1] = { "mstateen1", mstateen, read_mstateen,
4090 write_mstateen_1_3,
4091 .min_priv_ver = PRIV_VERSION_1_12_0 },
4092 [CSR_MSTATEEN1H] = { "mstateen1h", mstateen, read_mstateenh,
4093 write_mstateenh_1_3,
4094 .min_priv_ver = PRIV_VERSION_1_12_0 },
4095 [CSR_MSTATEEN2] = { "mstateen2", mstateen, read_mstateen,
4096 write_mstateen_1_3,
4097 .min_priv_ver = PRIV_VERSION_1_12_0 },
4098 [CSR_MSTATEEN2H] = { "mstateen2h", mstateen, read_mstateenh,
4099 write_mstateenh_1_3,
4100 .min_priv_ver = PRIV_VERSION_1_12_0 },
4101 [CSR_MSTATEEN3] = { "mstateen3", mstateen, read_mstateen,
4102 write_mstateen_1_3,
4103 .min_priv_ver = PRIV_VERSION_1_12_0 },
4104 [CSR_MSTATEEN3H] = { "mstateen3h", mstateen, read_mstateenh,
4105 write_mstateenh_1_3,
4106 .min_priv_ver = PRIV_VERSION_1_12_0 },
4107 [CSR_HSTATEEN0] = { "hstateen0", hstateen, read_hstateen, write_hstateen0,
4108 .min_priv_ver = PRIV_VERSION_1_12_0 },
4109 [CSR_HSTATEEN0H] = { "hstateen0h", hstateenh, read_hstateenh,
4110 write_hstateen0h,
4111 .min_priv_ver = PRIV_VERSION_1_12_0 },
4112 [CSR_HSTATEEN1] = { "hstateen1", hstateen, read_hstateen,
4113 write_hstateen_1_3,
4114 .min_priv_ver = PRIV_VERSION_1_12_0 },
4115 [CSR_HSTATEEN1H] = { "hstateen1h", hstateenh, read_hstateenh,
4116 write_hstateenh_1_3,
4117 .min_priv_ver = PRIV_VERSION_1_12_0 },
4118 [CSR_HSTATEEN2] = { "hstateen2", hstateen, read_hstateen,
4119 write_hstateen_1_3,
4120 .min_priv_ver = PRIV_VERSION_1_12_0 },
4121 [CSR_HSTATEEN2H] = { "hstateen2h", hstateenh, read_hstateenh,
4122 write_hstateenh_1_3,
4123 .min_priv_ver = PRIV_VERSION_1_12_0 },
4124 [CSR_HSTATEEN3] = { "hstateen3", hstateen, read_hstateen,
4125 write_hstateen_1_3,
4126 .min_priv_ver = PRIV_VERSION_1_12_0 },
4127 [CSR_HSTATEEN3H] = { "hstateen3h", hstateenh, read_hstateenh,
4128 write_hstateenh_1_3,
4129 .min_priv_ver = PRIV_VERSION_1_12_0 },
4130 [CSR_SSTATEEN0] = { "sstateen0", sstateen, read_sstateen, write_sstateen0,
4131 .min_priv_ver = PRIV_VERSION_1_12_0 },
4132 [CSR_SSTATEEN1] = { "sstateen1", sstateen, read_sstateen,
4133 write_sstateen_1_3,
4134 .min_priv_ver = PRIV_VERSION_1_12_0 },
4135 [CSR_SSTATEEN2] = { "sstateen2", sstateen, read_sstateen,
4136 write_sstateen_1_3,
4137 .min_priv_ver = PRIV_VERSION_1_12_0 },
4138 [CSR_SSTATEEN3] = { "sstateen3", sstateen, read_sstateen,
4139 write_sstateen_1_3,
4140 .min_priv_ver = PRIV_VERSION_1_12_0 },
4141
4142 /* Supervisor Trap Setup */
4143 [CSR_SSTATUS] = { "sstatus", smode, read_sstatus, write_sstatus,
4144 NULL, read_sstatus_i128 },
4145 [CSR_SIE] = { "sie", smode, NULL, NULL, rmw_sie },
4146 [CSR_STVEC] = { "stvec", smode, read_stvec, write_stvec },
4147 [CSR_SCOUNTEREN] = { "scounteren", smode, read_scounteren,
4148 write_scounteren },
4149
4150 /* Supervisor Trap Handling */
4151 [CSR_SSCRATCH] = { "sscratch", smode, read_sscratch, write_sscratch,
4152 NULL, read_sscratch_i128, write_sscratch_i128 },
4153 [CSR_SEPC] = { "sepc", smode, read_sepc, write_sepc },
4154 [CSR_SCAUSE] = { "scause", smode, read_scause, write_scause },
4155 [CSR_STVAL] = { "stval", smode, read_stval, write_stval },
4156 [CSR_SIP] = { "sip", smode, NULL, NULL, rmw_sip },
4157 [CSR_STIMECMP] = { "stimecmp", sstc, read_stimecmp, write_stimecmp,
4158 .min_priv_ver = PRIV_VERSION_1_12_0 },
4159 [CSR_STIMECMPH] = { "stimecmph", sstc_32, read_stimecmph, write_stimecmph,
4160 .min_priv_ver = PRIV_VERSION_1_12_0 },
4161 [CSR_VSTIMECMP] = { "vstimecmp", sstc, read_vstimecmp,
4162 write_vstimecmp,
4163 .min_priv_ver = PRIV_VERSION_1_12_0 },
4164 [CSR_VSTIMECMPH] = { "vstimecmph", sstc_32, read_vstimecmph,
4165 write_vstimecmph,
4166 .min_priv_ver = PRIV_VERSION_1_12_0 },
4167
4168 /* Supervisor Protection and Translation */
4169 [CSR_SATP] = { "satp", smode, read_satp, write_satp },
4170
4171 /* Supervisor-Level Window to Indirectly Accessed Registers (AIA) */
4172 [CSR_SISELECT] = { "siselect", aia_smode, NULL, NULL, rmw_xiselect },
4173 [CSR_SIREG] = { "sireg", aia_smode, NULL, NULL, rmw_xireg },
4174
4175 /* Supervisor-Level Interrupts (AIA) */
4176 [CSR_STOPEI] = { "stopei", aia_smode, NULL, NULL, rmw_xtopei },
4177 [CSR_STOPI] = { "stopi", aia_smode, read_stopi },
4178
4179 /* Supervisor-Level High-Half CSRs (AIA) */
4180 [CSR_SIEH] = { "sieh", aia_smode32, NULL, NULL, rmw_sieh },
4181 [CSR_SIPH] = { "siph", aia_smode32, NULL, NULL, rmw_siph },
4182
4183 [CSR_HSTATUS] = { "hstatus", hmode, read_hstatus, write_hstatus,
4184 .min_priv_ver = PRIV_VERSION_1_12_0 },
4185 [CSR_HEDELEG] = { "hedeleg", hmode, read_hedeleg, write_hedeleg,
4186 .min_priv_ver = PRIV_VERSION_1_12_0 },
4187 [CSR_HIDELEG] = { "hideleg", hmode, NULL, NULL, rmw_hideleg,
4188 .min_priv_ver = PRIV_VERSION_1_12_0 },
4189 [CSR_HVIP] = { "hvip", hmode, NULL, NULL, rmw_hvip,
4190 .min_priv_ver = PRIV_VERSION_1_12_0 },
4191 [CSR_HIP] = { "hip", hmode, NULL, NULL, rmw_hip,
4192 .min_priv_ver = PRIV_VERSION_1_12_0 },
4193 [CSR_HIE] = { "hie", hmode, NULL, NULL, rmw_hie,
4194 .min_priv_ver = PRIV_VERSION_1_12_0 },
4195 [CSR_HCOUNTEREN] = { "hcounteren", hmode, read_hcounteren,
4196 write_hcounteren,
4197 .min_priv_ver = PRIV_VERSION_1_12_0 },
4198 [CSR_HGEIE] = { "hgeie", hmode, read_hgeie, write_hgeie,
4199 .min_priv_ver = PRIV_VERSION_1_12_0 },
4200 [CSR_HTVAL] = { "htval", hmode, read_htval, write_htval,
4201 .min_priv_ver = PRIV_VERSION_1_12_0 },
4202 [CSR_HTINST] = { "htinst", hmode, read_htinst, write_htinst,
4203 .min_priv_ver = PRIV_VERSION_1_12_0 },
4204 [CSR_HGEIP] = { "hgeip", hmode, read_hgeip,
4205 .min_priv_ver = PRIV_VERSION_1_12_0 },
4206 [CSR_HGATP] = { "hgatp", hmode, read_hgatp, write_hgatp,
4207 .min_priv_ver = PRIV_VERSION_1_12_0 },
4208 [CSR_HTIMEDELTA] = { "htimedelta", hmode, read_htimedelta,
4209 write_htimedelta,
4210 .min_priv_ver = PRIV_VERSION_1_12_0 },
4211 [CSR_HTIMEDELTAH] = { "htimedeltah", hmode32, read_htimedeltah,
4212 write_htimedeltah,
4213 .min_priv_ver = PRIV_VERSION_1_12_0 },
4214
4215 [CSR_VSSTATUS] = { "vsstatus", hmode, read_vsstatus,
4216 write_vsstatus,
4217 .min_priv_ver = PRIV_VERSION_1_12_0 },
4218 [CSR_VSIP] = { "vsip", hmode, NULL, NULL, rmw_vsip,
4219 .min_priv_ver = PRIV_VERSION_1_12_0 },
4220 [CSR_VSIE] = { "vsie", hmode, NULL, NULL, rmw_vsie ,
4221 .min_priv_ver = PRIV_VERSION_1_12_0 },
4222 [CSR_VSTVEC] = { "vstvec", hmode, read_vstvec, write_vstvec,
4223 .min_priv_ver = PRIV_VERSION_1_12_0 },
4224 [CSR_VSSCRATCH] = { "vsscratch", hmode, read_vsscratch,
4225 write_vsscratch,
4226 .min_priv_ver = PRIV_VERSION_1_12_0 },
4227 [CSR_VSEPC] = { "vsepc", hmode, read_vsepc, write_vsepc,
4228 .min_priv_ver = PRIV_VERSION_1_12_0 },
4229 [CSR_VSCAUSE] = { "vscause", hmode, read_vscause, write_vscause,
4230 .min_priv_ver = PRIV_VERSION_1_12_0 },
4231 [CSR_VSTVAL] = { "vstval", hmode, read_vstval, write_vstval,
4232 .min_priv_ver = PRIV_VERSION_1_12_0 },
4233 [CSR_VSATP] = { "vsatp", hmode, read_vsatp, write_vsatp,
4234 .min_priv_ver = PRIV_VERSION_1_12_0 },
4235
4236 [CSR_MTVAL2] = { "mtval2", hmode, read_mtval2, write_mtval2,
4237 .min_priv_ver = PRIV_VERSION_1_12_0 },
4238 [CSR_MTINST] = { "mtinst", hmode, read_mtinst, write_mtinst,
4239 .min_priv_ver = PRIV_VERSION_1_12_0 },
4240
4241 /* Virtual Interrupts and Interrupt Priorities (H-extension with AIA) */
4242 [CSR_HVIEN] = { "hvien", aia_hmode, read_zero, write_ignore },
4243 [CSR_HVICTL] = { "hvictl", aia_hmode, read_hvictl,
4244 write_hvictl },
4245 [CSR_HVIPRIO1] = { "hviprio1", aia_hmode, read_hviprio1,
4246 write_hviprio1 },
4247 [CSR_HVIPRIO2] = { "hviprio2", aia_hmode, read_hviprio2,
4248 write_hviprio2 },
4249
4250 /*
4251 * VS-Level Window to Indirectly Accessed Registers (H-extension with AIA)
4252 */
4253 [CSR_VSISELECT] = { "vsiselect", aia_hmode, NULL, NULL,
4254 rmw_xiselect },
4255 [CSR_VSIREG] = { "vsireg", aia_hmode, NULL, NULL, rmw_xireg },
4256
4257 /* VS-Level Interrupts (H-extension with AIA) */
4258 [CSR_VSTOPEI] = { "vstopei", aia_hmode, NULL, NULL, rmw_xtopei },
4259 [CSR_VSTOPI] = { "vstopi", aia_hmode, read_vstopi },
4260
4261 /* Hypervisor and VS-Level High-Half CSRs (H-extension with AIA) */
4262 [CSR_HIDELEGH] = { "hidelegh", aia_hmode32, NULL, NULL,
4263 rmw_hidelegh },
4264 [CSR_HVIENH] = { "hvienh", aia_hmode32, read_zero,
4265 write_ignore },
4266 [CSR_HVIPH] = { "hviph", aia_hmode32, NULL, NULL, rmw_hviph },
4267 [CSR_HVIPRIO1H] = { "hviprio1h", aia_hmode32, read_hviprio1h,
4268 write_hviprio1h },
4269 [CSR_HVIPRIO2H] = { "hviprio2h", aia_hmode32, read_hviprio2h,
4270 write_hviprio2h },
4271 [CSR_VSIEH] = { "vsieh", aia_hmode32, NULL, NULL, rmw_vsieh },
4272 [CSR_VSIPH] = { "vsiph", aia_hmode32, NULL, NULL, rmw_vsiph },
4273
4274 /* Physical Memory Protection */
4275 [CSR_MSECCFG] = { "mseccfg", epmp, read_mseccfg, write_mseccfg,
4276 .min_priv_ver = PRIV_VERSION_1_11_0 },
4277 [CSR_PMPCFG0] = { "pmpcfg0", pmp, read_pmpcfg, write_pmpcfg },
4278 [CSR_PMPCFG1] = { "pmpcfg1", pmp, read_pmpcfg, write_pmpcfg },
4279 [CSR_PMPCFG2] = { "pmpcfg2", pmp, read_pmpcfg, write_pmpcfg },
4280 [CSR_PMPCFG3] = { "pmpcfg3", pmp, read_pmpcfg, write_pmpcfg },
4281 [CSR_PMPADDR0] = { "pmpaddr0", pmp, read_pmpaddr, write_pmpaddr },
4282 [CSR_PMPADDR1] = { "pmpaddr1", pmp, read_pmpaddr, write_pmpaddr },
4283 [CSR_PMPADDR2] = { "pmpaddr2", pmp, read_pmpaddr, write_pmpaddr },
4284 [CSR_PMPADDR3] = { "pmpaddr3", pmp, read_pmpaddr, write_pmpaddr },
4285 [CSR_PMPADDR4] = { "pmpaddr4", pmp, read_pmpaddr, write_pmpaddr },
4286 [CSR_PMPADDR5] = { "pmpaddr5", pmp, read_pmpaddr, write_pmpaddr },
4287 [CSR_PMPADDR6] = { "pmpaddr6", pmp, read_pmpaddr, write_pmpaddr },
4288 [CSR_PMPADDR7] = { "pmpaddr7", pmp, read_pmpaddr, write_pmpaddr },
4289 [CSR_PMPADDR8] = { "pmpaddr8", pmp, read_pmpaddr, write_pmpaddr },
4290 [CSR_PMPADDR9] = { "pmpaddr9", pmp, read_pmpaddr, write_pmpaddr },
4291 [CSR_PMPADDR10] = { "pmpaddr10", pmp, read_pmpaddr, write_pmpaddr },
4292 [CSR_PMPADDR11] = { "pmpaddr11", pmp, read_pmpaddr, write_pmpaddr },
4293 [CSR_PMPADDR12] = { "pmpaddr12", pmp, read_pmpaddr, write_pmpaddr },
4294 [CSR_PMPADDR13] = { "pmpaddr13", pmp, read_pmpaddr, write_pmpaddr },
4295 [CSR_PMPADDR14] = { "pmpaddr14", pmp, read_pmpaddr, write_pmpaddr },
4296 [CSR_PMPADDR15] = { "pmpaddr15", pmp, read_pmpaddr, write_pmpaddr },
4297
4298 /* Debug CSRs */
4299 [CSR_TSELECT] = { "tselect", debug, read_tselect, write_tselect },
4300 [CSR_TDATA1] = { "tdata1", debug, read_tdata, write_tdata },
4301 [CSR_TDATA2] = { "tdata2", debug, read_tdata, write_tdata },
4302 [CSR_TDATA3] = { "tdata3", debug, read_tdata, write_tdata },
4303 [CSR_TINFO] = { "tinfo", debug, read_tinfo, write_ignore },
4304
4305 /* User Pointer Masking */
4306 [CSR_UMTE] = { "umte", pointer_masking, read_umte, write_umte },
4307 [CSR_UPMMASK] = { "upmmask", pointer_masking, read_upmmask,
4308 write_upmmask },
4309 [CSR_UPMBASE] = { "upmbase", pointer_masking, read_upmbase,
4310 write_upmbase },
4311 /* Machine Pointer Masking */
4312 [CSR_MMTE] = { "mmte", pointer_masking, read_mmte, write_mmte },
4313 [CSR_MPMMASK] = { "mpmmask", pointer_masking, read_mpmmask,
4314 write_mpmmask },
4315 [CSR_MPMBASE] = { "mpmbase", pointer_masking, read_mpmbase,
4316 write_mpmbase },
4317 /* Supervisor Pointer Masking */
4318 [CSR_SMTE] = { "smte", pointer_masking, read_smte, write_smte },
4319 [CSR_SPMMASK] = { "spmmask", pointer_masking, read_spmmask,
4320 write_spmmask },
4321 [CSR_SPMBASE] = { "spmbase", pointer_masking, read_spmbase,
4322 write_spmbase },
4323
4324 /* Performance Counters */
4325 [CSR_HPMCOUNTER3] = { "hpmcounter3", ctr, read_hpmcounter },
4326 [CSR_HPMCOUNTER4] = { "hpmcounter4", ctr, read_hpmcounter },
4327 [CSR_HPMCOUNTER5] = { "hpmcounter5", ctr, read_hpmcounter },
4328 [CSR_HPMCOUNTER6] = { "hpmcounter6", ctr, read_hpmcounter },
4329 [CSR_HPMCOUNTER7] = { "hpmcounter7", ctr, read_hpmcounter },
4330 [CSR_HPMCOUNTER8] = { "hpmcounter8", ctr, read_hpmcounter },
4331 [CSR_HPMCOUNTER9] = { "hpmcounter9", ctr, read_hpmcounter },
4332 [CSR_HPMCOUNTER10] = { "hpmcounter10", ctr, read_hpmcounter },
4333 [CSR_HPMCOUNTER11] = { "hpmcounter11", ctr, read_hpmcounter },
4334 [CSR_HPMCOUNTER12] = { "hpmcounter12", ctr, read_hpmcounter },
4335 [CSR_HPMCOUNTER13] = { "hpmcounter13", ctr, read_hpmcounter },
4336 [CSR_HPMCOUNTER14] = { "hpmcounter14", ctr, read_hpmcounter },
4337 [CSR_HPMCOUNTER15] = { "hpmcounter15", ctr, read_hpmcounter },
4338 [CSR_HPMCOUNTER16] = { "hpmcounter16", ctr, read_hpmcounter },
4339 [CSR_HPMCOUNTER17] = { "hpmcounter17", ctr, read_hpmcounter },
4340 [CSR_HPMCOUNTER18] = { "hpmcounter18", ctr, read_hpmcounter },
4341 [CSR_HPMCOUNTER19] = { "hpmcounter19", ctr, read_hpmcounter },
4342 [CSR_HPMCOUNTER20] = { "hpmcounter20", ctr, read_hpmcounter },
4343 [CSR_HPMCOUNTER21] = { "hpmcounter21", ctr, read_hpmcounter },
4344 [CSR_HPMCOUNTER22] = { "hpmcounter22", ctr, read_hpmcounter },
4345 [CSR_HPMCOUNTER23] = { "hpmcounter23", ctr, read_hpmcounter },
4346 [CSR_HPMCOUNTER24] = { "hpmcounter24", ctr, read_hpmcounter },
4347 [CSR_HPMCOUNTER25] = { "hpmcounter25", ctr, read_hpmcounter },
4348 [CSR_HPMCOUNTER26] = { "hpmcounter26", ctr, read_hpmcounter },
4349 [CSR_HPMCOUNTER27] = { "hpmcounter27", ctr, read_hpmcounter },
4350 [CSR_HPMCOUNTER28] = { "hpmcounter28", ctr, read_hpmcounter },
4351 [CSR_HPMCOUNTER29] = { "hpmcounter29", ctr, read_hpmcounter },
4352 [CSR_HPMCOUNTER30] = { "hpmcounter30", ctr, read_hpmcounter },
4353 [CSR_HPMCOUNTER31] = { "hpmcounter31", ctr, read_hpmcounter },
4354
4355 [CSR_MHPMCOUNTER3] = { "mhpmcounter3", mctr, read_hpmcounter,
4356 write_mhpmcounter },
4357 [CSR_MHPMCOUNTER4] = { "mhpmcounter4", mctr, read_hpmcounter,
4358 write_mhpmcounter },
4359 [CSR_MHPMCOUNTER5] = { "mhpmcounter5", mctr, read_hpmcounter,
4360 write_mhpmcounter },
4361 [CSR_MHPMCOUNTER6] = { "mhpmcounter6", mctr, read_hpmcounter,
4362 write_mhpmcounter },
4363 [CSR_MHPMCOUNTER7] = { "mhpmcounter7", mctr, read_hpmcounter,
4364 write_mhpmcounter },
4365 [CSR_MHPMCOUNTER8] = { "mhpmcounter8", mctr, read_hpmcounter,
4366 write_mhpmcounter },
4367 [CSR_MHPMCOUNTER9] = { "mhpmcounter9", mctr, read_hpmcounter,
4368 write_mhpmcounter },
4369 [CSR_MHPMCOUNTER10] = { "mhpmcounter10", mctr, read_hpmcounter,
4370 write_mhpmcounter },
4371 [CSR_MHPMCOUNTER11] = { "mhpmcounter11", mctr, read_hpmcounter,
4372 write_mhpmcounter },
4373 [CSR_MHPMCOUNTER12] = { "mhpmcounter12", mctr, read_hpmcounter,
4374 write_mhpmcounter },
4375 [CSR_MHPMCOUNTER13] = { "mhpmcounter13", mctr, read_hpmcounter,
4376 write_mhpmcounter },
4377 [CSR_MHPMCOUNTER14] = { "mhpmcounter14", mctr, read_hpmcounter,
4378 write_mhpmcounter },
4379 [CSR_MHPMCOUNTER15] = { "mhpmcounter15", mctr, read_hpmcounter,
4380 write_mhpmcounter },
4381 [CSR_MHPMCOUNTER16] = { "mhpmcounter16", mctr, read_hpmcounter,
4382 write_mhpmcounter },
4383 [CSR_MHPMCOUNTER17] = { "mhpmcounter17", mctr, read_hpmcounter,
4384 write_mhpmcounter },
4385 [CSR_MHPMCOUNTER18] = { "mhpmcounter18", mctr, read_hpmcounter,
4386 write_mhpmcounter },
4387 [CSR_MHPMCOUNTER19] = { "mhpmcounter19", mctr, read_hpmcounter,
4388 write_mhpmcounter },
4389 [CSR_MHPMCOUNTER20] = { "mhpmcounter20", mctr, read_hpmcounter,
4390 write_mhpmcounter },
4391 [CSR_MHPMCOUNTER21] = { "mhpmcounter21", mctr, read_hpmcounter,
4392 write_mhpmcounter },
4393 [CSR_MHPMCOUNTER22] = { "mhpmcounter22", mctr, read_hpmcounter,
4394 write_mhpmcounter },
4395 [CSR_MHPMCOUNTER23] = { "mhpmcounter23", mctr, read_hpmcounter,
4396 write_mhpmcounter },
4397 [CSR_MHPMCOUNTER24] = { "mhpmcounter24", mctr, read_hpmcounter,
4398 write_mhpmcounter },
4399 [CSR_MHPMCOUNTER25] = { "mhpmcounter25", mctr, read_hpmcounter,
4400 write_mhpmcounter },
4401 [CSR_MHPMCOUNTER26] = { "mhpmcounter26", mctr, read_hpmcounter,
4402 write_mhpmcounter },
4403 [CSR_MHPMCOUNTER27] = { "mhpmcounter27", mctr, read_hpmcounter,
4404 write_mhpmcounter },
4405 [CSR_MHPMCOUNTER28] = { "mhpmcounter28", mctr, read_hpmcounter,
4406 write_mhpmcounter },
4407 [CSR_MHPMCOUNTER29] = { "mhpmcounter29", mctr, read_hpmcounter,
4408 write_mhpmcounter },
4409 [CSR_MHPMCOUNTER30] = { "mhpmcounter30", mctr, read_hpmcounter,
4410 write_mhpmcounter },
4411 [CSR_MHPMCOUNTER31] = { "mhpmcounter31", mctr, read_hpmcounter,
4412 write_mhpmcounter },
4413
4414 [CSR_MCOUNTINHIBIT] = { "mcountinhibit", any, read_mcountinhibit,
4415 write_mcountinhibit,
4416 .min_priv_ver = PRIV_VERSION_1_11_0 },
4417
4418 [CSR_MHPMEVENT3] = { "mhpmevent3", any, read_mhpmevent,
4419 write_mhpmevent },
4420 [CSR_MHPMEVENT4] = { "mhpmevent4", any, read_mhpmevent,
4421 write_mhpmevent },
4422 [CSR_MHPMEVENT5] = { "mhpmevent5", any, read_mhpmevent,
4423 write_mhpmevent },
4424 [CSR_MHPMEVENT6] = { "mhpmevent6", any, read_mhpmevent,
4425 write_mhpmevent },
4426 [CSR_MHPMEVENT7] = { "mhpmevent7", any, read_mhpmevent,
4427 write_mhpmevent },
4428 [CSR_MHPMEVENT8] = { "mhpmevent8", any, read_mhpmevent,
4429 write_mhpmevent },
4430 [CSR_MHPMEVENT9] = { "mhpmevent9", any, read_mhpmevent,
4431 write_mhpmevent },
4432 [CSR_MHPMEVENT10] = { "mhpmevent10", any, read_mhpmevent,
4433 write_mhpmevent },
4434 [CSR_MHPMEVENT11] = { "mhpmevent11", any, read_mhpmevent,
4435 write_mhpmevent },
4436 [CSR_MHPMEVENT12] = { "mhpmevent12", any, read_mhpmevent,
4437 write_mhpmevent },
4438 [CSR_MHPMEVENT13] = { "mhpmevent13", any, read_mhpmevent,
4439 write_mhpmevent },
4440 [CSR_MHPMEVENT14] = { "mhpmevent14", any, read_mhpmevent,
4441 write_mhpmevent },
4442 [CSR_MHPMEVENT15] = { "mhpmevent15", any, read_mhpmevent,
4443 write_mhpmevent },
4444 [CSR_MHPMEVENT16] = { "mhpmevent16", any, read_mhpmevent,
4445 write_mhpmevent },
4446 [CSR_MHPMEVENT17] = { "mhpmevent17", any, read_mhpmevent,
4447 write_mhpmevent },
4448 [CSR_MHPMEVENT18] = { "mhpmevent18", any, read_mhpmevent,
4449 write_mhpmevent },
4450 [CSR_MHPMEVENT19] = { "mhpmevent19", any, read_mhpmevent,
4451 write_mhpmevent },
4452 [CSR_MHPMEVENT20] = { "mhpmevent20", any, read_mhpmevent,
4453 write_mhpmevent },
4454 [CSR_MHPMEVENT21] = { "mhpmevent21", any, read_mhpmevent,
4455 write_mhpmevent },
4456 [CSR_MHPMEVENT22] = { "mhpmevent22", any, read_mhpmevent,
4457 write_mhpmevent },
4458 [CSR_MHPMEVENT23] = { "mhpmevent23", any, read_mhpmevent,
4459 write_mhpmevent },
4460 [CSR_MHPMEVENT24] = { "mhpmevent24", any, read_mhpmevent,
4461 write_mhpmevent },
4462 [CSR_MHPMEVENT25] = { "mhpmevent25", any, read_mhpmevent,
4463 write_mhpmevent },
4464 [CSR_MHPMEVENT26] = { "mhpmevent26", any, read_mhpmevent,
4465 write_mhpmevent },
4466 [CSR_MHPMEVENT27] = { "mhpmevent27", any, read_mhpmevent,
4467 write_mhpmevent },
4468 [CSR_MHPMEVENT28] = { "mhpmevent28", any, read_mhpmevent,
4469 write_mhpmevent },
4470 [CSR_MHPMEVENT29] = { "mhpmevent29", any, read_mhpmevent,
4471 write_mhpmevent },
4472 [CSR_MHPMEVENT30] = { "mhpmevent30", any, read_mhpmevent,
4473 write_mhpmevent },
4474 [CSR_MHPMEVENT31] = { "mhpmevent31", any, read_mhpmevent,
4475 write_mhpmevent },
4476
4477 [CSR_MHPMEVENT3H] = { "mhpmevent3h", sscofpmf, read_mhpmeventh,
4478 write_mhpmeventh,
4479 .min_priv_ver = PRIV_VERSION_1_12_0 },
4480 [CSR_MHPMEVENT4H] = { "mhpmevent4h", sscofpmf, read_mhpmeventh,
4481 write_mhpmeventh,
4482 .min_priv_ver = PRIV_VERSION_1_12_0 },
4483 [CSR_MHPMEVENT5H] = { "mhpmevent5h", sscofpmf, read_mhpmeventh,
4484 write_mhpmeventh,
4485 .min_priv_ver = PRIV_VERSION_1_12_0 },
4486 [CSR_MHPMEVENT6H] = { "mhpmevent6h", sscofpmf, read_mhpmeventh,
4487 write_mhpmeventh,
4488 .min_priv_ver = PRIV_VERSION_1_12_0 },
4489 [CSR_MHPMEVENT7H] = { "mhpmevent7h", sscofpmf, read_mhpmeventh,
4490 write_mhpmeventh,
4491 .min_priv_ver = PRIV_VERSION_1_12_0 },
4492 [CSR_MHPMEVENT8H] = { "mhpmevent8h", sscofpmf, read_mhpmeventh,
4493 write_mhpmeventh,
4494 .min_priv_ver = PRIV_VERSION_1_12_0 },
4495 [CSR_MHPMEVENT9H] = { "mhpmevent9h", sscofpmf, read_mhpmeventh,
4496 write_mhpmeventh,
4497 .min_priv_ver = PRIV_VERSION_1_12_0 },
4498 [CSR_MHPMEVENT10H] = { "mhpmevent10h", sscofpmf, read_mhpmeventh,
4499 write_mhpmeventh,
4500 .min_priv_ver = PRIV_VERSION_1_12_0 },
4501 [CSR_MHPMEVENT11H] = { "mhpmevent11h", sscofpmf, read_mhpmeventh,
4502 write_mhpmeventh,
4503 .min_priv_ver = PRIV_VERSION_1_12_0 },
4504 [CSR_MHPMEVENT12H] = { "mhpmevent12h", sscofpmf, read_mhpmeventh,
4505 write_mhpmeventh,
4506 .min_priv_ver = PRIV_VERSION_1_12_0 },
4507 [CSR_MHPMEVENT13H] = { "mhpmevent13h", sscofpmf, read_mhpmeventh,
4508 write_mhpmeventh,
4509 .min_priv_ver = PRIV_VERSION_1_12_0 },
4510 [CSR_MHPMEVENT14H] = { "mhpmevent14h", sscofpmf, read_mhpmeventh,
4511 write_mhpmeventh,
4512 .min_priv_ver = PRIV_VERSION_1_12_0 },
4513 [CSR_MHPMEVENT15H] = { "mhpmevent15h", sscofpmf, read_mhpmeventh,
4514 write_mhpmeventh,
4515 .min_priv_ver = PRIV_VERSION_1_12_0 },
4516 [CSR_MHPMEVENT16H] = { "mhpmevent16h", sscofpmf, read_mhpmeventh,
4517 write_mhpmeventh,
4518 .min_priv_ver = PRIV_VERSION_1_12_0 },
4519 [CSR_MHPMEVENT17H] = { "mhpmevent17h", sscofpmf, read_mhpmeventh,
4520 write_mhpmeventh,
4521 .min_priv_ver = PRIV_VERSION_1_12_0 },
4522 [CSR_MHPMEVENT18H] = { "mhpmevent18h", sscofpmf, read_mhpmeventh,
4523 write_mhpmeventh,
4524 .min_priv_ver = PRIV_VERSION_1_12_0 },
4525 [CSR_MHPMEVENT19H] = { "mhpmevent19h", sscofpmf, read_mhpmeventh,
4526 write_mhpmeventh,
4527 .min_priv_ver = PRIV_VERSION_1_12_0 },
4528 [CSR_MHPMEVENT20H] = { "mhpmevent20h", sscofpmf, read_mhpmeventh,
4529 write_mhpmeventh,
4530 .min_priv_ver = PRIV_VERSION_1_12_0 },
4531 [CSR_MHPMEVENT21H] = { "mhpmevent21h", sscofpmf, read_mhpmeventh,
4532 write_mhpmeventh,
4533 .min_priv_ver = PRIV_VERSION_1_12_0 },
4534 [CSR_MHPMEVENT22H] = { "mhpmevent22h", sscofpmf, read_mhpmeventh,
4535 write_mhpmeventh,
4536 .min_priv_ver = PRIV_VERSION_1_12_0 },
4537 [CSR_MHPMEVENT23H] = { "mhpmevent23h", sscofpmf, read_mhpmeventh,
4538 write_mhpmeventh,
4539 .min_priv_ver = PRIV_VERSION_1_12_0 },
4540 [CSR_MHPMEVENT24H] = { "mhpmevent24h", sscofpmf, read_mhpmeventh,
4541 write_mhpmeventh,
4542 .min_priv_ver = PRIV_VERSION_1_12_0 },
4543 [CSR_MHPMEVENT25H] = { "mhpmevent25h", sscofpmf, read_mhpmeventh,
4544 write_mhpmeventh,
4545 .min_priv_ver = PRIV_VERSION_1_12_0 },
4546 [CSR_MHPMEVENT26H] = { "mhpmevent26h", sscofpmf, read_mhpmeventh,
4547 write_mhpmeventh,
4548 .min_priv_ver = PRIV_VERSION_1_12_0 },
4549 [CSR_MHPMEVENT27H] = { "mhpmevent27h", sscofpmf, read_mhpmeventh,
4550 write_mhpmeventh,
4551 .min_priv_ver = PRIV_VERSION_1_12_0 },
4552 [CSR_MHPMEVENT28H] = { "mhpmevent28h", sscofpmf, read_mhpmeventh,
4553 write_mhpmeventh,
4554 .min_priv_ver = PRIV_VERSION_1_12_0 },
4555 [CSR_MHPMEVENT29H] = { "mhpmevent29h", sscofpmf, read_mhpmeventh,
4556 write_mhpmeventh,
4557 .min_priv_ver = PRIV_VERSION_1_12_0 },
4558 [CSR_MHPMEVENT30H] = { "mhpmevent30h", sscofpmf, read_mhpmeventh,
4559 write_mhpmeventh,
4560 .min_priv_ver = PRIV_VERSION_1_12_0 },
4561 [CSR_MHPMEVENT31H] = { "mhpmevent31h", sscofpmf, read_mhpmeventh,
4562 write_mhpmeventh,
4563 .min_priv_ver = PRIV_VERSION_1_12_0 },
4564
4565 [CSR_HPMCOUNTER3H] = { "hpmcounter3h", ctr32, read_hpmcounterh },
4566 [CSR_HPMCOUNTER4H] = { "hpmcounter4h", ctr32, read_hpmcounterh },
4567 [CSR_HPMCOUNTER5H] = { "hpmcounter5h", ctr32, read_hpmcounterh },
4568 [CSR_HPMCOUNTER6H] = { "hpmcounter6h", ctr32, read_hpmcounterh },
4569 [CSR_HPMCOUNTER7H] = { "hpmcounter7h", ctr32, read_hpmcounterh },
4570 [CSR_HPMCOUNTER8H] = { "hpmcounter8h", ctr32, read_hpmcounterh },
4571 [CSR_HPMCOUNTER9H] = { "hpmcounter9h", ctr32, read_hpmcounterh },
4572 [CSR_HPMCOUNTER10H] = { "hpmcounter10h", ctr32, read_hpmcounterh },
4573 [CSR_HPMCOUNTER11H] = { "hpmcounter11h", ctr32, read_hpmcounterh },
4574 [CSR_HPMCOUNTER12H] = { "hpmcounter12h", ctr32, read_hpmcounterh },
4575 [CSR_HPMCOUNTER13H] = { "hpmcounter13h", ctr32, read_hpmcounterh },
4576 [CSR_HPMCOUNTER14H] = { "hpmcounter14h", ctr32, read_hpmcounterh },
4577 [CSR_HPMCOUNTER15H] = { "hpmcounter15h", ctr32, read_hpmcounterh },
4578 [CSR_HPMCOUNTER16H] = { "hpmcounter16h", ctr32, read_hpmcounterh },
4579 [CSR_HPMCOUNTER17H] = { "hpmcounter17h", ctr32, read_hpmcounterh },
4580 [CSR_HPMCOUNTER18H] = { "hpmcounter18h", ctr32, read_hpmcounterh },
4581 [CSR_HPMCOUNTER19H] = { "hpmcounter19h", ctr32, read_hpmcounterh },
4582 [CSR_HPMCOUNTER20H] = { "hpmcounter20h", ctr32, read_hpmcounterh },
4583 [CSR_HPMCOUNTER21H] = { "hpmcounter21h", ctr32, read_hpmcounterh },
4584 [CSR_HPMCOUNTER22H] = { "hpmcounter22h", ctr32, read_hpmcounterh },
4585 [CSR_HPMCOUNTER23H] = { "hpmcounter23h", ctr32, read_hpmcounterh },
4586 [CSR_HPMCOUNTER24H] = { "hpmcounter24h", ctr32, read_hpmcounterh },
4587 [CSR_HPMCOUNTER25H] = { "hpmcounter25h", ctr32, read_hpmcounterh },
4588 [CSR_HPMCOUNTER26H] = { "hpmcounter26h", ctr32, read_hpmcounterh },
4589 [CSR_HPMCOUNTER27H] = { "hpmcounter27h", ctr32, read_hpmcounterh },
4590 [CSR_HPMCOUNTER28H] = { "hpmcounter28h", ctr32, read_hpmcounterh },
4591 [CSR_HPMCOUNTER29H] = { "hpmcounter29h", ctr32, read_hpmcounterh },
4592 [CSR_HPMCOUNTER30H] = { "hpmcounter30h", ctr32, read_hpmcounterh },
4593 [CSR_HPMCOUNTER31H] = { "hpmcounter31h", ctr32, read_hpmcounterh },
4594
4595 [CSR_MHPMCOUNTER3H] = { "mhpmcounter3h", mctr32, read_hpmcounterh,
4596 write_mhpmcounterh },
4597 [CSR_MHPMCOUNTER4H] = { "mhpmcounter4h", mctr32, read_hpmcounterh,
4598 write_mhpmcounterh },
4599 [CSR_MHPMCOUNTER5H] = { "mhpmcounter5h", mctr32, read_hpmcounterh,
4600 write_mhpmcounterh },
4601 [CSR_MHPMCOUNTER6H] = { "mhpmcounter6h", mctr32, read_hpmcounterh,
4602 write_mhpmcounterh },
4603 [CSR_MHPMCOUNTER7H] = { "mhpmcounter7h", mctr32, read_hpmcounterh,
4604 write_mhpmcounterh },
4605 [CSR_MHPMCOUNTER8H] = { "mhpmcounter8h", mctr32, read_hpmcounterh,
4606 write_mhpmcounterh },
4607 [CSR_MHPMCOUNTER9H] = { "mhpmcounter9h", mctr32, read_hpmcounterh,
4608 write_mhpmcounterh },
4609 [CSR_MHPMCOUNTER10H] = { "mhpmcounter10h", mctr32, read_hpmcounterh,
4610 write_mhpmcounterh },
4611 [CSR_MHPMCOUNTER11H] = { "mhpmcounter11h", mctr32, read_hpmcounterh,
4612 write_mhpmcounterh },
4613 [CSR_MHPMCOUNTER12H] = { "mhpmcounter12h", mctr32, read_hpmcounterh,
4614 write_mhpmcounterh },
4615 [CSR_MHPMCOUNTER13H] = { "mhpmcounter13h", mctr32, read_hpmcounterh,
4616 write_mhpmcounterh },
4617 [CSR_MHPMCOUNTER14H] = { "mhpmcounter14h", mctr32, read_hpmcounterh,
4618 write_mhpmcounterh },
4619 [CSR_MHPMCOUNTER15H] = { "mhpmcounter15h", mctr32, read_hpmcounterh,
4620 write_mhpmcounterh },
4621 [CSR_MHPMCOUNTER16H] = { "mhpmcounter16h", mctr32, read_hpmcounterh,
4622 write_mhpmcounterh },
4623 [CSR_MHPMCOUNTER17H] = { "mhpmcounter17h", mctr32, read_hpmcounterh,
4624 write_mhpmcounterh },
4625 [CSR_MHPMCOUNTER18H] = { "mhpmcounter18h", mctr32, read_hpmcounterh,
4626 write_mhpmcounterh },
4627 [CSR_MHPMCOUNTER19H] = { "mhpmcounter19h", mctr32, read_hpmcounterh,
4628 write_mhpmcounterh },
4629 [CSR_MHPMCOUNTER20H] = { "mhpmcounter20h", mctr32, read_hpmcounterh,
4630 write_mhpmcounterh },
4631 [CSR_MHPMCOUNTER21H] = { "mhpmcounter21h", mctr32, read_hpmcounterh,
4632 write_mhpmcounterh },
4633 [CSR_MHPMCOUNTER22H] = { "mhpmcounter22h", mctr32, read_hpmcounterh,
4634 write_mhpmcounterh },
4635 [CSR_MHPMCOUNTER23H] = { "mhpmcounter23h", mctr32, read_hpmcounterh,
4636 write_mhpmcounterh },
4637 [CSR_MHPMCOUNTER24H] = { "mhpmcounter24h", mctr32, read_hpmcounterh,
4638 write_mhpmcounterh },
4639 [CSR_MHPMCOUNTER25H] = { "mhpmcounter25h", mctr32, read_hpmcounterh,
4640 write_mhpmcounterh },
4641 [CSR_MHPMCOUNTER26H] = { "mhpmcounter26h", mctr32, read_hpmcounterh,
4642 write_mhpmcounterh },
4643 [CSR_MHPMCOUNTER27H] = { "mhpmcounter27h", mctr32, read_hpmcounterh,
4644 write_mhpmcounterh },
4645 [CSR_MHPMCOUNTER28H] = { "mhpmcounter28h", mctr32, read_hpmcounterh,
4646 write_mhpmcounterh },
4647 [CSR_MHPMCOUNTER29H] = { "mhpmcounter29h", mctr32, read_hpmcounterh,
4648 write_mhpmcounterh },
4649 [CSR_MHPMCOUNTER30H] = { "mhpmcounter30h", mctr32, read_hpmcounterh,
4650 write_mhpmcounterh },
4651 [CSR_MHPMCOUNTER31H] = { "mhpmcounter31h", mctr32, read_hpmcounterh,
4652 write_mhpmcounterh },
4653 [CSR_SCOUNTOVF] = { "scountovf", sscofpmf, read_scountovf,
4654 .min_priv_ver = PRIV_VERSION_1_12_0 },
4655
4656 #endif /* !CONFIG_USER_ONLY */
4657 };