]> git.proxmox.com Git - mirror_qemu.git/blob - target/riscv/csr.c
Merge tag 'pull-riscv-to-apply-20220907' of https://github.com/alistair23/qemu into...
[mirror_qemu.git] / target / riscv / csr.c
1 /*
2 * RISC-V Control and Status Registers.
3 *
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "qemu/timer.h"
23 #include "cpu.h"
24 #include "pmu.h"
25 #include "time_helper.h"
26 #include "qemu/main-loop.h"
27 #include "exec/exec-all.h"
28 #include "sysemu/cpu-timers.h"
29 #include "qemu/guest-random.h"
30 #include "qapi/error.h"
31
32 /* CSR function table public API */
33 void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops)
34 {
35 *ops = csr_ops[csrno & (CSR_TABLE_SIZE - 1)];
36 }
37
38 void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops)
39 {
40 csr_ops[csrno & (CSR_TABLE_SIZE - 1)] = *ops;
41 }
42
43 /* Predicates */
44 static RISCVException fs(CPURISCVState *env, int csrno)
45 {
46 #if !defined(CONFIG_USER_ONLY)
47 if (!env->debugger && !riscv_cpu_fp_enabled(env) &&
48 !RISCV_CPU(env_cpu(env))->cfg.ext_zfinx) {
49 return RISCV_EXCP_ILLEGAL_INST;
50 }
51 #endif
52 return RISCV_EXCP_NONE;
53 }
54
55 static RISCVException vs(CPURISCVState *env, int csrno)
56 {
57 CPUState *cs = env_cpu(env);
58 RISCVCPU *cpu = RISCV_CPU(cs);
59
60 if (env->misa_ext & RVV ||
61 cpu->cfg.ext_zve32f || cpu->cfg.ext_zve64f) {
62 #if !defined(CONFIG_USER_ONLY)
63 if (!env->debugger && !riscv_cpu_vector_enabled(env)) {
64 return RISCV_EXCP_ILLEGAL_INST;
65 }
66 #endif
67 return RISCV_EXCP_NONE;
68 }
69 return RISCV_EXCP_ILLEGAL_INST;
70 }
71
72 static RISCVException ctr(CPURISCVState *env, int csrno)
73 {
74 #if !defined(CONFIG_USER_ONLY)
75 CPUState *cs = env_cpu(env);
76 RISCVCPU *cpu = RISCV_CPU(cs);
77 int ctr_index;
78 target_ulong ctr_mask;
79 int base_csrno = CSR_CYCLE;
80 bool rv32 = riscv_cpu_mxl(env) == MXL_RV32 ? true : false;
81
82 if (rv32 && csrno >= CSR_CYCLEH) {
83 /* Offset for RV32 hpmcounternh counters */
84 base_csrno += 0x80;
85 }
86 ctr_index = csrno - base_csrno;
87 ctr_mask = BIT(ctr_index);
88
89 if ((csrno >= CSR_CYCLE && csrno <= CSR_INSTRET) ||
90 (csrno >= CSR_CYCLEH && csrno <= CSR_INSTRETH)) {
91 goto skip_ext_pmu_check;
92 }
93
94 if (!(cpu->pmu_avail_ctrs & ctr_mask)) {
95 /* No counter is enabled in PMU or the counter is out of range */
96 return RISCV_EXCP_ILLEGAL_INST;
97 }
98
99 skip_ext_pmu_check:
100
101 if (((env->priv == PRV_S) && (!get_field(env->mcounteren, ctr_mask))) ||
102 ((env->priv == PRV_U) && (!get_field(env->scounteren, ctr_mask)))) {
103 return RISCV_EXCP_ILLEGAL_INST;
104 }
105
106 if (riscv_cpu_virt_enabled(env)) {
107 if (!get_field(env->hcounteren, ctr_mask) &&
108 get_field(env->mcounteren, ctr_mask)) {
109 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
110 }
111 }
112 #endif
113 return RISCV_EXCP_NONE;
114 }
115
116 static RISCVException ctr32(CPURISCVState *env, int csrno)
117 {
118 if (riscv_cpu_mxl(env) != MXL_RV32) {
119 return RISCV_EXCP_ILLEGAL_INST;
120 }
121
122 return ctr(env, csrno);
123 }
124
125 #if !defined(CONFIG_USER_ONLY)
126 static RISCVException mctr(CPURISCVState *env, int csrno)
127 {
128 CPUState *cs = env_cpu(env);
129 RISCVCPU *cpu = RISCV_CPU(cs);
130 int ctr_index;
131 int base_csrno = CSR_MHPMCOUNTER3;
132
133 if ((riscv_cpu_mxl(env) == MXL_RV32) && csrno >= CSR_MCYCLEH) {
134 /* Offset for RV32 mhpmcounternh counters */
135 base_csrno += 0x80;
136 }
137 ctr_index = csrno - base_csrno;
138 if (!cpu->cfg.pmu_num || ctr_index >= cpu->cfg.pmu_num) {
139 /* The PMU is not enabled or counter is out of range*/
140 return RISCV_EXCP_ILLEGAL_INST;
141 }
142
143 return RISCV_EXCP_NONE;
144 }
145
146 static RISCVException mctr32(CPURISCVState *env, int csrno)
147 {
148 if (riscv_cpu_mxl(env) != MXL_RV32) {
149 return RISCV_EXCP_ILLEGAL_INST;
150 }
151
152 return mctr(env, csrno);
153 }
154
155 static RISCVException sscofpmf(CPURISCVState *env, int csrno)
156 {
157 CPUState *cs = env_cpu(env);
158 RISCVCPU *cpu = RISCV_CPU(cs);
159
160 if (!cpu->cfg.ext_sscofpmf) {
161 return RISCV_EXCP_ILLEGAL_INST;
162 }
163
164 return RISCV_EXCP_NONE;
165 }
166
167 static RISCVException any(CPURISCVState *env, int csrno)
168 {
169 return RISCV_EXCP_NONE;
170 }
171
172 static RISCVException any32(CPURISCVState *env, int csrno)
173 {
174 if (riscv_cpu_mxl(env) != MXL_RV32) {
175 return RISCV_EXCP_ILLEGAL_INST;
176 }
177
178 return any(env, csrno);
179
180 }
181
182 static int aia_any(CPURISCVState *env, int csrno)
183 {
184 RISCVCPU *cpu = env_archcpu(env);
185
186 if (!cpu->cfg.ext_smaia) {
187 return RISCV_EXCP_ILLEGAL_INST;
188 }
189
190 return any(env, csrno);
191 }
192
193 static int aia_any32(CPURISCVState *env, int csrno)
194 {
195 RISCVCPU *cpu = env_archcpu(env);
196
197 if (!cpu->cfg.ext_smaia) {
198 return RISCV_EXCP_ILLEGAL_INST;
199 }
200
201 return any32(env, csrno);
202 }
203
204 static RISCVException smode(CPURISCVState *env, int csrno)
205 {
206 if (riscv_has_ext(env, RVS)) {
207 return RISCV_EXCP_NONE;
208 }
209
210 return RISCV_EXCP_ILLEGAL_INST;
211 }
212
213 static int smode32(CPURISCVState *env, int csrno)
214 {
215 if (riscv_cpu_mxl(env) != MXL_RV32) {
216 return RISCV_EXCP_ILLEGAL_INST;
217 }
218
219 return smode(env, csrno);
220 }
221
222 static int aia_smode(CPURISCVState *env, int csrno)
223 {
224 RISCVCPU *cpu = env_archcpu(env);
225
226 if (!cpu->cfg.ext_ssaia) {
227 return RISCV_EXCP_ILLEGAL_INST;
228 }
229
230 return smode(env, csrno);
231 }
232
233 static int aia_smode32(CPURISCVState *env, int csrno)
234 {
235 RISCVCPU *cpu = env_archcpu(env);
236
237 if (!cpu->cfg.ext_ssaia) {
238 return RISCV_EXCP_ILLEGAL_INST;
239 }
240
241 return smode32(env, csrno);
242 }
243
244 static RISCVException hmode(CPURISCVState *env, int csrno)
245 {
246 if (riscv_has_ext(env, RVH)) {
247 return RISCV_EXCP_NONE;
248 }
249
250 return RISCV_EXCP_ILLEGAL_INST;
251 }
252
253 static RISCVException hmode32(CPURISCVState *env, int csrno)
254 {
255 if (riscv_cpu_mxl(env) != MXL_RV32) {
256 return RISCV_EXCP_ILLEGAL_INST;
257 }
258
259 return hmode(env, csrno);
260
261 }
262
263 static RISCVException umode(CPURISCVState *env, int csrno)
264 {
265 if (riscv_has_ext(env, RVU)) {
266 return RISCV_EXCP_NONE;
267 }
268
269 return RISCV_EXCP_ILLEGAL_INST;
270 }
271
272 static RISCVException umode32(CPURISCVState *env, int csrno)
273 {
274 if (riscv_cpu_mxl(env) != MXL_RV32) {
275 return RISCV_EXCP_ILLEGAL_INST;
276 }
277
278 return umode(env, csrno);
279 }
280
281 /* Checks if PointerMasking registers could be accessed */
282 static RISCVException pointer_masking(CPURISCVState *env, int csrno)
283 {
284 /* Check if j-ext is present */
285 if (riscv_has_ext(env, RVJ)) {
286 return RISCV_EXCP_NONE;
287 }
288 return RISCV_EXCP_ILLEGAL_INST;
289 }
290
291 static int aia_hmode(CPURISCVState *env, int csrno)
292 {
293 RISCVCPU *cpu = env_archcpu(env);
294
295 if (!cpu->cfg.ext_ssaia) {
296 return RISCV_EXCP_ILLEGAL_INST;
297 }
298
299 return hmode(env, csrno);
300 }
301
302 static int aia_hmode32(CPURISCVState *env, int csrno)
303 {
304 RISCVCPU *cpu = env_archcpu(env);
305
306 if (!cpu->cfg.ext_ssaia) {
307 return RISCV_EXCP_ILLEGAL_INST;
308 }
309
310 return hmode32(env, csrno);
311 }
312
313 static RISCVException pmp(CPURISCVState *env, int csrno)
314 {
315 if (riscv_feature(env, RISCV_FEATURE_PMP)) {
316 return RISCV_EXCP_NONE;
317 }
318
319 return RISCV_EXCP_ILLEGAL_INST;
320 }
321
322 static RISCVException epmp(CPURISCVState *env, int csrno)
323 {
324 if (env->priv == PRV_M && riscv_feature(env, RISCV_FEATURE_EPMP)) {
325 return RISCV_EXCP_NONE;
326 }
327
328 return RISCV_EXCP_ILLEGAL_INST;
329 }
330
331 static RISCVException debug(CPURISCVState *env, int csrno)
332 {
333 if (riscv_feature(env, RISCV_FEATURE_DEBUG)) {
334 return RISCV_EXCP_NONE;
335 }
336
337 return RISCV_EXCP_ILLEGAL_INST;
338 }
339 #endif
340
341 static RISCVException seed(CPURISCVState *env, int csrno)
342 {
343 RISCVCPU *cpu = env_archcpu(env);
344
345 if (!cpu->cfg.ext_zkr) {
346 return RISCV_EXCP_ILLEGAL_INST;
347 }
348
349 #if !defined(CONFIG_USER_ONLY)
350 /*
351 * With a CSR read-write instruction:
352 * 1) The seed CSR is always available in machine mode as normal.
353 * 2) Attempted access to seed from virtual modes VS and VU always raises
354 * an exception(virtual instruction exception only if mseccfg.sseed=1).
355 * 3) Without the corresponding access control bit set to 1, any attempted
356 * access to seed from U, S or HS modes will raise an illegal instruction
357 * exception.
358 */
359 if (env->priv == PRV_M) {
360 return RISCV_EXCP_NONE;
361 } else if (riscv_cpu_virt_enabled(env)) {
362 if (env->mseccfg & MSECCFG_SSEED) {
363 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
364 } else {
365 return RISCV_EXCP_ILLEGAL_INST;
366 }
367 } else {
368 if (env->priv == PRV_S && (env->mseccfg & MSECCFG_SSEED)) {
369 return RISCV_EXCP_NONE;
370 } else if (env->priv == PRV_U && (env->mseccfg & MSECCFG_USEED)) {
371 return RISCV_EXCP_NONE;
372 } else {
373 return RISCV_EXCP_ILLEGAL_INST;
374 }
375 }
376 #else
377 return RISCV_EXCP_NONE;
378 #endif
379 }
380
381 /* User Floating-Point CSRs */
382 static RISCVException read_fflags(CPURISCVState *env, int csrno,
383 target_ulong *val)
384 {
385 *val = riscv_cpu_get_fflags(env);
386 return RISCV_EXCP_NONE;
387 }
388
389 static RISCVException write_fflags(CPURISCVState *env, int csrno,
390 target_ulong val)
391 {
392 #if !defined(CONFIG_USER_ONLY)
393 if (riscv_has_ext(env, RVF)) {
394 env->mstatus |= MSTATUS_FS;
395 }
396 #endif
397 riscv_cpu_set_fflags(env, val & (FSR_AEXC >> FSR_AEXC_SHIFT));
398 return RISCV_EXCP_NONE;
399 }
400
401 static RISCVException read_frm(CPURISCVState *env, int csrno,
402 target_ulong *val)
403 {
404 *val = env->frm;
405 return RISCV_EXCP_NONE;
406 }
407
408 static RISCVException write_frm(CPURISCVState *env, int csrno,
409 target_ulong val)
410 {
411 #if !defined(CONFIG_USER_ONLY)
412 if (riscv_has_ext(env, RVF)) {
413 env->mstatus |= MSTATUS_FS;
414 }
415 #endif
416 env->frm = val & (FSR_RD >> FSR_RD_SHIFT);
417 return RISCV_EXCP_NONE;
418 }
419
420 static RISCVException read_fcsr(CPURISCVState *env, int csrno,
421 target_ulong *val)
422 {
423 *val = (riscv_cpu_get_fflags(env) << FSR_AEXC_SHIFT)
424 | (env->frm << FSR_RD_SHIFT);
425 return RISCV_EXCP_NONE;
426 }
427
428 static RISCVException write_fcsr(CPURISCVState *env, int csrno,
429 target_ulong val)
430 {
431 #if !defined(CONFIG_USER_ONLY)
432 if (riscv_has_ext(env, RVF)) {
433 env->mstatus |= MSTATUS_FS;
434 }
435 #endif
436 env->frm = (val & FSR_RD) >> FSR_RD_SHIFT;
437 riscv_cpu_set_fflags(env, (val & FSR_AEXC) >> FSR_AEXC_SHIFT);
438 return RISCV_EXCP_NONE;
439 }
440
441 static RISCVException read_vtype(CPURISCVState *env, int csrno,
442 target_ulong *val)
443 {
444 uint64_t vill;
445 switch (env->xl) {
446 case MXL_RV32:
447 vill = (uint32_t)env->vill << 31;
448 break;
449 case MXL_RV64:
450 vill = (uint64_t)env->vill << 63;
451 break;
452 default:
453 g_assert_not_reached();
454 }
455 *val = (target_ulong)vill | env->vtype;
456 return RISCV_EXCP_NONE;
457 }
458
459 static RISCVException read_vl(CPURISCVState *env, int csrno,
460 target_ulong *val)
461 {
462 *val = env->vl;
463 return RISCV_EXCP_NONE;
464 }
465
466 static int read_vlenb(CPURISCVState *env, int csrno, target_ulong *val)
467 {
468 *val = env_archcpu(env)->cfg.vlen >> 3;
469 return RISCV_EXCP_NONE;
470 }
471
472 static RISCVException read_vxrm(CPURISCVState *env, int csrno,
473 target_ulong *val)
474 {
475 *val = env->vxrm;
476 return RISCV_EXCP_NONE;
477 }
478
479 static RISCVException write_vxrm(CPURISCVState *env, int csrno,
480 target_ulong val)
481 {
482 #if !defined(CONFIG_USER_ONLY)
483 env->mstatus |= MSTATUS_VS;
484 #endif
485 env->vxrm = val;
486 return RISCV_EXCP_NONE;
487 }
488
489 static RISCVException read_vxsat(CPURISCVState *env, int csrno,
490 target_ulong *val)
491 {
492 *val = env->vxsat;
493 return RISCV_EXCP_NONE;
494 }
495
496 static RISCVException write_vxsat(CPURISCVState *env, int csrno,
497 target_ulong val)
498 {
499 #if !defined(CONFIG_USER_ONLY)
500 env->mstatus |= MSTATUS_VS;
501 #endif
502 env->vxsat = val;
503 return RISCV_EXCP_NONE;
504 }
505
506 static RISCVException read_vstart(CPURISCVState *env, int csrno,
507 target_ulong *val)
508 {
509 *val = env->vstart;
510 return RISCV_EXCP_NONE;
511 }
512
513 static RISCVException write_vstart(CPURISCVState *env, int csrno,
514 target_ulong val)
515 {
516 #if !defined(CONFIG_USER_ONLY)
517 env->mstatus |= MSTATUS_VS;
518 #endif
519 /*
520 * The vstart CSR is defined to have only enough writable bits
521 * to hold the largest element index, i.e. lg2(VLEN) bits.
522 */
523 env->vstart = val & ~(~0ULL << ctzl(env_archcpu(env)->cfg.vlen));
524 return RISCV_EXCP_NONE;
525 }
526
527 static int read_vcsr(CPURISCVState *env, int csrno, target_ulong *val)
528 {
529 *val = (env->vxrm << VCSR_VXRM_SHIFT) | (env->vxsat << VCSR_VXSAT_SHIFT);
530 return RISCV_EXCP_NONE;
531 }
532
533 static int write_vcsr(CPURISCVState *env, int csrno, target_ulong val)
534 {
535 #if !defined(CONFIG_USER_ONLY)
536 env->mstatus |= MSTATUS_VS;
537 #endif
538 env->vxrm = (val & VCSR_VXRM) >> VCSR_VXRM_SHIFT;
539 env->vxsat = (val & VCSR_VXSAT) >> VCSR_VXSAT_SHIFT;
540 return RISCV_EXCP_NONE;
541 }
542
543 /* User Timers and Counters */
544 static target_ulong get_ticks(bool shift)
545 {
546 int64_t val;
547 target_ulong result;
548
549 #if !defined(CONFIG_USER_ONLY)
550 if (icount_enabled()) {
551 val = icount_get();
552 } else {
553 val = cpu_get_host_ticks();
554 }
555 #else
556 val = cpu_get_host_ticks();
557 #endif
558
559 if (shift) {
560 result = val >> 32;
561 } else {
562 result = val;
563 }
564
565 return result;
566 }
567
568 #if defined(CONFIG_USER_ONLY)
569 static RISCVException read_time(CPURISCVState *env, int csrno,
570 target_ulong *val)
571 {
572 *val = cpu_get_host_ticks();
573 return RISCV_EXCP_NONE;
574 }
575
576 static RISCVException read_timeh(CPURISCVState *env, int csrno,
577 target_ulong *val)
578 {
579 *val = cpu_get_host_ticks() >> 32;
580 return RISCV_EXCP_NONE;
581 }
582
583 static int read_hpmcounter(CPURISCVState *env, int csrno, target_ulong *val)
584 {
585 *val = get_ticks(false);
586 return RISCV_EXCP_NONE;
587 }
588
589 static int read_hpmcounterh(CPURISCVState *env, int csrno, target_ulong *val)
590 {
591 *val = get_ticks(true);
592 return RISCV_EXCP_NONE;
593 }
594
595 #else /* CONFIG_USER_ONLY */
596
597 static int read_mhpmevent(CPURISCVState *env, int csrno, target_ulong *val)
598 {
599 int evt_index = csrno - CSR_MCOUNTINHIBIT;
600
601 *val = env->mhpmevent_val[evt_index];
602
603 return RISCV_EXCP_NONE;
604 }
605
606 static int write_mhpmevent(CPURISCVState *env, int csrno, target_ulong val)
607 {
608 int evt_index = csrno - CSR_MCOUNTINHIBIT;
609 uint64_t mhpmevt_val = val;
610
611 env->mhpmevent_val[evt_index] = val;
612
613 if (riscv_cpu_mxl(env) == MXL_RV32) {
614 mhpmevt_val = mhpmevt_val |
615 ((uint64_t)env->mhpmeventh_val[evt_index] << 32);
616 }
617 riscv_pmu_update_event_map(env, mhpmevt_val, evt_index);
618
619 return RISCV_EXCP_NONE;
620 }
621
622 static int read_mhpmeventh(CPURISCVState *env, int csrno, target_ulong *val)
623 {
624 int evt_index = csrno - CSR_MHPMEVENT3H + 3;
625
626 *val = env->mhpmeventh_val[evt_index];
627
628 return RISCV_EXCP_NONE;
629 }
630
631 static int write_mhpmeventh(CPURISCVState *env, int csrno, target_ulong val)
632 {
633 int evt_index = csrno - CSR_MHPMEVENT3H + 3;
634 uint64_t mhpmevth_val = val;
635 uint64_t mhpmevt_val = env->mhpmevent_val[evt_index];
636
637 mhpmevt_val = mhpmevt_val | (mhpmevth_val << 32);
638 env->mhpmeventh_val[evt_index] = val;
639
640 riscv_pmu_update_event_map(env, mhpmevt_val, evt_index);
641
642 return RISCV_EXCP_NONE;
643 }
644
645 static int write_mhpmcounter(CPURISCVState *env, int csrno, target_ulong val)
646 {
647 int ctr_idx = csrno - CSR_MCYCLE;
648 PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
649 uint64_t mhpmctr_val = val;
650
651 counter->mhpmcounter_val = val;
652 if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
653 riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
654 counter->mhpmcounter_prev = get_ticks(false);
655 if (ctr_idx > 2) {
656 if (riscv_cpu_mxl(env) == MXL_RV32) {
657 mhpmctr_val = mhpmctr_val |
658 ((uint64_t)counter->mhpmcounterh_val << 32);
659 }
660 riscv_pmu_setup_timer(env, mhpmctr_val, ctr_idx);
661 }
662 } else {
663 /* Other counters can keep incrementing from the given value */
664 counter->mhpmcounter_prev = val;
665 }
666
667 return RISCV_EXCP_NONE;
668 }
669
670 static int write_mhpmcounterh(CPURISCVState *env, int csrno, target_ulong val)
671 {
672 int ctr_idx = csrno - CSR_MCYCLEH;
673 PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
674 uint64_t mhpmctr_val = counter->mhpmcounter_val;
675 uint64_t mhpmctrh_val = val;
676
677 counter->mhpmcounterh_val = val;
678 mhpmctr_val = mhpmctr_val | (mhpmctrh_val << 32);
679 if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
680 riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
681 counter->mhpmcounterh_prev = get_ticks(true);
682 if (ctr_idx > 2) {
683 riscv_pmu_setup_timer(env, mhpmctr_val, ctr_idx);
684 }
685 } else {
686 counter->mhpmcounterh_prev = val;
687 }
688
689 return RISCV_EXCP_NONE;
690 }
691
692 static RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val,
693 bool upper_half, uint32_t ctr_idx)
694 {
695 PMUCTRState counter = env->pmu_ctrs[ctr_idx];
696 target_ulong ctr_prev = upper_half ? counter.mhpmcounterh_prev :
697 counter.mhpmcounter_prev;
698 target_ulong ctr_val = upper_half ? counter.mhpmcounterh_val :
699 counter.mhpmcounter_val;
700
701 if (get_field(env->mcountinhibit, BIT(ctr_idx))) {
702 /**
703 * Counter should not increment if inhibit bit is set. We can't really
704 * stop the icount counting. Just return the counter value written by
705 * the supervisor to indicate that counter was not incremented.
706 */
707 if (!counter.started) {
708 *val = ctr_val;
709 return RISCV_EXCP_NONE;
710 } else {
711 /* Mark that the counter has been stopped */
712 counter.started = false;
713 }
714 }
715
716 /**
717 * The kernel computes the perf delta by subtracting the current value from
718 * the value it initialized previously (ctr_val).
719 */
720 if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
721 riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
722 *val = get_ticks(upper_half) - ctr_prev + ctr_val;
723 } else {
724 *val = ctr_val;
725 }
726
727 return RISCV_EXCP_NONE;
728 }
729
730 static int read_hpmcounter(CPURISCVState *env, int csrno, target_ulong *val)
731 {
732 uint16_t ctr_index;
733
734 if (csrno >= CSR_MCYCLE && csrno <= CSR_MHPMCOUNTER31) {
735 ctr_index = csrno - CSR_MCYCLE;
736 } else if (csrno >= CSR_CYCLE && csrno <= CSR_HPMCOUNTER31) {
737 ctr_index = csrno - CSR_CYCLE;
738 } else {
739 return RISCV_EXCP_ILLEGAL_INST;
740 }
741
742 return riscv_pmu_read_ctr(env, val, false, ctr_index);
743 }
744
745 static int read_hpmcounterh(CPURISCVState *env, int csrno, target_ulong *val)
746 {
747 uint16_t ctr_index;
748
749 if (csrno >= CSR_MCYCLEH && csrno <= CSR_MHPMCOUNTER31H) {
750 ctr_index = csrno - CSR_MCYCLEH;
751 } else if (csrno >= CSR_CYCLEH && csrno <= CSR_HPMCOUNTER31H) {
752 ctr_index = csrno - CSR_CYCLEH;
753 } else {
754 return RISCV_EXCP_ILLEGAL_INST;
755 }
756
757 return riscv_pmu_read_ctr(env, val, true, ctr_index);
758 }
759
760 static int read_scountovf(CPURISCVState *env, int csrno, target_ulong *val)
761 {
762 int mhpmevt_start = CSR_MHPMEVENT3 - CSR_MCOUNTINHIBIT;
763 int i;
764 *val = 0;
765 target_ulong *mhpm_evt_val;
766 uint64_t of_bit_mask;
767
768 if (riscv_cpu_mxl(env) == MXL_RV32) {
769 mhpm_evt_val = env->mhpmeventh_val;
770 of_bit_mask = MHPMEVENTH_BIT_OF;
771 } else {
772 mhpm_evt_val = env->mhpmevent_val;
773 of_bit_mask = MHPMEVENT_BIT_OF;
774 }
775
776 for (i = mhpmevt_start; i < RV_MAX_MHPMEVENTS; i++) {
777 if ((get_field(env->mcounteren, BIT(i))) &&
778 (mhpm_evt_val[i] & of_bit_mask)) {
779 *val |= BIT(i);
780 }
781 }
782
783 return RISCV_EXCP_NONE;
784 }
785
786 static RISCVException read_time(CPURISCVState *env, int csrno,
787 target_ulong *val)
788 {
789 uint64_t delta = riscv_cpu_virt_enabled(env) ? env->htimedelta : 0;
790
791 if (!env->rdtime_fn) {
792 return RISCV_EXCP_ILLEGAL_INST;
793 }
794
795 *val = env->rdtime_fn(env->rdtime_fn_arg) + delta;
796 return RISCV_EXCP_NONE;
797 }
798
799 static RISCVException read_timeh(CPURISCVState *env, int csrno,
800 target_ulong *val)
801 {
802 uint64_t delta = riscv_cpu_virt_enabled(env) ? env->htimedelta : 0;
803
804 if (!env->rdtime_fn) {
805 return RISCV_EXCP_ILLEGAL_INST;
806 }
807
808 *val = (env->rdtime_fn(env->rdtime_fn_arg) + delta) >> 32;
809 return RISCV_EXCP_NONE;
810 }
811
812 static RISCVException sstc(CPURISCVState *env, int csrno)
813 {
814 CPUState *cs = env_cpu(env);
815 RISCVCPU *cpu = RISCV_CPU(cs);
816 bool hmode_check = false;
817
818 if (!cpu->cfg.ext_sstc || !env->rdtime_fn) {
819 return RISCV_EXCP_ILLEGAL_INST;
820 }
821
822 if (env->priv == PRV_M) {
823 return RISCV_EXCP_NONE;
824 }
825
826 /*
827 * No need of separate function for rv32 as menvcfg stores both menvcfg
828 * menvcfgh for RV32.
829 */
830 if (!(get_field(env->mcounteren, COUNTEREN_TM) &&
831 get_field(env->menvcfg, MENVCFG_STCE))) {
832 return RISCV_EXCP_ILLEGAL_INST;
833 }
834
835 if (riscv_cpu_virt_enabled(env)) {
836 if (!(get_field(env->hcounteren, COUNTEREN_TM) &
837 get_field(env->henvcfg, HENVCFG_STCE))) {
838 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
839 }
840 }
841
842 if ((csrno == CSR_VSTIMECMP) || (csrno == CSR_VSTIMECMPH)) {
843 hmode_check = true;
844 }
845
846 return hmode_check ? hmode(env, csrno) : smode(env, csrno);
847 }
848
849 static RISCVException sstc_32(CPURISCVState *env, int csrno)
850 {
851 if (riscv_cpu_mxl(env) != MXL_RV32) {
852 return RISCV_EXCP_ILLEGAL_INST;
853 }
854
855 return sstc(env, csrno);
856 }
857
858 static RISCVException read_vstimecmp(CPURISCVState *env, int csrno,
859 target_ulong *val)
860 {
861 *val = env->vstimecmp;
862
863 return RISCV_EXCP_NONE;
864 }
865
866 static RISCVException read_vstimecmph(CPURISCVState *env, int csrno,
867 target_ulong *val)
868 {
869 *val = env->vstimecmp >> 32;
870
871 return RISCV_EXCP_NONE;
872 }
873
874 static RISCVException write_vstimecmp(CPURISCVState *env, int csrno,
875 target_ulong val)
876 {
877 RISCVCPU *cpu = env_archcpu(env);
878
879 if (riscv_cpu_mxl(env) == MXL_RV32) {
880 env->vstimecmp = deposit64(env->vstimecmp, 0, 32, (uint64_t)val);
881 } else {
882 env->vstimecmp = val;
883 }
884
885 riscv_timer_write_timecmp(cpu, env->vstimer, env->vstimecmp,
886 env->htimedelta, MIP_VSTIP);
887
888 return RISCV_EXCP_NONE;
889 }
890
891 static RISCVException write_vstimecmph(CPURISCVState *env, int csrno,
892 target_ulong val)
893 {
894 RISCVCPU *cpu = env_archcpu(env);
895
896 env->vstimecmp = deposit64(env->vstimecmp, 32, 32, (uint64_t)val);
897 riscv_timer_write_timecmp(cpu, env->vstimer, env->vstimecmp,
898 env->htimedelta, MIP_VSTIP);
899
900 return RISCV_EXCP_NONE;
901 }
902
903 static RISCVException read_stimecmp(CPURISCVState *env, int csrno,
904 target_ulong *val)
905 {
906 if (riscv_cpu_virt_enabled(env)) {
907 *val = env->vstimecmp;
908 } else {
909 *val = env->stimecmp;
910 }
911
912 return RISCV_EXCP_NONE;
913 }
914
915 static RISCVException read_stimecmph(CPURISCVState *env, int csrno,
916 target_ulong *val)
917 {
918 if (riscv_cpu_virt_enabled(env)) {
919 *val = env->vstimecmp >> 32;
920 } else {
921 *val = env->stimecmp >> 32;
922 }
923
924 return RISCV_EXCP_NONE;
925 }
926
927 static RISCVException write_stimecmp(CPURISCVState *env, int csrno,
928 target_ulong val)
929 {
930 RISCVCPU *cpu = env_archcpu(env);
931
932 if (riscv_cpu_virt_enabled(env)) {
933 return write_vstimecmp(env, csrno, val);
934 }
935
936 if (riscv_cpu_mxl(env) == MXL_RV32) {
937 env->stimecmp = deposit64(env->stimecmp, 0, 32, (uint64_t)val);
938 } else {
939 env->stimecmp = val;
940 }
941
942 riscv_timer_write_timecmp(cpu, env->stimer, env->stimecmp, 0, MIP_STIP);
943
944 return RISCV_EXCP_NONE;
945 }
946
947 static RISCVException write_stimecmph(CPURISCVState *env, int csrno,
948 target_ulong val)
949 {
950 RISCVCPU *cpu = env_archcpu(env);
951
952 if (riscv_cpu_virt_enabled(env)) {
953 return write_vstimecmph(env, csrno, val);
954 }
955
956 env->stimecmp = deposit64(env->stimecmp, 32, 32, (uint64_t)val);
957 riscv_timer_write_timecmp(cpu, env->stimer, env->stimecmp, 0, MIP_STIP);
958
959 return RISCV_EXCP_NONE;
960 }
961
962 /* Machine constants */
963
964 #define M_MODE_INTERRUPTS ((uint64_t)(MIP_MSIP | MIP_MTIP | MIP_MEIP))
965 #define S_MODE_INTERRUPTS ((uint64_t)(MIP_SSIP | MIP_STIP | MIP_SEIP | \
966 MIP_LCOFIP))
967 #define VS_MODE_INTERRUPTS ((uint64_t)(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP))
968 #define HS_MODE_INTERRUPTS ((uint64_t)(MIP_SGEIP | VS_MODE_INTERRUPTS))
969
970 #define VSTOPI_NUM_SRCS 5
971
972 static const uint64_t delegable_ints = S_MODE_INTERRUPTS |
973 VS_MODE_INTERRUPTS;
974 static const uint64_t vs_delegable_ints = VS_MODE_INTERRUPTS;
975 static const uint64_t all_ints = M_MODE_INTERRUPTS | S_MODE_INTERRUPTS |
976 HS_MODE_INTERRUPTS;
977 #define DELEGABLE_EXCPS ((1ULL << (RISCV_EXCP_INST_ADDR_MIS)) | \
978 (1ULL << (RISCV_EXCP_INST_ACCESS_FAULT)) | \
979 (1ULL << (RISCV_EXCP_ILLEGAL_INST)) | \
980 (1ULL << (RISCV_EXCP_BREAKPOINT)) | \
981 (1ULL << (RISCV_EXCP_LOAD_ADDR_MIS)) | \
982 (1ULL << (RISCV_EXCP_LOAD_ACCESS_FAULT)) | \
983 (1ULL << (RISCV_EXCP_STORE_AMO_ADDR_MIS)) | \
984 (1ULL << (RISCV_EXCP_STORE_AMO_ACCESS_FAULT)) | \
985 (1ULL << (RISCV_EXCP_U_ECALL)) | \
986 (1ULL << (RISCV_EXCP_S_ECALL)) | \
987 (1ULL << (RISCV_EXCP_VS_ECALL)) | \
988 (1ULL << (RISCV_EXCP_M_ECALL)) | \
989 (1ULL << (RISCV_EXCP_INST_PAGE_FAULT)) | \
990 (1ULL << (RISCV_EXCP_LOAD_PAGE_FAULT)) | \
991 (1ULL << (RISCV_EXCP_STORE_PAGE_FAULT)) | \
992 (1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT)) | \
993 (1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT)) | \
994 (1ULL << (RISCV_EXCP_VIRT_INSTRUCTION_FAULT)) | \
995 (1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT)))
996 static const target_ulong vs_delegable_excps = DELEGABLE_EXCPS &
997 ~((1ULL << (RISCV_EXCP_S_ECALL)) |
998 (1ULL << (RISCV_EXCP_VS_ECALL)) |
999 (1ULL << (RISCV_EXCP_M_ECALL)) |
1000 (1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT)) |
1001 (1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT)) |
1002 (1ULL << (RISCV_EXCP_VIRT_INSTRUCTION_FAULT)) |
1003 (1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT)));
1004 static const target_ulong sstatus_v1_10_mask = SSTATUS_SIE | SSTATUS_SPIE |
1005 SSTATUS_UIE | SSTATUS_UPIE | SSTATUS_SPP | SSTATUS_FS | SSTATUS_XS |
1006 SSTATUS_SUM | SSTATUS_MXR | SSTATUS_VS;
1007 static const target_ulong sip_writable_mask = SIP_SSIP | MIP_USIP | MIP_UEIP |
1008 SIP_LCOFIP;
1009 static const target_ulong hip_writable_mask = MIP_VSSIP;
1010 static const target_ulong hvip_writable_mask = MIP_VSSIP | MIP_VSTIP | MIP_VSEIP;
1011 static const target_ulong vsip_writable_mask = MIP_VSSIP;
1012
1013 static const char valid_vm_1_10_32[16] = {
1014 [VM_1_10_MBARE] = 1,
1015 [VM_1_10_SV32] = 1
1016 };
1017
1018 static const char valid_vm_1_10_64[16] = {
1019 [VM_1_10_MBARE] = 1,
1020 [VM_1_10_SV39] = 1,
1021 [VM_1_10_SV48] = 1,
1022 [VM_1_10_SV57] = 1
1023 };
1024
1025 /* Machine Information Registers */
1026 static RISCVException read_zero(CPURISCVState *env, int csrno,
1027 target_ulong *val)
1028 {
1029 *val = 0;
1030 return RISCV_EXCP_NONE;
1031 }
1032
1033 static RISCVException write_ignore(CPURISCVState *env, int csrno,
1034 target_ulong val)
1035 {
1036 return RISCV_EXCP_NONE;
1037 }
1038
1039 static RISCVException read_mvendorid(CPURISCVState *env, int csrno,
1040 target_ulong *val)
1041 {
1042 CPUState *cs = env_cpu(env);
1043 RISCVCPU *cpu = RISCV_CPU(cs);
1044
1045 *val = cpu->cfg.mvendorid;
1046 return RISCV_EXCP_NONE;
1047 }
1048
1049 static RISCVException read_marchid(CPURISCVState *env, int csrno,
1050 target_ulong *val)
1051 {
1052 CPUState *cs = env_cpu(env);
1053 RISCVCPU *cpu = RISCV_CPU(cs);
1054
1055 *val = cpu->cfg.marchid;
1056 return RISCV_EXCP_NONE;
1057 }
1058
1059 static RISCVException read_mimpid(CPURISCVState *env, int csrno,
1060 target_ulong *val)
1061 {
1062 CPUState *cs = env_cpu(env);
1063 RISCVCPU *cpu = RISCV_CPU(cs);
1064
1065 *val = cpu->cfg.mimpid;
1066 return RISCV_EXCP_NONE;
1067 }
1068
1069 static RISCVException read_mhartid(CPURISCVState *env, int csrno,
1070 target_ulong *val)
1071 {
1072 *val = env->mhartid;
1073 return RISCV_EXCP_NONE;
1074 }
1075
1076 /* Machine Trap Setup */
1077
1078 /* We do not store SD explicitly, only compute it on demand. */
1079 static uint64_t add_status_sd(RISCVMXL xl, uint64_t status)
1080 {
1081 if ((status & MSTATUS_FS) == MSTATUS_FS ||
1082 (status & MSTATUS_VS) == MSTATUS_VS ||
1083 (status & MSTATUS_XS) == MSTATUS_XS) {
1084 switch (xl) {
1085 case MXL_RV32:
1086 return status | MSTATUS32_SD;
1087 case MXL_RV64:
1088 return status | MSTATUS64_SD;
1089 case MXL_RV128:
1090 return MSTATUSH128_SD;
1091 default:
1092 g_assert_not_reached();
1093 }
1094 }
1095 return status;
1096 }
1097
1098 static RISCVException read_mstatus(CPURISCVState *env, int csrno,
1099 target_ulong *val)
1100 {
1101 *val = add_status_sd(riscv_cpu_mxl(env), env->mstatus);
1102 return RISCV_EXCP_NONE;
1103 }
1104
1105 static int validate_vm(CPURISCVState *env, target_ulong vm)
1106 {
1107 if (riscv_cpu_mxl(env) == MXL_RV32) {
1108 return valid_vm_1_10_32[vm & 0xf];
1109 } else {
1110 return valid_vm_1_10_64[vm & 0xf];
1111 }
1112 }
1113
1114 static RISCVException write_mstatus(CPURISCVState *env, int csrno,
1115 target_ulong val)
1116 {
1117 uint64_t mstatus = env->mstatus;
1118 uint64_t mask = 0;
1119 RISCVMXL xl = riscv_cpu_mxl(env);
1120
1121 /* flush tlb on mstatus fields that affect VM */
1122 if ((val ^ mstatus) & (MSTATUS_MXR | MSTATUS_MPP | MSTATUS_MPV |
1123 MSTATUS_MPRV | MSTATUS_SUM)) {
1124 tlb_flush(env_cpu(env));
1125 }
1126 mask = MSTATUS_SIE | MSTATUS_SPIE | MSTATUS_MIE | MSTATUS_MPIE |
1127 MSTATUS_SPP | MSTATUS_MPRV | MSTATUS_SUM |
1128 MSTATUS_MPP | MSTATUS_MXR | MSTATUS_TVM | MSTATUS_TSR |
1129 MSTATUS_TW | MSTATUS_VS;
1130
1131 if (riscv_has_ext(env, RVF)) {
1132 mask |= MSTATUS_FS;
1133 }
1134
1135 if (xl != MXL_RV32 || env->debugger) {
1136 /*
1137 * RV32: MPV and GVA are not in mstatus. The current plan is to
1138 * add them to mstatush. For now, we just don't support it.
1139 */
1140 mask |= MSTATUS_MPV | MSTATUS_GVA;
1141 if ((val & MSTATUS64_UXL) != 0) {
1142 mask |= MSTATUS64_UXL;
1143 }
1144 }
1145
1146 mstatus = (mstatus & ~mask) | (val & mask);
1147
1148 if (xl > MXL_RV32) {
1149 /* SXL field is for now read only */
1150 mstatus = set_field(mstatus, MSTATUS64_SXL, xl);
1151 }
1152 env->mstatus = mstatus;
1153 env->xl = cpu_recompute_xl(env);
1154
1155 return RISCV_EXCP_NONE;
1156 }
1157
1158 static RISCVException read_mstatush(CPURISCVState *env, int csrno,
1159 target_ulong *val)
1160 {
1161 *val = env->mstatus >> 32;
1162 return RISCV_EXCP_NONE;
1163 }
1164
1165 static RISCVException write_mstatush(CPURISCVState *env, int csrno,
1166 target_ulong val)
1167 {
1168 uint64_t valh = (uint64_t)val << 32;
1169 uint64_t mask = MSTATUS_MPV | MSTATUS_GVA;
1170
1171 if ((valh ^ env->mstatus) & (MSTATUS_MPV)) {
1172 tlb_flush(env_cpu(env));
1173 }
1174
1175 env->mstatus = (env->mstatus & ~mask) | (valh & mask);
1176
1177 return RISCV_EXCP_NONE;
1178 }
1179
1180 static RISCVException read_mstatus_i128(CPURISCVState *env, int csrno,
1181 Int128 *val)
1182 {
1183 *val = int128_make128(env->mstatus, add_status_sd(MXL_RV128, env->mstatus));
1184 return RISCV_EXCP_NONE;
1185 }
1186
1187 static RISCVException read_misa_i128(CPURISCVState *env, int csrno,
1188 Int128 *val)
1189 {
1190 *val = int128_make128(env->misa_ext, (uint64_t)MXL_RV128 << 62);
1191 return RISCV_EXCP_NONE;
1192 }
1193
1194 static RISCVException read_misa(CPURISCVState *env, int csrno,
1195 target_ulong *val)
1196 {
1197 target_ulong misa;
1198
1199 switch (env->misa_mxl) {
1200 case MXL_RV32:
1201 misa = (target_ulong)MXL_RV32 << 30;
1202 break;
1203 #ifdef TARGET_RISCV64
1204 case MXL_RV64:
1205 misa = (target_ulong)MXL_RV64 << 62;
1206 break;
1207 #endif
1208 default:
1209 g_assert_not_reached();
1210 }
1211
1212 *val = misa | env->misa_ext;
1213 return RISCV_EXCP_NONE;
1214 }
1215
1216 static RISCVException write_misa(CPURISCVState *env, int csrno,
1217 target_ulong val)
1218 {
1219 if (!riscv_feature(env, RISCV_FEATURE_MISA)) {
1220 /* drop write to misa */
1221 return RISCV_EXCP_NONE;
1222 }
1223
1224 /* 'I' or 'E' must be present */
1225 if (!(val & (RVI | RVE))) {
1226 /* It is not, drop write to misa */
1227 return RISCV_EXCP_NONE;
1228 }
1229
1230 /* 'E' excludes all other extensions */
1231 if (val & RVE) {
1232 /* when we support 'E' we can do "val = RVE;" however
1233 * for now we just drop writes if 'E' is present.
1234 */
1235 return RISCV_EXCP_NONE;
1236 }
1237
1238 /*
1239 * misa.MXL writes are not supported by QEMU.
1240 * Drop writes to those bits.
1241 */
1242
1243 /* Mask extensions that are not supported by this hart */
1244 val &= env->misa_ext_mask;
1245
1246 /* Mask extensions that are not supported by QEMU */
1247 val &= (RVI | RVE | RVM | RVA | RVF | RVD | RVC | RVS | RVU | RVV);
1248
1249 /* 'D' depends on 'F', so clear 'D' if 'F' is not present */
1250 if ((val & RVD) && !(val & RVF)) {
1251 val &= ~RVD;
1252 }
1253
1254 /* Suppress 'C' if next instruction is not aligned
1255 * TODO: this should check next_pc
1256 */
1257 if ((val & RVC) && (GETPC() & ~3) != 0) {
1258 val &= ~RVC;
1259 }
1260
1261 /* If nothing changed, do nothing. */
1262 if (val == env->misa_ext) {
1263 return RISCV_EXCP_NONE;
1264 }
1265
1266 if (!(val & RVF)) {
1267 env->mstatus &= ~MSTATUS_FS;
1268 }
1269
1270 /* flush translation cache */
1271 tb_flush(env_cpu(env));
1272 env->misa_ext = val;
1273 env->xl = riscv_cpu_mxl(env);
1274 return RISCV_EXCP_NONE;
1275 }
1276
1277 static RISCVException read_medeleg(CPURISCVState *env, int csrno,
1278 target_ulong *val)
1279 {
1280 *val = env->medeleg;
1281 return RISCV_EXCP_NONE;
1282 }
1283
1284 static RISCVException write_medeleg(CPURISCVState *env, int csrno,
1285 target_ulong val)
1286 {
1287 env->medeleg = (env->medeleg & ~DELEGABLE_EXCPS) | (val & DELEGABLE_EXCPS);
1288 return RISCV_EXCP_NONE;
1289 }
1290
1291 static RISCVException rmw_mideleg64(CPURISCVState *env, int csrno,
1292 uint64_t *ret_val,
1293 uint64_t new_val, uint64_t wr_mask)
1294 {
1295 uint64_t mask = wr_mask & delegable_ints;
1296
1297 if (ret_val) {
1298 *ret_val = env->mideleg;
1299 }
1300
1301 env->mideleg = (env->mideleg & ~mask) | (new_val & mask);
1302
1303 if (riscv_has_ext(env, RVH)) {
1304 env->mideleg |= HS_MODE_INTERRUPTS;
1305 }
1306
1307 return RISCV_EXCP_NONE;
1308 }
1309
1310 static RISCVException rmw_mideleg(CPURISCVState *env, int csrno,
1311 target_ulong *ret_val,
1312 target_ulong new_val, target_ulong wr_mask)
1313 {
1314 uint64_t rval;
1315 RISCVException ret;
1316
1317 ret = rmw_mideleg64(env, csrno, &rval, new_val, wr_mask);
1318 if (ret_val) {
1319 *ret_val = rval;
1320 }
1321
1322 return ret;
1323 }
1324
1325 static RISCVException rmw_midelegh(CPURISCVState *env, int csrno,
1326 target_ulong *ret_val,
1327 target_ulong new_val,
1328 target_ulong wr_mask)
1329 {
1330 uint64_t rval;
1331 RISCVException ret;
1332
1333 ret = rmw_mideleg64(env, csrno, &rval,
1334 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
1335 if (ret_val) {
1336 *ret_val = rval >> 32;
1337 }
1338
1339 return ret;
1340 }
1341
1342 static RISCVException rmw_mie64(CPURISCVState *env, int csrno,
1343 uint64_t *ret_val,
1344 uint64_t new_val, uint64_t wr_mask)
1345 {
1346 uint64_t mask = wr_mask & all_ints;
1347
1348 if (ret_val) {
1349 *ret_val = env->mie;
1350 }
1351
1352 env->mie = (env->mie & ~mask) | (new_val & mask);
1353
1354 if (!riscv_has_ext(env, RVH)) {
1355 env->mie &= ~((uint64_t)MIP_SGEIP);
1356 }
1357
1358 return RISCV_EXCP_NONE;
1359 }
1360
1361 static RISCVException rmw_mie(CPURISCVState *env, int csrno,
1362 target_ulong *ret_val,
1363 target_ulong new_val, target_ulong wr_mask)
1364 {
1365 uint64_t rval;
1366 RISCVException ret;
1367
1368 ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask);
1369 if (ret_val) {
1370 *ret_val = rval;
1371 }
1372
1373 return ret;
1374 }
1375
1376 static RISCVException rmw_mieh(CPURISCVState *env, int csrno,
1377 target_ulong *ret_val,
1378 target_ulong new_val, target_ulong wr_mask)
1379 {
1380 uint64_t rval;
1381 RISCVException ret;
1382
1383 ret = rmw_mie64(env, csrno, &rval,
1384 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
1385 if (ret_val) {
1386 *ret_val = rval >> 32;
1387 }
1388
1389 return ret;
1390 }
1391
1392 static int read_mtopi(CPURISCVState *env, int csrno, target_ulong *val)
1393 {
1394 int irq;
1395 uint8_t iprio;
1396
1397 irq = riscv_cpu_mirq_pending(env);
1398 if (irq <= 0 || irq > 63) {
1399 *val = 0;
1400 } else {
1401 iprio = env->miprio[irq];
1402 if (!iprio) {
1403 if (riscv_cpu_default_priority(irq) > IPRIO_DEFAULT_M) {
1404 iprio = IPRIO_MMAXIPRIO;
1405 }
1406 }
1407 *val = (irq & TOPI_IID_MASK) << TOPI_IID_SHIFT;
1408 *val |= iprio;
1409 }
1410
1411 return RISCV_EXCP_NONE;
1412 }
1413
1414 static int aia_xlate_vs_csrno(CPURISCVState *env, int csrno)
1415 {
1416 if (!riscv_cpu_virt_enabled(env)) {
1417 return csrno;
1418 }
1419
1420 switch (csrno) {
1421 case CSR_SISELECT:
1422 return CSR_VSISELECT;
1423 case CSR_SIREG:
1424 return CSR_VSIREG;
1425 case CSR_STOPEI:
1426 return CSR_VSTOPEI;
1427 default:
1428 return csrno;
1429 };
1430 }
1431
1432 static int rmw_xiselect(CPURISCVState *env, int csrno, target_ulong *val,
1433 target_ulong new_val, target_ulong wr_mask)
1434 {
1435 target_ulong *iselect;
1436
1437 /* Translate CSR number for VS-mode */
1438 csrno = aia_xlate_vs_csrno(env, csrno);
1439
1440 /* Find the iselect CSR based on CSR number */
1441 switch (csrno) {
1442 case CSR_MISELECT:
1443 iselect = &env->miselect;
1444 break;
1445 case CSR_SISELECT:
1446 iselect = &env->siselect;
1447 break;
1448 case CSR_VSISELECT:
1449 iselect = &env->vsiselect;
1450 break;
1451 default:
1452 return RISCV_EXCP_ILLEGAL_INST;
1453 };
1454
1455 if (val) {
1456 *val = *iselect;
1457 }
1458
1459 wr_mask &= ISELECT_MASK;
1460 if (wr_mask) {
1461 *iselect = (*iselect & ~wr_mask) | (new_val & wr_mask);
1462 }
1463
1464 return RISCV_EXCP_NONE;
1465 }
1466
1467 static int rmw_iprio(target_ulong xlen,
1468 target_ulong iselect, uint8_t *iprio,
1469 target_ulong *val, target_ulong new_val,
1470 target_ulong wr_mask, int ext_irq_no)
1471 {
1472 int i, firq, nirqs;
1473 target_ulong old_val;
1474
1475 if (iselect < ISELECT_IPRIO0 || ISELECT_IPRIO15 < iselect) {
1476 return -EINVAL;
1477 }
1478 if (xlen != 32 && iselect & 0x1) {
1479 return -EINVAL;
1480 }
1481
1482 nirqs = 4 * (xlen / 32);
1483 firq = ((iselect - ISELECT_IPRIO0) / (xlen / 32)) * (nirqs);
1484
1485 old_val = 0;
1486 for (i = 0; i < nirqs; i++) {
1487 old_val |= ((target_ulong)iprio[firq + i]) << (IPRIO_IRQ_BITS * i);
1488 }
1489
1490 if (val) {
1491 *val = old_val;
1492 }
1493
1494 if (wr_mask) {
1495 new_val = (old_val & ~wr_mask) | (new_val & wr_mask);
1496 for (i = 0; i < nirqs; i++) {
1497 /*
1498 * M-level and S-level external IRQ priority always read-only
1499 * zero. This means default priority order is always preferred
1500 * for M-level and S-level external IRQs.
1501 */
1502 if ((firq + i) == ext_irq_no) {
1503 continue;
1504 }
1505 iprio[firq + i] = (new_val >> (IPRIO_IRQ_BITS * i)) & 0xff;
1506 }
1507 }
1508
1509 return 0;
1510 }
1511
1512 static int rmw_xireg(CPURISCVState *env, int csrno, target_ulong *val,
1513 target_ulong new_val, target_ulong wr_mask)
1514 {
1515 bool virt;
1516 uint8_t *iprio;
1517 int ret = -EINVAL;
1518 target_ulong priv, isel, vgein;
1519
1520 /* Translate CSR number for VS-mode */
1521 csrno = aia_xlate_vs_csrno(env, csrno);
1522
1523 /* Decode register details from CSR number */
1524 virt = false;
1525 switch (csrno) {
1526 case CSR_MIREG:
1527 iprio = env->miprio;
1528 isel = env->miselect;
1529 priv = PRV_M;
1530 break;
1531 case CSR_SIREG:
1532 iprio = env->siprio;
1533 isel = env->siselect;
1534 priv = PRV_S;
1535 break;
1536 case CSR_VSIREG:
1537 iprio = env->hviprio;
1538 isel = env->vsiselect;
1539 priv = PRV_S;
1540 virt = true;
1541 break;
1542 default:
1543 goto done;
1544 };
1545
1546 /* Find the selected guest interrupt file */
1547 vgein = (virt) ? get_field(env->hstatus, HSTATUS_VGEIN) : 0;
1548
1549 if (ISELECT_IPRIO0 <= isel && isel <= ISELECT_IPRIO15) {
1550 /* Local interrupt priority registers not available for VS-mode */
1551 if (!virt) {
1552 ret = rmw_iprio(riscv_cpu_mxl_bits(env),
1553 isel, iprio, val, new_val, wr_mask,
1554 (priv == PRV_M) ? IRQ_M_EXT : IRQ_S_EXT);
1555 }
1556 } else if (ISELECT_IMSIC_FIRST <= isel && isel <= ISELECT_IMSIC_LAST) {
1557 /* IMSIC registers only available when machine implements it. */
1558 if (env->aia_ireg_rmw_fn[priv]) {
1559 /* Selected guest interrupt file should not be zero */
1560 if (virt && (!vgein || env->geilen < vgein)) {
1561 goto done;
1562 }
1563 /* Call machine specific IMSIC register emulation */
1564 ret = env->aia_ireg_rmw_fn[priv](env->aia_ireg_rmw_fn_arg[priv],
1565 AIA_MAKE_IREG(isel, priv, virt, vgein,
1566 riscv_cpu_mxl_bits(env)),
1567 val, new_val, wr_mask);
1568 }
1569 }
1570
1571 done:
1572 if (ret) {
1573 return (riscv_cpu_virt_enabled(env) && virt) ?
1574 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
1575 }
1576 return RISCV_EXCP_NONE;
1577 }
1578
1579 static int rmw_xtopei(CPURISCVState *env, int csrno, target_ulong *val,
1580 target_ulong new_val, target_ulong wr_mask)
1581 {
1582 bool virt;
1583 int ret = -EINVAL;
1584 target_ulong priv, vgein;
1585
1586 /* Translate CSR number for VS-mode */
1587 csrno = aia_xlate_vs_csrno(env, csrno);
1588
1589 /* Decode register details from CSR number */
1590 virt = false;
1591 switch (csrno) {
1592 case CSR_MTOPEI:
1593 priv = PRV_M;
1594 break;
1595 case CSR_STOPEI:
1596 priv = PRV_S;
1597 break;
1598 case CSR_VSTOPEI:
1599 priv = PRV_S;
1600 virt = true;
1601 break;
1602 default:
1603 goto done;
1604 };
1605
1606 /* IMSIC CSRs only available when machine implements IMSIC. */
1607 if (!env->aia_ireg_rmw_fn[priv]) {
1608 goto done;
1609 }
1610
1611 /* Find the selected guest interrupt file */
1612 vgein = (virt) ? get_field(env->hstatus, HSTATUS_VGEIN) : 0;
1613
1614 /* Selected guest interrupt file should be valid */
1615 if (virt && (!vgein || env->geilen < vgein)) {
1616 goto done;
1617 }
1618
1619 /* Call machine specific IMSIC register emulation for TOPEI */
1620 ret = env->aia_ireg_rmw_fn[priv](env->aia_ireg_rmw_fn_arg[priv],
1621 AIA_MAKE_IREG(ISELECT_IMSIC_TOPEI, priv, virt, vgein,
1622 riscv_cpu_mxl_bits(env)),
1623 val, new_val, wr_mask);
1624
1625 done:
1626 if (ret) {
1627 return (riscv_cpu_virt_enabled(env) && virt) ?
1628 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
1629 }
1630 return RISCV_EXCP_NONE;
1631 }
1632
1633 static RISCVException read_mtvec(CPURISCVState *env, int csrno,
1634 target_ulong *val)
1635 {
1636 *val = env->mtvec;
1637 return RISCV_EXCP_NONE;
1638 }
1639
1640 static RISCVException write_mtvec(CPURISCVState *env, int csrno,
1641 target_ulong val)
1642 {
1643 /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
1644 if ((val & 3) < 2) {
1645 env->mtvec = val;
1646 } else {
1647 qemu_log_mask(LOG_UNIMP, "CSR_MTVEC: reserved mode not supported\n");
1648 }
1649 return RISCV_EXCP_NONE;
1650 }
1651
1652 static RISCVException read_mcountinhibit(CPURISCVState *env, int csrno,
1653 target_ulong *val)
1654 {
1655 *val = env->mcountinhibit;
1656 return RISCV_EXCP_NONE;
1657 }
1658
1659 static RISCVException write_mcountinhibit(CPURISCVState *env, int csrno,
1660 target_ulong val)
1661 {
1662 int cidx;
1663 PMUCTRState *counter;
1664
1665 env->mcountinhibit = val;
1666
1667 /* Check if any other counter is also monitoring cycles/instructions */
1668 for (cidx = 0; cidx < RV_MAX_MHPMCOUNTERS; cidx++) {
1669 if (!get_field(env->mcountinhibit, BIT(cidx))) {
1670 counter = &env->pmu_ctrs[cidx];
1671 counter->started = true;
1672 }
1673 }
1674
1675 return RISCV_EXCP_NONE;
1676 }
1677
1678 static RISCVException read_mcounteren(CPURISCVState *env, int csrno,
1679 target_ulong *val)
1680 {
1681 *val = env->mcounteren;
1682 return RISCV_EXCP_NONE;
1683 }
1684
1685 static RISCVException write_mcounteren(CPURISCVState *env, int csrno,
1686 target_ulong val)
1687 {
1688 env->mcounteren = val;
1689 return RISCV_EXCP_NONE;
1690 }
1691
1692 /* Machine Trap Handling */
1693 static RISCVException read_mscratch_i128(CPURISCVState *env, int csrno,
1694 Int128 *val)
1695 {
1696 *val = int128_make128(env->mscratch, env->mscratchh);
1697 return RISCV_EXCP_NONE;
1698 }
1699
1700 static RISCVException write_mscratch_i128(CPURISCVState *env, int csrno,
1701 Int128 val)
1702 {
1703 env->mscratch = int128_getlo(val);
1704 env->mscratchh = int128_gethi(val);
1705 return RISCV_EXCP_NONE;
1706 }
1707
1708 static RISCVException read_mscratch(CPURISCVState *env, int csrno,
1709 target_ulong *val)
1710 {
1711 *val = env->mscratch;
1712 return RISCV_EXCP_NONE;
1713 }
1714
1715 static RISCVException write_mscratch(CPURISCVState *env, int csrno,
1716 target_ulong val)
1717 {
1718 env->mscratch = val;
1719 return RISCV_EXCP_NONE;
1720 }
1721
1722 static RISCVException read_mepc(CPURISCVState *env, int csrno,
1723 target_ulong *val)
1724 {
1725 *val = env->mepc;
1726 return RISCV_EXCP_NONE;
1727 }
1728
1729 static RISCVException write_mepc(CPURISCVState *env, int csrno,
1730 target_ulong val)
1731 {
1732 env->mepc = val;
1733 return RISCV_EXCP_NONE;
1734 }
1735
1736 static RISCVException read_mcause(CPURISCVState *env, int csrno,
1737 target_ulong *val)
1738 {
1739 *val = env->mcause;
1740 return RISCV_EXCP_NONE;
1741 }
1742
1743 static RISCVException write_mcause(CPURISCVState *env, int csrno,
1744 target_ulong val)
1745 {
1746 env->mcause = val;
1747 return RISCV_EXCP_NONE;
1748 }
1749
1750 static RISCVException read_mtval(CPURISCVState *env, int csrno,
1751 target_ulong *val)
1752 {
1753 *val = env->mtval;
1754 return RISCV_EXCP_NONE;
1755 }
1756
1757 static RISCVException write_mtval(CPURISCVState *env, int csrno,
1758 target_ulong val)
1759 {
1760 env->mtval = val;
1761 return RISCV_EXCP_NONE;
1762 }
1763
1764 /* Execution environment configuration setup */
1765 static RISCVException read_menvcfg(CPURISCVState *env, int csrno,
1766 target_ulong *val)
1767 {
1768 *val = env->menvcfg;
1769 return RISCV_EXCP_NONE;
1770 }
1771
1772 static RISCVException write_menvcfg(CPURISCVState *env, int csrno,
1773 target_ulong val)
1774 {
1775 uint64_t mask = MENVCFG_FIOM | MENVCFG_CBIE | MENVCFG_CBCFE | MENVCFG_CBZE;
1776
1777 if (riscv_cpu_mxl(env) == MXL_RV64) {
1778 mask |= MENVCFG_PBMTE | MENVCFG_STCE;
1779 }
1780 env->menvcfg = (env->menvcfg & ~mask) | (val & mask);
1781
1782 return RISCV_EXCP_NONE;
1783 }
1784
1785 static RISCVException read_menvcfgh(CPURISCVState *env, int csrno,
1786 target_ulong *val)
1787 {
1788 *val = env->menvcfg >> 32;
1789 return RISCV_EXCP_NONE;
1790 }
1791
1792 static RISCVException write_menvcfgh(CPURISCVState *env, int csrno,
1793 target_ulong val)
1794 {
1795 uint64_t mask = MENVCFG_PBMTE | MENVCFG_STCE;
1796 uint64_t valh = (uint64_t)val << 32;
1797
1798 env->menvcfg = (env->menvcfg & ~mask) | (valh & mask);
1799
1800 return RISCV_EXCP_NONE;
1801 }
1802
1803 static RISCVException read_senvcfg(CPURISCVState *env, int csrno,
1804 target_ulong *val)
1805 {
1806 *val = env->senvcfg;
1807 return RISCV_EXCP_NONE;
1808 }
1809
1810 static RISCVException write_senvcfg(CPURISCVState *env, int csrno,
1811 target_ulong val)
1812 {
1813 uint64_t mask = SENVCFG_FIOM | SENVCFG_CBIE | SENVCFG_CBCFE | SENVCFG_CBZE;
1814
1815 env->senvcfg = (env->senvcfg & ~mask) | (val & mask);
1816
1817 return RISCV_EXCP_NONE;
1818 }
1819
1820 static RISCVException read_henvcfg(CPURISCVState *env, int csrno,
1821 target_ulong *val)
1822 {
1823 *val = env->henvcfg;
1824 return RISCV_EXCP_NONE;
1825 }
1826
1827 static RISCVException write_henvcfg(CPURISCVState *env, int csrno,
1828 target_ulong val)
1829 {
1830 uint64_t mask = HENVCFG_FIOM | HENVCFG_CBIE | HENVCFG_CBCFE | HENVCFG_CBZE;
1831
1832 if (riscv_cpu_mxl(env) == MXL_RV64) {
1833 mask |= HENVCFG_PBMTE | HENVCFG_STCE;
1834 }
1835
1836 env->henvcfg = (env->henvcfg & ~mask) | (val & mask);
1837
1838 return RISCV_EXCP_NONE;
1839 }
1840
1841 static RISCVException read_henvcfgh(CPURISCVState *env, int csrno,
1842 target_ulong *val)
1843 {
1844 *val = env->henvcfg >> 32;
1845 return RISCV_EXCP_NONE;
1846 }
1847
1848 static RISCVException write_henvcfgh(CPURISCVState *env, int csrno,
1849 target_ulong val)
1850 {
1851 uint64_t mask = HENVCFG_PBMTE | HENVCFG_STCE;
1852 uint64_t valh = (uint64_t)val << 32;
1853
1854 env->henvcfg = (env->henvcfg & ~mask) | (valh & mask);
1855
1856 return RISCV_EXCP_NONE;
1857 }
1858
1859 static RISCVException rmw_mip64(CPURISCVState *env, int csrno,
1860 uint64_t *ret_val,
1861 uint64_t new_val, uint64_t wr_mask)
1862 {
1863 RISCVCPU *cpu = env_archcpu(env);
1864 uint64_t old_mip, mask = wr_mask & delegable_ints;
1865 uint32_t gin;
1866
1867 if (mask & MIP_SEIP) {
1868 env->software_seip = new_val & MIP_SEIP;
1869 new_val |= env->external_seip * MIP_SEIP;
1870 }
1871
1872 if (cpu->cfg.ext_sstc && (env->priv == PRV_M) &&
1873 get_field(env->menvcfg, MENVCFG_STCE)) {
1874 /* sstc extension forbids STIP & VSTIP to be writeable in mip */
1875 mask = mask & ~(MIP_STIP | MIP_VSTIP);
1876 }
1877
1878 if (mask) {
1879 old_mip = riscv_cpu_update_mip(cpu, mask, (new_val & mask));
1880 } else {
1881 old_mip = env->mip;
1882 }
1883
1884 if (csrno != CSR_HVIP) {
1885 gin = get_field(env->hstatus, HSTATUS_VGEIN);
1886 old_mip |= (env->hgeip & ((target_ulong)1 << gin)) ? MIP_VSEIP : 0;
1887 old_mip |= env->vstime_irq ? MIP_VSTIP : 0;
1888 }
1889
1890 if (ret_val) {
1891 *ret_val = old_mip;
1892 }
1893
1894 return RISCV_EXCP_NONE;
1895 }
1896
1897 static RISCVException rmw_mip(CPURISCVState *env, int csrno,
1898 target_ulong *ret_val,
1899 target_ulong new_val, target_ulong wr_mask)
1900 {
1901 uint64_t rval;
1902 RISCVException ret;
1903
1904 ret = rmw_mip64(env, csrno, &rval, new_val, wr_mask);
1905 if (ret_val) {
1906 *ret_val = rval;
1907 }
1908
1909 return ret;
1910 }
1911
1912 static RISCVException rmw_miph(CPURISCVState *env, int csrno,
1913 target_ulong *ret_val,
1914 target_ulong new_val, target_ulong wr_mask)
1915 {
1916 uint64_t rval;
1917 RISCVException ret;
1918
1919 ret = rmw_mip64(env, csrno, &rval,
1920 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
1921 if (ret_val) {
1922 *ret_val = rval >> 32;
1923 }
1924
1925 return ret;
1926 }
1927
1928 /* Supervisor Trap Setup */
1929 static RISCVException read_sstatus_i128(CPURISCVState *env, int csrno,
1930 Int128 *val)
1931 {
1932 uint64_t mask = sstatus_v1_10_mask;
1933 uint64_t sstatus = env->mstatus & mask;
1934 if (env->xl != MXL_RV32 || env->debugger) {
1935 mask |= SSTATUS64_UXL;
1936 }
1937
1938 *val = int128_make128(sstatus, add_status_sd(MXL_RV128, sstatus));
1939 return RISCV_EXCP_NONE;
1940 }
1941
1942 static RISCVException read_sstatus(CPURISCVState *env, int csrno,
1943 target_ulong *val)
1944 {
1945 target_ulong mask = (sstatus_v1_10_mask);
1946 if (env->xl != MXL_RV32 || env->debugger) {
1947 mask |= SSTATUS64_UXL;
1948 }
1949 /* TODO: Use SXL not MXL. */
1950 *val = add_status_sd(riscv_cpu_mxl(env), env->mstatus & mask);
1951 return RISCV_EXCP_NONE;
1952 }
1953
1954 static RISCVException write_sstatus(CPURISCVState *env, int csrno,
1955 target_ulong val)
1956 {
1957 target_ulong mask = (sstatus_v1_10_mask);
1958
1959 if (env->xl != MXL_RV32 || env->debugger) {
1960 if ((val & SSTATUS64_UXL) != 0) {
1961 mask |= SSTATUS64_UXL;
1962 }
1963 }
1964 target_ulong newval = (env->mstatus & ~mask) | (val & mask);
1965 return write_mstatus(env, CSR_MSTATUS, newval);
1966 }
1967
1968 static RISCVException rmw_vsie64(CPURISCVState *env, int csrno,
1969 uint64_t *ret_val,
1970 uint64_t new_val, uint64_t wr_mask)
1971 {
1972 RISCVException ret;
1973 uint64_t rval, vsbits, mask = env->hideleg & VS_MODE_INTERRUPTS;
1974
1975 /* Bring VS-level bits to correct position */
1976 vsbits = new_val & (VS_MODE_INTERRUPTS >> 1);
1977 new_val &= ~(VS_MODE_INTERRUPTS >> 1);
1978 new_val |= vsbits << 1;
1979 vsbits = wr_mask & (VS_MODE_INTERRUPTS >> 1);
1980 wr_mask &= ~(VS_MODE_INTERRUPTS >> 1);
1981 wr_mask |= vsbits << 1;
1982
1983 ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask & mask);
1984 if (ret_val) {
1985 rval &= mask;
1986 vsbits = rval & VS_MODE_INTERRUPTS;
1987 rval &= ~VS_MODE_INTERRUPTS;
1988 *ret_val = rval | (vsbits >> 1);
1989 }
1990
1991 return ret;
1992 }
1993
1994 static RISCVException rmw_vsie(CPURISCVState *env, int csrno,
1995 target_ulong *ret_val,
1996 target_ulong new_val, target_ulong wr_mask)
1997 {
1998 uint64_t rval;
1999 RISCVException ret;
2000
2001 ret = rmw_vsie64(env, csrno, &rval, new_val, wr_mask);
2002 if (ret_val) {
2003 *ret_val = rval;
2004 }
2005
2006 return ret;
2007 }
2008
2009 static RISCVException rmw_vsieh(CPURISCVState *env, int csrno,
2010 target_ulong *ret_val,
2011 target_ulong new_val, target_ulong wr_mask)
2012 {
2013 uint64_t rval;
2014 RISCVException ret;
2015
2016 ret = rmw_vsie64(env, csrno, &rval,
2017 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2018 if (ret_val) {
2019 *ret_val = rval >> 32;
2020 }
2021
2022 return ret;
2023 }
2024
2025 static RISCVException rmw_sie64(CPURISCVState *env, int csrno,
2026 uint64_t *ret_val,
2027 uint64_t new_val, uint64_t wr_mask)
2028 {
2029 RISCVException ret;
2030 uint64_t mask = env->mideleg & S_MODE_INTERRUPTS;
2031
2032 if (riscv_cpu_virt_enabled(env)) {
2033 if (env->hvictl & HVICTL_VTI) {
2034 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
2035 }
2036 ret = rmw_vsie64(env, CSR_VSIE, ret_val, new_val, wr_mask);
2037 } else {
2038 ret = rmw_mie64(env, csrno, ret_val, new_val, wr_mask & mask);
2039 }
2040
2041 if (ret_val) {
2042 *ret_val &= mask;
2043 }
2044
2045 return ret;
2046 }
2047
2048 static RISCVException rmw_sie(CPURISCVState *env, int csrno,
2049 target_ulong *ret_val,
2050 target_ulong new_val, target_ulong wr_mask)
2051 {
2052 uint64_t rval;
2053 RISCVException ret;
2054
2055 ret = rmw_sie64(env, csrno, &rval, new_val, wr_mask);
2056 if (ret == RISCV_EXCP_NONE && ret_val) {
2057 *ret_val = rval;
2058 }
2059
2060 return ret;
2061 }
2062
2063 static RISCVException rmw_sieh(CPURISCVState *env, int csrno,
2064 target_ulong *ret_val,
2065 target_ulong new_val, target_ulong wr_mask)
2066 {
2067 uint64_t rval;
2068 RISCVException ret;
2069
2070 ret = rmw_sie64(env, csrno, &rval,
2071 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2072 if (ret_val) {
2073 *ret_val = rval >> 32;
2074 }
2075
2076 return ret;
2077 }
2078
2079 static RISCVException read_stvec(CPURISCVState *env, int csrno,
2080 target_ulong *val)
2081 {
2082 *val = env->stvec;
2083 return RISCV_EXCP_NONE;
2084 }
2085
2086 static RISCVException write_stvec(CPURISCVState *env, int csrno,
2087 target_ulong val)
2088 {
2089 /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
2090 if ((val & 3) < 2) {
2091 env->stvec = val;
2092 } else {
2093 qemu_log_mask(LOG_UNIMP, "CSR_STVEC: reserved mode not supported\n");
2094 }
2095 return RISCV_EXCP_NONE;
2096 }
2097
2098 static RISCVException read_scounteren(CPURISCVState *env, int csrno,
2099 target_ulong *val)
2100 {
2101 *val = env->scounteren;
2102 return RISCV_EXCP_NONE;
2103 }
2104
2105 static RISCVException write_scounteren(CPURISCVState *env, int csrno,
2106 target_ulong val)
2107 {
2108 env->scounteren = val;
2109 return RISCV_EXCP_NONE;
2110 }
2111
2112 /* Supervisor Trap Handling */
2113 static RISCVException read_sscratch_i128(CPURISCVState *env, int csrno,
2114 Int128 *val)
2115 {
2116 *val = int128_make128(env->sscratch, env->sscratchh);
2117 return RISCV_EXCP_NONE;
2118 }
2119
2120 static RISCVException write_sscratch_i128(CPURISCVState *env, int csrno,
2121 Int128 val)
2122 {
2123 env->sscratch = int128_getlo(val);
2124 env->sscratchh = int128_gethi(val);
2125 return RISCV_EXCP_NONE;
2126 }
2127
2128 static RISCVException read_sscratch(CPURISCVState *env, int csrno,
2129 target_ulong *val)
2130 {
2131 *val = env->sscratch;
2132 return RISCV_EXCP_NONE;
2133 }
2134
2135 static RISCVException write_sscratch(CPURISCVState *env, int csrno,
2136 target_ulong val)
2137 {
2138 env->sscratch = val;
2139 return RISCV_EXCP_NONE;
2140 }
2141
2142 static RISCVException read_sepc(CPURISCVState *env, int csrno,
2143 target_ulong *val)
2144 {
2145 *val = env->sepc;
2146 return RISCV_EXCP_NONE;
2147 }
2148
2149 static RISCVException write_sepc(CPURISCVState *env, int csrno,
2150 target_ulong val)
2151 {
2152 env->sepc = val;
2153 return RISCV_EXCP_NONE;
2154 }
2155
2156 static RISCVException read_scause(CPURISCVState *env, int csrno,
2157 target_ulong *val)
2158 {
2159 *val = env->scause;
2160 return RISCV_EXCP_NONE;
2161 }
2162
2163 static RISCVException write_scause(CPURISCVState *env, int csrno,
2164 target_ulong val)
2165 {
2166 env->scause = val;
2167 return RISCV_EXCP_NONE;
2168 }
2169
2170 static RISCVException read_stval(CPURISCVState *env, int csrno,
2171 target_ulong *val)
2172 {
2173 *val = env->stval;
2174 return RISCV_EXCP_NONE;
2175 }
2176
2177 static RISCVException write_stval(CPURISCVState *env, int csrno,
2178 target_ulong val)
2179 {
2180 env->stval = val;
2181 return RISCV_EXCP_NONE;
2182 }
2183
2184 static RISCVException rmw_vsip64(CPURISCVState *env, int csrno,
2185 uint64_t *ret_val,
2186 uint64_t new_val, uint64_t wr_mask)
2187 {
2188 RISCVException ret;
2189 uint64_t rval, vsbits, mask = env->hideleg & vsip_writable_mask;
2190
2191 /* Bring VS-level bits to correct position */
2192 vsbits = new_val & (VS_MODE_INTERRUPTS >> 1);
2193 new_val &= ~(VS_MODE_INTERRUPTS >> 1);
2194 new_val |= vsbits << 1;
2195 vsbits = wr_mask & (VS_MODE_INTERRUPTS >> 1);
2196 wr_mask &= ~(VS_MODE_INTERRUPTS >> 1);
2197 wr_mask |= vsbits << 1;
2198
2199 ret = rmw_mip64(env, csrno, &rval, new_val, wr_mask & mask);
2200 if (ret_val) {
2201 rval &= mask;
2202 vsbits = rval & VS_MODE_INTERRUPTS;
2203 rval &= ~VS_MODE_INTERRUPTS;
2204 *ret_val = rval | (vsbits >> 1);
2205 }
2206
2207 return ret;
2208 }
2209
2210 static RISCVException rmw_vsip(CPURISCVState *env, int csrno,
2211 target_ulong *ret_val,
2212 target_ulong new_val, target_ulong wr_mask)
2213 {
2214 uint64_t rval;
2215 RISCVException ret;
2216
2217 ret = rmw_vsip64(env, csrno, &rval, new_val, wr_mask);
2218 if (ret_val) {
2219 *ret_val = rval;
2220 }
2221
2222 return ret;
2223 }
2224
2225 static RISCVException rmw_vsiph(CPURISCVState *env, int csrno,
2226 target_ulong *ret_val,
2227 target_ulong new_val, target_ulong wr_mask)
2228 {
2229 uint64_t rval;
2230 RISCVException ret;
2231
2232 ret = rmw_vsip64(env, csrno, &rval,
2233 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2234 if (ret_val) {
2235 *ret_val = rval >> 32;
2236 }
2237
2238 return ret;
2239 }
2240
2241 static RISCVException rmw_sip64(CPURISCVState *env, int csrno,
2242 uint64_t *ret_val,
2243 uint64_t new_val, uint64_t wr_mask)
2244 {
2245 RISCVException ret;
2246 uint64_t mask = env->mideleg & sip_writable_mask;
2247
2248 if (riscv_cpu_virt_enabled(env)) {
2249 if (env->hvictl & HVICTL_VTI) {
2250 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
2251 }
2252 ret = rmw_vsip64(env, CSR_VSIP, ret_val, new_val, wr_mask);
2253 } else {
2254 ret = rmw_mip64(env, csrno, ret_val, new_val, wr_mask & mask);
2255 }
2256
2257 if (ret_val) {
2258 *ret_val &= env->mideleg & S_MODE_INTERRUPTS;
2259 }
2260
2261 return ret;
2262 }
2263
2264 static RISCVException rmw_sip(CPURISCVState *env, int csrno,
2265 target_ulong *ret_val,
2266 target_ulong new_val, target_ulong wr_mask)
2267 {
2268 uint64_t rval;
2269 RISCVException ret;
2270
2271 ret = rmw_sip64(env, csrno, &rval, new_val, wr_mask);
2272 if (ret_val) {
2273 *ret_val = rval;
2274 }
2275
2276 return ret;
2277 }
2278
2279 static RISCVException rmw_siph(CPURISCVState *env, int csrno,
2280 target_ulong *ret_val,
2281 target_ulong new_val, target_ulong wr_mask)
2282 {
2283 uint64_t rval;
2284 RISCVException ret;
2285
2286 ret = rmw_sip64(env, csrno, &rval,
2287 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2288 if (ret_val) {
2289 *ret_val = rval >> 32;
2290 }
2291
2292 return ret;
2293 }
2294
2295 /* Supervisor Protection and Translation */
2296 static RISCVException read_satp(CPURISCVState *env, int csrno,
2297 target_ulong *val)
2298 {
2299 if (!riscv_feature(env, RISCV_FEATURE_MMU)) {
2300 *val = 0;
2301 return RISCV_EXCP_NONE;
2302 }
2303
2304 if (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_TVM)) {
2305 return RISCV_EXCP_ILLEGAL_INST;
2306 } else {
2307 *val = env->satp;
2308 }
2309
2310 return RISCV_EXCP_NONE;
2311 }
2312
2313 static RISCVException write_satp(CPURISCVState *env, int csrno,
2314 target_ulong val)
2315 {
2316 target_ulong vm, mask;
2317
2318 if (!riscv_feature(env, RISCV_FEATURE_MMU)) {
2319 return RISCV_EXCP_NONE;
2320 }
2321
2322 if (riscv_cpu_mxl(env) == MXL_RV32) {
2323 vm = validate_vm(env, get_field(val, SATP32_MODE));
2324 mask = (val ^ env->satp) & (SATP32_MODE | SATP32_ASID | SATP32_PPN);
2325 } else {
2326 vm = validate_vm(env, get_field(val, SATP64_MODE));
2327 mask = (val ^ env->satp) & (SATP64_MODE | SATP64_ASID | SATP64_PPN);
2328 }
2329
2330 if (vm && mask) {
2331 if (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_TVM)) {
2332 return RISCV_EXCP_ILLEGAL_INST;
2333 } else {
2334 /*
2335 * The ISA defines SATP.MODE=Bare as "no translation", but we still
2336 * pass these through QEMU's TLB emulation as it improves
2337 * performance. Flushing the TLB on SATP writes with paging
2338 * enabled avoids leaking those invalid cached mappings.
2339 */
2340 tlb_flush(env_cpu(env));
2341 env->satp = val;
2342 }
2343 }
2344 return RISCV_EXCP_NONE;
2345 }
2346
2347 static int read_vstopi(CPURISCVState *env, int csrno, target_ulong *val)
2348 {
2349 int irq, ret;
2350 target_ulong topei;
2351 uint64_t vseip, vsgein;
2352 uint32_t iid, iprio, hviid, hviprio, gein;
2353 uint32_t s, scount = 0, siid[VSTOPI_NUM_SRCS], siprio[VSTOPI_NUM_SRCS];
2354
2355 gein = get_field(env->hstatus, HSTATUS_VGEIN);
2356 hviid = get_field(env->hvictl, HVICTL_IID);
2357 hviprio = get_field(env->hvictl, HVICTL_IPRIO);
2358
2359 if (gein) {
2360 vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0;
2361 vseip = env->mie & (env->mip | vsgein) & MIP_VSEIP;
2362 if (gein <= env->geilen && vseip) {
2363 siid[scount] = IRQ_S_EXT;
2364 siprio[scount] = IPRIO_MMAXIPRIO + 1;
2365 if (env->aia_ireg_rmw_fn[PRV_S]) {
2366 /*
2367 * Call machine specific IMSIC register emulation for
2368 * reading TOPEI.
2369 */
2370 ret = env->aia_ireg_rmw_fn[PRV_S](
2371 env->aia_ireg_rmw_fn_arg[PRV_S],
2372 AIA_MAKE_IREG(ISELECT_IMSIC_TOPEI, PRV_S, true, gein,
2373 riscv_cpu_mxl_bits(env)),
2374 &topei, 0, 0);
2375 if (!ret && topei) {
2376 siprio[scount] = topei & IMSIC_TOPEI_IPRIO_MASK;
2377 }
2378 }
2379 scount++;
2380 }
2381 } else {
2382 if (hviid == IRQ_S_EXT && hviprio) {
2383 siid[scount] = IRQ_S_EXT;
2384 siprio[scount] = hviprio;
2385 scount++;
2386 }
2387 }
2388
2389 if (env->hvictl & HVICTL_VTI) {
2390 if (hviid != IRQ_S_EXT) {
2391 siid[scount] = hviid;
2392 siprio[scount] = hviprio;
2393 scount++;
2394 }
2395 } else {
2396 irq = riscv_cpu_vsirq_pending(env);
2397 if (irq != IRQ_S_EXT && 0 < irq && irq <= 63) {
2398 siid[scount] = irq;
2399 siprio[scount] = env->hviprio[irq];
2400 scount++;
2401 }
2402 }
2403
2404 iid = 0;
2405 iprio = UINT_MAX;
2406 for (s = 0; s < scount; s++) {
2407 if (siprio[s] < iprio) {
2408 iid = siid[s];
2409 iprio = siprio[s];
2410 }
2411 }
2412
2413 if (iid) {
2414 if (env->hvictl & HVICTL_IPRIOM) {
2415 if (iprio > IPRIO_MMAXIPRIO) {
2416 iprio = IPRIO_MMAXIPRIO;
2417 }
2418 if (!iprio) {
2419 if (riscv_cpu_default_priority(iid) > IPRIO_DEFAULT_S) {
2420 iprio = IPRIO_MMAXIPRIO;
2421 }
2422 }
2423 } else {
2424 iprio = 1;
2425 }
2426 } else {
2427 iprio = 0;
2428 }
2429
2430 *val = (iid & TOPI_IID_MASK) << TOPI_IID_SHIFT;
2431 *val |= iprio;
2432 return RISCV_EXCP_NONE;
2433 }
2434
2435 static int read_stopi(CPURISCVState *env, int csrno, target_ulong *val)
2436 {
2437 int irq;
2438 uint8_t iprio;
2439
2440 if (riscv_cpu_virt_enabled(env)) {
2441 return read_vstopi(env, CSR_VSTOPI, val);
2442 }
2443
2444 irq = riscv_cpu_sirq_pending(env);
2445 if (irq <= 0 || irq > 63) {
2446 *val = 0;
2447 } else {
2448 iprio = env->siprio[irq];
2449 if (!iprio) {
2450 if (riscv_cpu_default_priority(irq) > IPRIO_DEFAULT_S) {
2451 iprio = IPRIO_MMAXIPRIO;
2452 }
2453 }
2454 *val = (irq & TOPI_IID_MASK) << TOPI_IID_SHIFT;
2455 *val |= iprio;
2456 }
2457
2458 return RISCV_EXCP_NONE;
2459 }
2460
2461 /* Hypervisor Extensions */
2462 static RISCVException read_hstatus(CPURISCVState *env, int csrno,
2463 target_ulong *val)
2464 {
2465 *val = env->hstatus;
2466 if (riscv_cpu_mxl(env) != MXL_RV32) {
2467 /* We only support 64-bit VSXL */
2468 *val = set_field(*val, HSTATUS_VSXL, 2);
2469 }
2470 /* We only support little endian */
2471 *val = set_field(*val, HSTATUS_VSBE, 0);
2472 return RISCV_EXCP_NONE;
2473 }
2474
2475 static RISCVException write_hstatus(CPURISCVState *env, int csrno,
2476 target_ulong val)
2477 {
2478 env->hstatus = val;
2479 if (riscv_cpu_mxl(env) != MXL_RV32 && get_field(val, HSTATUS_VSXL) != 2) {
2480 qemu_log_mask(LOG_UNIMP, "QEMU does not support mixed HSXLEN options.");
2481 }
2482 if (get_field(val, HSTATUS_VSBE) != 0) {
2483 qemu_log_mask(LOG_UNIMP, "QEMU does not support big endian guests.");
2484 }
2485 return RISCV_EXCP_NONE;
2486 }
2487
2488 static RISCVException read_hedeleg(CPURISCVState *env, int csrno,
2489 target_ulong *val)
2490 {
2491 *val = env->hedeleg;
2492 return RISCV_EXCP_NONE;
2493 }
2494
2495 static RISCVException write_hedeleg(CPURISCVState *env, int csrno,
2496 target_ulong val)
2497 {
2498 env->hedeleg = val & vs_delegable_excps;
2499 return RISCV_EXCP_NONE;
2500 }
2501
2502 static RISCVException rmw_hideleg64(CPURISCVState *env, int csrno,
2503 uint64_t *ret_val,
2504 uint64_t new_val, uint64_t wr_mask)
2505 {
2506 uint64_t mask = wr_mask & vs_delegable_ints;
2507
2508 if (ret_val) {
2509 *ret_val = env->hideleg & vs_delegable_ints;
2510 }
2511
2512 env->hideleg = (env->hideleg & ~mask) | (new_val & mask);
2513 return RISCV_EXCP_NONE;
2514 }
2515
2516 static RISCVException rmw_hideleg(CPURISCVState *env, int csrno,
2517 target_ulong *ret_val,
2518 target_ulong new_val, target_ulong wr_mask)
2519 {
2520 uint64_t rval;
2521 RISCVException ret;
2522
2523 ret = rmw_hideleg64(env, csrno, &rval, new_val, wr_mask);
2524 if (ret_val) {
2525 *ret_val = rval;
2526 }
2527
2528 return ret;
2529 }
2530
2531 static RISCVException rmw_hidelegh(CPURISCVState *env, int csrno,
2532 target_ulong *ret_val,
2533 target_ulong new_val, target_ulong wr_mask)
2534 {
2535 uint64_t rval;
2536 RISCVException ret;
2537
2538 ret = rmw_hideleg64(env, csrno, &rval,
2539 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2540 if (ret_val) {
2541 *ret_val = rval >> 32;
2542 }
2543
2544 return ret;
2545 }
2546
2547 static RISCVException rmw_hvip64(CPURISCVState *env, int csrno,
2548 uint64_t *ret_val,
2549 uint64_t new_val, uint64_t wr_mask)
2550 {
2551 RISCVException ret;
2552
2553 ret = rmw_mip64(env, csrno, ret_val, new_val,
2554 wr_mask & hvip_writable_mask);
2555 if (ret_val) {
2556 *ret_val &= VS_MODE_INTERRUPTS;
2557 }
2558
2559 return ret;
2560 }
2561
2562 static RISCVException rmw_hvip(CPURISCVState *env, int csrno,
2563 target_ulong *ret_val,
2564 target_ulong new_val, target_ulong wr_mask)
2565 {
2566 uint64_t rval;
2567 RISCVException ret;
2568
2569 ret = rmw_hvip64(env, csrno, &rval, new_val, wr_mask);
2570 if (ret_val) {
2571 *ret_val = rval;
2572 }
2573
2574 return ret;
2575 }
2576
2577 static RISCVException rmw_hviph(CPURISCVState *env, int csrno,
2578 target_ulong *ret_val,
2579 target_ulong new_val, target_ulong wr_mask)
2580 {
2581 uint64_t rval;
2582 RISCVException ret;
2583
2584 ret = rmw_hvip64(env, csrno, &rval,
2585 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2586 if (ret_val) {
2587 *ret_val = rval >> 32;
2588 }
2589
2590 return ret;
2591 }
2592
2593 static RISCVException rmw_hip(CPURISCVState *env, int csrno,
2594 target_ulong *ret_value,
2595 target_ulong new_value, target_ulong write_mask)
2596 {
2597 int ret = rmw_mip(env, csrno, ret_value, new_value,
2598 write_mask & hip_writable_mask);
2599
2600 if (ret_value) {
2601 *ret_value &= HS_MODE_INTERRUPTS;
2602 }
2603 return ret;
2604 }
2605
2606 static RISCVException rmw_hie(CPURISCVState *env, int csrno,
2607 target_ulong *ret_val,
2608 target_ulong new_val, target_ulong wr_mask)
2609 {
2610 uint64_t rval;
2611 RISCVException ret;
2612
2613 ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask & HS_MODE_INTERRUPTS);
2614 if (ret_val) {
2615 *ret_val = rval & HS_MODE_INTERRUPTS;
2616 }
2617
2618 return ret;
2619 }
2620
2621 static RISCVException read_hcounteren(CPURISCVState *env, int csrno,
2622 target_ulong *val)
2623 {
2624 *val = env->hcounteren;
2625 return RISCV_EXCP_NONE;
2626 }
2627
2628 static RISCVException write_hcounteren(CPURISCVState *env, int csrno,
2629 target_ulong val)
2630 {
2631 env->hcounteren = val;
2632 return RISCV_EXCP_NONE;
2633 }
2634
2635 static RISCVException read_hgeie(CPURISCVState *env, int csrno,
2636 target_ulong *val)
2637 {
2638 if (val) {
2639 *val = env->hgeie;
2640 }
2641 return RISCV_EXCP_NONE;
2642 }
2643
2644 static RISCVException write_hgeie(CPURISCVState *env, int csrno,
2645 target_ulong val)
2646 {
2647 /* Only GEILEN:1 bits implemented and BIT0 is never implemented */
2648 val &= ((((target_ulong)1) << env->geilen) - 1) << 1;
2649 env->hgeie = val;
2650 /* Update mip.SGEIP bit */
2651 riscv_cpu_update_mip(env_archcpu(env), MIP_SGEIP,
2652 BOOL_TO_MASK(!!(env->hgeie & env->hgeip)));
2653 return RISCV_EXCP_NONE;
2654 }
2655
2656 static RISCVException read_htval(CPURISCVState *env, int csrno,
2657 target_ulong *val)
2658 {
2659 *val = env->htval;
2660 return RISCV_EXCP_NONE;
2661 }
2662
2663 static RISCVException write_htval(CPURISCVState *env, int csrno,
2664 target_ulong val)
2665 {
2666 env->htval = val;
2667 return RISCV_EXCP_NONE;
2668 }
2669
2670 static RISCVException read_htinst(CPURISCVState *env, int csrno,
2671 target_ulong *val)
2672 {
2673 *val = env->htinst;
2674 return RISCV_EXCP_NONE;
2675 }
2676
2677 static RISCVException write_htinst(CPURISCVState *env, int csrno,
2678 target_ulong val)
2679 {
2680 return RISCV_EXCP_NONE;
2681 }
2682
2683 static RISCVException read_hgeip(CPURISCVState *env, int csrno,
2684 target_ulong *val)
2685 {
2686 if (val) {
2687 *val = env->hgeip;
2688 }
2689 return RISCV_EXCP_NONE;
2690 }
2691
2692 static RISCVException read_hgatp(CPURISCVState *env, int csrno,
2693 target_ulong *val)
2694 {
2695 *val = env->hgatp;
2696 return RISCV_EXCP_NONE;
2697 }
2698
2699 static RISCVException write_hgatp(CPURISCVState *env, int csrno,
2700 target_ulong val)
2701 {
2702 env->hgatp = val;
2703 return RISCV_EXCP_NONE;
2704 }
2705
2706 static RISCVException read_htimedelta(CPURISCVState *env, int csrno,
2707 target_ulong *val)
2708 {
2709 if (!env->rdtime_fn) {
2710 return RISCV_EXCP_ILLEGAL_INST;
2711 }
2712
2713 *val = env->htimedelta;
2714 return RISCV_EXCP_NONE;
2715 }
2716
2717 static RISCVException write_htimedelta(CPURISCVState *env, int csrno,
2718 target_ulong val)
2719 {
2720 if (!env->rdtime_fn) {
2721 return RISCV_EXCP_ILLEGAL_INST;
2722 }
2723
2724 if (riscv_cpu_mxl(env) == MXL_RV32) {
2725 env->htimedelta = deposit64(env->htimedelta, 0, 32, (uint64_t)val);
2726 } else {
2727 env->htimedelta = val;
2728 }
2729 return RISCV_EXCP_NONE;
2730 }
2731
2732 static RISCVException read_htimedeltah(CPURISCVState *env, int csrno,
2733 target_ulong *val)
2734 {
2735 if (!env->rdtime_fn) {
2736 return RISCV_EXCP_ILLEGAL_INST;
2737 }
2738
2739 *val = env->htimedelta >> 32;
2740 return RISCV_EXCP_NONE;
2741 }
2742
2743 static RISCVException write_htimedeltah(CPURISCVState *env, int csrno,
2744 target_ulong val)
2745 {
2746 if (!env->rdtime_fn) {
2747 return RISCV_EXCP_ILLEGAL_INST;
2748 }
2749
2750 env->htimedelta = deposit64(env->htimedelta, 32, 32, (uint64_t)val);
2751 return RISCV_EXCP_NONE;
2752 }
2753
2754 static int read_hvictl(CPURISCVState *env, int csrno, target_ulong *val)
2755 {
2756 *val = env->hvictl;
2757 return RISCV_EXCP_NONE;
2758 }
2759
2760 static int write_hvictl(CPURISCVState *env, int csrno, target_ulong val)
2761 {
2762 env->hvictl = val & HVICTL_VALID_MASK;
2763 return RISCV_EXCP_NONE;
2764 }
2765
2766 static int read_hvipriox(CPURISCVState *env, int first_index,
2767 uint8_t *iprio, target_ulong *val)
2768 {
2769 int i, irq, rdzero, num_irqs = 4 * (riscv_cpu_mxl_bits(env) / 32);
2770
2771 /* First index has to be a multiple of number of irqs per register */
2772 if (first_index % num_irqs) {
2773 return (riscv_cpu_virt_enabled(env)) ?
2774 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
2775 }
2776
2777 /* Fill-up return value */
2778 *val = 0;
2779 for (i = 0; i < num_irqs; i++) {
2780 if (riscv_cpu_hviprio_index2irq(first_index + i, &irq, &rdzero)) {
2781 continue;
2782 }
2783 if (rdzero) {
2784 continue;
2785 }
2786 *val |= ((target_ulong)iprio[irq]) << (i * 8);
2787 }
2788
2789 return RISCV_EXCP_NONE;
2790 }
2791
2792 static int write_hvipriox(CPURISCVState *env, int first_index,
2793 uint8_t *iprio, target_ulong val)
2794 {
2795 int i, irq, rdzero, num_irqs = 4 * (riscv_cpu_mxl_bits(env) / 32);
2796
2797 /* First index has to be a multiple of number of irqs per register */
2798 if (first_index % num_irqs) {
2799 return (riscv_cpu_virt_enabled(env)) ?
2800 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
2801 }
2802
2803 /* Fill-up priority arrary */
2804 for (i = 0; i < num_irqs; i++) {
2805 if (riscv_cpu_hviprio_index2irq(first_index + i, &irq, &rdzero)) {
2806 continue;
2807 }
2808 if (rdzero) {
2809 iprio[irq] = 0;
2810 } else {
2811 iprio[irq] = (val >> (i * 8)) & 0xff;
2812 }
2813 }
2814
2815 return RISCV_EXCP_NONE;
2816 }
2817
2818 static int read_hviprio1(CPURISCVState *env, int csrno, target_ulong *val)
2819 {
2820 return read_hvipriox(env, 0, env->hviprio, val);
2821 }
2822
2823 static int write_hviprio1(CPURISCVState *env, int csrno, target_ulong val)
2824 {
2825 return write_hvipriox(env, 0, env->hviprio, val);
2826 }
2827
2828 static int read_hviprio1h(CPURISCVState *env, int csrno, target_ulong *val)
2829 {
2830 return read_hvipriox(env, 4, env->hviprio, val);
2831 }
2832
2833 static int write_hviprio1h(CPURISCVState *env, int csrno, target_ulong val)
2834 {
2835 return write_hvipriox(env, 4, env->hviprio, val);
2836 }
2837
2838 static int read_hviprio2(CPURISCVState *env, int csrno, target_ulong *val)
2839 {
2840 return read_hvipriox(env, 8, env->hviprio, val);
2841 }
2842
2843 static int write_hviprio2(CPURISCVState *env, int csrno, target_ulong val)
2844 {
2845 return write_hvipriox(env, 8, env->hviprio, val);
2846 }
2847
2848 static int read_hviprio2h(CPURISCVState *env, int csrno, target_ulong *val)
2849 {
2850 return read_hvipriox(env, 12, env->hviprio, val);
2851 }
2852
2853 static int write_hviprio2h(CPURISCVState *env, int csrno, target_ulong val)
2854 {
2855 return write_hvipriox(env, 12, env->hviprio, val);
2856 }
2857
2858 /* Virtual CSR Registers */
2859 static RISCVException read_vsstatus(CPURISCVState *env, int csrno,
2860 target_ulong *val)
2861 {
2862 *val = env->vsstatus;
2863 return RISCV_EXCP_NONE;
2864 }
2865
2866 static RISCVException write_vsstatus(CPURISCVState *env, int csrno,
2867 target_ulong val)
2868 {
2869 uint64_t mask = (target_ulong)-1;
2870 if ((val & VSSTATUS64_UXL) == 0) {
2871 mask &= ~VSSTATUS64_UXL;
2872 }
2873 env->vsstatus = (env->vsstatus & ~mask) | (uint64_t)val;
2874 return RISCV_EXCP_NONE;
2875 }
2876
2877 static int read_vstvec(CPURISCVState *env, int csrno, target_ulong *val)
2878 {
2879 *val = env->vstvec;
2880 return RISCV_EXCP_NONE;
2881 }
2882
2883 static RISCVException write_vstvec(CPURISCVState *env, int csrno,
2884 target_ulong val)
2885 {
2886 env->vstvec = val;
2887 return RISCV_EXCP_NONE;
2888 }
2889
2890 static RISCVException read_vsscratch(CPURISCVState *env, int csrno,
2891 target_ulong *val)
2892 {
2893 *val = env->vsscratch;
2894 return RISCV_EXCP_NONE;
2895 }
2896
2897 static RISCVException write_vsscratch(CPURISCVState *env, int csrno,
2898 target_ulong val)
2899 {
2900 env->vsscratch = val;
2901 return RISCV_EXCP_NONE;
2902 }
2903
2904 static RISCVException read_vsepc(CPURISCVState *env, int csrno,
2905 target_ulong *val)
2906 {
2907 *val = env->vsepc;
2908 return RISCV_EXCP_NONE;
2909 }
2910
2911 static RISCVException write_vsepc(CPURISCVState *env, int csrno,
2912 target_ulong val)
2913 {
2914 env->vsepc = val;
2915 return RISCV_EXCP_NONE;
2916 }
2917
2918 static RISCVException read_vscause(CPURISCVState *env, int csrno,
2919 target_ulong *val)
2920 {
2921 *val = env->vscause;
2922 return RISCV_EXCP_NONE;
2923 }
2924
2925 static RISCVException write_vscause(CPURISCVState *env, int csrno,
2926 target_ulong val)
2927 {
2928 env->vscause = val;
2929 return RISCV_EXCP_NONE;
2930 }
2931
2932 static RISCVException read_vstval(CPURISCVState *env, int csrno,
2933 target_ulong *val)
2934 {
2935 *val = env->vstval;
2936 return RISCV_EXCP_NONE;
2937 }
2938
2939 static RISCVException write_vstval(CPURISCVState *env, int csrno,
2940 target_ulong val)
2941 {
2942 env->vstval = val;
2943 return RISCV_EXCP_NONE;
2944 }
2945
2946 static RISCVException read_vsatp(CPURISCVState *env, int csrno,
2947 target_ulong *val)
2948 {
2949 *val = env->vsatp;
2950 return RISCV_EXCP_NONE;
2951 }
2952
2953 static RISCVException write_vsatp(CPURISCVState *env, int csrno,
2954 target_ulong val)
2955 {
2956 env->vsatp = val;
2957 return RISCV_EXCP_NONE;
2958 }
2959
2960 static RISCVException read_mtval2(CPURISCVState *env, int csrno,
2961 target_ulong *val)
2962 {
2963 *val = env->mtval2;
2964 return RISCV_EXCP_NONE;
2965 }
2966
2967 static RISCVException write_mtval2(CPURISCVState *env, int csrno,
2968 target_ulong val)
2969 {
2970 env->mtval2 = val;
2971 return RISCV_EXCP_NONE;
2972 }
2973
2974 static RISCVException read_mtinst(CPURISCVState *env, int csrno,
2975 target_ulong *val)
2976 {
2977 *val = env->mtinst;
2978 return RISCV_EXCP_NONE;
2979 }
2980
2981 static RISCVException write_mtinst(CPURISCVState *env, int csrno,
2982 target_ulong val)
2983 {
2984 env->mtinst = val;
2985 return RISCV_EXCP_NONE;
2986 }
2987
2988 /* Physical Memory Protection */
2989 static RISCVException read_mseccfg(CPURISCVState *env, int csrno,
2990 target_ulong *val)
2991 {
2992 *val = mseccfg_csr_read(env);
2993 return RISCV_EXCP_NONE;
2994 }
2995
2996 static RISCVException write_mseccfg(CPURISCVState *env, int csrno,
2997 target_ulong val)
2998 {
2999 mseccfg_csr_write(env, val);
3000 return RISCV_EXCP_NONE;
3001 }
3002
3003 static bool check_pmp_reg_index(CPURISCVState *env, uint32_t reg_index)
3004 {
3005 /* TODO: RV128 restriction check */
3006 if ((reg_index & 1) && (riscv_cpu_mxl(env) == MXL_RV64)) {
3007 return false;
3008 }
3009 return true;
3010 }
3011
3012 static RISCVException read_pmpcfg(CPURISCVState *env, int csrno,
3013 target_ulong *val)
3014 {
3015 uint32_t reg_index = csrno - CSR_PMPCFG0;
3016
3017 if (!check_pmp_reg_index(env, reg_index)) {
3018 return RISCV_EXCP_ILLEGAL_INST;
3019 }
3020 *val = pmpcfg_csr_read(env, csrno - CSR_PMPCFG0);
3021 return RISCV_EXCP_NONE;
3022 }
3023
3024 static RISCVException write_pmpcfg(CPURISCVState *env, int csrno,
3025 target_ulong val)
3026 {
3027 uint32_t reg_index = csrno - CSR_PMPCFG0;
3028
3029 if (!check_pmp_reg_index(env, reg_index)) {
3030 return RISCV_EXCP_ILLEGAL_INST;
3031 }
3032 pmpcfg_csr_write(env, csrno - CSR_PMPCFG0, val);
3033 return RISCV_EXCP_NONE;
3034 }
3035
3036 static RISCVException read_pmpaddr(CPURISCVState *env, int csrno,
3037 target_ulong *val)
3038 {
3039 *val = pmpaddr_csr_read(env, csrno - CSR_PMPADDR0);
3040 return RISCV_EXCP_NONE;
3041 }
3042
3043 static RISCVException write_pmpaddr(CPURISCVState *env, int csrno,
3044 target_ulong val)
3045 {
3046 pmpaddr_csr_write(env, csrno - CSR_PMPADDR0, val);
3047 return RISCV_EXCP_NONE;
3048 }
3049
3050 static RISCVException read_tselect(CPURISCVState *env, int csrno,
3051 target_ulong *val)
3052 {
3053 *val = tselect_csr_read(env);
3054 return RISCV_EXCP_NONE;
3055 }
3056
3057 static RISCVException write_tselect(CPURISCVState *env, int csrno,
3058 target_ulong val)
3059 {
3060 tselect_csr_write(env, val);
3061 return RISCV_EXCP_NONE;
3062 }
3063
3064 static RISCVException read_tdata(CPURISCVState *env, int csrno,
3065 target_ulong *val)
3066 {
3067 /* return 0 in tdata1 to end the trigger enumeration */
3068 if (env->trigger_cur >= TRIGGER_NUM && csrno == CSR_TDATA1) {
3069 *val = 0;
3070 return RISCV_EXCP_NONE;
3071 }
3072
3073 if (!tdata_available(env, csrno - CSR_TDATA1)) {
3074 return RISCV_EXCP_ILLEGAL_INST;
3075 }
3076
3077 *val = tdata_csr_read(env, csrno - CSR_TDATA1);
3078 return RISCV_EXCP_NONE;
3079 }
3080
3081 static RISCVException write_tdata(CPURISCVState *env, int csrno,
3082 target_ulong val)
3083 {
3084 if (!tdata_available(env, csrno - CSR_TDATA1)) {
3085 return RISCV_EXCP_ILLEGAL_INST;
3086 }
3087
3088 tdata_csr_write(env, csrno - CSR_TDATA1, val);
3089 return RISCV_EXCP_NONE;
3090 }
3091
3092 /*
3093 * Functions to access Pointer Masking feature registers
3094 * We have to check if current priv lvl could modify
3095 * csr in given mode
3096 */
3097 static bool check_pm_current_disabled(CPURISCVState *env, int csrno)
3098 {
3099 int csr_priv = get_field(csrno, 0x300);
3100 int pm_current;
3101
3102 if (env->debugger) {
3103 return false;
3104 }
3105 /*
3106 * If priv lvls differ that means we're accessing csr from higher priv lvl,
3107 * so allow the access
3108 */
3109 if (env->priv != csr_priv) {
3110 return false;
3111 }
3112 switch (env->priv) {
3113 case PRV_M:
3114 pm_current = get_field(env->mmte, M_PM_CURRENT);
3115 break;
3116 case PRV_S:
3117 pm_current = get_field(env->mmte, S_PM_CURRENT);
3118 break;
3119 case PRV_U:
3120 pm_current = get_field(env->mmte, U_PM_CURRENT);
3121 break;
3122 default:
3123 g_assert_not_reached();
3124 }
3125 /* It's same priv lvl, so we allow to modify csr only if pm.current==1 */
3126 return !pm_current;
3127 }
3128
3129 static RISCVException read_mmte(CPURISCVState *env, int csrno,
3130 target_ulong *val)
3131 {
3132 *val = env->mmte & MMTE_MASK;
3133 return RISCV_EXCP_NONE;
3134 }
3135
3136 static RISCVException write_mmte(CPURISCVState *env, int csrno,
3137 target_ulong val)
3138 {
3139 uint64_t mstatus;
3140 target_ulong wpri_val = val & MMTE_MASK;
3141
3142 if (val != wpri_val) {
3143 qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s" TARGET_FMT_lx "\n",
3144 "MMTE: WPRI violation written 0x", val,
3145 "vs expected 0x", wpri_val);
3146 }
3147 /* for machine mode pm.current is hardwired to 1 */
3148 wpri_val |= MMTE_M_PM_CURRENT;
3149
3150 /* hardwiring pm.instruction bit to 0, since it's not supported yet */
3151 wpri_val &= ~(MMTE_M_PM_INSN | MMTE_S_PM_INSN | MMTE_U_PM_INSN);
3152 env->mmte = wpri_val | PM_EXT_DIRTY;
3153 riscv_cpu_update_mask(env);
3154
3155 /* Set XS and SD bits, since PM CSRs are dirty */
3156 mstatus = env->mstatus | MSTATUS_XS;
3157 write_mstatus(env, csrno, mstatus);
3158 return RISCV_EXCP_NONE;
3159 }
3160
3161 static RISCVException read_smte(CPURISCVState *env, int csrno,
3162 target_ulong *val)
3163 {
3164 *val = env->mmte & SMTE_MASK;
3165 return RISCV_EXCP_NONE;
3166 }
3167
3168 static RISCVException write_smte(CPURISCVState *env, int csrno,
3169 target_ulong val)
3170 {
3171 target_ulong wpri_val = val & SMTE_MASK;
3172
3173 if (val != wpri_val) {
3174 qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s" TARGET_FMT_lx "\n",
3175 "SMTE: WPRI violation written 0x", val,
3176 "vs expected 0x", wpri_val);
3177 }
3178
3179 /* if pm.current==0 we can't modify current PM CSRs */
3180 if (check_pm_current_disabled(env, csrno)) {
3181 return RISCV_EXCP_NONE;
3182 }
3183
3184 wpri_val |= (env->mmte & ~SMTE_MASK);
3185 write_mmte(env, csrno, wpri_val);
3186 return RISCV_EXCP_NONE;
3187 }
3188
3189 static RISCVException read_umte(CPURISCVState *env, int csrno,
3190 target_ulong *val)
3191 {
3192 *val = env->mmte & UMTE_MASK;
3193 return RISCV_EXCP_NONE;
3194 }
3195
3196 static RISCVException write_umte(CPURISCVState *env, int csrno,
3197 target_ulong val)
3198 {
3199 target_ulong wpri_val = val & UMTE_MASK;
3200
3201 if (val != wpri_val) {
3202 qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s" TARGET_FMT_lx "\n",
3203 "UMTE: WPRI violation written 0x", val,
3204 "vs expected 0x", wpri_val);
3205 }
3206
3207 if (check_pm_current_disabled(env, csrno)) {
3208 return RISCV_EXCP_NONE;
3209 }
3210
3211 wpri_val |= (env->mmte & ~UMTE_MASK);
3212 write_mmte(env, csrno, wpri_val);
3213 return RISCV_EXCP_NONE;
3214 }
3215
3216 static RISCVException read_mpmmask(CPURISCVState *env, int csrno,
3217 target_ulong *val)
3218 {
3219 *val = env->mpmmask;
3220 return RISCV_EXCP_NONE;
3221 }
3222
3223 static RISCVException write_mpmmask(CPURISCVState *env, int csrno,
3224 target_ulong val)
3225 {
3226 uint64_t mstatus;
3227
3228 env->mpmmask = val;
3229 if ((env->priv == PRV_M) && (env->mmte & M_PM_ENABLE)) {
3230 env->cur_pmmask = val;
3231 }
3232 env->mmte |= PM_EXT_DIRTY;
3233
3234 /* Set XS and SD bits, since PM CSRs are dirty */
3235 mstatus = env->mstatus | MSTATUS_XS;
3236 write_mstatus(env, csrno, mstatus);
3237 return RISCV_EXCP_NONE;
3238 }
3239
3240 static RISCVException read_spmmask(CPURISCVState *env, int csrno,
3241 target_ulong *val)
3242 {
3243 *val = env->spmmask;
3244 return RISCV_EXCP_NONE;
3245 }
3246
3247 static RISCVException write_spmmask(CPURISCVState *env, int csrno,
3248 target_ulong val)
3249 {
3250 uint64_t mstatus;
3251
3252 /* if pm.current==0 we can't modify current PM CSRs */
3253 if (check_pm_current_disabled(env, csrno)) {
3254 return RISCV_EXCP_NONE;
3255 }
3256 env->spmmask = val;
3257 if ((env->priv == PRV_S) && (env->mmte & S_PM_ENABLE)) {
3258 env->cur_pmmask = val;
3259 }
3260 env->mmte |= PM_EXT_DIRTY;
3261
3262 /* Set XS and SD bits, since PM CSRs are dirty */
3263 mstatus = env->mstatus | MSTATUS_XS;
3264 write_mstatus(env, csrno, mstatus);
3265 return RISCV_EXCP_NONE;
3266 }
3267
3268 static RISCVException read_upmmask(CPURISCVState *env, int csrno,
3269 target_ulong *val)
3270 {
3271 *val = env->upmmask;
3272 return RISCV_EXCP_NONE;
3273 }
3274
3275 static RISCVException write_upmmask(CPURISCVState *env, int csrno,
3276 target_ulong val)
3277 {
3278 uint64_t mstatus;
3279
3280 /* if pm.current==0 we can't modify current PM CSRs */
3281 if (check_pm_current_disabled(env, csrno)) {
3282 return RISCV_EXCP_NONE;
3283 }
3284 env->upmmask = val;
3285 if ((env->priv == PRV_U) && (env->mmte & U_PM_ENABLE)) {
3286 env->cur_pmmask = val;
3287 }
3288 env->mmte |= PM_EXT_DIRTY;
3289
3290 /* Set XS and SD bits, since PM CSRs are dirty */
3291 mstatus = env->mstatus | MSTATUS_XS;
3292 write_mstatus(env, csrno, mstatus);
3293 return RISCV_EXCP_NONE;
3294 }
3295
3296 static RISCVException read_mpmbase(CPURISCVState *env, int csrno,
3297 target_ulong *val)
3298 {
3299 *val = env->mpmbase;
3300 return RISCV_EXCP_NONE;
3301 }
3302
3303 static RISCVException write_mpmbase(CPURISCVState *env, int csrno,
3304 target_ulong val)
3305 {
3306 uint64_t mstatus;
3307
3308 env->mpmbase = val;
3309 if ((env->priv == PRV_M) && (env->mmte & M_PM_ENABLE)) {
3310 env->cur_pmbase = val;
3311 }
3312 env->mmte |= PM_EXT_DIRTY;
3313
3314 /* Set XS and SD bits, since PM CSRs are dirty */
3315 mstatus = env->mstatus | MSTATUS_XS;
3316 write_mstatus(env, csrno, mstatus);
3317 return RISCV_EXCP_NONE;
3318 }
3319
3320 static RISCVException read_spmbase(CPURISCVState *env, int csrno,
3321 target_ulong *val)
3322 {
3323 *val = env->spmbase;
3324 return RISCV_EXCP_NONE;
3325 }
3326
3327 static RISCVException write_spmbase(CPURISCVState *env, int csrno,
3328 target_ulong val)
3329 {
3330 uint64_t mstatus;
3331
3332 /* if pm.current==0 we can't modify current PM CSRs */
3333 if (check_pm_current_disabled(env, csrno)) {
3334 return RISCV_EXCP_NONE;
3335 }
3336 env->spmbase = val;
3337 if ((env->priv == PRV_S) && (env->mmte & S_PM_ENABLE)) {
3338 env->cur_pmbase = val;
3339 }
3340 env->mmte |= PM_EXT_DIRTY;
3341
3342 /* Set XS and SD bits, since PM CSRs are dirty */
3343 mstatus = env->mstatus | MSTATUS_XS;
3344 write_mstatus(env, csrno, mstatus);
3345 return RISCV_EXCP_NONE;
3346 }
3347
3348 static RISCVException read_upmbase(CPURISCVState *env, int csrno,
3349 target_ulong *val)
3350 {
3351 *val = env->upmbase;
3352 return RISCV_EXCP_NONE;
3353 }
3354
3355 static RISCVException write_upmbase(CPURISCVState *env, int csrno,
3356 target_ulong val)
3357 {
3358 uint64_t mstatus;
3359
3360 /* if pm.current==0 we can't modify current PM CSRs */
3361 if (check_pm_current_disabled(env, csrno)) {
3362 return RISCV_EXCP_NONE;
3363 }
3364 env->upmbase = val;
3365 if ((env->priv == PRV_U) && (env->mmte & U_PM_ENABLE)) {
3366 env->cur_pmbase = val;
3367 }
3368 env->mmte |= PM_EXT_DIRTY;
3369
3370 /* Set XS and SD bits, since PM CSRs are dirty */
3371 mstatus = env->mstatus | MSTATUS_XS;
3372 write_mstatus(env, csrno, mstatus);
3373 return RISCV_EXCP_NONE;
3374 }
3375
3376 #endif
3377
3378 /* Crypto Extension */
3379 static RISCVException rmw_seed(CPURISCVState *env, int csrno,
3380 target_ulong *ret_value,
3381 target_ulong new_value,
3382 target_ulong write_mask)
3383 {
3384 uint16_t random_v;
3385 Error *random_e = NULL;
3386 int random_r;
3387 target_ulong rval;
3388
3389 random_r = qemu_guest_getrandom(&random_v, 2, &random_e);
3390 if (unlikely(random_r < 0)) {
3391 /*
3392 * Failed, for unknown reasons in the crypto subsystem.
3393 * The best we can do is log the reason and return a
3394 * failure indication to the guest. There is no reason
3395 * we know to expect the failure to be transitory, so
3396 * indicate DEAD to avoid having the guest spin on WAIT.
3397 */
3398 qemu_log_mask(LOG_UNIMP, "%s: Crypto failure: %s",
3399 __func__, error_get_pretty(random_e));
3400 error_free(random_e);
3401 rval = SEED_OPST_DEAD;
3402 } else {
3403 rval = random_v | SEED_OPST_ES16;
3404 }
3405
3406 if (ret_value) {
3407 *ret_value = rval;
3408 }
3409
3410 return RISCV_EXCP_NONE;
3411 }
3412
3413 /*
3414 * riscv_csrrw - read and/or update control and status register
3415 *
3416 * csrr <-> riscv_csrrw(env, csrno, ret_value, 0, 0);
3417 * csrrw <-> riscv_csrrw(env, csrno, ret_value, value, -1);
3418 * csrrs <-> riscv_csrrw(env, csrno, ret_value, -1, value);
3419 * csrrc <-> riscv_csrrw(env, csrno, ret_value, 0, value);
3420 */
3421
3422 static inline RISCVException riscv_csrrw_check(CPURISCVState *env,
3423 int csrno,
3424 bool write_mask,
3425 RISCVCPU *cpu)
3426 {
3427 /* check privileges and return RISCV_EXCP_ILLEGAL_INST if check fails */
3428 int read_only = get_field(csrno, 0xC00) == 3;
3429 int csr_min_priv = csr_ops[csrno].min_priv_ver;
3430
3431 /* ensure the CSR extension is enabled. */
3432 if (!cpu->cfg.ext_icsr) {
3433 return RISCV_EXCP_ILLEGAL_INST;
3434 }
3435
3436 if (env->priv_ver < csr_min_priv) {
3437 return RISCV_EXCP_ILLEGAL_INST;
3438 }
3439
3440 /* check predicate */
3441 if (!csr_ops[csrno].predicate) {
3442 return RISCV_EXCP_ILLEGAL_INST;
3443 }
3444
3445 if (write_mask && read_only) {
3446 return RISCV_EXCP_ILLEGAL_INST;
3447 }
3448
3449 RISCVException ret = csr_ops[csrno].predicate(env, csrno);
3450 if (ret != RISCV_EXCP_NONE) {
3451 return ret;
3452 }
3453
3454 #if !defined(CONFIG_USER_ONLY)
3455 int csr_priv, effective_priv = env->priv;
3456
3457 if (riscv_has_ext(env, RVH) && env->priv == PRV_S &&
3458 !riscv_cpu_virt_enabled(env)) {
3459 /*
3460 * We are in HS mode. Add 1 to the effective privledge level to
3461 * allow us to access the Hypervisor CSRs.
3462 */
3463 effective_priv++;
3464 }
3465
3466 csr_priv = get_field(csrno, 0x300);
3467 if (!env->debugger && (effective_priv < csr_priv)) {
3468 if (csr_priv == (PRV_S + 1) && riscv_cpu_virt_enabled(env)) {
3469 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
3470 }
3471 return RISCV_EXCP_ILLEGAL_INST;
3472 }
3473 #endif
3474 return RISCV_EXCP_NONE;
3475 }
3476
3477 static RISCVException riscv_csrrw_do64(CPURISCVState *env, int csrno,
3478 target_ulong *ret_value,
3479 target_ulong new_value,
3480 target_ulong write_mask)
3481 {
3482 RISCVException ret;
3483 target_ulong old_value;
3484
3485 /* execute combined read/write operation if it exists */
3486 if (csr_ops[csrno].op) {
3487 return csr_ops[csrno].op(env, csrno, ret_value, new_value, write_mask);
3488 }
3489
3490 /* if no accessor exists then return failure */
3491 if (!csr_ops[csrno].read) {
3492 return RISCV_EXCP_ILLEGAL_INST;
3493 }
3494 /* read old value */
3495 ret = csr_ops[csrno].read(env, csrno, &old_value);
3496 if (ret != RISCV_EXCP_NONE) {
3497 return ret;
3498 }
3499
3500 /* write value if writable and write mask set, otherwise drop writes */
3501 if (write_mask) {
3502 new_value = (old_value & ~write_mask) | (new_value & write_mask);
3503 if (csr_ops[csrno].write) {
3504 ret = csr_ops[csrno].write(env, csrno, new_value);
3505 if (ret != RISCV_EXCP_NONE) {
3506 return ret;
3507 }
3508 }
3509 }
3510
3511 /* return old value */
3512 if (ret_value) {
3513 *ret_value = old_value;
3514 }
3515
3516 return RISCV_EXCP_NONE;
3517 }
3518
3519 RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
3520 target_ulong *ret_value,
3521 target_ulong new_value, target_ulong write_mask)
3522 {
3523 RISCVCPU *cpu = env_archcpu(env);
3524
3525 RISCVException ret = riscv_csrrw_check(env, csrno, write_mask, cpu);
3526 if (ret != RISCV_EXCP_NONE) {
3527 return ret;
3528 }
3529
3530 return riscv_csrrw_do64(env, csrno, ret_value, new_value, write_mask);
3531 }
3532
3533 static RISCVException riscv_csrrw_do128(CPURISCVState *env, int csrno,
3534 Int128 *ret_value,
3535 Int128 new_value,
3536 Int128 write_mask)
3537 {
3538 RISCVException ret;
3539 Int128 old_value;
3540
3541 /* read old value */
3542 ret = csr_ops[csrno].read128(env, csrno, &old_value);
3543 if (ret != RISCV_EXCP_NONE) {
3544 return ret;
3545 }
3546
3547 /* write value if writable and write mask set, otherwise drop writes */
3548 if (int128_nz(write_mask)) {
3549 new_value = int128_or(int128_and(old_value, int128_not(write_mask)),
3550 int128_and(new_value, write_mask));
3551 if (csr_ops[csrno].write128) {
3552 ret = csr_ops[csrno].write128(env, csrno, new_value);
3553 if (ret != RISCV_EXCP_NONE) {
3554 return ret;
3555 }
3556 } else if (csr_ops[csrno].write) {
3557 /* avoids having to write wrappers for all registers */
3558 ret = csr_ops[csrno].write(env, csrno, int128_getlo(new_value));
3559 if (ret != RISCV_EXCP_NONE) {
3560 return ret;
3561 }
3562 }
3563 }
3564
3565 /* return old value */
3566 if (ret_value) {
3567 *ret_value = old_value;
3568 }
3569
3570 return RISCV_EXCP_NONE;
3571 }
3572
3573 RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
3574 Int128 *ret_value,
3575 Int128 new_value, Int128 write_mask)
3576 {
3577 RISCVException ret;
3578 RISCVCPU *cpu = env_archcpu(env);
3579
3580 ret = riscv_csrrw_check(env, csrno, int128_nz(write_mask), cpu);
3581 if (ret != RISCV_EXCP_NONE) {
3582 return ret;
3583 }
3584
3585 if (csr_ops[csrno].read128) {
3586 return riscv_csrrw_do128(env, csrno, ret_value, new_value, write_mask);
3587 }
3588
3589 /*
3590 * Fall back to 64-bit version for now, if the 128-bit alternative isn't
3591 * at all defined.
3592 * Note, some CSRs don't need to extend to MXLEN (64 upper bits non
3593 * significant), for those, this fallback is correctly handling the accesses
3594 */
3595 target_ulong old_value;
3596 ret = riscv_csrrw_do64(env, csrno, &old_value,
3597 int128_getlo(new_value),
3598 int128_getlo(write_mask));
3599 if (ret == RISCV_EXCP_NONE && ret_value) {
3600 *ret_value = int128_make64(old_value);
3601 }
3602 return ret;
3603 }
3604
3605 /*
3606 * Debugger support. If not in user mode, set env->debugger before the
3607 * riscv_csrrw call and clear it after the call.
3608 */
3609 RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno,
3610 target_ulong *ret_value,
3611 target_ulong new_value,
3612 target_ulong write_mask)
3613 {
3614 RISCVException ret;
3615 #if !defined(CONFIG_USER_ONLY)
3616 env->debugger = true;
3617 #endif
3618 ret = riscv_csrrw(env, csrno, ret_value, new_value, write_mask);
3619 #if !defined(CONFIG_USER_ONLY)
3620 env->debugger = false;
3621 #endif
3622 return ret;
3623 }
3624
3625 /* Control and Status Register function table */
3626 riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
3627 /* User Floating-Point CSRs */
3628 [CSR_FFLAGS] = { "fflags", fs, read_fflags, write_fflags },
3629 [CSR_FRM] = { "frm", fs, read_frm, write_frm },
3630 [CSR_FCSR] = { "fcsr", fs, read_fcsr, write_fcsr },
3631 /* Vector CSRs */
3632 [CSR_VSTART] = { "vstart", vs, read_vstart, write_vstart,
3633 .min_priv_ver = PRIV_VERSION_1_12_0 },
3634 [CSR_VXSAT] = { "vxsat", vs, read_vxsat, write_vxsat,
3635 .min_priv_ver = PRIV_VERSION_1_12_0 },
3636 [CSR_VXRM] = { "vxrm", vs, read_vxrm, write_vxrm,
3637 .min_priv_ver = PRIV_VERSION_1_12_0 },
3638 [CSR_VCSR] = { "vcsr", vs, read_vcsr, write_vcsr,
3639 .min_priv_ver = PRIV_VERSION_1_12_0 },
3640 [CSR_VL] = { "vl", vs, read_vl,
3641 .min_priv_ver = PRIV_VERSION_1_12_0 },
3642 [CSR_VTYPE] = { "vtype", vs, read_vtype,
3643 .min_priv_ver = PRIV_VERSION_1_12_0 },
3644 [CSR_VLENB] = { "vlenb", vs, read_vlenb,
3645 .min_priv_ver = PRIV_VERSION_1_12_0 },
3646 /* User Timers and Counters */
3647 [CSR_CYCLE] = { "cycle", ctr, read_hpmcounter },
3648 [CSR_INSTRET] = { "instret", ctr, read_hpmcounter },
3649 [CSR_CYCLEH] = { "cycleh", ctr32, read_hpmcounterh },
3650 [CSR_INSTRETH] = { "instreth", ctr32, read_hpmcounterh },
3651
3652 /*
3653 * In privileged mode, the monitor will have to emulate TIME CSRs only if
3654 * rdtime callback is not provided by machine/platform emulation.
3655 */
3656 [CSR_TIME] = { "time", ctr, read_time },
3657 [CSR_TIMEH] = { "timeh", ctr32, read_timeh },
3658
3659 /* Crypto Extension */
3660 [CSR_SEED] = { "seed", seed, NULL, NULL, rmw_seed },
3661
3662 #if !defined(CONFIG_USER_ONLY)
3663 /* Machine Timers and Counters */
3664 [CSR_MCYCLE] = { "mcycle", any, read_hpmcounter,
3665 write_mhpmcounter },
3666 [CSR_MINSTRET] = { "minstret", any, read_hpmcounter,
3667 write_mhpmcounter },
3668 [CSR_MCYCLEH] = { "mcycleh", any32, read_hpmcounterh,
3669 write_mhpmcounterh },
3670 [CSR_MINSTRETH] = { "minstreth", any32, read_hpmcounterh,
3671 write_mhpmcounterh },
3672
3673 /* Machine Information Registers */
3674 [CSR_MVENDORID] = { "mvendorid", any, read_mvendorid },
3675 [CSR_MARCHID] = { "marchid", any, read_marchid },
3676 [CSR_MIMPID] = { "mimpid", any, read_mimpid },
3677 [CSR_MHARTID] = { "mhartid", any, read_mhartid },
3678
3679 [CSR_MCONFIGPTR] = { "mconfigptr", any, read_zero,
3680 .min_priv_ver = PRIV_VERSION_1_12_0 },
3681 /* Machine Trap Setup */
3682 [CSR_MSTATUS] = { "mstatus", any, read_mstatus, write_mstatus,
3683 NULL, read_mstatus_i128 },
3684 [CSR_MISA] = { "misa", any, read_misa, write_misa,
3685 NULL, read_misa_i128 },
3686 [CSR_MIDELEG] = { "mideleg", any, NULL, NULL, rmw_mideleg },
3687 [CSR_MEDELEG] = { "medeleg", any, read_medeleg, write_medeleg },
3688 [CSR_MIE] = { "mie", any, NULL, NULL, rmw_mie },
3689 [CSR_MTVEC] = { "mtvec", any, read_mtvec, write_mtvec },
3690 [CSR_MCOUNTEREN] = { "mcounteren", umode, read_mcounteren,
3691 write_mcounteren },
3692
3693 [CSR_MSTATUSH] = { "mstatush", any32, read_mstatush,
3694 write_mstatush },
3695
3696 /* Machine Trap Handling */
3697 [CSR_MSCRATCH] = { "mscratch", any, read_mscratch, write_mscratch,
3698 NULL, read_mscratch_i128, write_mscratch_i128 },
3699 [CSR_MEPC] = { "mepc", any, read_mepc, write_mepc },
3700 [CSR_MCAUSE] = { "mcause", any, read_mcause, write_mcause },
3701 [CSR_MTVAL] = { "mtval", any, read_mtval, write_mtval },
3702 [CSR_MIP] = { "mip", any, NULL, NULL, rmw_mip },
3703
3704 /* Machine-Level Window to Indirectly Accessed Registers (AIA) */
3705 [CSR_MISELECT] = { "miselect", aia_any, NULL, NULL, rmw_xiselect },
3706 [CSR_MIREG] = { "mireg", aia_any, NULL, NULL, rmw_xireg },
3707
3708 /* Machine-Level Interrupts (AIA) */
3709 [CSR_MTOPEI] = { "mtopei", aia_any, NULL, NULL, rmw_xtopei },
3710 [CSR_MTOPI] = { "mtopi", aia_any, read_mtopi },
3711
3712 /* Virtual Interrupts for Supervisor Level (AIA) */
3713 [CSR_MVIEN] = { "mvien", aia_any, read_zero, write_ignore },
3714 [CSR_MVIP] = { "mvip", aia_any, read_zero, write_ignore },
3715
3716 /* Machine-Level High-Half CSRs (AIA) */
3717 [CSR_MIDELEGH] = { "midelegh", aia_any32, NULL, NULL, rmw_midelegh },
3718 [CSR_MIEH] = { "mieh", aia_any32, NULL, NULL, rmw_mieh },
3719 [CSR_MVIENH] = { "mvienh", aia_any32, read_zero, write_ignore },
3720 [CSR_MVIPH] = { "mviph", aia_any32, read_zero, write_ignore },
3721 [CSR_MIPH] = { "miph", aia_any32, NULL, NULL, rmw_miph },
3722
3723 /* Execution environment configuration */
3724 [CSR_MENVCFG] = { "menvcfg", umode, read_menvcfg, write_menvcfg,
3725 .min_priv_ver = PRIV_VERSION_1_12_0 },
3726 [CSR_MENVCFGH] = { "menvcfgh", umode32, read_menvcfgh, write_menvcfgh,
3727 .min_priv_ver = PRIV_VERSION_1_12_0 },
3728 [CSR_SENVCFG] = { "senvcfg", smode, read_senvcfg, write_senvcfg,
3729 .min_priv_ver = PRIV_VERSION_1_12_0 },
3730 [CSR_HENVCFG] = { "henvcfg", hmode, read_henvcfg, write_henvcfg,
3731 .min_priv_ver = PRIV_VERSION_1_12_0 },
3732 [CSR_HENVCFGH] = { "henvcfgh", hmode32, read_henvcfgh, write_henvcfgh,
3733 .min_priv_ver = PRIV_VERSION_1_12_0 },
3734
3735 /* Supervisor Trap Setup */
3736 [CSR_SSTATUS] = { "sstatus", smode, read_sstatus, write_sstatus,
3737 NULL, read_sstatus_i128 },
3738 [CSR_SIE] = { "sie", smode, NULL, NULL, rmw_sie },
3739 [CSR_STVEC] = { "stvec", smode, read_stvec, write_stvec },
3740 [CSR_SCOUNTEREN] = { "scounteren", smode, read_scounteren,
3741 write_scounteren },
3742
3743 /* Supervisor Trap Handling */
3744 [CSR_SSCRATCH] = { "sscratch", smode, read_sscratch, write_sscratch,
3745 NULL, read_sscratch_i128, write_sscratch_i128 },
3746 [CSR_SEPC] = { "sepc", smode, read_sepc, write_sepc },
3747 [CSR_SCAUSE] = { "scause", smode, read_scause, write_scause },
3748 [CSR_STVAL] = { "stval", smode, read_stval, write_stval },
3749 [CSR_SIP] = { "sip", smode, NULL, NULL, rmw_sip },
3750 [CSR_STIMECMP] = { "stimecmp", sstc, read_stimecmp, write_stimecmp,
3751 .min_priv_ver = PRIV_VERSION_1_12_0 },
3752 [CSR_STIMECMPH] = { "stimecmph", sstc_32, read_stimecmph, write_stimecmph,
3753 .min_priv_ver = PRIV_VERSION_1_12_0 },
3754 [CSR_VSTIMECMP] = { "vstimecmp", sstc, read_vstimecmp,
3755 write_vstimecmp,
3756 .min_priv_ver = PRIV_VERSION_1_12_0 },
3757 [CSR_VSTIMECMPH] = { "vstimecmph", sstc_32, read_vstimecmph,
3758 write_vstimecmph,
3759 .min_priv_ver = PRIV_VERSION_1_12_0 },
3760
3761 /* Supervisor Protection and Translation */
3762 [CSR_SATP] = { "satp", smode, read_satp, write_satp },
3763
3764 /* Supervisor-Level Window to Indirectly Accessed Registers (AIA) */
3765 [CSR_SISELECT] = { "siselect", aia_smode, NULL, NULL, rmw_xiselect },
3766 [CSR_SIREG] = { "sireg", aia_smode, NULL, NULL, rmw_xireg },
3767
3768 /* Supervisor-Level Interrupts (AIA) */
3769 [CSR_STOPEI] = { "stopei", aia_smode, NULL, NULL, rmw_xtopei },
3770 [CSR_STOPI] = { "stopi", aia_smode, read_stopi },
3771
3772 /* Supervisor-Level High-Half CSRs (AIA) */
3773 [CSR_SIEH] = { "sieh", aia_smode32, NULL, NULL, rmw_sieh },
3774 [CSR_SIPH] = { "siph", aia_smode32, NULL, NULL, rmw_siph },
3775
3776 [CSR_HSTATUS] = { "hstatus", hmode, read_hstatus, write_hstatus,
3777 .min_priv_ver = PRIV_VERSION_1_12_0 },
3778 [CSR_HEDELEG] = { "hedeleg", hmode, read_hedeleg, write_hedeleg,
3779 .min_priv_ver = PRIV_VERSION_1_12_0 },
3780 [CSR_HIDELEG] = { "hideleg", hmode, NULL, NULL, rmw_hideleg,
3781 .min_priv_ver = PRIV_VERSION_1_12_0 },
3782 [CSR_HVIP] = { "hvip", hmode, NULL, NULL, rmw_hvip,
3783 .min_priv_ver = PRIV_VERSION_1_12_0 },
3784 [CSR_HIP] = { "hip", hmode, NULL, NULL, rmw_hip,
3785 .min_priv_ver = PRIV_VERSION_1_12_0 },
3786 [CSR_HIE] = { "hie", hmode, NULL, NULL, rmw_hie,
3787 .min_priv_ver = PRIV_VERSION_1_12_0 },
3788 [CSR_HCOUNTEREN] = { "hcounteren", hmode, read_hcounteren,
3789 write_hcounteren,
3790 .min_priv_ver = PRIV_VERSION_1_12_0 },
3791 [CSR_HGEIE] = { "hgeie", hmode, read_hgeie, write_hgeie,
3792 .min_priv_ver = PRIV_VERSION_1_12_0 },
3793 [CSR_HTVAL] = { "htval", hmode, read_htval, write_htval,
3794 .min_priv_ver = PRIV_VERSION_1_12_0 },
3795 [CSR_HTINST] = { "htinst", hmode, read_htinst, write_htinst,
3796 .min_priv_ver = PRIV_VERSION_1_12_0 },
3797 [CSR_HGEIP] = { "hgeip", hmode, read_hgeip,
3798 .min_priv_ver = PRIV_VERSION_1_12_0 },
3799 [CSR_HGATP] = { "hgatp", hmode, read_hgatp, write_hgatp,
3800 .min_priv_ver = PRIV_VERSION_1_12_0 },
3801 [CSR_HTIMEDELTA] = { "htimedelta", hmode, read_htimedelta,
3802 write_htimedelta,
3803 .min_priv_ver = PRIV_VERSION_1_12_0 },
3804 [CSR_HTIMEDELTAH] = { "htimedeltah", hmode32, read_htimedeltah,
3805 write_htimedeltah,
3806 .min_priv_ver = PRIV_VERSION_1_12_0 },
3807
3808 [CSR_VSSTATUS] = { "vsstatus", hmode, read_vsstatus,
3809 write_vsstatus,
3810 .min_priv_ver = PRIV_VERSION_1_12_0 },
3811 [CSR_VSIP] = { "vsip", hmode, NULL, NULL, rmw_vsip,
3812 .min_priv_ver = PRIV_VERSION_1_12_0 },
3813 [CSR_VSIE] = { "vsie", hmode, NULL, NULL, rmw_vsie ,
3814 .min_priv_ver = PRIV_VERSION_1_12_0 },
3815 [CSR_VSTVEC] = { "vstvec", hmode, read_vstvec, write_vstvec,
3816 .min_priv_ver = PRIV_VERSION_1_12_0 },
3817 [CSR_VSSCRATCH] = { "vsscratch", hmode, read_vsscratch,
3818 write_vsscratch,
3819 .min_priv_ver = PRIV_VERSION_1_12_0 },
3820 [CSR_VSEPC] = { "vsepc", hmode, read_vsepc, write_vsepc,
3821 .min_priv_ver = PRIV_VERSION_1_12_0 },
3822 [CSR_VSCAUSE] = { "vscause", hmode, read_vscause, write_vscause,
3823 .min_priv_ver = PRIV_VERSION_1_12_0 },
3824 [CSR_VSTVAL] = { "vstval", hmode, read_vstval, write_vstval,
3825 .min_priv_ver = PRIV_VERSION_1_12_0 },
3826 [CSR_VSATP] = { "vsatp", hmode, read_vsatp, write_vsatp,
3827 .min_priv_ver = PRIV_VERSION_1_12_0 },
3828
3829 [CSR_MTVAL2] = { "mtval2", hmode, read_mtval2, write_mtval2,
3830 .min_priv_ver = PRIV_VERSION_1_12_0 },
3831 [CSR_MTINST] = { "mtinst", hmode, read_mtinst, write_mtinst,
3832 .min_priv_ver = PRIV_VERSION_1_12_0 },
3833
3834 /* Virtual Interrupts and Interrupt Priorities (H-extension with AIA) */
3835 [CSR_HVIEN] = { "hvien", aia_hmode, read_zero, write_ignore },
3836 [CSR_HVICTL] = { "hvictl", aia_hmode, read_hvictl,
3837 write_hvictl },
3838 [CSR_HVIPRIO1] = { "hviprio1", aia_hmode, read_hviprio1,
3839 write_hviprio1 },
3840 [CSR_HVIPRIO2] = { "hviprio2", aia_hmode, read_hviprio2,
3841 write_hviprio2 },
3842
3843 /*
3844 * VS-Level Window to Indirectly Accessed Registers (H-extension with AIA)
3845 */
3846 [CSR_VSISELECT] = { "vsiselect", aia_hmode, NULL, NULL,
3847 rmw_xiselect },
3848 [CSR_VSIREG] = { "vsireg", aia_hmode, NULL, NULL, rmw_xireg },
3849
3850 /* VS-Level Interrupts (H-extension with AIA) */
3851 [CSR_VSTOPEI] = { "vstopei", aia_hmode, NULL, NULL, rmw_xtopei },
3852 [CSR_VSTOPI] = { "vstopi", aia_hmode, read_vstopi },
3853
3854 /* Hypervisor and VS-Level High-Half CSRs (H-extension with AIA) */
3855 [CSR_HIDELEGH] = { "hidelegh", aia_hmode32, NULL, NULL,
3856 rmw_hidelegh },
3857 [CSR_HVIENH] = { "hvienh", aia_hmode32, read_zero,
3858 write_ignore },
3859 [CSR_HVIPH] = { "hviph", aia_hmode32, NULL, NULL, rmw_hviph },
3860 [CSR_HVIPRIO1H] = { "hviprio1h", aia_hmode32, read_hviprio1h,
3861 write_hviprio1h },
3862 [CSR_HVIPRIO2H] = { "hviprio2h", aia_hmode32, read_hviprio2h,
3863 write_hviprio2h },
3864 [CSR_VSIEH] = { "vsieh", aia_hmode32, NULL, NULL, rmw_vsieh },
3865 [CSR_VSIPH] = { "vsiph", aia_hmode32, NULL, NULL, rmw_vsiph },
3866
3867 /* Physical Memory Protection */
3868 [CSR_MSECCFG] = { "mseccfg", epmp, read_mseccfg, write_mseccfg,
3869 .min_priv_ver = PRIV_VERSION_1_11_0 },
3870 [CSR_PMPCFG0] = { "pmpcfg0", pmp, read_pmpcfg, write_pmpcfg },
3871 [CSR_PMPCFG1] = { "pmpcfg1", pmp, read_pmpcfg, write_pmpcfg },
3872 [CSR_PMPCFG2] = { "pmpcfg2", pmp, read_pmpcfg, write_pmpcfg },
3873 [CSR_PMPCFG3] = { "pmpcfg3", pmp, read_pmpcfg, write_pmpcfg },
3874 [CSR_PMPADDR0] = { "pmpaddr0", pmp, read_pmpaddr, write_pmpaddr },
3875 [CSR_PMPADDR1] = { "pmpaddr1", pmp, read_pmpaddr, write_pmpaddr },
3876 [CSR_PMPADDR2] = { "pmpaddr2", pmp, read_pmpaddr, write_pmpaddr },
3877 [CSR_PMPADDR3] = { "pmpaddr3", pmp, read_pmpaddr, write_pmpaddr },
3878 [CSR_PMPADDR4] = { "pmpaddr4", pmp, read_pmpaddr, write_pmpaddr },
3879 [CSR_PMPADDR5] = { "pmpaddr5", pmp, read_pmpaddr, write_pmpaddr },
3880 [CSR_PMPADDR6] = { "pmpaddr6", pmp, read_pmpaddr, write_pmpaddr },
3881 [CSR_PMPADDR7] = { "pmpaddr7", pmp, read_pmpaddr, write_pmpaddr },
3882 [CSR_PMPADDR8] = { "pmpaddr8", pmp, read_pmpaddr, write_pmpaddr },
3883 [CSR_PMPADDR9] = { "pmpaddr9", pmp, read_pmpaddr, write_pmpaddr },
3884 [CSR_PMPADDR10] = { "pmpaddr10", pmp, read_pmpaddr, write_pmpaddr },
3885 [CSR_PMPADDR11] = { "pmpaddr11", pmp, read_pmpaddr, write_pmpaddr },
3886 [CSR_PMPADDR12] = { "pmpaddr12", pmp, read_pmpaddr, write_pmpaddr },
3887 [CSR_PMPADDR13] = { "pmpaddr13", pmp, read_pmpaddr, write_pmpaddr },
3888 [CSR_PMPADDR14] = { "pmpaddr14", pmp, read_pmpaddr, write_pmpaddr },
3889 [CSR_PMPADDR15] = { "pmpaddr15", pmp, read_pmpaddr, write_pmpaddr },
3890
3891 /* Debug CSRs */
3892 [CSR_TSELECT] = { "tselect", debug, read_tselect, write_tselect },
3893 [CSR_TDATA1] = { "tdata1", debug, read_tdata, write_tdata },
3894 [CSR_TDATA2] = { "tdata2", debug, read_tdata, write_tdata },
3895 [CSR_TDATA3] = { "tdata3", debug, read_tdata, write_tdata },
3896
3897 /* User Pointer Masking */
3898 [CSR_UMTE] = { "umte", pointer_masking, read_umte, write_umte },
3899 [CSR_UPMMASK] = { "upmmask", pointer_masking, read_upmmask,
3900 write_upmmask },
3901 [CSR_UPMBASE] = { "upmbase", pointer_masking, read_upmbase,
3902 write_upmbase },
3903 /* Machine Pointer Masking */
3904 [CSR_MMTE] = { "mmte", pointer_masking, read_mmte, write_mmte },
3905 [CSR_MPMMASK] = { "mpmmask", pointer_masking, read_mpmmask,
3906 write_mpmmask },
3907 [CSR_MPMBASE] = { "mpmbase", pointer_masking, read_mpmbase,
3908 write_mpmbase },
3909 /* Supervisor Pointer Masking */
3910 [CSR_SMTE] = { "smte", pointer_masking, read_smte, write_smte },
3911 [CSR_SPMMASK] = { "spmmask", pointer_masking, read_spmmask,
3912 write_spmmask },
3913 [CSR_SPMBASE] = { "spmbase", pointer_masking, read_spmbase,
3914 write_spmbase },
3915
3916 /* Performance Counters */
3917 [CSR_HPMCOUNTER3] = { "hpmcounter3", ctr, read_hpmcounter },
3918 [CSR_HPMCOUNTER4] = { "hpmcounter4", ctr, read_hpmcounter },
3919 [CSR_HPMCOUNTER5] = { "hpmcounter5", ctr, read_hpmcounter },
3920 [CSR_HPMCOUNTER6] = { "hpmcounter6", ctr, read_hpmcounter },
3921 [CSR_HPMCOUNTER7] = { "hpmcounter7", ctr, read_hpmcounter },
3922 [CSR_HPMCOUNTER8] = { "hpmcounter8", ctr, read_hpmcounter },
3923 [CSR_HPMCOUNTER9] = { "hpmcounter9", ctr, read_hpmcounter },
3924 [CSR_HPMCOUNTER10] = { "hpmcounter10", ctr, read_hpmcounter },
3925 [CSR_HPMCOUNTER11] = { "hpmcounter11", ctr, read_hpmcounter },
3926 [CSR_HPMCOUNTER12] = { "hpmcounter12", ctr, read_hpmcounter },
3927 [CSR_HPMCOUNTER13] = { "hpmcounter13", ctr, read_hpmcounter },
3928 [CSR_HPMCOUNTER14] = { "hpmcounter14", ctr, read_hpmcounter },
3929 [CSR_HPMCOUNTER15] = { "hpmcounter15", ctr, read_hpmcounter },
3930 [CSR_HPMCOUNTER16] = { "hpmcounter16", ctr, read_hpmcounter },
3931 [CSR_HPMCOUNTER17] = { "hpmcounter17", ctr, read_hpmcounter },
3932 [CSR_HPMCOUNTER18] = { "hpmcounter18", ctr, read_hpmcounter },
3933 [CSR_HPMCOUNTER19] = { "hpmcounter19", ctr, read_hpmcounter },
3934 [CSR_HPMCOUNTER20] = { "hpmcounter20", ctr, read_hpmcounter },
3935 [CSR_HPMCOUNTER21] = { "hpmcounter21", ctr, read_hpmcounter },
3936 [CSR_HPMCOUNTER22] = { "hpmcounter22", ctr, read_hpmcounter },
3937 [CSR_HPMCOUNTER23] = { "hpmcounter23", ctr, read_hpmcounter },
3938 [CSR_HPMCOUNTER24] = { "hpmcounter24", ctr, read_hpmcounter },
3939 [CSR_HPMCOUNTER25] = { "hpmcounter25", ctr, read_hpmcounter },
3940 [CSR_HPMCOUNTER26] = { "hpmcounter26", ctr, read_hpmcounter },
3941 [CSR_HPMCOUNTER27] = { "hpmcounter27", ctr, read_hpmcounter },
3942 [CSR_HPMCOUNTER28] = { "hpmcounter28", ctr, read_hpmcounter },
3943 [CSR_HPMCOUNTER29] = { "hpmcounter29", ctr, read_hpmcounter },
3944 [CSR_HPMCOUNTER30] = { "hpmcounter30", ctr, read_hpmcounter },
3945 [CSR_HPMCOUNTER31] = { "hpmcounter31", ctr, read_hpmcounter },
3946
3947 [CSR_MHPMCOUNTER3] = { "mhpmcounter3", mctr, read_hpmcounter,
3948 write_mhpmcounter },
3949 [CSR_MHPMCOUNTER4] = { "mhpmcounter4", mctr, read_hpmcounter,
3950 write_mhpmcounter },
3951 [CSR_MHPMCOUNTER5] = { "mhpmcounter5", mctr, read_hpmcounter,
3952 write_mhpmcounter },
3953 [CSR_MHPMCOUNTER6] = { "mhpmcounter6", mctr, read_hpmcounter,
3954 write_mhpmcounter },
3955 [CSR_MHPMCOUNTER7] = { "mhpmcounter7", mctr, read_hpmcounter,
3956 write_mhpmcounter },
3957 [CSR_MHPMCOUNTER8] = { "mhpmcounter8", mctr, read_hpmcounter,
3958 write_mhpmcounter },
3959 [CSR_MHPMCOUNTER9] = { "mhpmcounter9", mctr, read_hpmcounter,
3960 write_mhpmcounter },
3961 [CSR_MHPMCOUNTER10] = { "mhpmcounter10", mctr, read_hpmcounter,
3962 write_mhpmcounter },
3963 [CSR_MHPMCOUNTER11] = { "mhpmcounter11", mctr, read_hpmcounter,
3964 write_mhpmcounter },
3965 [CSR_MHPMCOUNTER12] = { "mhpmcounter12", mctr, read_hpmcounter,
3966 write_mhpmcounter },
3967 [CSR_MHPMCOUNTER13] = { "mhpmcounter13", mctr, read_hpmcounter,
3968 write_mhpmcounter },
3969 [CSR_MHPMCOUNTER14] = { "mhpmcounter14", mctr, read_hpmcounter,
3970 write_mhpmcounter },
3971 [CSR_MHPMCOUNTER15] = { "mhpmcounter15", mctr, read_hpmcounter,
3972 write_mhpmcounter },
3973 [CSR_MHPMCOUNTER16] = { "mhpmcounter16", mctr, read_hpmcounter,
3974 write_mhpmcounter },
3975 [CSR_MHPMCOUNTER17] = { "mhpmcounter17", mctr, read_hpmcounter,
3976 write_mhpmcounter },
3977 [CSR_MHPMCOUNTER18] = { "mhpmcounter18", mctr, read_hpmcounter,
3978 write_mhpmcounter },
3979 [CSR_MHPMCOUNTER19] = { "mhpmcounter19", mctr, read_hpmcounter,
3980 write_mhpmcounter },
3981 [CSR_MHPMCOUNTER20] = { "mhpmcounter20", mctr, read_hpmcounter,
3982 write_mhpmcounter },
3983 [CSR_MHPMCOUNTER21] = { "mhpmcounter21", mctr, read_hpmcounter,
3984 write_mhpmcounter },
3985 [CSR_MHPMCOUNTER22] = { "mhpmcounter22", mctr, read_hpmcounter,
3986 write_mhpmcounter },
3987 [CSR_MHPMCOUNTER23] = { "mhpmcounter23", mctr, read_hpmcounter,
3988 write_mhpmcounter },
3989 [CSR_MHPMCOUNTER24] = { "mhpmcounter24", mctr, read_hpmcounter,
3990 write_mhpmcounter },
3991 [CSR_MHPMCOUNTER25] = { "mhpmcounter25", mctr, read_hpmcounter,
3992 write_mhpmcounter },
3993 [CSR_MHPMCOUNTER26] = { "mhpmcounter26", mctr, read_hpmcounter,
3994 write_mhpmcounter },
3995 [CSR_MHPMCOUNTER27] = { "mhpmcounter27", mctr, read_hpmcounter,
3996 write_mhpmcounter },
3997 [CSR_MHPMCOUNTER28] = { "mhpmcounter28", mctr, read_hpmcounter,
3998 write_mhpmcounter },
3999 [CSR_MHPMCOUNTER29] = { "mhpmcounter29", mctr, read_hpmcounter,
4000 write_mhpmcounter },
4001 [CSR_MHPMCOUNTER30] = { "mhpmcounter30", mctr, read_hpmcounter,
4002 write_mhpmcounter },
4003 [CSR_MHPMCOUNTER31] = { "mhpmcounter31", mctr, read_hpmcounter,
4004 write_mhpmcounter },
4005
4006 [CSR_MCOUNTINHIBIT] = { "mcountinhibit", any, read_mcountinhibit,
4007 write_mcountinhibit,
4008 .min_priv_ver = PRIV_VERSION_1_11_0 },
4009
4010 [CSR_MHPMEVENT3] = { "mhpmevent3", any, read_mhpmevent,
4011 write_mhpmevent },
4012 [CSR_MHPMEVENT4] = { "mhpmevent4", any, read_mhpmevent,
4013 write_mhpmevent },
4014 [CSR_MHPMEVENT5] = { "mhpmevent5", any, read_mhpmevent,
4015 write_mhpmevent },
4016 [CSR_MHPMEVENT6] = { "mhpmevent6", any, read_mhpmevent,
4017 write_mhpmevent },
4018 [CSR_MHPMEVENT7] = { "mhpmevent7", any, read_mhpmevent,
4019 write_mhpmevent },
4020 [CSR_MHPMEVENT8] = { "mhpmevent8", any, read_mhpmevent,
4021 write_mhpmevent },
4022 [CSR_MHPMEVENT9] = { "mhpmevent9", any, read_mhpmevent,
4023 write_mhpmevent },
4024 [CSR_MHPMEVENT10] = { "mhpmevent10", any, read_mhpmevent,
4025 write_mhpmevent },
4026 [CSR_MHPMEVENT11] = { "mhpmevent11", any, read_mhpmevent,
4027 write_mhpmevent },
4028 [CSR_MHPMEVENT12] = { "mhpmevent12", any, read_mhpmevent,
4029 write_mhpmevent },
4030 [CSR_MHPMEVENT13] = { "mhpmevent13", any, read_mhpmevent,
4031 write_mhpmevent },
4032 [CSR_MHPMEVENT14] = { "mhpmevent14", any, read_mhpmevent,
4033 write_mhpmevent },
4034 [CSR_MHPMEVENT15] = { "mhpmevent15", any, read_mhpmevent,
4035 write_mhpmevent },
4036 [CSR_MHPMEVENT16] = { "mhpmevent16", any, read_mhpmevent,
4037 write_mhpmevent },
4038 [CSR_MHPMEVENT17] = { "mhpmevent17", any, read_mhpmevent,
4039 write_mhpmevent },
4040 [CSR_MHPMEVENT18] = { "mhpmevent18", any, read_mhpmevent,
4041 write_mhpmevent },
4042 [CSR_MHPMEVENT19] = { "mhpmevent19", any, read_mhpmevent,
4043 write_mhpmevent },
4044 [CSR_MHPMEVENT20] = { "mhpmevent20", any, read_mhpmevent,
4045 write_mhpmevent },
4046 [CSR_MHPMEVENT21] = { "mhpmevent21", any, read_mhpmevent,
4047 write_mhpmevent },
4048 [CSR_MHPMEVENT22] = { "mhpmevent22", any, read_mhpmevent,
4049 write_mhpmevent },
4050 [CSR_MHPMEVENT23] = { "mhpmevent23", any, read_mhpmevent,
4051 write_mhpmevent },
4052 [CSR_MHPMEVENT24] = { "mhpmevent24", any, read_mhpmevent,
4053 write_mhpmevent },
4054 [CSR_MHPMEVENT25] = { "mhpmevent25", any, read_mhpmevent,
4055 write_mhpmevent },
4056 [CSR_MHPMEVENT26] = { "mhpmevent26", any, read_mhpmevent,
4057 write_mhpmevent },
4058 [CSR_MHPMEVENT27] = { "mhpmevent27", any, read_mhpmevent,
4059 write_mhpmevent },
4060 [CSR_MHPMEVENT28] = { "mhpmevent28", any, read_mhpmevent,
4061 write_mhpmevent },
4062 [CSR_MHPMEVENT29] = { "mhpmevent29", any, read_mhpmevent,
4063 write_mhpmevent },
4064 [CSR_MHPMEVENT30] = { "mhpmevent30", any, read_mhpmevent,
4065 write_mhpmevent },
4066 [CSR_MHPMEVENT31] = { "mhpmevent31", any, read_mhpmevent,
4067 write_mhpmevent },
4068
4069 [CSR_MHPMEVENT3H] = { "mhpmevent3h", sscofpmf, read_mhpmeventh,
4070 write_mhpmeventh,
4071 .min_priv_ver = PRIV_VERSION_1_12_0 },
4072 [CSR_MHPMEVENT4H] = { "mhpmevent4h", sscofpmf, read_mhpmeventh,
4073 write_mhpmeventh,
4074 .min_priv_ver = PRIV_VERSION_1_12_0 },
4075 [CSR_MHPMEVENT5H] = { "mhpmevent5h", sscofpmf, read_mhpmeventh,
4076 write_mhpmeventh,
4077 .min_priv_ver = PRIV_VERSION_1_12_0 },
4078 [CSR_MHPMEVENT6H] = { "mhpmevent6h", sscofpmf, read_mhpmeventh,
4079 write_mhpmeventh,
4080 .min_priv_ver = PRIV_VERSION_1_12_0 },
4081 [CSR_MHPMEVENT7H] = { "mhpmevent7h", sscofpmf, read_mhpmeventh,
4082 write_mhpmeventh,
4083 .min_priv_ver = PRIV_VERSION_1_12_0 },
4084 [CSR_MHPMEVENT8H] = { "mhpmevent8h", sscofpmf, read_mhpmeventh,
4085 write_mhpmeventh,
4086 .min_priv_ver = PRIV_VERSION_1_12_0 },
4087 [CSR_MHPMEVENT9H] = { "mhpmevent9h", sscofpmf, read_mhpmeventh,
4088 write_mhpmeventh,
4089 .min_priv_ver = PRIV_VERSION_1_12_0 },
4090 [CSR_MHPMEVENT10H] = { "mhpmevent10h", sscofpmf, read_mhpmeventh,
4091 write_mhpmeventh,
4092 .min_priv_ver = PRIV_VERSION_1_12_0 },
4093 [CSR_MHPMEVENT11H] = { "mhpmevent11h", sscofpmf, read_mhpmeventh,
4094 write_mhpmeventh,
4095 .min_priv_ver = PRIV_VERSION_1_12_0 },
4096 [CSR_MHPMEVENT12H] = { "mhpmevent12h", sscofpmf, read_mhpmeventh,
4097 write_mhpmeventh,
4098 .min_priv_ver = PRIV_VERSION_1_12_0 },
4099 [CSR_MHPMEVENT13H] = { "mhpmevent13h", sscofpmf, read_mhpmeventh,
4100 write_mhpmeventh,
4101 .min_priv_ver = PRIV_VERSION_1_12_0 },
4102 [CSR_MHPMEVENT14H] = { "mhpmevent14h", sscofpmf, read_mhpmeventh,
4103 write_mhpmeventh,
4104 .min_priv_ver = PRIV_VERSION_1_12_0 },
4105 [CSR_MHPMEVENT15H] = { "mhpmevent15h", sscofpmf, read_mhpmeventh,
4106 write_mhpmeventh,
4107 .min_priv_ver = PRIV_VERSION_1_12_0 },
4108 [CSR_MHPMEVENT16H] = { "mhpmevent16h", sscofpmf, read_mhpmeventh,
4109 write_mhpmeventh,
4110 .min_priv_ver = PRIV_VERSION_1_12_0 },
4111 [CSR_MHPMEVENT17H] = { "mhpmevent17h", sscofpmf, read_mhpmeventh,
4112 write_mhpmeventh,
4113 .min_priv_ver = PRIV_VERSION_1_12_0 },
4114 [CSR_MHPMEVENT18H] = { "mhpmevent18h", sscofpmf, read_mhpmeventh,
4115 write_mhpmeventh,
4116 .min_priv_ver = PRIV_VERSION_1_12_0 },
4117 [CSR_MHPMEVENT19H] = { "mhpmevent19h", sscofpmf, read_mhpmeventh,
4118 write_mhpmeventh,
4119 .min_priv_ver = PRIV_VERSION_1_12_0 },
4120 [CSR_MHPMEVENT20H] = { "mhpmevent20h", sscofpmf, read_mhpmeventh,
4121 write_mhpmeventh,
4122 .min_priv_ver = PRIV_VERSION_1_12_0 },
4123 [CSR_MHPMEVENT21H] = { "mhpmevent21h", sscofpmf, read_mhpmeventh,
4124 write_mhpmeventh,
4125 .min_priv_ver = PRIV_VERSION_1_12_0 },
4126 [CSR_MHPMEVENT22H] = { "mhpmevent22h", sscofpmf, read_mhpmeventh,
4127 write_mhpmeventh,
4128 .min_priv_ver = PRIV_VERSION_1_12_0 },
4129 [CSR_MHPMEVENT23H] = { "mhpmevent23h", sscofpmf, read_mhpmeventh,
4130 write_mhpmeventh,
4131 .min_priv_ver = PRIV_VERSION_1_12_0 },
4132 [CSR_MHPMEVENT24H] = { "mhpmevent24h", sscofpmf, read_mhpmeventh,
4133 write_mhpmeventh,
4134 .min_priv_ver = PRIV_VERSION_1_12_0 },
4135 [CSR_MHPMEVENT25H] = { "mhpmevent25h", sscofpmf, read_mhpmeventh,
4136 write_mhpmeventh,
4137 .min_priv_ver = PRIV_VERSION_1_12_0 },
4138 [CSR_MHPMEVENT26H] = { "mhpmevent26h", sscofpmf, read_mhpmeventh,
4139 write_mhpmeventh,
4140 .min_priv_ver = PRIV_VERSION_1_12_0 },
4141 [CSR_MHPMEVENT27H] = { "mhpmevent27h", sscofpmf, read_mhpmeventh,
4142 write_mhpmeventh,
4143 .min_priv_ver = PRIV_VERSION_1_12_0 },
4144 [CSR_MHPMEVENT28H] = { "mhpmevent28h", sscofpmf, read_mhpmeventh,
4145 write_mhpmeventh,
4146 .min_priv_ver = PRIV_VERSION_1_12_0 },
4147 [CSR_MHPMEVENT29H] = { "mhpmevent29h", sscofpmf, read_mhpmeventh,
4148 write_mhpmeventh,
4149 .min_priv_ver = PRIV_VERSION_1_12_0 },
4150 [CSR_MHPMEVENT30H] = { "mhpmevent30h", sscofpmf, read_mhpmeventh,
4151 write_mhpmeventh,
4152 .min_priv_ver = PRIV_VERSION_1_12_0 },
4153 [CSR_MHPMEVENT31H] = { "mhpmevent31h", sscofpmf, read_mhpmeventh,
4154 write_mhpmeventh,
4155 .min_priv_ver = PRIV_VERSION_1_12_0 },
4156
4157 [CSR_HPMCOUNTER3H] = { "hpmcounter3h", ctr32, read_hpmcounterh },
4158 [CSR_HPMCOUNTER4H] = { "hpmcounter4h", ctr32, read_hpmcounterh },
4159 [CSR_HPMCOUNTER5H] = { "hpmcounter5h", ctr32, read_hpmcounterh },
4160 [CSR_HPMCOUNTER6H] = { "hpmcounter6h", ctr32, read_hpmcounterh },
4161 [CSR_HPMCOUNTER7H] = { "hpmcounter7h", ctr32, read_hpmcounterh },
4162 [CSR_HPMCOUNTER8H] = { "hpmcounter8h", ctr32, read_hpmcounterh },
4163 [CSR_HPMCOUNTER9H] = { "hpmcounter9h", ctr32, read_hpmcounterh },
4164 [CSR_HPMCOUNTER10H] = { "hpmcounter10h", ctr32, read_hpmcounterh },
4165 [CSR_HPMCOUNTER11H] = { "hpmcounter11h", ctr32, read_hpmcounterh },
4166 [CSR_HPMCOUNTER12H] = { "hpmcounter12h", ctr32, read_hpmcounterh },
4167 [CSR_HPMCOUNTER13H] = { "hpmcounter13h", ctr32, read_hpmcounterh },
4168 [CSR_HPMCOUNTER14H] = { "hpmcounter14h", ctr32, read_hpmcounterh },
4169 [CSR_HPMCOUNTER15H] = { "hpmcounter15h", ctr32, read_hpmcounterh },
4170 [CSR_HPMCOUNTER16H] = { "hpmcounter16h", ctr32, read_hpmcounterh },
4171 [CSR_HPMCOUNTER17H] = { "hpmcounter17h", ctr32, read_hpmcounterh },
4172 [CSR_HPMCOUNTER18H] = { "hpmcounter18h", ctr32, read_hpmcounterh },
4173 [CSR_HPMCOUNTER19H] = { "hpmcounter19h", ctr32, read_hpmcounterh },
4174 [CSR_HPMCOUNTER20H] = { "hpmcounter20h", ctr32, read_hpmcounterh },
4175 [CSR_HPMCOUNTER21H] = { "hpmcounter21h", ctr32, read_hpmcounterh },
4176 [CSR_HPMCOUNTER22H] = { "hpmcounter22h", ctr32, read_hpmcounterh },
4177 [CSR_HPMCOUNTER23H] = { "hpmcounter23h", ctr32, read_hpmcounterh },
4178 [CSR_HPMCOUNTER24H] = { "hpmcounter24h", ctr32, read_hpmcounterh },
4179 [CSR_HPMCOUNTER25H] = { "hpmcounter25h", ctr32, read_hpmcounterh },
4180 [CSR_HPMCOUNTER26H] = { "hpmcounter26h", ctr32, read_hpmcounterh },
4181 [CSR_HPMCOUNTER27H] = { "hpmcounter27h", ctr32, read_hpmcounterh },
4182 [CSR_HPMCOUNTER28H] = { "hpmcounter28h", ctr32, read_hpmcounterh },
4183 [CSR_HPMCOUNTER29H] = { "hpmcounter29h", ctr32, read_hpmcounterh },
4184 [CSR_HPMCOUNTER30H] = { "hpmcounter30h", ctr32, read_hpmcounterh },
4185 [CSR_HPMCOUNTER31H] = { "hpmcounter31h", ctr32, read_hpmcounterh },
4186
4187 [CSR_MHPMCOUNTER3H] = { "mhpmcounter3h", mctr32, read_hpmcounterh,
4188 write_mhpmcounterh },
4189 [CSR_MHPMCOUNTER4H] = { "mhpmcounter4h", mctr32, read_hpmcounterh,
4190 write_mhpmcounterh },
4191 [CSR_MHPMCOUNTER5H] = { "mhpmcounter5h", mctr32, read_hpmcounterh,
4192 write_mhpmcounterh },
4193 [CSR_MHPMCOUNTER6H] = { "mhpmcounter6h", mctr32, read_hpmcounterh,
4194 write_mhpmcounterh },
4195 [CSR_MHPMCOUNTER7H] = { "mhpmcounter7h", mctr32, read_hpmcounterh,
4196 write_mhpmcounterh },
4197 [CSR_MHPMCOUNTER8H] = { "mhpmcounter8h", mctr32, read_hpmcounterh,
4198 write_mhpmcounterh },
4199 [CSR_MHPMCOUNTER9H] = { "mhpmcounter9h", mctr32, read_hpmcounterh,
4200 write_mhpmcounterh },
4201 [CSR_MHPMCOUNTER10H] = { "mhpmcounter10h", mctr32, read_hpmcounterh,
4202 write_mhpmcounterh },
4203 [CSR_MHPMCOUNTER11H] = { "mhpmcounter11h", mctr32, read_hpmcounterh,
4204 write_mhpmcounterh },
4205 [CSR_MHPMCOUNTER12H] = { "mhpmcounter12h", mctr32, read_hpmcounterh,
4206 write_mhpmcounterh },
4207 [CSR_MHPMCOUNTER13H] = { "mhpmcounter13h", mctr32, read_hpmcounterh,
4208 write_mhpmcounterh },
4209 [CSR_MHPMCOUNTER14H] = { "mhpmcounter14h", mctr32, read_hpmcounterh,
4210 write_mhpmcounterh },
4211 [CSR_MHPMCOUNTER15H] = { "mhpmcounter15h", mctr32, read_hpmcounterh,
4212 write_mhpmcounterh },
4213 [CSR_MHPMCOUNTER16H] = { "mhpmcounter16h", mctr32, read_hpmcounterh,
4214 write_mhpmcounterh },
4215 [CSR_MHPMCOUNTER17H] = { "mhpmcounter17h", mctr32, read_hpmcounterh,
4216 write_mhpmcounterh },
4217 [CSR_MHPMCOUNTER18H] = { "mhpmcounter18h", mctr32, read_hpmcounterh,
4218 write_mhpmcounterh },
4219 [CSR_MHPMCOUNTER19H] = { "mhpmcounter19h", mctr32, read_hpmcounterh,
4220 write_mhpmcounterh },
4221 [CSR_MHPMCOUNTER20H] = { "mhpmcounter20h", mctr32, read_hpmcounterh,
4222 write_mhpmcounterh },
4223 [CSR_MHPMCOUNTER21H] = { "mhpmcounter21h", mctr32, read_hpmcounterh,
4224 write_mhpmcounterh },
4225 [CSR_MHPMCOUNTER22H] = { "mhpmcounter22h", mctr32, read_hpmcounterh,
4226 write_mhpmcounterh },
4227 [CSR_MHPMCOUNTER23H] = { "mhpmcounter23h", mctr32, read_hpmcounterh,
4228 write_mhpmcounterh },
4229 [CSR_MHPMCOUNTER24H] = { "mhpmcounter24h", mctr32, read_hpmcounterh,
4230 write_mhpmcounterh },
4231 [CSR_MHPMCOUNTER25H] = { "mhpmcounter25h", mctr32, read_hpmcounterh,
4232 write_mhpmcounterh },
4233 [CSR_MHPMCOUNTER26H] = { "mhpmcounter26h", mctr32, read_hpmcounterh,
4234 write_mhpmcounterh },
4235 [CSR_MHPMCOUNTER27H] = { "mhpmcounter27h", mctr32, read_hpmcounterh,
4236 write_mhpmcounterh },
4237 [CSR_MHPMCOUNTER28H] = { "mhpmcounter28h", mctr32, read_hpmcounterh,
4238 write_mhpmcounterh },
4239 [CSR_MHPMCOUNTER29H] = { "mhpmcounter29h", mctr32, read_hpmcounterh,
4240 write_mhpmcounterh },
4241 [CSR_MHPMCOUNTER30H] = { "mhpmcounter30h", mctr32, read_hpmcounterh,
4242 write_mhpmcounterh },
4243 [CSR_MHPMCOUNTER31H] = { "mhpmcounter31h", mctr32, read_hpmcounterh,
4244 write_mhpmcounterh },
4245 [CSR_SCOUNTOVF] = { "scountovf", sscofpmf, read_scountovf,
4246 .min_priv_ver = PRIV_VERSION_1_12_0 },
4247
4248 #endif /* !CONFIG_USER_ONLY */
4249 };