2 * RISC-V CPU helpers for qemu.
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "qemu/main-loop.h"
24 #include "internals.h"
26 #include "exec/exec-all.h"
28 #include "tcg/tcg-op.h"
30 #include "semihosting/common-semi.h"
31 #include "sysemu/cpu-timers.h"
34 #include "tcg/oversized-guest.h"
36 int riscv_cpu_mmu_index(CPURISCVState
*env
, bool ifetch
)
38 #ifdef CONFIG_USER_ONLY
41 bool virt
= env
->virt_enabled
;
44 /* All priv -> mmu_idx mapping are here */
46 uint64_t status
= env
->mstatus
;
48 if (mode
== PRV_M
&& get_field(status
, MSTATUS_MPRV
)) {
49 mode
= get_field(env
->mstatus
, MSTATUS_MPP
);
50 virt
= get_field(env
->mstatus
, MSTATUS_MPV
) &&
53 status
= env
->vsstatus
;
56 if (mode
== PRV_S
&& get_field(status
, MSTATUS_SUM
)) {
61 return mode
| (virt
? MMU_2STAGE_BIT
: 0);
65 void cpu_get_tb_cpu_state(CPURISCVState
*env
, vaddr
*pc
,
66 uint64_t *cs_base
, uint32_t *pflags
)
68 RISCVCPU
*cpu
= env_archcpu(env
);
69 RISCVExtStatus fs
, vs
;
72 *pc
= env
->xl
== MXL_RV32
? env
->pc
& UINT32_MAX
: env
->pc
;
75 if (cpu
->cfg
.ext_zve32f
) {
77 * If env->vl equals to VLMAX, we can use generic vector operation
78 * expanders (GVEC) to accerlate the vector operations.
79 * However, as LMUL could be a fractional number. The maximum
80 * vector size can be operated might be less than 8 bytes,
81 * which is not supported by GVEC. So we set vl_eq_vlmax flag to true
82 * only when maxsz >= 8 bytes.
84 uint32_t vlmax
= vext_get_vlmax(cpu
, env
->vtype
);
85 uint32_t sew
= FIELD_EX64(env
->vtype
, VTYPE
, VSEW
);
86 uint32_t maxsz
= vlmax
<< sew
;
87 bool vl_eq_vlmax
= (env
->vstart
== 0) && (vlmax
== env
->vl
) &&
89 flags
= FIELD_DP32(flags
, TB_FLAGS
, VILL
, env
->vill
);
90 flags
= FIELD_DP32(flags
, TB_FLAGS
, SEW
, sew
);
91 flags
= FIELD_DP32(flags
, TB_FLAGS
, LMUL
,
92 FIELD_EX64(env
->vtype
, VTYPE
, VLMUL
));
93 flags
= FIELD_DP32(flags
, TB_FLAGS
, VL_EQ_VLMAX
, vl_eq_vlmax
);
94 flags
= FIELD_DP32(flags
, TB_FLAGS
, VTA
,
95 FIELD_EX64(env
->vtype
, VTYPE
, VTA
));
96 flags
= FIELD_DP32(flags
, TB_FLAGS
, VMA
,
97 FIELD_EX64(env
->vtype
, VTYPE
, VMA
));
98 flags
= FIELD_DP32(flags
, TB_FLAGS
, VSTART_EQ_ZERO
, env
->vstart
== 0);
100 flags
= FIELD_DP32(flags
, TB_FLAGS
, VILL
, 1);
103 #ifdef CONFIG_USER_ONLY
104 fs
= EXT_STATUS_DIRTY
;
105 vs
= EXT_STATUS_DIRTY
;
107 flags
= FIELD_DP32(flags
, TB_FLAGS
, PRIV
, env
->priv
);
109 flags
|= cpu_mmu_index(env
, 0);
110 fs
= get_field(env
->mstatus
, MSTATUS_FS
);
111 vs
= get_field(env
->mstatus
, MSTATUS_VS
);
113 if (env
->virt_enabled
) {
114 flags
= FIELD_DP32(flags
, TB_FLAGS
, VIRT_ENABLED
, 1);
116 * Merge DISABLED and !DIRTY states using MIN.
117 * We will set both fields when dirtying.
119 fs
= MIN(fs
, get_field(env
->mstatus_hs
, MSTATUS_FS
));
120 vs
= MIN(vs
, get_field(env
->mstatus_hs
, MSTATUS_VS
));
123 /* With Zfinx, floating point is enabled/disabled by Smstateen. */
124 if (!riscv_has_ext(env
, RVF
)) {
125 fs
= (smstateen_acc_ok(env
, 0, SMSTATEEN0_FCSR
) == RISCV_EXCP_NONE
)
126 ? EXT_STATUS_DIRTY
: EXT_STATUS_DISABLED
;
129 if (cpu
->cfg
.debug
&& !icount_enabled()) {
130 flags
= FIELD_DP32(flags
, TB_FLAGS
, ITRIGGER
, env
->itrigger_enabled
);
134 flags
= FIELD_DP32(flags
, TB_FLAGS
, FS
, fs
);
135 flags
= FIELD_DP32(flags
, TB_FLAGS
, VS
, vs
);
136 flags
= FIELD_DP32(flags
, TB_FLAGS
, XL
, env
->xl
);
137 flags
= FIELD_DP32(flags
, TB_FLAGS
, AXL
, cpu_address_xl(env
));
138 if (env
->cur_pmmask
!= 0) {
139 flags
= FIELD_DP32(flags
, TB_FLAGS
, PM_MASK_ENABLED
, 1);
141 if (env
->cur_pmbase
!= 0) {
142 flags
= FIELD_DP32(flags
, TB_FLAGS
, PM_BASE_ENABLED
, 1);
148 void riscv_cpu_update_mask(CPURISCVState
*env
)
150 target_ulong mask
= 0, base
= 0;
151 RISCVMXL xl
= env
->xl
;
153 * TODO: Current RVJ spec does not specify
154 * how the extension interacts with XLEN.
156 #ifndef CONFIG_USER_ONLY
157 int mode
= cpu_address_mode(env
);
158 xl
= cpu_get_xl(env
, mode
);
159 if (riscv_has_ext(env
, RVJ
)) {
162 if (env
->mmte
& M_PM_ENABLE
) {
168 if (env
->mmte
& S_PM_ENABLE
) {
174 if (env
->mmte
& U_PM_ENABLE
) {
180 g_assert_not_reached();
184 if (xl
== MXL_RV32
) {
185 env
->cur_pmmask
= mask
& UINT32_MAX
;
186 env
->cur_pmbase
= base
& UINT32_MAX
;
188 env
->cur_pmmask
= mask
;
189 env
->cur_pmbase
= base
;
193 #ifndef CONFIG_USER_ONLY
196 * The HS-mode is allowed to configure priority only for the
197 * following VS-mode local interrupts:
199 * 0 (Reserved interrupt, reads as zero)
200 * 1 Supervisor software interrupt
201 * 4 (Reserved interrupt, reads as zero)
202 * 5 Supervisor timer interrupt
203 * 8 (Reserved interrupt, reads as zero)
204 * 13 (Reserved interrupt)
217 static const int hviprio_index2irq
[] = {
218 0, 1, 4, 5, 8, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 };
219 static const int hviprio_index2rdzero
[] = {
220 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
222 int riscv_cpu_hviprio_index2irq(int index
, int *out_irq
, int *out_rdzero
)
224 if (index
< 0 || ARRAY_SIZE(hviprio_index2irq
) <= index
) {
229 *out_irq
= hviprio_index2irq
[index
];
233 *out_rdzero
= hviprio_index2rdzero
[index
];
240 * Default priorities of local interrupts are defined in the
241 * RISC-V Advanced Interrupt Architecture specification.
243 * ----------------------------------------------------------------
245 * Priority | Major Interrupt Numbers
246 * ----------------------------------------------------------------
247 * Highest | 47, 23, 46, 45, 22, 44,
248 * | 43, 21, 42, 41, 20, 40
250 * | 11 (0b), 3 (03), 7 (07)
251 * | 9 (09), 1 (01), 5 (05)
253 * | 10 (0a), 2 (02), 6 (06)
255 * | 39, 19, 38, 37, 18, 36,
256 * Lowest | 35, 17, 34, 33, 16, 32
257 * ----------------------------------------------------------------
259 static const uint8_t default_iprio
[64] = {
260 /* Custom interrupts 48 to 63 */
261 [63] = IPRIO_MMAXIPRIO
,
262 [62] = IPRIO_MMAXIPRIO
,
263 [61] = IPRIO_MMAXIPRIO
,
264 [60] = IPRIO_MMAXIPRIO
,
265 [59] = IPRIO_MMAXIPRIO
,
266 [58] = IPRIO_MMAXIPRIO
,
267 [57] = IPRIO_MMAXIPRIO
,
268 [56] = IPRIO_MMAXIPRIO
,
269 [55] = IPRIO_MMAXIPRIO
,
270 [54] = IPRIO_MMAXIPRIO
,
271 [53] = IPRIO_MMAXIPRIO
,
272 [52] = IPRIO_MMAXIPRIO
,
273 [51] = IPRIO_MMAXIPRIO
,
274 [50] = IPRIO_MMAXIPRIO
,
275 [49] = IPRIO_MMAXIPRIO
,
276 [48] = IPRIO_MMAXIPRIO
,
278 /* Custom interrupts 24 to 31 */
279 [31] = IPRIO_MMAXIPRIO
,
280 [30] = IPRIO_MMAXIPRIO
,
281 [29] = IPRIO_MMAXIPRIO
,
282 [28] = IPRIO_MMAXIPRIO
,
283 [27] = IPRIO_MMAXIPRIO
,
284 [26] = IPRIO_MMAXIPRIO
,
285 [25] = IPRIO_MMAXIPRIO
,
286 [24] = IPRIO_MMAXIPRIO
,
288 [47] = IPRIO_DEFAULT_UPPER
,
289 [23] = IPRIO_DEFAULT_UPPER
+ 1,
290 [46] = IPRIO_DEFAULT_UPPER
+ 2,
291 [45] = IPRIO_DEFAULT_UPPER
+ 3,
292 [22] = IPRIO_DEFAULT_UPPER
+ 4,
293 [44] = IPRIO_DEFAULT_UPPER
+ 5,
295 [43] = IPRIO_DEFAULT_UPPER
+ 6,
296 [21] = IPRIO_DEFAULT_UPPER
+ 7,
297 [42] = IPRIO_DEFAULT_UPPER
+ 8,
298 [41] = IPRIO_DEFAULT_UPPER
+ 9,
299 [20] = IPRIO_DEFAULT_UPPER
+ 10,
300 [40] = IPRIO_DEFAULT_UPPER
+ 11,
302 [11] = IPRIO_DEFAULT_M
,
303 [3] = IPRIO_DEFAULT_M
+ 1,
304 [7] = IPRIO_DEFAULT_M
+ 2,
306 [9] = IPRIO_DEFAULT_S
,
307 [1] = IPRIO_DEFAULT_S
+ 1,
308 [5] = IPRIO_DEFAULT_S
+ 2,
310 [12] = IPRIO_DEFAULT_SGEXT
,
312 [10] = IPRIO_DEFAULT_VS
,
313 [2] = IPRIO_DEFAULT_VS
+ 1,
314 [6] = IPRIO_DEFAULT_VS
+ 2,
316 [39] = IPRIO_DEFAULT_LOWER
,
317 [19] = IPRIO_DEFAULT_LOWER
+ 1,
318 [38] = IPRIO_DEFAULT_LOWER
+ 2,
319 [37] = IPRIO_DEFAULT_LOWER
+ 3,
320 [18] = IPRIO_DEFAULT_LOWER
+ 4,
321 [36] = IPRIO_DEFAULT_LOWER
+ 5,
323 [35] = IPRIO_DEFAULT_LOWER
+ 6,
324 [17] = IPRIO_DEFAULT_LOWER
+ 7,
325 [34] = IPRIO_DEFAULT_LOWER
+ 8,
326 [33] = IPRIO_DEFAULT_LOWER
+ 9,
327 [16] = IPRIO_DEFAULT_LOWER
+ 10,
328 [32] = IPRIO_DEFAULT_LOWER
+ 11,
331 uint8_t riscv_cpu_default_priority(int irq
)
333 if (irq
< 0 || irq
> 63) {
334 return IPRIO_MMAXIPRIO
;
337 return default_iprio
[irq
] ? default_iprio
[irq
] : IPRIO_MMAXIPRIO
;
340 static int riscv_cpu_pending_to_irq(CPURISCVState
*env
,
341 int extirq
, unsigned int extirq_def_prio
,
342 uint64_t pending
, uint8_t *iprio
)
344 int irq
, best_irq
= RISCV_EXCP_NONE
;
345 unsigned int prio
, best_prio
= UINT_MAX
;
348 return RISCV_EXCP_NONE
;
351 irq
= ctz64(pending
);
352 if (!((extirq
== IRQ_M_EXT
) ? riscv_cpu_cfg(env
)->ext_smaia
:
353 riscv_cpu_cfg(env
)->ext_ssaia
)) {
357 pending
= pending
>> irq
;
362 prio
= extirq_def_prio
;
364 prio
= (riscv_cpu_default_priority(irq
) < extirq_def_prio
) ?
368 if ((pending
& 0x1) && (prio
<= best_prio
)) {
373 pending
= pending
>> 1;
380 * Doesn't report interrupts inserted using mvip from M-mode firmware. Those
381 * are returned in riscv_cpu_sirq_pending().
383 uint64_t riscv_cpu_all_pending(CPURISCVState
*env
)
385 uint32_t gein
= get_field(env
->hstatus
, HSTATUS_VGEIN
);
386 uint64_t vsgein
= (env
->hgeip
& (1ULL << gein
)) ? MIP_VSEIP
: 0;
387 uint64_t vstip
= (env
->vstime_irq
) ? MIP_VSTIP
: 0;
389 return (env
->mip
| vsgein
| vstip
) & env
->mie
;
392 int riscv_cpu_mirq_pending(CPURISCVState
*env
)
394 uint64_t irqs
= riscv_cpu_all_pending(env
) & ~env
->mideleg
&
395 ~(MIP_SGEIP
| MIP_VSSIP
| MIP_VSTIP
| MIP_VSEIP
);
397 return riscv_cpu_pending_to_irq(env
, IRQ_M_EXT
, IPRIO_DEFAULT_M
,
401 int riscv_cpu_sirq_pending(CPURISCVState
*env
)
403 uint64_t irqs
= riscv_cpu_all_pending(env
) & env
->mideleg
&
404 ~(MIP_VSSIP
| MIP_VSTIP
| MIP_VSEIP
);
405 uint64_t irqs_f
= env
->mvip
& env
->mvien
& ~env
->mideleg
& env
->sie
;
407 return riscv_cpu_pending_to_irq(env
, IRQ_S_EXT
, IPRIO_DEFAULT_S
,
408 irqs
| irqs_f
, env
->siprio
);
411 int riscv_cpu_vsirq_pending(CPURISCVState
*env
)
413 uint64_t irqs
= riscv_cpu_all_pending(env
) & env
->mideleg
&
414 (MIP_VSSIP
| MIP_VSTIP
| MIP_VSEIP
);
416 return riscv_cpu_pending_to_irq(env
, IRQ_S_EXT
, IPRIO_DEFAULT_S
,
417 irqs
>> 1, env
->hviprio
);
420 static int riscv_cpu_local_irq_pending(CPURISCVState
*env
)
422 uint64_t irqs
, pending
, mie
, hsie
, vsie
, irqs_f
;
425 /* Determine interrupt enable state of all privilege modes */
426 if (env
->virt_enabled
) {
429 vsie
= (env
->priv
< PRV_S
) ||
430 (env
->priv
== PRV_S
&& get_field(env
->mstatus
, MSTATUS_SIE
));
432 mie
= (env
->priv
< PRV_M
) ||
433 (env
->priv
== PRV_M
&& get_field(env
->mstatus
, MSTATUS_MIE
));
434 hsie
= (env
->priv
< PRV_S
) ||
435 (env
->priv
== PRV_S
&& get_field(env
->mstatus
, MSTATUS_SIE
));
439 /* Determine all pending interrupts */
440 pending
= riscv_cpu_all_pending(env
);
442 /* Check M-mode interrupts */
443 irqs
= pending
& ~env
->mideleg
& -mie
;
445 return riscv_cpu_pending_to_irq(env
, IRQ_M_EXT
, IPRIO_DEFAULT_M
,
449 /* Check for virtual S-mode interrupts. */
450 irqs_f
= env
->mvip
& (env
->mvien
& ~env
->mideleg
) & env
->sie
;
452 /* Check HS-mode interrupts */
453 irqs
= ((pending
& env
->mideleg
& ~env
->hideleg
) | irqs_f
) & -hsie
;
455 return riscv_cpu_pending_to_irq(env
, IRQ_S_EXT
, IPRIO_DEFAULT_S
,
459 /* Check VS-mode interrupts */
460 irqs
= pending
& env
->mideleg
& env
->hideleg
& -vsie
;
462 virq
= riscv_cpu_pending_to_irq(env
, IRQ_S_EXT
, IPRIO_DEFAULT_S
,
463 irqs
>> 1, env
->hviprio
);
464 return (virq
<= 0) ? virq
: virq
+ 1;
467 /* Indicate no pending interrupt */
468 return RISCV_EXCP_NONE
;
471 bool riscv_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
473 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
474 RISCVCPU
*cpu
= RISCV_CPU(cs
);
475 CPURISCVState
*env
= &cpu
->env
;
476 int interruptno
= riscv_cpu_local_irq_pending(env
);
477 if (interruptno
>= 0) {
478 cs
->exception_index
= RISCV_EXCP_INT_FLAG
| interruptno
;
479 riscv_cpu_do_interrupt(cs
);
486 /* Return true is floating point support is currently enabled */
487 bool riscv_cpu_fp_enabled(CPURISCVState
*env
)
489 if (env
->mstatus
& MSTATUS_FS
) {
490 if (env
->virt_enabled
&& !(env
->mstatus_hs
& MSTATUS_FS
)) {
499 /* Return true is vector support is currently enabled */
500 bool riscv_cpu_vector_enabled(CPURISCVState
*env
)
502 if (env
->mstatus
& MSTATUS_VS
) {
503 if (env
->virt_enabled
&& !(env
->mstatus_hs
& MSTATUS_VS
)) {
512 void riscv_cpu_swap_hypervisor_regs(CPURISCVState
*env
)
514 uint64_t mstatus_mask
= MSTATUS_MXR
| MSTATUS_SUM
|
515 MSTATUS_SPP
| MSTATUS_SPIE
| MSTATUS_SIE
|
516 MSTATUS64_UXL
| MSTATUS_VS
;
518 if (riscv_has_ext(env
, RVF
)) {
519 mstatus_mask
|= MSTATUS_FS
;
521 bool current_virt
= env
->virt_enabled
;
523 g_assert(riscv_has_ext(env
, RVH
));
526 /* Current V=1 and we are about to change to V=0 */
527 env
->vsstatus
= env
->mstatus
& mstatus_mask
;
528 env
->mstatus
&= ~mstatus_mask
;
529 env
->mstatus
|= env
->mstatus_hs
;
531 env
->vstvec
= env
->stvec
;
532 env
->stvec
= env
->stvec_hs
;
534 env
->vsscratch
= env
->sscratch
;
535 env
->sscratch
= env
->sscratch_hs
;
537 env
->vsepc
= env
->sepc
;
538 env
->sepc
= env
->sepc_hs
;
540 env
->vscause
= env
->scause
;
541 env
->scause
= env
->scause_hs
;
543 env
->vstval
= env
->stval
;
544 env
->stval
= env
->stval_hs
;
546 env
->vsatp
= env
->satp
;
547 env
->satp
= env
->satp_hs
;
549 /* Current V=0 and we are about to change to V=1 */
550 env
->mstatus_hs
= env
->mstatus
& mstatus_mask
;
551 env
->mstatus
&= ~mstatus_mask
;
552 env
->mstatus
|= env
->vsstatus
;
554 env
->stvec_hs
= env
->stvec
;
555 env
->stvec
= env
->vstvec
;
557 env
->sscratch_hs
= env
->sscratch
;
558 env
->sscratch
= env
->vsscratch
;
560 env
->sepc_hs
= env
->sepc
;
561 env
->sepc
= env
->vsepc
;
563 env
->scause_hs
= env
->scause
;
564 env
->scause
= env
->vscause
;
566 env
->stval_hs
= env
->stval
;
567 env
->stval
= env
->vstval
;
569 env
->satp_hs
= env
->satp
;
570 env
->satp
= env
->vsatp
;
574 target_ulong
riscv_cpu_get_geilen(CPURISCVState
*env
)
576 if (!riscv_has_ext(env
, RVH
)) {
583 void riscv_cpu_set_geilen(CPURISCVState
*env
, target_ulong geilen
)
585 if (!riscv_has_ext(env
, RVH
)) {
589 if (geilen
> (TARGET_LONG_BITS
- 1)) {
593 env
->geilen
= geilen
;
596 /* This function can only be called to set virt when RVH is enabled */
597 void riscv_cpu_set_virt_enabled(CPURISCVState
*env
, bool enable
)
599 /* Flush the TLB on all virt mode changes. */
600 if (env
->virt_enabled
!= enable
) {
601 tlb_flush(env_cpu(env
));
604 env
->virt_enabled
= enable
;
608 * The guest external interrupts from an interrupt controller are
609 * delivered only when the Guest/VM is running (i.e. V=1). This means
610 * any guest external interrupt which is triggered while the Guest/VM
611 * is not running (i.e. V=0) will be missed on QEMU resulting in guest
612 * with sluggish response to serial console input and other I/O events.
614 * To solve this, we check and inject interrupt after setting V=1.
616 riscv_cpu_update_mip(env
, 0, 0);
620 int riscv_cpu_claim_interrupts(RISCVCPU
*cpu
, uint64_t interrupts
)
622 CPURISCVState
*env
= &cpu
->env
;
623 if (env
->miclaim
& interrupts
) {
626 env
->miclaim
|= interrupts
;
631 void riscv_cpu_interrupt(CPURISCVState
*env
)
633 uint64_t gein
, vsgein
= 0, vstip
= 0, irqf
= 0;
634 CPUState
*cs
= env_cpu(env
);
636 QEMU_IOTHREAD_LOCK_GUARD();
638 if (env
->virt_enabled
) {
639 gein
= get_field(env
->hstatus
, HSTATUS_VGEIN
);
640 vsgein
= (env
->hgeip
& (1ULL << gein
)) ? MIP_VSEIP
: 0;
642 irqf
= env
->mvien
& env
->mvip
& env
->sie
;
645 vstip
= env
->vstime_irq
? MIP_VSTIP
: 0;
647 if (env
->mip
| vsgein
| vstip
| irqf
) {
648 cpu_interrupt(cs
, CPU_INTERRUPT_HARD
);
650 cpu_reset_interrupt(cs
, CPU_INTERRUPT_HARD
);
654 uint64_t riscv_cpu_update_mip(CPURISCVState
*env
, uint64_t mask
, uint64_t value
)
656 uint64_t old
= env
->mip
;
658 /* No need to update mip for VSTIP */
659 mask
= ((mask
== MIP_VSTIP
) && env
->vstime_irq
) ? 0 : mask
;
661 QEMU_IOTHREAD_LOCK_GUARD();
663 env
->mip
= (env
->mip
& ~mask
) | (value
& mask
);
665 riscv_cpu_interrupt(env
);
670 void riscv_cpu_set_rdtime_fn(CPURISCVState
*env
, uint64_t (*fn
)(void *),
674 env
->rdtime_fn_arg
= arg
;
677 void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState
*env
, uint32_t priv
,
678 int (*rmw_fn
)(void *arg
,
681 target_ulong new_val
,
682 target_ulong write_mask
),
686 env
->aia_ireg_rmw_fn
[priv
] = rmw_fn
;
687 env
->aia_ireg_rmw_fn_arg
[priv
] = rmw_fn_arg
;
691 void riscv_cpu_set_mode(CPURISCVState
*env
, target_ulong newpriv
)
693 g_assert(newpriv
<= PRV_M
&& newpriv
!= PRV_RESERVED
);
695 if (icount_enabled() && newpriv
!= env
->priv
) {
696 riscv_itrigger_update_priv(env
);
698 /* tlb_flush is unnecessary as mode is contained in mmu_idx */
700 env
->xl
= cpu_recompute_xl(env
);
701 riscv_cpu_update_mask(env
);
704 * Clear the load reservation - otherwise a reservation placed in one
705 * context/process can be used by another, resulting in an SC succeeding
706 * incorrectly. Version 2.2 of the ISA specification explicitly requires
707 * this behaviour, while later revisions say that the kernel "should" use
708 * an SC instruction to force the yielding of a load reservation on a
709 * preemptive context switch. As a result, do both.
715 * get_physical_address_pmp - check PMP permission for this physical address
717 * Match the PMP region and check permission for this physical address and it's
718 * TLB page. Returns 0 if the permission checking was successful
720 * @env: CPURISCVState
721 * @prot: The returned protection attributes
722 * @addr: The physical address to be checked permission
723 * @access_type: The type of MMU access
724 * @mode: Indicates current privilege level.
726 static int get_physical_address_pmp(CPURISCVState
*env
, int *prot
, hwaddr addr
,
727 int size
, MMUAccessType access_type
,
733 if (!riscv_cpu_cfg(env
)->pmp
) {
734 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
735 return TRANSLATE_SUCCESS
;
738 pmp_has_privs
= pmp_hart_has_privs(env
, addr
, size
, 1 << access_type
,
740 if (!pmp_has_privs
) {
742 return TRANSLATE_PMP_FAIL
;
745 *prot
= pmp_priv_to_page_prot(pmp_priv
);
747 return TRANSLATE_SUCCESS
;
751 * get_physical_address - get the physical address for this virtual address
753 * Do a page table walk to obtain the physical address corresponding to a
754 * virtual address. Returns 0 if the translation was successful
756 * Adapted from Spike's mmu_t::translate and mmu_t::walk
758 * @env: CPURISCVState
759 * @physical: This will be set to the calculated physical address
760 * @prot: The returned protection attributes
761 * @addr: The virtual address or guest physical address to be translated
762 * @fault_pte_addr: If not NULL, this will be set to fault pte address
763 * when a error occurs on pte address translation.
764 * This will already be shifted to match htval.
765 * @access_type: The type of MMU access
766 * @mmu_idx: Indicates current privilege level
767 * @first_stage: Are we in first stage translation?
768 * Second stage is used for hypervisor guest translation
769 * @two_stage: Are we going to perform two stage translation
770 * @is_debug: Is this access from a debugger or the monitor?
772 static int get_physical_address(CPURISCVState
*env
, hwaddr
*physical
,
773 int *ret_prot
, vaddr addr
,
774 target_ulong
*fault_pte_addr
,
775 int access_type
, int mmu_idx
,
776 bool first_stage
, bool two_stage
,
780 * NOTE: the env->pc value visible here will not be
781 * correct, but the value visible to the exception handler
782 * (riscv_cpu_do_interrupt) is correct
785 MemTxAttrs attrs
= MEMTXATTRS_UNSPECIFIED
;
786 int mode
= mmuidx_priv(mmu_idx
);
787 bool use_background
= false;
790 target_ulong napot_mask
;
793 * Check if we should use the background registers for the two
794 * stage translation. We don't need to check if we actually need
795 * two stage translation as that happened before this function
796 * was called. Background registers will be used if the guest has
797 * forced a two stage translation to be on (in HS or M mode).
799 if (!env
->virt_enabled
&& two_stage
) {
800 use_background
= true;
803 if (mode
== PRV_M
|| !riscv_cpu_cfg(env
)->mmu
) {
805 *ret_prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
806 return TRANSLATE_SUCCESS
;
812 int levels
, ptidxbits
, ptesize
, vm
, widened
;
814 if (first_stage
== true) {
815 if (use_background
) {
816 if (riscv_cpu_mxl(env
) == MXL_RV32
) {
817 base
= (hwaddr
)get_field(env
->vsatp
, SATP32_PPN
) << PGSHIFT
;
818 vm
= get_field(env
->vsatp
, SATP32_MODE
);
820 base
= (hwaddr
)get_field(env
->vsatp
, SATP64_PPN
) << PGSHIFT
;
821 vm
= get_field(env
->vsatp
, SATP64_MODE
);
824 if (riscv_cpu_mxl(env
) == MXL_RV32
) {
825 base
= (hwaddr
)get_field(env
->satp
, SATP32_PPN
) << PGSHIFT
;
826 vm
= get_field(env
->satp
, SATP32_MODE
);
828 base
= (hwaddr
)get_field(env
->satp
, SATP64_PPN
) << PGSHIFT
;
829 vm
= get_field(env
->satp
, SATP64_MODE
);
834 if (riscv_cpu_mxl(env
) == MXL_RV32
) {
835 base
= (hwaddr
)get_field(env
->hgatp
, SATP32_PPN
) << PGSHIFT
;
836 vm
= get_field(env
->hgatp
, SATP32_MODE
);
838 base
= (hwaddr
)get_field(env
->hgatp
, SATP64_PPN
) << PGSHIFT
;
839 vm
= get_field(env
->hgatp
, SATP64_MODE
);
846 levels
= 2; ptidxbits
= 10; ptesize
= 4; break;
848 levels
= 3; ptidxbits
= 9; ptesize
= 8; break;
850 levels
= 4; ptidxbits
= 9; ptesize
= 8; break;
852 levels
= 5; ptidxbits
= 9; ptesize
= 8; break;
855 *ret_prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
856 return TRANSLATE_SUCCESS
;
858 g_assert_not_reached();
861 CPUState
*cs
= env_cpu(env
);
862 int va_bits
= PGSHIFT
+ levels
* ptidxbits
+ widened
;
864 if (first_stage
== true) {
865 target_ulong mask
, masked_msbs
;
867 if (TARGET_LONG_BITS
> (va_bits
- 1)) {
868 mask
= (1L << (TARGET_LONG_BITS
- (va_bits
- 1))) - 1;
872 masked_msbs
= (addr
>> (va_bits
- 1)) & mask
;
874 if (masked_msbs
!= 0 && masked_msbs
!= mask
) {
875 return TRANSLATE_FAIL
;
878 if (vm
!= VM_1_10_SV32
&& addr
>> va_bits
!= 0) {
879 return TRANSLATE_FAIL
;
883 bool pbmte
= env
->menvcfg
& MENVCFG_PBMTE
;
884 bool adue
= env
->menvcfg
& MENVCFG_ADUE
;
886 if (first_stage
&& two_stage
&& env
->virt_enabled
) {
887 pbmte
= pbmte
&& (env
->henvcfg
& HENVCFG_PBMTE
);
888 adue
= adue
&& (env
->henvcfg
& HENVCFG_ADUE
);
891 int ptshift
= (levels
- 1) * ptidxbits
;
896 #if !TCG_OVERSIZED_GUEST
899 for (i
= 0; i
< levels
; i
++, ptshift
-= ptidxbits
) {
902 idx
= (addr
>> (PGSHIFT
+ ptshift
)) &
903 ((1 << (ptidxbits
+ widened
)) - 1);
905 idx
= (addr
>> (PGSHIFT
+ ptshift
)) &
906 ((1 << ptidxbits
) - 1);
909 /* check that physical address of PTE is legal */
911 if (two_stage
&& first_stage
) {
915 /* Do the second stage translation on the base PTE address. */
916 int vbase_ret
= get_physical_address(env
, &vbase
, &vbase_prot
,
917 base
, NULL
, MMU_DATA_LOAD
,
918 MMUIdx_U
, false, true,
921 if (vbase_ret
!= TRANSLATE_SUCCESS
) {
922 if (fault_pte_addr
) {
923 *fault_pte_addr
= (base
+ idx
* ptesize
) >> 2;
925 return TRANSLATE_G_STAGE_FAIL
;
928 pte_addr
= vbase
+ idx
* ptesize
;
930 pte_addr
= base
+ idx
* ptesize
;
934 int pmp_ret
= get_physical_address_pmp(env
, &pmp_prot
, pte_addr
,
935 sizeof(target_ulong
),
936 MMU_DATA_LOAD
, PRV_S
);
937 if (pmp_ret
!= TRANSLATE_SUCCESS
) {
938 return TRANSLATE_PMP_FAIL
;
941 if (riscv_cpu_mxl(env
) == MXL_RV32
) {
942 pte
= address_space_ldl(cs
->as
, pte_addr
, attrs
, &res
);
944 pte
= address_space_ldq(cs
->as
, pte_addr
, attrs
, &res
);
947 if (res
!= MEMTX_OK
) {
948 return TRANSLATE_FAIL
;
951 if (riscv_cpu_sxl(env
) == MXL_RV32
) {
952 ppn
= pte
>> PTE_PPN_SHIFT
;
954 if (pte
& PTE_RESERVED
) {
955 return TRANSLATE_FAIL
;
958 if (!pbmte
&& (pte
& PTE_PBMT
)) {
959 return TRANSLATE_FAIL
;
962 if (!riscv_cpu_cfg(env
)->ext_svnapot
&& (pte
& PTE_N
)) {
963 return TRANSLATE_FAIL
;
966 ppn
= (pte
& (target_ulong
)PTE_PPN_MASK
) >> PTE_PPN_SHIFT
;
969 if (!(pte
& PTE_V
)) {
971 return TRANSLATE_FAIL
;
973 if (pte
& (PTE_R
| PTE_W
| PTE_X
)) {
977 /* Inner PTE, continue walking */
978 if (pte
& (PTE_D
| PTE_A
| PTE_U
| PTE_ATTR
)) {
979 return TRANSLATE_FAIL
;
981 base
= ppn
<< PGSHIFT
;
984 /* No leaf pte at any translation level. */
985 return TRANSLATE_FAIL
;
988 if (ppn
& ((1ULL << ptshift
) - 1)) {
990 return TRANSLATE_FAIL
;
992 if (!pbmte
&& (pte
& PTE_PBMT
)) {
993 /* Reserved without Svpbmt. */
994 return TRANSLATE_FAIL
;
997 /* Check for reserved combinations of RWX flags. */
998 switch (pte
& (PTE_R
| PTE_W
| PTE_X
)) {
1001 return TRANSLATE_FAIL
;
1014 if (first_stage
== true) {
1015 mxr
= get_field(env
->mstatus
, MSTATUS_MXR
);
1017 mxr
= get_field(env
->vsstatus
, MSTATUS_MXR
);
1026 if (mode
!= PRV_U
) {
1027 if (!mmuidx_sum(mmu_idx
)) {
1028 return TRANSLATE_FAIL
;
1030 /* SUM allows only read+write, not execute. */
1031 prot
&= PAGE_READ
| PAGE_WRITE
;
1033 } else if (mode
!= PRV_S
) {
1034 /* Supervisor PTE flags when not S mode */
1035 return TRANSLATE_FAIL
;
1038 if (!((prot
>> access_type
) & 1)) {
1039 /* Access check failed */
1040 return TRANSLATE_FAIL
;
1043 /* If necessary, set accessed and dirty bits. */
1044 target_ulong updated_pte
= pte
| PTE_A
|
1045 (access_type
== MMU_DATA_STORE
? PTE_D
: 0);
1047 /* Page table updates need to be atomic with MTTCG enabled */
1048 if (updated_pte
!= pte
&& !is_debug
) {
1050 return TRANSLATE_FAIL
;
1054 * - if accessed or dirty bits need updating, and the PTE is
1055 * in RAM, then we do so atomically with a compare and swap.
1056 * - if the PTE is in IO space or ROM, then it can't be updated
1057 * and we return TRANSLATE_FAIL.
1058 * - if the PTE changed by the time we went to update it, then
1059 * it is no longer valid and we must re-walk the page table.
1062 hwaddr l
= sizeof(target_ulong
), addr1
;
1063 mr
= address_space_translate(cs
->as
, pte_addr
, &addr1
, &l
,
1064 false, MEMTXATTRS_UNSPECIFIED
);
1065 if (memory_region_is_ram(mr
)) {
1066 target_ulong
*pte_pa
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
1067 #if TCG_OVERSIZED_GUEST
1069 * MTTCG is not enabled on oversized TCG guests so
1070 * page table updates do not need to be atomic
1072 *pte_pa
= pte
= updated_pte
;
1074 target_ulong old_pte
= qatomic_cmpxchg(pte_pa
, pte
, updated_pte
);
1075 if (old_pte
!= pte
) {
1082 * Misconfigured PTE in ROM (AD bits are not preset) or
1083 * PTE is in IO space and can't be updated atomically.
1085 return TRANSLATE_FAIL
;
1089 /* For superpage mappings, make a fake leaf PTE for the TLB's benefit. */
1090 target_ulong vpn
= addr
>> PGSHIFT
;
1092 if (riscv_cpu_cfg(env
)->ext_svnapot
&& (pte
& PTE_N
)) {
1093 napot_bits
= ctzl(ppn
) + 1;
1094 if ((i
!= (levels
- 1)) || (napot_bits
!= 4)) {
1095 return TRANSLATE_FAIL
;
1099 napot_mask
= (1 << napot_bits
) - 1;
1100 *physical
= (((ppn
& ~napot_mask
) | (vpn
& napot_mask
) |
1101 (vpn
& (((target_ulong
)1 << ptshift
) - 1))
1102 ) << PGSHIFT
) | (addr
& ~TARGET_PAGE_MASK
);
1105 * Remove write permission unless this is a store, or the page is
1106 * already dirty, so that we TLB miss on later writes to update
1109 if (access_type
!= MMU_DATA_STORE
&& !(pte
& PTE_D
)) {
1110 prot
&= ~PAGE_WRITE
;
1114 return TRANSLATE_SUCCESS
;
1117 static void raise_mmu_exception(CPURISCVState
*env
, target_ulong address
,
1118 MMUAccessType access_type
, bool pmp_violation
,
1119 bool first_stage
, bool two_stage
,
1120 bool two_stage_indirect
)
1122 CPUState
*cs
= env_cpu(env
);
1123 int page_fault_exceptions
, vm
;
1126 if (riscv_cpu_mxl(env
) == MXL_RV32
) {
1127 stap_mode
= SATP32_MODE
;
1129 stap_mode
= SATP64_MODE
;
1133 vm
= get_field(env
->satp
, stap_mode
);
1135 vm
= get_field(env
->hgatp
, stap_mode
);
1138 page_fault_exceptions
= vm
!= VM_1_10_MBARE
&& !pmp_violation
;
1140 switch (access_type
) {
1141 case MMU_INST_FETCH
:
1142 if (env
->virt_enabled
&& !first_stage
) {
1143 cs
->exception_index
= RISCV_EXCP_INST_GUEST_PAGE_FAULT
;
1145 cs
->exception_index
= page_fault_exceptions
?
1146 RISCV_EXCP_INST_PAGE_FAULT
: RISCV_EXCP_INST_ACCESS_FAULT
;
1150 if (two_stage
&& !first_stage
) {
1151 cs
->exception_index
= RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT
;
1153 cs
->exception_index
= page_fault_exceptions
?
1154 RISCV_EXCP_LOAD_PAGE_FAULT
: RISCV_EXCP_LOAD_ACCESS_FAULT
;
1157 case MMU_DATA_STORE
:
1158 if (two_stage
&& !first_stage
) {
1159 cs
->exception_index
= RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT
;
1161 cs
->exception_index
= page_fault_exceptions
?
1162 RISCV_EXCP_STORE_PAGE_FAULT
:
1163 RISCV_EXCP_STORE_AMO_ACCESS_FAULT
;
1167 g_assert_not_reached();
1169 env
->badaddr
= address
;
1170 env
->two_stage_lookup
= two_stage
;
1171 env
->two_stage_indirect_lookup
= two_stage_indirect
;
1174 hwaddr
riscv_cpu_get_phys_page_debug(CPUState
*cs
, vaddr addr
)
1176 RISCVCPU
*cpu
= RISCV_CPU(cs
);
1177 CPURISCVState
*env
= &cpu
->env
;
1180 int mmu_idx
= cpu_mmu_index(&cpu
->env
, false);
1182 if (get_physical_address(env
, &phys_addr
, &prot
, addr
, NULL
, 0, mmu_idx
,
1183 true, env
->virt_enabled
, true)) {
1187 if (env
->virt_enabled
) {
1188 if (get_physical_address(env
, &phys_addr
, &prot
, phys_addr
, NULL
,
1189 0, mmu_idx
, false, true, true)) {
1194 return phys_addr
& TARGET_PAGE_MASK
;
1197 void riscv_cpu_do_transaction_failed(CPUState
*cs
, hwaddr physaddr
,
1198 vaddr addr
, unsigned size
,
1199 MMUAccessType access_type
,
1200 int mmu_idx
, MemTxAttrs attrs
,
1201 MemTxResult response
, uintptr_t retaddr
)
1203 RISCVCPU
*cpu
= RISCV_CPU(cs
);
1204 CPURISCVState
*env
= &cpu
->env
;
1206 if (access_type
== MMU_DATA_STORE
) {
1207 cs
->exception_index
= RISCV_EXCP_STORE_AMO_ACCESS_FAULT
;
1208 } else if (access_type
== MMU_DATA_LOAD
) {
1209 cs
->exception_index
= RISCV_EXCP_LOAD_ACCESS_FAULT
;
1211 cs
->exception_index
= RISCV_EXCP_INST_ACCESS_FAULT
;
1214 env
->badaddr
= addr
;
1215 env
->two_stage_lookup
= mmuidx_2stage(mmu_idx
);
1216 env
->two_stage_indirect_lookup
= false;
1217 cpu_loop_exit_restore(cs
, retaddr
);
1220 void riscv_cpu_do_unaligned_access(CPUState
*cs
, vaddr addr
,
1221 MMUAccessType access_type
, int mmu_idx
,
1224 RISCVCPU
*cpu
= RISCV_CPU(cs
);
1225 CPURISCVState
*env
= &cpu
->env
;
1226 switch (access_type
) {
1227 case MMU_INST_FETCH
:
1228 cs
->exception_index
= RISCV_EXCP_INST_ADDR_MIS
;
1231 cs
->exception_index
= RISCV_EXCP_LOAD_ADDR_MIS
;
1233 case MMU_DATA_STORE
:
1234 cs
->exception_index
= RISCV_EXCP_STORE_AMO_ADDR_MIS
;
1237 g_assert_not_reached();
1239 env
->badaddr
= addr
;
1240 env
->two_stage_lookup
= mmuidx_2stage(mmu_idx
);
1241 env
->two_stage_indirect_lookup
= false;
1242 cpu_loop_exit_restore(cs
, retaddr
);
1246 static void pmu_tlb_fill_incr_ctr(RISCVCPU
*cpu
, MMUAccessType access_type
)
1248 enum riscv_pmu_event_idx pmu_event_type
;
1250 switch (access_type
) {
1251 case MMU_INST_FETCH
:
1252 pmu_event_type
= RISCV_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS
;
1255 pmu_event_type
= RISCV_PMU_EVENT_CACHE_DTLB_READ_MISS
;
1257 case MMU_DATA_STORE
:
1258 pmu_event_type
= RISCV_PMU_EVENT_CACHE_DTLB_WRITE_MISS
;
1264 riscv_pmu_incr_ctr(cpu
, pmu_event_type
);
1267 bool riscv_cpu_tlb_fill(CPUState
*cs
, vaddr address
, int size
,
1268 MMUAccessType access_type
, int mmu_idx
,
1269 bool probe
, uintptr_t retaddr
)
1271 RISCVCPU
*cpu
= RISCV_CPU(cs
);
1272 CPURISCVState
*env
= &cpu
->env
;
1275 int prot
, prot2
, prot_pmp
;
1276 bool pmp_violation
= false;
1277 bool first_stage_error
= true;
1278 bool two_stage_lookup
= mmuidx_2stage(mmu_idx
);
1279 bool two_stage_indirect_error
= false;
1280 int ret
= TRANSLATE_FAIL
;
1282 /* default TLB page size */
1283 target_ulong tlb_size
= TARGET_PAGE_SIZE
;
1285 env
->guest_phys_fault_addr
= 0;
1287 qemu_log_mask(CPU_LOG_MMU
, "%s ad %" VADDR_PRIx
" rw %d mmu_idx %d\n",
1288 __func__
, address
, access_type
, mmu_idx
);
1290 pmu_tlb_fill_incr_ctr(cpu
, access_type
);
1291 if (two_stage_lookup
) {
1292 /* Two stage lookup */
1293 ret
= get_physical_address(env
, &pa
, &prot
, address
,
1294 &env
->guest_phys_fault_addr
, access_type
,
1295 mmu_idx
, true, true, false);
1298 * A G-stage exception may be triggered during two state lookup.
1299 * And the env->guest_phys_fault_addr has already been set in
1300 * get_physical_address().
1302 if (ret
== TRANSLATE_G_STAGE_FAIL
) {
1303 first_stage_error
= false;
1304 two_stage_indirect_error
= true;
1307 qemu_log_mask(CPU_LOG_MMU
,
1308 "%s 1st-stage address=%" VADDR_PRIx
" ret %d physical "
1309 HWADDR_FMT_plx
" prot %d\n",
1310 __func__
, address
, ret
, pa
, prot
);
1312 if (ret
== TRANSLATE_SUCCESS
) {
1313 /* Second stage lookup */
1316 ret
= get_physical_address(env
, &pa
, &prot2
, im_address
, NULL
,
1317 access_type
, MMUIdx_U
, false, true,
1320 qemu_log_mask(CPU_LOG_MMU
,
1321 "%s 2nd-stage address=%" VADDR_PRIx
1323 HWADDR_FMT_plx
" prot %d\n",
1324 __func__
, im_address
, ret
, pa
, prot2
);
1328 if (ret
== TRANSLATE_SUCCESS
) {
1329 ret
= get_physical_address_pmp(env
, &prot_pmp
, pa
,
1330 size
, access_type
, mode
);
1331 tlb_size
= pmp_get_tlb_size(env
, pa
);
1333 qemu_log_mask(CPU_LOG_MMU
,
1334 "%s PMP address=" HWADDR_FMT_plx
" ret %d prot"
1335 " %d tlb_size " TARGET_FMT_lu
"\n",
1336 __func__
, pa
, ret
, prot_pmp
, tlb_size
);
1341 if (ret
!= TRANSLATE_SUCCESS
) {
1343 * Guest physical address translation failed, this is a HS
1346 first_stage_error
= false;
1347 env
->guest_phys_fault_addr
= (im_address
|
1349 (TARGET_PAGE_SIZE
- 1))) >> 2;
1353 /* Single stage lookup */
1354 ret
= get_physical_address(env
, &pa
, &prot
, address
, NULL
,
1355 access_type
, mmu_idx
, true, false, false);
1357 qemu_log_mask(CPU_LOG_MMU
,
1358 "%s address=%" VADDR_PRIx
" ret %d physical "
1359 HWADDR_FMT_plx
" prot %d\n",
1360 __func__
, address
, ret
, pa
, prot
);
1362 if (ret
== TRANSLATE_SUCCESS
) {
1363 ret
= get_physical_address_pmp(env
, &prot_pmp
, pa
,
1364 size
, access_type
, mode
);
1365 tlb_size
= pmp_get_tlb_size(env
, pa
);
1367 qemu_log_mask(CPU_LOG_MMU
,
1368 "%s PMP address=" HWADDR_FMT_plx
" ret %d prot"
1369 " %d tlb_size " TARGET_FMT_lu
"\n",
1370 __func__
, pa
, ret
, prot_pmp
, tlb_size
);
1376 if (ret
== TRANSLATE_PMP_FAIL
) {
1377 pmp_violation
= true;
1380 if (ret
== TRANSLATE_SUCCESS
) {
1381 tlb_set_page(cs
, address
& ~(tlb_size
- 1), pa
& ~(tlb_size
- 1),
1382 prot
, mmu_idx
, tlb_size
);
1387 raise_mmu_exception(env
, address
, access_type
, pmp_violation
,
1388 first_stage_error
, two_stage_lookup
,
1389 two_stage_indirect_error
);
1390 cpu_loop_exit_restore(cs
, retaddr
);
1396 static target_ulong
riscv_transformed_insn(CPURISCVState
*env
,
1400 target_ulong xinsn
= 0;
1401 target_ulong access_rs1
= 0, access_imm
= 0, access_size
= 0;
1404 * Only Quadrant 0 and Quadrant 2 of RVC instruction space need to
1405 * be uncompressed. The Quadrant 1 of RVC instruction space need
1406 * not be transformed because these instructions won't generate
1407 * any load/store trap.
1410 if ((insn
& 0x3) != 0x3) {
1411 /* Transform 16bit instruction into 32bit instruction */
1412 switch (GET_C_OP(insn
)) {
1413 case OPC_RISC_C_OP_QUAD0
: /* Quadrant 0 */
1414 switch (GET_C_FUNC(insn
)) {
1415 case OPC_RISC_C_FUNC_FLD_LQ
:
1416 if (riscv_cpu_xlen(env
) != 128) { /* C.FLD (RV32/64) */
1417 xinsn
= OPC_RISC_FLD
;
1418 xinsn
= SET_RD(xinsn
, GET_C_RS2S(insn
));
1419 access_rs1
= GET_C_RS1S(insn
);
1420 access_imm
= GET_C_LD_IMM(insn
);
1424 case OPC_RISC_C_FUNC_LW
: /* C.LW */
1425 xinsn
= OPC_RISC_LW
;
1426 xinsn
= SET_RD(xinsn
, GET_C_RS2S(insn
));
1427 access_rs1
= GET_C_RS1S(insn
);
1428 access_imm
= GET_C_LW_IMM(insn
);
1431 case OPC_RISC_C_FUNC_FLW_LD
:
1432 if (riscv_cpu_xlen(env
) == 32) { /* C.FLW (RV32) */
1433 xinsn
= OPC_RISC_FLW
;
1434 xinsn
= SET_RD(xinsn
, GET_C_RS2S(insn
));
1435 access_rs1
= GET_C_RS1S(insn
);
1436 access_imm
= GET_C_LW_IMM(insn
);
1438 } else { /* C.LD (RV64/RV128) */
1439 xinsn
= OPC_RISC_LD
;
1440 xinsn
= SET_RD(xinsn
, GET_C_RS2S(insn
));
1441 access_rs1
= GET_C_RS1S(insn
);
1442 access_imm
= GET_C_LD_IMM(insn
);
1446 case OPC_RISC_C_FUNC_FSD_SQ
:
1447 if (riscv_cpu_xlen(env
) != 128) { /* C.FSD (RV32/64) */
1448 xinsn
= OPC_RISC_FSD
;
1449 xinsn
= SET_RS2(xinsn
, GET_C_RS2S(insn
));
1450 access_rs1
= GET_C_RS1S(insn
);
1451 access_imm
= GET_C_SD_IMM(insn
);
1455 case OPC_RISC_C_FUNC_SW
: /* C.SW */
1456 xinsn
= OPC_RISC_SW
;
1457 xinsn
= SET_RS2(xinsn
, GET_C_RS2S(insn
));
1458 access_rs1
= GET_C_RS1S(insn
);
1459 access_imm
= GET_C_SW_IMM(insn
);
1462 case OPC_RISC_C_FUNC_FSW_SD
:
1463 if (riscv_cpu_xlen(env
) == 32) { /* C.FSW (RV32) */
1464 xinsn
= OPC_RISC_FSW
;
1465 xinsn
= SET_RS2(xinsn
, GET_C_RS2S(insn
));
1466 access_rs1
= GET_C_RS1S(insn
);
1467 access_imm
= GET_C_SW_IMM(insn
);
1469 } else { /* C.SD (RV64/RV128) */
1470 xinsn
= OPC_RISC_SD
;
1471 xinsn
= SET_RS2(xinsn
, GET_C_RS2S(insn
));
1472 access_rs1
= GET_C_RS1S(insn
);
1473 access_imm
= GET_C_SD_IMM(insn
);
1481 case OPC_RISC_C_OP_QUAD2
: /* Quadrant 2 */
1482 switch (GET_C_FUNC(insn
)) {
1483 case OPC_RISC_C_FUNC_FLDSP_LQSP
:
1484 if (riscv_cpu_xlen(env
) != 128) { /* C.FLDSP (RV32/64) */
1485 xinsn
= OPC_RISC_FLD
;
1486 xinsn
= SET_RD(xinsn
, GET_C_RD(insn
));
1488 access_imm
= GET_C_LDSP_IMM(insn
);
1492 case OPC_RISC_C_FUNC_LWSP
: /* C.LWSP */
1493 xinsn
= OPC_RISC_LW
;
1494 xinsn
= SET_RD(xinsn
, GET_C_RD(insn
));
1496 access_imm
= GET_C_LWSP_IMM(insn
);
1499 case OPC_RISC_C_FUNC_FLWSP_LDSP
:
1500 if (riscv_cpu_xlen(env
) == 32) { /* C.FLWSP (RV32) */
1501 xinsn
= OPC_RISC_FLW
;
1502 xinsn
= SET_RD(xinsn
, GET_C_RD(insn
));
1504 access_imm
= GET_C_LWSP_IMM(insn
);
1506 } else { /* C.LDSP (RV64/RV128) */
1507 xinsn
= OPC_RISC_LD
;
1508 xinsn
= SET_RD(xinsn
, GET_C_RD(insn
));
1510 access_imm
= GET_C_LDSP_IMM(insn
);
1514 case OPC_RISC_C_FUNC_FSDSP_SQSP
:
1515 if (riscv_cpu_xlen(env
) != 128) { /* C.FSDSP (RV32/64) */
1516 xinsn
= OPC_RISC_FSD
;
1517 xinsn
= SET_RS2(xinsn
, GET_C_RS2(insn
));
1519 access_imm
= GET_C_SDSP_IMM(insn
);
1523 case OPC_RISC_C_FUNC_SWSP
: /* C.SWSP */
1524 xinsn
= OPC_RISC_SW
;
1525 xinsn
= SET_RS2(xinsn
, GET_C_RS2(insn
));
1527 access_imm
= GET_C_SWSP_IMM(insn
);
1531 if (riscv_cpu_xlen(env
) == 32) { /* C.FSWSP (RV32) */
1532 xinsn
= OPC_RISC_FSW
;
1533 xinsn
= SET_RS2(xinsn
, GET_C_RS2(insn
));
1535 access_imm
= GET_C_SWSP_IMM(insn
);
1537 } else { /* C.SDSP (RV64/RV128) */
1538 xinsn
= OPC_RISC_SD
;
1539 xinsn
= SET_RS2(xinsn
, GET_C_RS2(insn
));
1541 access_imm
= GET_C_SDSP_IMM(insn
);
1554 * Clear Bit1 of transformed instruction to indicate that
1555 * original insruction was a 16bit instruction
1557 xinsn
&= ~((target_ulong
)0x2);
1559 /* Transform 32bit (or wider) instructions */
1560 switch (MASK_OP_MAJOR(insn
)) {
1561 case OPC_RISC_ATOMIC
:
1563 access_rs1
= GET_RS1(insn
);
1564 access_size
= 1 << GET_FUNCT3(insn
);
1567 case OPC_RISC_FP_LOAD
:
1568 xinsn
= SET_I_IMM(insn
, 0);
1569 access_rs1
= GET_RS1(insn
);
1570 access_imm
= GET_IMM(insn
);
1571 access_size
= 1 << GET_FUNCT3(insn
);
1573 case OPC_RISC_STORE
:
1574 case OPC_RISC_FP_STORE
:
1575 xinsn
= SET_S_IMM(insn
, 0);
1576 access_rs1
= GET_RS1(insn
);
1577 access_imm
= GET_STORE_IMM(insn
);
1578 access_size
= 1 << GET_FUNCT3(insn
);
1580 case OPC_RISC_SYSTEM
:
1581 if (MASK_OP_SYSTEM(insn
) == OPC_RISC_HLVHSV
) {
1583 access_rs1
= GET_RS1(insn
);
1584 access_size
= 1 << ((GET_FUNCT7(insn
) >> 1) & 0x3);
1585 access_size
= 1 << access_size
;
1594 xinsn
= SET_RS1(xinsn
, (taddr
- (env
->gpr
[access_rs1
] + access_imm
)) &
1600 #endif /* !CONFIG_USER_ONLY */
1605 * Adapted from Spike's processor_t::take_trap.
1608 void riscv_cpu_do_interrupt(CPUState
*cs
)
1610 #if !defined(CONFIG_USER_ONLY)
1612 RISCVCPU
*cpu
= RISCV_CPU(cs
);
1613 CPURISCVState
*env
= &cpu
->env
;
1614 bool write_gva
= false;
1618 * cs->exception is 32-bits wide unlike mcause which is XLEN-bits wide
1619 * so we mask off the MSB and separate into trap type and cause.
1621 bool async
= !!(cs
->exception_index
& RISCV_EXCP_INT_FLAG
);
1622 target_ulong cause
= cs
->exception_index
& RISCV_EXCP_INT_MASK
;
1623 uint64_t deleg
= async
? env
->mideleg
: env
->medeleg
;
1624 bool s_injected
= env
->mvip
& (1 << cause
) & env
->mvien
&&
1625 !(env
->mip
& (1 << cause
));
1626 target_ulong tval
= 0;
1627 target_ulong tinst
= 0;
1628 target_ulong htval
= 0;
1629 target_ulong mtval2
= 0;
1632 /* set tval to badaddr for traps with address information */
1634 case RISCV_EXCP_SEMIHOST
:
1635 do_common_semihosting(cs
);
1638 case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT
:
1639 case RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT
:
1640 case RISCV_EXCP_LOAD_ADDR_MIS
:
1641 case RISCV_EXCP_STORE_AMO_ADDR_MIS
:
1642 case RISCV_EXCP_LOAD_ACCESS_FAULT
:
1643 case RISCV_EXCP_STORE_AMO_ACCESS_FAULT
:
1644 case RISCV_EXCP_LOAD_PAGE_FAULT
:
1645 case RISCV_EXCP_STORE_PAGE_FAULT
:
1646 write_gva
= env
->two_stage_lookup
;
1647 tval
= env
->badaddr
;
1648 if (env
->two_stage_indirect_lookup
) {
1650 * special pseudoinstruction for G-stage fault taken while
1651 * doing VS-stage page table walk.
1653 tinst
= (riscv_cpu_xlen(env
) == 32) ? 0x00002000 : 0x00003000;
1656 * The "Addr. Offset" field in transformed instruction is
1657 * non-zero only for misaligned access.
1659 tinst
= riscv_transformed_insn(env
, env
->bins
, tval
);
1662 case RISCV_EXCP_INST_GUEST_PAGE_FAULT
:
1663 case RISCV_EXCP_INST_ADDR_MIS
:
1664 case RISCV_EXCP_INST_ACCESS_FAULT
:
1665 case RISCV_EXCP_INST_PAGE_FAULT
:
1666 write_gva
= env
->two_stage_lookup
;
1667 tval
= env
->badaddr
;
1668 if (env
->two_stage_indirect_lookup
) {
1670 * special pseudoinstruction for G-stage fault taken while
1671 * doing VS-stage page table walk.
1673 tinst
= (riscv_cpu_xlen(env
) == 32) ? 0x00002000 : 0x00003000;
1676 case RISCV_EXCP_ILLEGAL_INST
:
1677 case RISCV_EXCP_VIRT_INSTRUCTION_FAULT
:
1680 case RISCV_EXCP_BREAKPOINT
:
1681 if (cs
->watchpoint_hit
) {
1682 tval
= cs
->watchpoint_hit
->hitaddr
;
1683 cs
->watchpoint_hit
= NULL
;
1689 /* ecall is dispatched as one cause so translate based on mode */
1690 if (cause
== RISCV_EXCP_U_ECALL
) {
1691 assert(env
->priv
<= 3);
1693 if (env
->priv
== PRV_M
) {
1694 cause
= RISCV_EXCP_M_ECALL
;
1695 } else if (env
->priv
== PRV_S
&& env
->virt_enabled
) {
1696 cause
= RISCV_EXCP_VS_ECALL
;
1697 } else if (env
->priv
== PRV_S
&& !env
->virt_enabled
) {
1698 cause
= RISCV_EXCP_S_ECALL
;
1699 } else if (env
->priv
== PRV_U
) {
1700 cause
= RISCV_EXCP_U_ECALL
;
1705 trace_riscv_trap(env
->mhartid
, async
, cause
, env
->pc
, tval
,
1706 riscv_cpu_get_trap_name(cause
, async
));
1708 qemu_log_mask(CPU_LOG_INT
,
1709 "%s: hart:"TARGET_FMT_ld
", async:%d, cause:"TARGET_FMT_lx
", "
1710 "epc:0x"TARGET_FMT_lx
", tval:0x"TARGET_FMT_lx
", desc=%s\n",
1711 __func__
, env
->mhartid
, async
, cause
, env
->pc
, tval
,
1712 riscv_cpu_get_trap_name(cause
, async
));
1714 if (env
->priv
<= PRV_S
&& cause
< 64 &&
1715 (((deleg
>> cause
) & 1) || s_injected
)) {
1716 /* handle the trap in S-mode */
1717 if (riscv_has_ext(env
, RVH
)) {
1718 uint64_t hdeleg
= async
? env
->hideleg
: env
->hedeleg
;
1720 if (env
->virt_enabled
&& ((hdeleg
>> cause
) & 1)) {
1721 /* Trap to VS mode */
1723 * See if we need to adjust cause. Yes if its VS mode interrupt
1724 * no if hypervisor has delegated one of hs mode's interrupt
1726 if (cause
== IRQ_VS_TIMER
|| cause
== IRQ_VS_SOFT
||
1727 cause
== IRQ_VS_EXT
) {
1731 } else if (env
->virt_enabled
) {
1732 /* Trap into HS mode, from virt */
1733 riscv_cpu_swap_hypervisor_regs(env
);
1734 env
->hstatus
= set_field(env
->hstatus
, HSTATUS_SPVP
,
1736 env
->hstatus
= set_field(env
->hstatus
, HSTATUS_SPV
, true);
1738 htval
= env
->guest_phys_fault_addr
;
1740 riscv_cpu_set_virt_enabled(env
, 0);
1742 /* Trap into HS mode */
1743 env
->hstatus
= set_field(env
->hstatus
, HSTATUS_SPV
, false);
1744 htval
= env
->guest_phys_fault_addr
;
1746 env
->hstatus
= set_field(env
->hstatus
, HSTATUS_GVA
, write_gva
);
1750 s
= set_field(s
, MSTATUS_SPIE
, get_field(s
, MSTATUS_SIE
));
1751 s
= set_field(s
, MSTATUS_SPP
, env
->priv
);
1752 s
= set_field(s
, MSTATUS_SIE
, 0);
1754 env
->scause
= cause
| ((target_ulong
)async
<< (TARGET_LONG_BITS
- 1));
1755 env
->sepc
= env
->pc
;
1758 env
->htinst
= tinst
;
1759 env
->pc
= (env
->stvec
>> 2 << 2) +
1760 ((async
&& (env
->stvec
& 3) == 1) ? cause
* 4 : 0);
1761 riscv_cpu_set_mode(env
, PRV_S
);
1763 /* handle the trap in M-mode */
1764 if (riscv_has_ext(env
, RVH
)) {
1765 if (env
->virt_enabled
) {
1766 riscv_cpu_swap_hypervisor_regs(env
);
1768 env
->mstatus
= set_field(env
->mstatus
, MSTATUS_MPV
,
1770 if (env
->virt_enabled
&& tval
) {
1771 env
->mstatus
= set_field(env
->mstatus
, MSTATUS_GVA
, 1);
1774 mtval2
= env
->guest_phys_fault_addr
;
1776 /* Trapping to M mode, virt is disabled */
1777 riscv_cpu_set_virt_enabled(env
, 0);
1781 s
= set_field(s
, MSTATUS_MPIE
, get_field(s
, MSTATUS_MIE
));
1782 s
= set_field(s
, MSTATUS_MPP
, env
->priv
);
1783 s
= set_field(s
, MSTATUS_MIE
, 0);
1785 env
->mcause
= cause
| ~(((target_ulong
)-1) >> async
);
1786 env
->mepc
= env
->pc
;
1788 env
->mtval2
= mtval2
;
1789 env
->mtinst
= tinst
;
1790 env
->pc
= (env
->mtvec
>> 2 << 2) +
1791 ((async
&& (env
->mtvec
& 3) == 1) ? cause
* 4 : 0);
1792 riscv_cpu_set_mode(env
, PRV_M
);
1796 * NOTE: it is not necessary to yield load reservations here. It is only
1797 * necessary for an SC from "another hart" to cause a load reservation
1798 * to be yielded. Refer to the memory consistency model section of the
1799 * RISC-V ISA Specification.
1802 env
->two_stage_lookup
= false;
1803 env
->two_stage_indirect_lookup
= false;
1805 cs
->exception_index
= RISCV_EXCP_NONE
; /* mark handled to qemu */