2 * RISC-V CPU helpers for qemu.
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "qemu/main-loop.h"
25 #include "exec/exec-all.h"
27 #include "tcg/tcg-op.h"
29 #include "semihosting/common-semi.h"
30 #include "sysemu/cpu-timers.h"
34 int riscv_cpu_mmu_index(CPURISCVState
*env
, bool ifetch
)
36 #ifdef CONFIG_USER_ONLY
43 void cpu_get_tb_cpu_state(CPURISCVState
*env
, target_ulong
*pc
,
44 target_ulong
*cs_base
, uint32_t *pflags
)
46 CPUState
*cs
= env_cpu(env
);
47 RISCVCPU
*cpu
= RISCV_CPU(cs
);
51 *pc
= env
->xl
== MXL_RV32
? env
->pc
& UINT32_MAX
: env
->pc
;
54 if (riscv_has_ext(env
, RVV
) || cpu
->cfg
.ext_zve32f
|| cpu
->cfg
.ext_zve64f
) {
56 * If env->vl equals to VLMAX, we can use generic vector operation
57 * expanders (GVEC) to accerlate the vector operations.
58 * However, as LMUL could be a fractional number. The maximum
59 * vector size can be operated might be less than 8 bytes,
60 * which is not supported by GVEC. So we set vl_eq_vlmax flag to true
61 * only when maxsz >= 8 bytes.
63 uint32_t vlmax
= vext_get_vlmax(cpu
, env
->vtype
);
64 uint32_t sew
= FIELD_EX64(env
->vtype
, VTYPE
, VSEW
);
65 uint32_t maxsz
= vlmax
<< sew
;
66 bool vl_eq_vlmax
= (env
->vstart
== 0) && (vlmax
== env
->vl
) &&
68 flags
= FIELD_DP32(flags
, TB_FLAGS
, VILL
, env
->vill
);
69 flags
= FIELD_DP32(flags
, TB_FLAGS
, SEW
, sew
);
70 flags
= FIELD_DP32(flags
, TB_FLAGS
, LMUL
,
71 FIELD_EX64(env
->vtype
, VTYPE
, VLMUL
));
72 flags
= FIELD_DP32(flags
, TB_FLAGS
, VL_EQ_VLMAX
, vl_eq_vlmax
);
73 flags
= FIELD_DP32(flags
, TB_FLAGS
, VTA
,
74 FIELD_EX64(env
->vtype
, VTYPE
, VTA
));
75 flags
= FIELD_DP32(flags
, TB_FLAGS
, VMA
,
76 FIELD_EX64(env
->vtype
, VTYPE
, VMA
));
78 flags
= FIELD_DP32(flags
, TB_FLAGS
, VILL
, 1);
81 #ifdef CONFIG_USER_ONLY
82 flags
|= TB_FLAGS_MSTATUS_FS
;
83 flags
|= TB_FLAGS_MSTATUS_VS
;
85 flags
|= cpu_mmu_index(env
, 0);
86 if (riscv_cpu_fp_enabled(env
)) {
87 flags
|= env
->mstatus
& MSTATUS_FS
;
90 if (riscv_cpu_vector_enabled(env
)) {
91 flags
|= env
->mstatus
& MSTATUS_VS
;
94 if (riscv_has_ext(env
, RVH
)) {
95 if (env
->priv
== PRV_M
||
96 (env
->priv
== PRV_S
&& !riscv_cpu_virt_enabled(env
)) ||
97 (env
->priv
== PRV_U
&& !riscv_cpu_virt_enabled(env
) &&
98 get_field(env
->hstatus
, HSTATUS_HU
))) {
99 flags
= FIELD_DP32(flags
, TB_FLAGS
, HLSX
, 1);
102 flags
= FIELD_DP32(flags
, TB_FLAGS
, MSTATUS_HS_FS
,
103 get_field(env
->mstatus_hs
, MSTATUS_FS
));
105 flags
= FIELD_DP32(flags
, TB_FLAGS
, MSTATUS_HS_VS
,
106 get_field(env
->mstatus_hs
, MSTATUS_VS
));
108 if (cpu
->cfg
.debug
&& !icount_enabled()) {
109 flags
= FIELD_DP32(flags
, TB_FLAGS
, ITRIGGER
, env
->itrigger_enabled
);
113 flags
= FIELD_DP32(flags
, TB_FLAGS
, XL
, env
->xl
);
114 if (env
->cur_pmmask
< (env
->xl
== MXL_RV32
? UINT32_MAX
: UINT64_MAX
)) {
115 flags
= FIELD_DP32(flags
, TB_FLAGS
, PM_MASK_ENABLED
, 1);
117 if (env
->cur_pmbase
!= 0) {
118 flags
= FIELD_DP32(flags
, TB_FLAGS
, PM_BASE_ENABLED
, 1);
124 void riscv_cpu_update_mask(CPURISCVState
*env
)
126 target_ulong mask
= -1, base
= 0;
128 * TODO: Current RVJ spec does not specify
129 * how the extension interacts with XLEN.
131 #ifndef CONFIG_USER_ONLY
132 if (riscv_has_ext(env
, RVJ
)) {
135 if (env
->mmte
& M_PM_ENABLE
) {
141 if (env
->mmte
& S_PM_ENABLE
) {
147 if (env
->mmte
& U_PM_ENABLE
) {
153 g_assert_not_reached();
157 if (env
->xl
== MXL_RV32
) {
158 env
->cur_pmmask
= mask
& UINT32_MAX
;
159 env
->cur_pmbase
= base
& UINT32_MAX
;
161 env
->cur_pmmask
= mask
;
162 env
->cur_pmbase
= base
;
166 #ifndef CONFIG_USER_ONLY
169 * The HS-mode is allowed to configure priority only for the
170 * following VS-mode local interrupts:
172 * 0 (Reserved interrupt, reads as zero)
173 * 1 Supervisor software interrupt
174 * 4 (Reserved interrupt, reads as zero)
175 * 5 Supervisor timer interrupt
176 * 8 (Reserved interrupt, reads as zero)
177 * 13 (Reserved interrupt)
190 static const int hviprio_index2irq
[] = {
191 0, 1, 4, 5, 8, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 };
192 static const int hviprio_index2rdzero
[] = {
193 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
195 int riscv_cpu_hviprio_index2irq(int index
, int *out_irq
, int *out_rdzero
)
197 if (index
< 0 || ARRAY_SIZE(hviprio_index2irq
) <= index
) {
202 *out_irq
= hviprio_index2irq
[index
];
206 *out_rdzero
= hviprio_index2rdzero
[index
];
213 * Default priorities of local interrupts are defined in the
214 * RISC-V Advanced Interrupt Architecture specification.
216 * ----------------------------------------------------------------
218 * Priority | Major Interrupt Numbers
219 * ----------------------------------------------------------------
220 * Highest | 47, 23, 46, 45, 22, 44,
221 * | 43, 21, 42, 41, 20, 40
223 * | 11 (0b), 3 (03), 7 (07)
224 * | 9 (09), 1 (01), 5 (05)
226 * | 10 (0a), 2 (02), 6 (06)
228 * | 39, 19, 38, 37, 18, 36,
229 * Lowest | 35, 17, 34, 33, 16, 32
230 * ----------------------------------------------------------------
232 static const uint8_t default_iprio
[64] = {
233 /* Custom interrupts 48 to 63 */
234 [63] = IPRIO_MMAXIPRIO
,
235 [62] = IPRIO_MMAXIPRIO
,
236 [61] = IPRIO_MMAXIPRIO
,
237 [60] = IPRIO_MMAXIPRIO
,
238 [59] = IPRIO_MMAXIPRIO
,
239 [58] = IPRIO_MMAXIPRIO
,
240 [57] = IPRIO_MMAXIPRIO
,
241 [56] = IPRIO_MMAXIPRIO
,
242 [55] = IPRIO_MMAXIPRIO
,
243 [54] = IPRIO_MMAXIPRIO
,
244 [53] = IPRIO_MMAXIPRIO
,
245 [52] = IPRIO_MMAXIPRIO
,
246 [51] = IPRIO_MMAXIPRIO
,
247 [50] = IPRIO_MMAXIPRIO
,
248 [49] = IPRIO_MMAXIPRIO
,
249 [48] = IPRIO_MMAXIPRIO
,
251 /* Custom interrupts 24 to 31 */
252 [31] = IPRIO_MMAXIPRIO
,
253 [30] = IPRIO_MMAXIPRIO
,
254 [29] = IPRIO_MMAXIPRIO
,
255 [28] = IPRIO_MMAXIPRIO
,
256 [27] = IPRIO_MMAXIPRIO
,
257 [26] = IPRIO_MMAXIPRIO
,
258 [25] = IPRIO_MMAXIPRIO
,
259 [24] = IPRIO_MMAXIPRIO
,
261 [47] = IPRIO_DEFAULT_UPPER
,
262 [23] = IPRIO_DEFAULT_UPPER
+ 1,
263 [46] = IPRIO_DEFAULT_UPPER
+ 2,
264 [45] = IPRIO_DEFAULT_UPPER
+ 3,
265 [22] = IPRIO_DEFAULT_UPPER
+ 4,
266 [44] = IPRIO_DEFAULT_UPPER
+ 5,
268 [43] = IPRIO_DEFAULT_UPPER
+ 6,
269 [21] = IPRIO_DEFAULT_UPPER
+ 7,
270 [42] = IPRIO_DEFAULT_UPPER
+ 8,
271 [41] = IPRIO_DEFAULT_UPPER
+ 9,
272 [20] = IPRIO_DEFAULT_UPPER
+ 10,
273 [40] = IPRIO_DEFAULT_UPPER
+ 11,
275 [11] = IPRIO_DEFAULT_M
,
276 [3] = IPRIO_DEFAULT_M
+ 1,
277 [7] = IPRIO_DEFAULT_M
+ 2,
279 [9] = IPRIO_DEFAULT_S
,
280 [1] = IPRIO_DEFAULT_S
+ 1,
281 [5] = IPRIO_DEFAULT_S
+ 2,
283 [12] = IPRIO_DEFAULT_SGEXT
,
285 [10] = IPRIO_DEFAULT_VS
,
286 [2] = IPRIO_DEFAULT_VS
+ 1,
287 [6] = IPRIO_DEFAULT_VS
+ 2,
289 [39] = IPRIO_DEFAULT_LOWER
,
290 [19] = IPRIO_DEFAULT_LOWER
+ 1,
291 [38] = IPRIO_DEFAULT_LOWER
+ 2,
292 [37] = IPRIO_DEFAULT_LOWER
+ 3,
293 [18] = IPRIO_DEFAULT_LOWER
+ 4,
294 [36] = IPRIO_DEFAULT_LOWER
+ 5,
296 [35] = IPRIO_DEFAULT_LOWER
+ 6,
297 [17] = IPRIO_DEFAULT_LOWER
+ 7,
298 [34] = IPRIO_DEFAULT_LOWER
+ 8,
299 [33] = IPRIO_DEFAULT_LOWER
+ 9,
300 [16] = IPRIO_DEFAULT_LOWER
+ 10,
301 [32] = IPRIO_DEFAULT_LOWER
+ 11,
304 uint8_t riscv_cpu_default_priority(int irq
)
306 if (irq
< 0 || irq
> 63) {
307 return IPRIO_MMAXIPRIO
;
310 return default_iprio
[irq
] ? default_iprio
[irq
] : IPRIO_MMAXIPRIO
;
313 static int riscv_cpu_pending_to_irq(CPURISCVState
*env
,
314 int extirq
, unsigned int extirq_def_prio
,
315 uint64_t pending
, uint8_t *iprio
)
317 RISCVCPU
*cpu
= env_archcpu(env
);
318 int irq
, best_irq
= RISCV_EXCP_NONE
;
319 unsigned int prio
, best_prio
= UINT_MAX
;
322 return RISCV_EXCP_NONE
;
325 irq
= ctz64(pending
);
326 if (!((extirq
== IRQ_M_EXT
) ? cpu
->cfg
.ext_smaia
: cpu
->cfg
.ext_ssaia
)) {
330 pending
= pending
>> irq
;
335 prio
= extirq_def_prio
;
337 prio
= (riscv_cpu_default_priority(irq
) < extirq_def_prio
) ?
341 if ((pending
& 0x1) && (prio
<= best_prio
)) {
346 pending
= pending
>> 1;
352 uint64_t riscv_cpu_all_pending(CPURISCVState
*env
)
354 uint32_t gein
= get_field(env
->hstatus
, HSTATUS_VGEIN
);
355 uint64_t vsgein
= (env
->hgeip
& (1ULL << gein
)) ? MIP_VSEIP
: 0;
356 uint64_t vstip
= (env
->vstime_irq
) ? MIP_VSTIP
: 0;
358 return (env
->mip
| vsgein
| vstip
) & env
->mie
;
361 int riscv_cpu_mirq_pending(CPURISCVState
*env
)
363 uint64_t irqs
= riscv_cpu_all_pending(env
) & ~env
->mideleg
&
364 ~(MIP_SGEIP
| MIP_VSSIP
| MIP_VSTIP
| MIP_VSEIP
);
366 return riscv_cpu_pending_to_irq(env
, IRQ_M_EXT
, IPRIO_DEFAULT_M
,
370 int riscv_cpu_sirq_pending(CPURISCVState
*env
)
372 uint64_t irqs
= riscv_cpu_all_pending(env
) & env
->mideleg
&
373 ~(MIP_VSSIP
| MIP_VSTIP
| MIP_VSEIP
);
375 return riscv_cpu_pending_to_irq(env
, IRQ_S_EXT
, IPRIO_DEFAULT_S
,
379 int riscv_cpu_vsirq_pending(CPURISCVState
*env
)
381 uint64_t irqs
= riscv_cpu_all_pending(env
) & env
->mideleg
&
382 (MIP_VSSIP
| MIP_VSTIP
| MIP_VSEIP
);
384 return riscv_cpu_pending_to_irq(env
, IRQ_S_EXT
, IPRIO_DEFAULT_S
,
385 irqs
>> 1, env
->hviprio
);
388 static int riscv_cpu_local_irq_pending(CPURISCVState
*env
)
391 uint64_t irqs
, pending
, mie
, hsie
, vsie
;
393 /* Determine interrupt enable state of all privilege modes */
394 if (riscv_cpu_virt_enabled(env
)) {
397 vsie
= (env
->priv
< PRV_S
) ||
398 (env
->priv
== PRV_S
&& get_field(env
->mstatus
, MSTATUS_SIE
));
400 mie
= (env
->priv
< PRV_M
) ||
401 (env
->priv
== PRV_M
&& get_field(env
->mstatus
, MSTATUS_MIE
));
402 hsie
= (env
->priv
< PRV_S
) ||
403 (env
->priv
== PRV_S
&& get_field(env
->mstatus
, MSTATUS_SIE
));
407 /* Determine all pending interrupts */
408 pending
= riscv_cpu_all_pending(env
);
410 /* Check M-mode interrupts */
411 irqs
= pending
& ~env
->mideleg
& -mie
;
413 return riscv_cpu_pending_to_irq(env
, IRQ_M_EXT
, IPRIO_DEFAULT_M
,
417 /* Check HS-mode interrupts */
418 irqs
= pending
& env
->mideleg
& ~env
->hideleg
& -hsie
;
420 return riscv_cpu_pending_to_irq(env
, IRQ_S_EXT
, IPRIO_DEFAULT_S
,
424 /* Check VS-mode interrupts */
425 irqs
= pending
& env
->mideleg
& env
->hideleg
& -vsie
;
427 virq
= riscv_cpu_pending_to_irq(env
, IRQ_S_EXT
, IPRIO_DEFAULT_S
,
428 irqs
>> 1, env
->hviprio
);
429 return (virq
<= 0) ? virq
: virq
+ 1;
432 /* Indicate no pending interrupt */
433 return RISCV_EXCP_NONE
;
436 bool riscv_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
438 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
439 RISCVCPU
*cpu
= RISCV_CPU(cs
);
440 CPURISCVState
*env
= &cpu
->env
;
441 int interruptno
= riscv_cpu_local_irq_pending(env
);
442 if (interruptno
>= 0) {
443 cs
->exception_index
= RISCV_EXCP_INT_FLAG
| interruptno
;
444 riscv_cpu_do_interrupt(cs
);
451 /* Return true is floating point support is currently enabled */
452 bool riscv_cpu_fp_enabled(CPURISCVState
*env
)
454 if (env
->mstatus
& MSTATUS_FS
) {
455 if (riscv_cpu_virt_enabled(env
) && !(env
->mstatus_hs
& MSTATUS_FS
)) {
464 /* Return true is vector support is currently enabled */
465 bool riscv_cpu_vector_enabled(CPURISCVState
*env
)
467 if (env
->mstatus
& MSTATUS_VS
) {
468 if (riscv_cpu_virt_enabled(env
) && !(env
->mstatus_hs
& MSTATUS_VS
)) {
477 void riscv_cpu_swap_hypervisor_regs(CPURISCVState
*env
)
479 uint64_t mstatus_mask
= MSTATUS_MXR
| MSTATUS_SUM
|
480 MSTATUS_SPP
| MSTATUS_SPIE
| MSTATUS_SIE
|
481 MSTATUS64_UXL
| MSTATUS_VS
;
483 if (riscv_has_ext(env
, RVF
)) {
484 mstatus_mask
|= MSTATUS_FS
;
486 bool current_virt
= riscv_cpu_virt_enabled(env
);
488 g_assert(riscv_has_ext(env
, RVH
));
491 /* Current V=1 and we are about to change to V=0 */
492 env
->vsstatus
= env
->mstatus
& mstatus_mask
;
493 env
->mstatus
&= ~mstatus_mask
;
494 env
->mstatus
|= env
->mstatus_hs
;
496 env
->vstvec
= env
->stvec
;
497 env
->stvec
= env
->stvec_hs
;
499 env
->vsscratch
= env
->sscratch
;
500 env
->sscratch
= env
->sscratch_hs
;
502 env
->vsepc
= env
->sepc
;
503 env
->sepc
= env
->sepc_hs
;
505 env
->vscause
= env
->scause
;
506 env
->scause
= env
->scause_hs
;
508 env
->vstval
= env
->stval
;
509 env
->stval
= env
->stval_hs
;
511 env
->vsatp
= env
->satp
;
512 env
->satp
= env
->satp_hs
;
514 /* Current V=0 and we are about to change to V=1 */
515 env
->mstatus_hs
= env
->mstatus
& mstatus_mask
;
516 env
->mstatus
&= ~mstatus_mask
;
517 env
->mstatus
|= env
->vsstatus
;
519 env
->stvec_hs
= env
->stvec
;
520 env
->stvec
= env
->vstvec
;
522 env
->sscratch_hs
= env
->sscratch
;
523 env
->sscratch
= env
->vsscratch
;
525 env
->sepc_hs
= env
->sepc
;
526 env
->sepc
= env
->vsepc
;
528 env
->scause_hs
= env
->scause
;
529 env
->scause
= env
->vscause
;
531 env
->stval_hs
= env
->stval
;
532 env
->stval
= env
->vstval
;
534 env
->satp_hs
= env
->satp
;
535 env
->satp
= env
->vsatp
;
539 target_ulong
riscv_cpu_get_geilen(CPURISCVState
*env
)
541 if (!riscv_has_ext(env
, RVH
)) {
548 void riscv_cpu_set_geilen(CPURISCVState
*env
, target_ulong geilen
)
550 if (!riscv_has_ext(env
, RVH
)) {
554 if (geilen
> (TARGET_LONG_BITS
- 1)) {
558 env
->geilen
= geilen
;
561 bool riscv_cpu_virt_enabled(CPURISCVState
*env
)
563 if (!riscv_has_ext(env
, RVH
)) {
567 return get_field(env
->virt
, VIRT_ONOFF
);
570 void riscv_cpu_set_virt_enabled(CPURISCVState
*env
, bool enable
)
572 if (!riscv_has_ext(env
, RVH
)) {
576 /* Flush the TLB on all virt mode changes. */
577 if (get_field(env
->virt
, VIRT_ONOFF
) != enable
) {
578 tlb_flush(env_cpu(env
));
581 env
->virt
= set_field(env
->virt
, VIRT_ONOFF
, enable
);
585 * The guest external interrupts from an interrupt controller are
586 * delivered only when the Guest/VM is running (i.e. V=1). This means
587 * any guest external interrupt which is triggered while the Guest/VM
588 * is not running (i.e. V=0) will be missed on QEMU resulting in guest
589 * with sluggish response to serial console input and other I/O events.
591 * To solve this, we check and inject interrupt after setting V=1.
593 riscv_cpu_update_mip(env_archcpu(env
), 0, 0);
597 bool riscv_cpu_two_stage_lookup(int mmu_idx
)
599 return mmu_idx
& TB_FLAGS_PRIV_HYP_ACCESS_MASK
;
602 int riscv_cpu_claim_interrupts(RISCVCPU
*cpu
, uint64_t interrupts
)
604 CPURISCVState
*env
= &cpu
->env
;
605 if (env
->miclaim
& interrupts
) {
608 env
->miclaim
|= interrupts
;
613 uint64_t riscv_cpu_update_mip(RISCVCPU
*cpu
, uint64_t mask
, uint64_t value
)
615 CPURISCVState
*env
= &cpu
->env
;
616 CPUState
*cs
= CPU(cpu
);
617 uint64_t gein
, vsgein
= 0, vstip
= 0, old
= env
->mip
;
619 if (riscv_cpu_virt_enabled(env
)) {
620 gein
= get_field(env
->hstatus
, HSTATUS_VGEIN
);
621 vsgein
= (env
->hgeip
& (1ULL << gein
)) ? MIP_VSEIP
: 0;
624 vstip
= env
->vstime_irq
? MIP_VSTIP
: 0;
626 QEMU_IOTHREAD_LOCK_GUARD();
628 env
->mip
= (env
->mip
& ~mask
) | (value
& mask
);
630 if (env
->mip
| vsgein
| vstip
) {
631 cpu_interrupt(cs
, CPU_INTERRUPT_HARD
);
633 cpu_reset_interrupt(cs
, CPU_INTERRUPT_HARD
);
639 void riscv_cpu_set_rdtime_fn(CPURISCVState
*env
, uint64_t (*fn
)(void *),
643 env
->rdtime_fn_arg
= arg
;
646 void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState
*env
, uint32_t priv
,
647 int (*rmw_fn
)(void *arg
,
650 target_ulong new_val
,
651 target_ulong write_mask
),
655 env
->aia_ireg_rmw_fn
[priv
] = rmw_fn
;
656 env
->aia_ireg_rmw_fn_arg
[priv
] = rmw_fn_arg
;
660 void riscv_cpu_set_mode(CPURISCVState
*env
, target_ulong newpriv
)
662 if (newpriv
> PRV_M
) {
663 g_assert_not_reached();
665 if (newpriv
== PRV_H
) {
668 if (icount_enabled() && newpriv
!= env
->priv
) {
669 riscv_itrigger_update_priv(env
);
671 /* tlb_flush is unnecessary as mode is contained in mmu_idx */
673 env
->xl
= cpu_recompute_xl(env
);
674 riscv_cpu_update_mask(env
);
677 * Clear the load reservation - otherwise a reservation placed in one
678 * context/process can be used by another, resulting in an SC succeeding
679 * incorrectly. Version 2.2 of the ISA specification explicitly requires
680 * this behaviour, while later revisions say that the kernel "should" use
681 * an SC instruction to force the yielding of a load reservation on a
682 * preemptive context switch. As a result, do both.
688 * get_physical_address_pmp - check PMP permission for this physical address
690 * Match the PMP region and check permission for this physical address and it's
691 * TLB page. Returns 0 if the permission checking was successful
693 * @env: CPURISCVState
694 * @prot: The returned protection attributes
695 * @tlb_size: TLB page size containing addr. It could be modified after PMP
696 * permission checking. NULL if not set TLB page for addr.
697 * @addr: The physical address to be checked permission
698 * @access_type: The type of MMU access
699 * @mode: Indicates current privilege level.
701 static int get_physical_address_pmp(CPURISCVState
*env
, int *prot
,
702 target_ulong
*tlb_size
, hwaddr addr
,
703 int size
, MMUAccessType access_type
,
709 if (!riscv_cpu_cfg(env
)->pmp
) {
710 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
711 return TRANSLATE_SUCCESS
;
714 pmp_index
= pmp_hart_has_privs(env
, addr
, size
, 1 << access_type
,
718 return TRANSLATE_PMP_FAIL
;
721 *prot
= pmp_priv_to_page_prot(pmp_priv
);
722 if ((tlb_size
!= NULL
) && pmp_index
!= MAX_RISCV_PMPS
) {
723 target_ulong tlb_sa
= addr
& ~(TARGET_PAGE_SIZE
- 1);
724 target_ulong tlb_ea
= tlb_sa
+ TARGET_PAGE_SIZE
- 1;
726 *tlb_size
= pmp_get_tlb_size(env
, pmp_index
, tlb_sa
, tlb_ea
);
729 return TRANSLATE_SUCCESS
;
732 /* get_physical_address - get the physical address for this virtual address
734 * Do a page table walk to obtain the physical address corresponding to a
735 * virtual address. Returns 0 if the translation was successful
737 * Adapted from Spike's mmu_t::translate and mmu_t::walk
739 * @env: CPURISCVState
740 * @physical: This will be set to the calculated physical address
741 * @prot: The returned protection attributes
742 * @addr: The virtual address to be translated
743 * @fault_pte_addr: If not NULL, this will be set to fault pte address
744 * when a error occurs on pte address translation.
745 * This will already be shifted to match htval.
746 * @access_type: The type of MMU access
747 * @mmu_idx: Indicates current privilege level
748 * @first_stage: Are we in first stage translation?
749 * Second stage is used for hypervisor guest translation
750 * @two_stage: Are we going to perform two stage translation
751 * @is_debug: Is this access from a debugger or the monitor?
753 static int get_physical_address(CPURISCVState
*env
, hwaddr
*physical
,
754 int *prot
, target_ulong addr
,
755 target_ulong
*fault_pte_addr
,
756 int access_type
, int mmu_idx
,
757 bool first_stage
, bool two_stage
,
760 /* NOTE: the env->pc value visible here will not be
761 * correct, but the value visible to the exception handler
762 * (riscv_cpu_do_interrupt) is correct */
764 MemTxAttrs attrs
= MEMTXATTRS_UNSPECIFIED
;
765 int mode
= mmu_idx
& TB_FLAGS_PRIV_MMU_MASK
;
766 bool use_background
= false;
768 RISCVCPU
*cpu
= env_archcpu(env
);
770 target_ulong napot_mask
;
773 * Check if we should use the background registers for the two
774 * stage translation. We don't need to check if we actually need
775 * two stage translation as that happened before this function
776 * was called. Background registers will be used if the guest has
777 * forced a two stage translation to be on (in HS or M mode).
779 if (!riscv_cpu_virt_enabled(env
) && two_stage
) {
780 use_background
= true;
783 /* MPRV does not affect the virtual-machine load/store
784 instructions, HLV, HLVX, and HSV. */
785 if (riscv_cpu_two_stage_lookup(mmu_idx
)) {
786 mode
= get_field(env
->hstatus
, HSTATUS_SPVP
);
787 } else if (mode
== PRV_M
&& access_type
!= MMU_INST_FETCH
) {
788 if (get_field(env
->mstatus
, MSTATUS_MPRV
)) {
789 mode
= get_field(env
->mstatus
, MSTATUS_MPP
);
793 if (first_stage
== false) {
794 /* We are in stage 2 translation, this is similar to stage 1. */
795 /* Stage 2 is always taken as U-mode */
799 if (mode
== PRV_M
|| !riscv_cpu_cfg(env
)->mmu
) {
801 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
802 return TRANSLATE_SUCCESS
;
808 int levels
, ptidxbits
, ptesize
, vm
, sum
, mxr
, widened
;
810 if (first_stage
== true) {
811 mxr
= get_field(env
->mstatus
, MSTATUS_MXR
);
813 mxr
= get_field(env
->vsstatus
, MSTATUS_MXR
);
816 if (first_stage
== true) {
817 if (use_background
) {
818 if (riscv_cpu_mxl(env
) == MXL_RV32
) {
819 base
= (hwaddr
)get_field(env
->vsatp
, SATP32_PPN
) << PGSHIFT
;
820 vm
= get_field(env
->vsatp
, SATP32_MODE
);
822 base
= (hwaddr
)get_field(env
->vsatp
, SATP64_PPN
) << PGSHIFT
;
823 vm
= get_field(env
->vsatp
, SATP64_MODE
);
826 if (riscv_cpu_mxl(env
) == MXL_RV32
) {
827 base
= (hwaddr
)get_field(env
->satp
, SATP32_PPN
) << PGSHIFT
;
828 vm
= get_field(env
->satp
, SATP32_MODE
);
830 base
= (hwaddr
)get_field(env
->satp
, SATP64_PPN
) << PGSHIFT
;
831 vm
= get_field(env
->satp
, SATP64_MODE
);
836 if (riscv_cpu_mxl(env
) == MXL_RV32
) {
837 base
= (hwaddr
)get_field(env
->hgatp
, SATP32_PPN
) << PGSHIFT
;
838 vm
= get_field(env
->hgatp
, SATP32_MODE
);
840 base
= (hwaddr
)get_field(env
->hgatp
, SATP64_PPN
) << PGSHIFT
;
841 vm
= get_field(env
->hgatp
, SATP64_MODE
);
845 /* status.SUM will be ignored if execute on background */
846 sum
= get_field(env
->mstatus
, MSTATUS_SUM
) || use_background
|| is_debug
;
849 levels
= 2; ptidxbits
= 10; ptesize
= 4; break;
851 levels
= 3; ptidxbits
= 9; ptesize
= 8; break;
853 levels
= 4; ptidxbits
= 9; ptesize
= 8; break;
855 levels
= 5; ptidxbits
= 9; ptesize
= 8; break;
858 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
859 return TRANSLATE_SUCCESS
;
861 g_assert_not_reached();
864 CPUState
*cs
= env_cpu(env
);
865 int va_bits
= PGSHIFT
+ levels
* ptidxbits
+ widened
;
866 target_ulong mask
, masked_msbs
;
868 if (TARGET_LONG_BITS
> (va_bits
- 1)) {
869 mask
= (1L << (TARGET_LONG_BITS
- (va_bits
- 1))) - 1;
873 masked_msbs
= (addr
>> (va_bits
- 1)) & mask
;
875 if (masked_msbs
!= 0 && masked_msbs
!= mask
) {
876 return TRANSLATE_FAIL
;
879 int ptshift
= (levels
- 1) * ptidxbits
;
882 #if !TCG_OVERSIZED_GUEST
885 for (i
= 0; i
< levels
; i
++, ptshift
-= ptidxbits
) {
888 idx
= (addr
>> (PGSHIFT
+ ptshift
)) &
889 ((1 << (ptidxbits
+ widened
)) - 1);
891 idx
= (addr
>> (PGSHIFT
+ ptshift
)) &
892 ((1 << ptidxbits
) - 1);
895 /* check that physical address of PTE is legal */
898 if (two_stage
&& first_stage
) {
902 /* Do the second stage translation on the base PTE address. */
903 int vbase_ret
= get_physical_address(env
, &vbase
, &vbase_prot
,
904 base
, NULL
, MMU_DATA_LOAD
,
905 mmu_idx
, false, true,
908 if (vbase_ret
!= TRANSLATE_SUCCESS
) {
909 if (fault_pte_addr
) {
910 *fault_pte_addr
= (base
+ idx
* ptesize
) >> 2;
912 return TRANSLATE_G_STAGE_FAIL
;
915 pte_addr
= vbase
+ idx
* ptesize
;
917 pte_addr
= base
+ idx
* ptesize
;
921 int pmp_ret
= get_physical_address_pmp(env
, &pmp_prot
, NULL
, pte_addr
,
922 sizeof(target_ulong
),
923 MMU_DATA_LOAD
, PRV_S
);
924 if (pmp_ret
!= TRANSLATE_SUCCESS
) {
925 return TRANSLATE_PMP_FAIL
;
929 if (riscv_cpu_mxl(env
) == MXL_RV32
) {
930 pte
= address_space_ldl(cs
->as
, pte_addr
, attrs
, &res
);
932 pte
= address_space_ldq(cs
->as
, pte_addr
, attrs
, &res
);
935 if (res
!= MEMTX_OK
) {
936 return TRANSLATE_FAIL
;
939 if (riscv_cpu_sxl(env
) == MXL_RV32
) {
940 ppn
= pte
>> PTE_PPN_SHIFT
;
941 } else if (cpu
->cfg
.ext_svpbmt
|| cpu
->cfg
.ext_svnapot
) {
942 ppn
= (pte
& (target_ulong
)PTE_PPN_MASK
) >> PTE_PPN_SHIFT
;
944 ppn
= pte
>> PTE_PPN_SHIFT
;
945 if ((pte
& ~(target_ulong
)PTE_PPN_MASK
) >> PTE_PPN_SHIFT
) {
946 return TRANSLATE_FAIL
;
950 if (!(pte
& PTE_V
)) {
952 return TRANSLATE_FAIL
;
953 } else if (!cpu
->cfg
.ext_svpbmt
&& (pte
& PTE_PBMT
)) {
954 return TRANSLATE_FAIL
;
955 } else if (!(pte
& (PTE_R
| PTE_W
| PTE_X
))) {
956 /* Inner PTE, continue walking */
957 if (pte
& (PTE_D
| PTE_A
| PTE_U
| PTE_ATTR
)) {
958 return TRANSLATE_FAIL
;
960 base
= ppn
<< PGSHIFT
;
961 } else if ((pte
& (PTE_R
| PTE_W
| PTE_X
)) == PTE_W
) {
962 /* Reserved leaf PTE flags: PTE_W */
963 return TRANSLATE_FAIL
;
964 } else if ((pte
& (PTE_R
| PTE_W
| PTE_X
)) == (PTE_W
| PTE_X
)) {
965 /* Reserved leaf PTE flags: PTE_W + PTE_X */
966 return TRANSLATE_FAIL
;
967 } else if ((pte
& PTE_U
) && ((mode
!= PRV_U
) &&
968 (!sum
|| access_type
== MMU_INST_FETCH
))) {
969 /* User PTE flags when not U mode and mstatus.SUM is not set,
970 or the access type is an instruction fetch */
971 return TRANSLATE_FAIL
;
972 } else if (!(pte
& PTE_U
) && (mode
!= PRV_S
)) {
973 /* Supervisor PTE flags when not S mode */
974 return TRANSLATE_FAIL
;
975 } else if (ppn
& ((1ULL << ptshift
) - 1)) {
977 return TRANSLATE_FAIL
;
978 } else if (access_type
== MMU_DATA_LOAD
&& !((pte
& PTE_R
) ||
979 ((pte
& PTE_X
) && mxr
))) {
980 /* Read access check failed */
981 return TRANSLATE_FAIL
;
982 } else if (access_type
== MMU_DATA_STORE
&& !(pte
& PTE_W
)) {
983 /* Write access check failed */
984 return TRANSLATE_FAIL
;
985 } else if (access_type
== MMU_INST_FETCH
&& !(pte
& PTE_X
)) {
986 /* Fetch access check failed */
987 return TRANSLATE_FAIL
;
989 /* if necessary, set accessed and dirty bits. */
990 target_ulong updated_pte
= pte
| PTE_A
|
991 (access_type
== MMU_DATA_STORE
? PTE_D
: 0);
993 /* Page table updates need to be atomic with MTTCG enabled */
994 if (updated_pte
!= pte
) {
996 * - if accessed or dirty bits need updating, and the PTE is
997 * in RAM, then we do so atomically with a compare and swap.
998 * - if the PTE is in IO space or ROM, then it can't be updated
999 * and we return TRANSLATE_FAIL.
1000 * - if the PTE changed by the time we went to update it, then
1001 * it is no longer valid and we must re-walk the page table.
1004 hwaddr l
= sizeof(target_ulong
), addr1
;
1005 mr
= address_space_translate(cs
->as
, pte_addr
,
1006 &addr1
, &l
, false, MEMTXATTRS_UNSPECIFIED
);
1007 if (memory_region_is_ram(mr
)) {
1008 target_ulong
*pte_pa
=
1009 qemu_map_ram_ptr(mr
->ram_block
, addr1
);
1010 #if TCG_OVERSIZED_GUEST
1011 /* MTTCG is not enabled on oversized TCG guests so
1012 * page table updates do not need to be atomic */
1013 *pte_pa
= pte
= updated_pte
;
1015 target_ulong old_pte
=
1016 qatomic_cmpxchg(pte_pa
, pte
, updated_pte
);
1017 if (old_pte
!= pte
) {
1024 /* misconfigured PTE in ROM (AD bits are not preset) or
1025 * PTE is in IO space and can't be updated atomically */
1026 return TRANSLATE_FAIL
;
1030 /* for superpage mappings, make a fake leaf PTE for the TLB's
1032 target_ulong vpn
= addr
>> PGSHIFT
;
1034 if (cpu
->cfg
.ext_svnapot
&& (pte
& PTE_N
)) {
1035 napot_bits
= ctzl(ppn
) + 1;
1036 if ((i
!= (levels
- 1)) || (napot_bits
!= 4)) {
1037 return TRANSLATE_FAIL
;
1041 napot_mask
= (1 << napot_bits
) - 1;
1042 *physical
= (((ppn
& ~napot_mask
) | (vpn
& napot_mask
) |
1043 (vpn
& (((target_ulong
)1 << ptshift
) - 1))
1044 ) << PGSHIFT
) | (addr
& ~TARGET_PAGE_MASK
);
1046 /* set permissions on the TLB entry */
1047 if ((pte
& PTE_R
) || ((pte
& PTE_X
) && mxr
)) {
1050 if ((pte
& PTE_X
)) {
1053 /* add write permission on stores or if the page is already dirty,
1054 so that we TLB miss on later writes to update the dirty bit */
1055 if ((pte
& PTE_W
) &&
1056 (access_type
== MMU_DATA_STORE
|| (pte
& PTE_D
))) {
1057 *prot
|= PAGE_WRITE
;
1059 return TRANSLATE_SUCCESS
;
1062 return TRANSLATE_FAIL
;
1065 static void raise_mmu_exception(CPURISCVState
*env
, target_ulong address
,
1066 MMUAccessType access_type
, bool pmp_violation
,
1067 bool first_stage
, bool two_stage
,
1068 bool two_stage_indirect
)
1070 CPUState
*cs
= env_cpu(env
);
1071 int page_fault_exceptions
, vm
;
1074 if (riscv_cpu_mxl(env
) == MXL_RV32
) {
1075 stap_mode
= SATP32_MODE
;
1077 stap_mode
= SATP64_MODE
;
1081 vm
= get_field(env
->satp
, stap_mode
);
1083 vm
= get_field(env
->hgatp
, stap_mode
);
1086 page_fault_exceptions
= vm
!= VM_1_10_MBARE
&& !pmp_violation
;
1088 switch (access_type
) {
1089 case MMU_INST_FETCH
:
1090 if (riscv_cpu_virt_enabled(env
) && !first_stage
) {
1091 cs
->exception_index
= RISCV_EXCP_INST_GUEST_PAGE_FAULT
;
1093 cs
->exception_index
= page_fault_exceptions
?
1094 RISCV_EXCP_INST_PAGE_FAULT
: RISCV_EXCP_INST_ACCESS_FAULT
;
1098 if (two_stage
&& !first_stage
) {
1099 cs
->exception_index
= RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT
;
1101 cs
->exception_index
= page_fault_exceptions
?
1102 RISCV_EXCP_LOAD_PAGE_FAULT
: RISCV_EXCP_LOAD_ACCESS_FAULT
;
1105 case MMU_DATA_STORE
:
1106 if (two_stage
&& !first_stage
) {
1107 cs
->exception_index
= RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT
;
1109 cs
->exception_index
= page_fault_exceptions
?
1110 RISCV_EXCP_STORE_PAGE_FAULT
: RISCV_EXCP_STORE_AMO_ACCESS_FAULT
;
1114 g_assert_not_reached();
1116 env
->badaddr
= address
;
1117 env
->two_stage_lookup
= two_stage
;
1118 env
->two_stage_indirect_lookup
= two_stage_indirect
;
1121 hwaddr
riscv_cpu_get_phys_page_debug(CPUState
*cs
, vaddr addr
)
1123 RISCVCPU
*cpu
= RISCV_CPU(cs
);
1124 CPURISCVState
*env
= &cpu
->env
;
1127 int mmu_idx
= cpu_mmu_index(&cpu
->env
, false);
1129 if (get_physical_address(env
, &phys_addr
, &prot
, addr
, NULL
, 0, mmu_idx
,
1130 true, riscv_cpu_virt_enabled(env
), true)) {
1134 if (riscv_cpu_virt_enabled(env
)) {
1135 if (get_physical_address(env
, &phys_addr
, &prot
, phys_addr
, NULL
,
1136 0, mmu_idx
, false, true, true)) {
1141 return phys_addr
& TARGET_PAGE_MASK
;
1144 void riscv_cpu_do_transaction_failed(CPUState
*cs
, hwaddr physaddr
,
1145 vaddr addr
, unsigned size
,
1146 MMUAccessType access_type
,
1147 int mmu_idx
, MemTxAttrs attrs
,
1148 MemTxResult response
, uintptr_t retaddr
)
1150 RISCVCPU
*cpu
= RISCV_CPU(cs
);
1151 CPURISCVState
*env
= &cpu
->env
;
1153 if (access_type
== MMU_DATA_STORE
) {
1154 cs
->exception_index
= RISCV_EXCP_STORE_AMO_ACCESS_FAULT
;
1155 } else if (access_type
== MMU_DATA_LOAD
) {
1156 cs
->exception_index
= RISCV_EXCP_LOAD_ACCESS_FAULT
;
1158 cs
->exception_index
= RISCV_EXCP_INST_ACCESS_FAULT
;
1161 env
->badaddr
= addr
;
1162 env
->two_stage_lookup
= riscv_cpu_virt_enabled(env
) ||
1163 riscv_cpu_two_stage_lookup(mmu_idx
);
1164 env
->two_stage_indirect_lookup
= false;
1165 cpu_loop_exit_restore(cs
, retaddr
);
1168 void riscv_cpu_do_unaligned_access(CPUState
*cs
, vaddr addr
,
1169 MMUAccessType access_type
, int mmu_idx
,
1172 RISCVCPU
*cpu
= RISCV_CPU(cs
);
1173 CPURISCVState
*env
= &cpu
->env
;
1174 switch (access_type
) {
1175 case MMU_INST_FETCH
:
1176 cs
->exception_index
= RISCV_EXCP_INST_ADDR_MIS
;
1179 cs
->exception_index
= RISCV_EXCP_LOAD_ADDR_MIS
;
1181 case MMU_DATA_STORE
:
1182 cs
->exception_index
= RISCV_EXCP_STORE_AMO_ADDR_MIS
;
1185 g_assert_not_reached();
1187 env
->badaddr
= addr
;
1188 env
->two_stage_lookup
= riscv_cpu_virt_enabled(env
) ||
1189 riscv_cpu_two_stage_lookup(mmu_idx
);
1190 env
->two_stage_indirect_lookup
= false;
1191 cpu_loop_exit_restore(cs
, retaddr
);
1195 static void pmu_tlb_fill_incr_ctr(RISCVCPU
*cpu
, MMUAccessType access_type
)
1197 enum riscv_pmu_event_idx pmu_event_type
;
1199 switch (access_type
) {
1200 case MMU_INST_FETCH
:
1201 pmu_event_type
= RISCV_PMU_EVENT_CACHE_ITLB_PREFETCH_MISS
;
1204 pmu_event_type
= RISCV_PMU_EVENT_CACHE_DTLB_READ_MISS
;
1206 case MMU_DATA_STORE
:
1207 pmu_event_type
= RISCV_PMU_EVENT_CACHE_DTLB_WRITE_MISS
;
1213 riscv_pmu_incr_ctr(cpu
, pmu_event_type
);
1216 bool riscv_cpu_tlb_fill(CPUState
*cs
, vaddr address
, int size
,
1217 MMUAccessType access_type
, int mmu_idx
,
1218 bool probe
, uintptr_t retaddr
)
1220 RISCVCPU
*cpu
= RISCV_CPU(cs
);
1221 CPURISCVState
*env
= &cpu
->env
;
1224 int prot
, prot2
, prot_pmp
;
1225 bool pmp_violation
= false;
1226 bool first_stage_error
= true;
1227 bool two_stage_lookup
= false;
1228 bool two_stage_indirect_error
= false;
1229 int ret
= TRANSLATE_FAIL
;
1231 /* default TLB page size */
1232 target_ulong tlb_size
= TARGET_PAGE_SIZE
;
1234 env
->guest_phys_fault_addr
= 0;
1236 qemu_log_mask(CPU_LOG_MMU
, "%s ad %" VADDR_PRIx
" rw %d mmu_idx %d\n",
1237 __func__
, address
, access_type
, mmu_idx
);
1239 /* MPRV does not affect the virtual-machine load/store
1240 instructions, HLV, HLVX, and HSV. */
1241 if (riscv_cpu_two_stage_lookup(mmu_idx
)) {
1242 mode
= get_field(env
->hstatus
, HSTATUS_SPVP
);
1243 } else if (mode
== PRV_M
&& access_type
!= MMU_INST_FETCH
&&
1244 get_field(env
->mstatus
, MSTATUS_MPRV
)) {
1245 mode
= get_field(env
->mstatus
, MSTATUS_MPP
);
1246 if (riscv_has_ext(env
, RVH
) && get_field(env
->mstatus
, MSTATUS_MPV
)) {
1247 two_stage_lookup
= true;
1251 pmu_tlb_fill_incr_ctr(cpu
, access_type
);
1252 if (riscv_cpu_virt_enabled(env
) ||
1253 ((riscv_cpu_two_stage_lookup(mmu_idx
) || two_stage_lookup
) &&
1254 access_type
!= MMU_INST_FETCH
)) {
1255 /* Two stage lookup */
1256 ret
= get_physical_address(env
, &pa
, &prot
, address
,
1257 &env
->guest_phys_fault_addr
, access_type
,
1258 mmu_idx
, true, true, false);
1261 * A G-stage exception may be triggered during two state lookup.
1262 * And the env->guest_phys_fault_addr has already been set in
1263 * get_physical_address().
1265 if (ret
== TRANSLATE_G_STAGE_FAIL
) {
1266 first_stage_error
= false;
1267 two_stage_indirect_error
= true;
1268 access_type
= MMU_DATA_LOAD
;
1271 qemu_log_mask(CPU_LOG_MMU
,
1272 "%s 1st-stage address=%" VADDR_PRIx
" ret %d physical "
1273 HWADDR_FMT_plx
" prot %d\n",
1274 __func__
, address
, ret
, pa
, prot
);
1276 if (ret
== TRANSLATE_SUCCESS
) {
1277 /* Second stage lookup */
1280 ret
= get_physical_address(env
, &pa
, &prot2
, im_address
, NULL
,
1281 access_type
, mmu_idx
, false, true,
1284 qemu_log_mask(CPU_LOG_MMU
,
1285 "%s 2nd-stage address=%" VADDR_PRIx
" ret %d physical "
1286 HWADDR_FMT_plx
" prot %d\n",
1287 __func__
, im_address
, ret
, pa
, prot2
);
1291 if (ret
== TRANSLATE_SUCCESS
) {
1292 ret
= get_physical_address_pmp(env
, &prot_pmp
, &tlb_size
, pa
,
1293 size
, access_type
, mode
);
1295 qemu_log_mask(CPU_LOG_MMU
,
1296 "%s PMP address=" HWADDR_FMT_plx
" ret %d prot"
1297 " %d tlb_size " TARGET_FMT_lu
"\n",
1298 __func__
, pa
, ret
, prot_pmp
, tlb_size
);
1303 if (ret
!= TRANSLATE_SUCCESS
) {
1305 * Guest physical address translation failed, this is a HS
1308 first_stage_error
= false;
1309 env
->guest_phys_fault_addr
= (im_address
|
1311 (TARGET_PAGE_SIZE
- 1))) >> 2;
1315 /* Single stage lookup */
1316 ret
= get_physical_address(env
, &pa
, &prot
, address
, NULL
,
1317 access_type
, mmu_idx
, true, false, false);
1319 qemu_log_mask(CPU_LOG_MMU
,
1320 "%s address=%" VADDR_PRIx
" ret %d physical "
1321 HWADDR_FMT_plx
" prot %d\n",
1322 __func__
, address
, ret
, pa
, prot
);
1324 if (ret
== TRANSLATE_SUCCESS
) {
1325 ret
= get_physical_address_pmp(env
, &prot_pmp
, &tlb_size
, pa
,
1326 size
, access_type
, mode
);
1328 qemu_log_mask(CPU_LOG_MMU
,
1329 "%s PMP address=" HWADDR_FMT_plx
" ret %d prot"
1330 " %d tlb_size " TARGET_FMT_lu
"\n",
1331 __func__
, pa
, ret
, prot_pmp
, tlb_size
);
1337 if (ret
== TRANSLATE_PMP_FAIL
) {
1338 pmp_violation
= true;
1341 if (ret
== TRANSLATE_SUCCESS
) {
1342 tlb_set_page(cs
, address
& ~(tlb_size
- 1), pa
& ~(tlb_size
- 1),
1343 prot
, mmu_idx
, tlb_size
);
1348 raise_mmu_exception(env
, address
, access_type
, pmp_violation
,
1350 riscv_cpu_virt_enabled(env
) ||
1351 riscv_cpu_two_stage_lookup(mmu_idx
),
1352 two_stage_indirect_error
);
1353 cpu_loop_exit_restore(cs
, retaddr
);
1359 static target_ulong
riscv_transformed_insn(CPURISCVState
*env
,
1363 target_ulong xinsn
= 0;
1364 target_ulong access_rs1
= 0, access_imm
= 0, access_size
= 0;
1367 * Only Quadrant 0 and Quadrant 2 of RVC instruction space need to
1368 * be uncompressed. The Quadrant 1 of RVC instruction space need
1369 * not be transformed because these instructions won't generate
1370 * any load/store trap.
1373 if ((insn
& 0x3) != 0x3) {
1374 /* Transform 16bit instruction into 32bit instruction */
1375 switch (GET_C_OP(insn
)) {
1376 case OPC_RISC_C_OP_QUAD0
: /* Quadrant 0 */
1377 switch (GET_C_FUNC(insn
)) {
1378 case OPC_RISC_C_FUNC_FLD_LQ
:
1379 if (riscv_cpu_xlen(env
) != 128) { /* C.FLD (RV32/64) */
1380 xinsn
= OPC_RISC_FLD
;
1381 xinsn
= SET_RD(xinsn
, GET_C_RS2S(insn
));
1382 access_rs1
= GET_C_RS1S(insn
);
1383 access_imm
= GET_C_LD_IMM(insn
);
1387 case OPC_RISC_C_FUNC_LW
: /* C.LW */
1388 xinsn
= OPC_RISC_LW
;
1389 xinsn
= SET_RD(xinsn
, GET_C_RS2S(insn
));
1390 access_rs1
= GET_C_RS1S(insn
);
1391 access_imm
= GET_C_LW_IMM(insn
);
1394 case OPC_RISC_C_FUNC_FLW_LD
:
1395 if (riscv_cpu_xlen(env
) == 32) { /* C.FLW (RV32) */
1396 xinsn
= OPC_RISC_FLW
;
1397 xinsn
= SET_RD(xinsn
, GET_C_RS2S(insn
));
1398 access_rs1
= GET_C_RS1S(insn
);
1399 access_imm
= GET_C_LW_IMM(insn
);
1401 } else { /* C.LD (RV64/RV128) */
1402 xinsn
= OPC_RISC_LD
;
1403 xinsn
= SET_RD(xinsn
, GET_C_RS2S(insn
));
1404 access_rs1
= GET_C_RS1S(insn
);
1405 access_imm
= GET_C_LD_IMM(insn
);
1409 case OPC_RISC_C_FUNC_FSD_SQ
:
1410 if (riscv_cpu_xlen(env
) != 128) { /* C.FSD (RV32/64) */
1411 xinsn
= OPC_RISC_FSD
;
1412 xinsn
= SET_RS2(xinsn
, GET_C_RS2S(insn
));
1413 access_rs1
= GET_C_RS1S(insn
);
1414 access_imm
= GET_C_SD_IMM(insn
);
1418 case OPC_RISC_C_FUNC_SW
: /* C.SW */
1419 xinsn
= OPC_RISC_SW
;
1420 xinsn
= SET_RS2(xinsn
, GET_C_RS2S(insn
));
1421 access_rs1
= GET_C_RS1S(insn
);
1422 access_imm
= GET_C_SW_IMM(insn
);
1425 case OPC_RISC_C_FUNC_FSW_SD
:
1426 if (riscv_cpu_xlen(env
) == 32) { /* C.FSW (RV32) */
1427 xinsn
= OPC_RISC_FSW
;
1428 xinsn
= SET_RS2(xinsn
, GET_C_RS2S(insn
));
1429 access_rs1
= GET_C_RS1S(insn
);
1430 access_imm
= GET_C_SW_IMM(insn
);
1432 } else { /* C.SD (RV64/RV128) */
1433 xinsn
= OPC_RISC_SD
;
1434 xinsn
= SET_RS2(xinsn
, GET_C_RS2S(insn
));
1435 access_rs1
= GET_C_RS1S(insn
);
1436 access_imm
= GET_C_SD_IMM(insn
);
1444 case OPC_RISC_C_OP_QUAD2
: /* Quadrant 2 */
1445 switch (GET_C_FUNC(insn
)) {
1446 case OPC_RISC_C_FUNC_FLDSP_LQSP
:
1447 if (riscv_cpu_xlen(env
) != 128) { /* C.FLDSP (RV32/64) */
1448 xinsn
= OPC_RISC_FLD
;
1449 xinsn
= SET_RD(xinsn
, GET_C_RD(insn
));
1451 access_imm
= GET_C_LDSP_IMM(insn
);
1455 case OPC_RISC_C_FUNC_LWSP
: /* C.LWSP */
1456 xinsn
= OPC_RISC_LW
;
1457 xinsn
= SET_RD(xinsn
, GET_C_RD(insn
));
1459 access_imm
= GET_C_LWSP_IMM(insn
);
1462 case OPC_RISC_C_FUNC_FLWSP_LDSP
:
1463 if (riscv_cpu_xlen(env
) == 32) { /* C.FLWSP (RV32) */
1464 xinsn
= OPC_RISC_FLW
;
1465 xinsn
= SET_RD(xinsn
, GET_C_RD(insn
));
1467 access_imm
= GET_C_LWSP_IMM(insn
);
1469 } else { /* C.LDSP (RV64/RV128) */
1470 xinsn
= OPC_RISC_LD
;
1471 xinsn
= SET_RD(xinsn
, GET_C_RD(insn
));
1473 access_imm
= GET_C_LDSP_IMM(insn
);
1477 case OPC_RISC_C_FUNC_FSDSP_SQSP
:
1478 if (riscv_cpu_xlen(env
) != 128) { /* C.FSDSP (RV32/64) */
1479 xinsn
= OPC_RISC_FSD
;
1480 xinsn
= SET_RS2(xinsn
, GET_C_RS2(insn
));
1482 access_imm
= GET_C_SDSP_IMM(insn
);
1486 case OPC_RISC_C_FUNC_SWSP
: /* C.SWSP */
1487 xinsn
= OPC_RISC_SW
;
1488 xinsn
= SET_RS2(xinsn
, GET_C_RS2(insn
));
1490 access_imm
= GET_C_SWSP_IMM(insn
);
1494 if (riscv_cpu_xlen(env
) == 32) { /* C.FSWSP (RV32) */
1495 xinsn
= OPC_RISC_FSW
;
1496 xinsn
= SET_RS2(xinsn
, GET_C_RS2(insn
));
1498 access_imm
= GET_C_SWSP_IMM(insn
);
1500 } else { /* C.SDSP (RV64/RV128) */
1501 xinsn
= OPC_RISC_SD
;
1502 xinsn
= SET_RS2(xinsn
, GET_C_RS2(insn
));
1504 access_imm
= GET_C_SDSP_IMM(insn
);
1517 * Clear Bit1 of transformed instruction to indicate that
1518 * original insruction was a 16bit instruction
1520 xinsn
&= ~((target_ulong
)0x2);
1522 /* Transform 32bit (or wider) instructions */
1523 switch (MASK_OP_MAJOR(insn
)) {
1524 case OPC_RISC_ATOMIC
:
1526 access_rs1
= GET_RS1(insn
);
1527 access_size
= 1 << GET_FUNCT3(insn
);
1530 case OPC_RISC_FP_LOAD
:
1531 xinsn
= SET_I_IMM(insn
, 0);
1532 access_rs1
= GET_RS1(insn
);
1533 access_imm
= GET_IMM(insn
);
1534 access_size
= 1 << GET_FUNCT3(insn
);
1536 case OPC_RISC_STORE
:
1537 case OPC_RISC_FP_STORE
:
1538 xinsn
= SET_S_IMM(insn
, 0);
1539 access_rs1
= GET_RS1(insn
);
1540 access_imm
= GET_STORE_IMM(insn
);
1541 access_size
= 1 << GET_FUNCT3(insn
);
1543 case OPC_RISC_SYSTEM
:
1544 if (MASK_OP_SYSTEM(insn
) == OPC_RISC_HLVHSV
) {
1546 access_rs1
= GET_RS1(insn
);
1547 access_size
= 1 << ((GET_FUNCT7(insn
) >> 1) & 0x3);
1548 access_size
= 1 << access_size
;
1557 xinsn
= SET_RS1(xinsn
, (taddr
- (env
->gpr
[access_rs1
] + access_imm
)) &
1563 #endif /* !CONFIG_USER_ONLY */
1568 * Adapted from Spike's processor_t::take_trap.
1571 void riscv_cpu_do_interrupt(CPUState
*cs
)
1573 #if !defined(CONFIG_USER_ONLY)
1575 RISCVCPU
*cpu
= RISCV_CPU(cs
);
1576 CPURISCVState
*env
= &cpu
->env
;
1577 bool write_gva
= false;
1580 /* cs->exception is 32-bits wide unlike mcause which is XLEN-bits wide
1581 * so we mask off the MSB and separate into trap type and cause.
1583 bool async
= !!(cs
->exception_index
& RISCV_EXCP_INT_FLAG
);
1584 target_ulong cause
= cs
->exception_index
& RISCV_EXCP_INT_MASK
;
1585 uint64_t deleg
= async
? env
->mideleg
: env
->medeleg
;
1586 target_ulong tval
= 0;
1587 target_ulong tinst
= 0;
1588 target_ulong htval
= 0;
1589 target_ulong mtval2
= 0;
1591 if (cause
== RISCV_EXCP_SEMIHOST
) {
1592 do_common_semihosting(cs
);
1598 /* set tval to badaddr for traps with address information */
1600 case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT
:
1601 case RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT
:
1602 case RISCV_EXCP_LOAD_ADDR_MIS
:
1603 case RISCV_EXCP_STORE_AMO_ADDR_MIS
:
1604 case RISCV_EXCP_LOAD_ACCESS_FAULT
:
1605 case RISCV_EXCP_STORE_AMO_ACCESS_FAULT
:
1606 case RISCV_EXCP_LOAD_PAGE_FAULT
:
1607 case RISCV_EXCP_STORE_PAGE_FAULT
:
1608 write_gva
= env
->two_stage_lookup
;
1609 tval
= env
->badaddr
;
1610 if (env
->two_stage_indirect_lookup
) {
1612 * special pseudoinstruction for G-stage fault taken while
1613 * doing VS-stage page table walk.
1615 tinst
= (riscv_cpu_xlen(env
) == 32) ? 0x00002000 : 0x00003000;
1618 * The "Addr. Offset" field in transformed instruction is
1619 * non-zero only for misaligned access.
1621 tinst
= riscv_transformed_insn(env
, env
->bins
, tval
);
1624 case RISCV_EXCP_INST_GUEST_PAGE_FAULT
:
1625 case RISCV_EXCP_INST_ADDR_MIS
:
1626 case RISCV_EXCP_INST_ACCESS_FAULT
:
1627 case RISCV_EXCP_INST_PAGE_FAULT
:
1628 write_gva
= env
->two_stage_lookup
;
1629 tval
= env
->badaddr
;
1630 if (env
->two_stage_indirect_lookup
) {
1632 * special pseudoinstruction for G-stage fault taken while
1633 * doing VS-stage page table walk.
1635 tinst
= (riscv_cpu_xlen(env
) == 32) ? 0x00002000 : 0x00003000;
1638 case RISCV_EXCP_ILLEGAL_INST
:
1639 case RISCV_EXCP_VIRT_INSTRUCTION_FAULT
:
1642 case RISCV_EXCP_BREAKPOINT
:
1643 if (cs
->watchpoint_hit
) {
1644 tval
= cs
->watchpoint_hit
->hitaddr
;
1645 cs
->watchpoint_hit
= NULL
;
1651 /* ecall is dispatched as one cause so translate based on mode */
1652 if (cause
== RISCV_EXCP_U_ECALL
) {
1653 assert(env
->priv
<= 3);
1655 if (env
->priv
== PRV_M
) {
1656 cause
= RISCV_EXCP_M_ECALL
;
1657 } else if (env
->priv
== PRV_S
&& riscv_cpu_virt_enabled(env
)) {
1658 cause
= RISCV_EXCP_VS_ECALL
;
1659 } else if (env
->priv
== PRV_S
&& !riscv_cpu_virt_enabled(env
)) {
1660 cause
= RISCV_EXCP_S_ECALL
;
1661 } else if (env
->priv
== PRV_U
) {
1662 cause
= RISCV_EXCP_U_ECALL
;
1667 trace_riscv_trap(env
->mhartid
, async
, cause
, env
->pc
, tval
,
1668 riscv_cpu_get_trap_name(cause
, async
));
1670 qemu_log_mask(CPU_LOG_INT
,
1671 "%s: hart:"TARGET_FMT_ld
", async:%d, cause:"TARGET_FMT_lx
", "
1672 "epc:0x"TARGET_FMT_lx
", tval:0x"TARGET_FMT_lx
", desc=%s\n",
1673 __func__
, env
->mhartid
, async
, cause
, env
->pc
, tval
,
1674 riscv_cpu_get_trap_name(cause
, async
));
1676 if (env
->priv
<= PRV_S
&&
1677 cause
< TARGET_LONG_BITS
&& ((deleg
>> cause
) & 1)) {
1678 /* handle the trap in S-mode */
1679 if (riscv_has_ext(env
, RVH
)) {
1680 uint64_t hdeleg
= async
? env
->hideleg
: env
->hedeleg
;
1682 if (riscv_cpu_virt_enabled(env
) && ((hdeleg
>> cause
) & 1)) {
1683 /* Trap to VS mode */
1685 * See if we need to adjust cause. Yes if its VS mode interrupt
1686 * no if hypervisor has delegated one of hs mode's interrupt
1688 if (cause
== IRQ_VS_TIMER
|| cause
== IRQ_VS_SOFT
||
1689 cause
== IRQ_VS_EXT
) {
1693 } else if (riscv_cpu_virt_enabled(env
)) {
1694 /* Trap into HS mode, from virt */
1695 riscv_cpu_swap_hypervisor_regs(env
);
1696 env
->hstatus
= set_field(env
->hstatus
, HSTATUS_SPVP
,
1698 env
->hstatus
= set_field(env
->hstatus
, HSTATUS_SPV
,
1699 riscv_cpu_virt_enabled(env
));
1702 htval
= env
->guest_phys_fault_addr
;
1704 riscv_cpu_set_virt_enabled(env
, 0);
1706 /* Trap into HS mode */
1707 env
->hstatus
= set_field(env
->hstatus
, HSTATUS_SPV
, false);
1708 htval
= env
->guest_phys_fault_addr
;
1710 env
->hstatus
= set_field(env
->hstatus
, HSTATUS_GVA
, write_gva
);
1714 s
= set_field(s
, MSTATUS_SPIE
, get_field(s
, MSTATUS_SIE
));
1715 s
= set_field(s
, MSTATUS_SPP
, env
->priv
);
1716 s
= set_field(s
, MSTATUS_SIE
, 0);
1718 env
->scause
= cause
| ((target_ulong
)async
<< (TARGET_LONG_BITS
- 1));
1719 env
->sepc
= env
->pc
;
1722 env
->htinst
= tinst
;
1723 env
->pc
= (env
->stvec
>> 2 << 2) +
1724 ((async
&& (env
->stvec
& 3) == 1) ? cause
* 4 : 0);
1725 riscv_cpu_set_mode(env
, PRV_S
);
1727 /* handle the trap in M-mode */
1728 if (riscv_has_ext(env
, RVH
)) {
1729 if (riscv_cpu_virt_enabled(env
)) {
1730 riscv_cpu_swap_hypervisor_regs(env
);
1732 env
->mstatus
= set_field(env
->mstatus
, MSTATUS_MPV
,
1733 riscv_cpu_virt_enabled(env
));
1734 if (riscv_cpu_virt_enabled(env
) && tval
) {
1735 env
->mstatus
= set_field(env
->mstatus
, MSTATUS_GVA
, 1);
1738 mtval2
= env
->guest_phys_fault_addr
;
1740 /* Trapping to M mode, virt is disabled */
1741 riscv_cpu_set_virt_enabled(env
, 0);
1745 s
= set_field(s
, MSTATUS_MPIE
, get_field(s
, MSTATUS_MIE
));
1746 s
= set_field(s
, MSTATUS_MPP
, env
->priv
);
1747 s
= set_field(s
, MSTATUS_MIE
, 0);
1749 env
->mcause
= cause
| ~(((target_ulong
)-1) >> async
);
1750 env
->mepc
= env
->pc
;
1752 env
->mtval2
= mtval2
;
1753 env
->mtinst
= tinst
;
1754 env
->pc
= (env
->mtvec
>> 2 << 2) +
1755 ((async
&& (env
->mtvec
& 3) == 1) ? cause
* 4 : 0);
1756 riscv_cpu_set_mode(env
, PRV_M
);
1759 /* NOTE: it is not necessary to yield load reservations here. It is only
1760 * necessary for an SC from "another hart" to cause a load reservation
1761 * to be yielded. Refer to the memory consistency model section of the
1762 * RISC-V ISA Specification.
1765 env
->two_stage_lookup
= false;
1766 env
->two_stage_indirect_lookup
= false;
1768 cs
->exception_index
= RISCV_EXCP_NONE
; /* mark handled to qemu */