]> git.proxmox.com Git - mirror_qemu.git/blame - target/riscv/cpu_helper.c
target/riscv: Add defines for AIA CSRs
[mirror_qemu.git] / target / riscv / cpu_helper.c
CommitLineData
0c3e702a 1/*
df354dd4 2 * RISC-V CPU helpers for qemu.
0c3e702a
MC
3 *
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include "qemu/osdep.h"
21#include "qemu/log.h"
7ec5d303 22#include "qemu/main-loop.h"
0c3e702a
MC
23#include "cpu.h"
24#include "exec/exec-all.h"
dcb32f1d 25#include "tcg/tcg-op.h"
929f0a7f 26#include "trace.h"
6b5fe137 27#include "semihosting/common-semi.h"
0c3e702a
MC
28
29int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch)
30{
31#ifdef CONFIG_USER_ONLY
32 return 0;
33#else
34 return env->priv;
35#endif
36}
37
53677acf
RH
38void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc,
39 target_ulong *cs_base, uint32_t *pflags)
40{
b4a99d40
FC
41 CPUState *cs = env_cpu(env);
42 RISCVCPU *cpu = RISCV_CPU(cs);
43
53677acf
RH
44 uint32_t flags = 0;
45
8c796f1a 46 *pc = env->xl == MXL_RV32 ? env->pc & UINT32_MAX : env->pc;
53677acf
RH
47 *cs_base = 0;
48
32e579b8 49 if (riscv_has_ext(env, RVV) || cpu->cfg.ext_zve32f || cpu->cfg.ext_zve64f) {
a689a82b
FC
50 /*
51 * If env->vl equals to VLMAX, we can use generic vector operation
52 * expanders (GVEC) to accerlate the vector operations.
53 * However, as LMUL could be a fractional number. The maximum
54 * vector size can be operated might be less than 8 bytes,
55 * which is not supported by GVEC. So we set vl_eq_vlmax flag to true
56 * only when maxsz >= 8 bytes.
57 */
53677acf 58 uint32_t vlmax = vext_get_vlmax(env_archcpu(env), env->vtype);
a689a82b
FC
59 uint32_t sew = FIELD_EX64(env->vtype, VTYPE, VSEW);
60 uint32_t maxsz = vlmax << sew;
61 bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl) &&
62 (maxsz >= 8);
d96a271a 63 flags = FIELD_DP32(flags, TB_FLAGS, VILL, env->vill);
a689a82b 64 flags = FIELD_DP32(flags, TB_FLAGS, SEW, sew);
53677acf
RH
65 flags = FIELD_DP32(flags, TB_FLAGS, LMUL,
66 FIELD_EX64(env->vtype, VTYPE, VLMUL));
67 flags = FIELD_DP32(flags, TB_FLAGS, VL_EQ_VLMAX, vl_eq_vlmax);
68 } else {
69 flags = FIELD_DP32(flags, TB_FLAGS, VILL, 1);
70 }
71
72#ifdef CONFIG_USER_ONLY
73 flags |= TB_FLAGS_MSTATUS_FS;
61b4b69d 74 flags |= TB_FLAGS_MSTATUS_VS;
53677acf
RH
75#else
76 flags |= cpu_mmu_index(env, 0);
77 if (riscv_cpu_fp_enabled(env)) {
78 flags |= env->mstatus & MSTATUS_FS;
79 }
80
61b4b69d
LZ
81 if (riscv_cpu_vector_enabled(env)) {
82 flags |= env->mstatus & MSTATUS_VS;
83 }
84
53677acf
RH
85 if (riscv_has_ext(env, RVH)) {
86 if (env->priv == PRV_M ||
87 (env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) ||
88 (env->priv == PRV_U && !riscv_cpu_virt_enabled(env) &&
89 get_field(env->hstatus, HSTATUS_HU))) {
90 flags = FIELD_DP32(flags, TB_FLAGS, HLSX, 1);
91 }
92
93 flags = FIELD_DP32(flags, TB_FLAGS, MSTATUS_HS_FS,
94 get_field(env->mstatus_hs, MSTATUS_FS));
8e1ee1fb
FC
95
96 flags = FIELD_DP32(flags, TB_FLAGS, MSTATUS_HS_VS,
97 get_field(env->mstatus_hs, MSTATUS_VS));
53677acf
RH
98 }
99#endif
100
440544e1 101 flags = FIELD_DP32(flags, TB_FLAGS, XL, env->xl);
4208dc7e
LZ
102 if (env->cur_pmmask < (env->xl == MXL_RV32 ? UINT32_MAX : UINT64_MAX)) {
103 flags = FIELD_DP32(flags, TB_FLAGS, PM_MASK_ENABLED, 1);
104 }
105 if (env->cur_pmbase != 0) {
106 flags = FIELD_DP32(flags, TB_FLAGS, PM_BASE_ENABLED, 1);
107 }
92371bd9 108
53677acf
RH
109 *pflags = flags;
110}
111
40bfa5f6
LZ
112void riscv_cpu_update_mask(CPURISCVState *env)
113{
114 target_ulong mask = -1, base = 0;
115 /*
116 * TODO: Current RVJ spec does not specify
117 * how the extension interacts with XLEN.
118 */
119#ifndef CONFIG_USER_ONLY
120 if (riscv_has_ext(env, RVJ)) {
121 switch (env->priv) {
122 case PRV_M:
123 if (env->mmte & M_PM_ENABLE) {
124 mask = env->mpmmask;
125 base = env->mpmbase;
126 }
127 break;
128 case PRV_S:
129 if (env->mmte & S_PM_ENABLE) {
130 mask = env->spmmask;
131 base = env->spmbase;
132 }
133 break;
134 case PRV_U:
135 if (env->mmte & U_PM_ENABLE) {
136 mask = env->upmmask;
137 base = env->upmbase;
138 }
139 break;
140 default:
141 g_assert_not_reached();
142 }
143 }
144#endif
145 if (env->xl == MXL_RV32) {
146 env->cur_pmmask = mask & UINT32_MAX;
147 env->cur_pmbase = base & UINT32_MAX;
148 } else {
149 env->cur_pmmask = mask;
150 env->cur_pmbase = base;
151 }
152}
153
0c3e702a 154#ifndef CONFIG_USER_ONLY
efbdbc26 155static int riscv_cpu_local_irq_pending(CPURISCVState *env)
0c3e702a 156{
487a9955 157 target_ulong virt_enabled = riscv_cpu_virt_enabled(env);
3ef10a09 158
efbdbc26
MC
159 target_ulong mstatus_mie = get_field(env->mstatus, MSTATUS_MIE);
160 target_ulong mstatus_sie = get_field(env->mstatus, MSTATUS_SIE);
3ef10a09 161
cd032fe7
AP
162 target_ulong vsgemask =
163 (target_ulong)1 << get_field(env->hstatus, HSTATUS_VGEIN);
164 target_ulong vsgein = (env->hgeip & vsgemask) ? MIP_VSEIP : 0;
165
166 target_ulong pending = (env->mip | vsgein) & env->mie;
3ef10a09
AF
167
168 target_ulong mie = env->priv < PRV_M ||
169 (env->priv == PRV_M && mstatus_mie);
170 target_ulong sie = env->priv < PRV_S ||
171 (env->priv == PRV_S && mstatus_sie);
487a9955
JM
172 target_ulong hsie = virt_enabled || sie;
173 target_ulong vsie = virt_enabled && sie;
3ef10a09 174
487a9955
JM
175 target_ulong irqs =
176 (pending & ~env->mideleg & -mie) |
177 (pending & env->mideleg & ~env->hideleg & -hsie) |
178 (pending & env->mideleg & env->hideleg & -vsie);
0c3e702a 179
efbdbc26
MC
180 if (irqs) {
181 return ctz64(irqs); /* since non-zero */
0c3e702a 182 } else {
330d2ae3 183 return RISCV_EXCP_NONE; /* indicates no pending interrupt */
0c3e702a
MC
184 }
185}
0c3e702a
MC
186
187bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
188{
0c3e702a
MC
189 if (interrupt_request & CPU_INTERRUPT_HARD) {
190 RISCVCPU *cpu = RISCV_CPU(cs);
191 CPURISCVState *env = &cpu->env;
efbdbc26 192 int interruptno = riscv_cpu_local_irq_pending(env);
0c3e702a
MC
193 if (interruptno >= 0) {
194 cs->exception_index = RISCV_EXCP_INT_FLAG | interruptno;
195 riscv_cpu_do_interrupt(cs);
196 return true;
197 }
198 }
0c3e702a
MC
199 return false;
200}
201
b345b480
AF
202/* Return true is floating point support is currently enabled */
203bool riscv_cpu_fp_enabled(CPURISCVState *env)
204{
205 if (env->mstatus & MSTATUS_FS) {
29409c1d
AF
206 if (riscv_cpu_virt_enabled(env) && !(env->mstatus_hs & MSTATUS_FS)) {
207 return false;
208 }
b345b480
AF
209 return true;
210 }
211
212 return false;
213}
214
61b4b69d
LZ
215/* Return true is vector support is currently enabled */
216bool riscv_cpu_vector_enabled(CPURISCVState *env)
217{
218 if (env->mstatus & MSTATUS_VS) {
219 if (riscv_cpu_virt_enabled(env) && !(env->mstatus_hs & MSTATUS_VS)) {
220 return false;
221 }
222 return true;
223 }
224
225 return false;
226}
227
66e594f2
AF
228void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env)
229{
284d697c
YJ
230 uint64_t mstatus_mask = MSTATUS_MXR | MSTATUS_SUM | MSTATUS_FS |
231 MSTATUS_SPP | MSTATUS_SPIE | MSTATUS_SIE |
61b4b69d 232 MSTATUS64_UXL | MSTATUS_VS;
66e594f2
AF
233 bool current_virt = riscv_cpu_virt_enabled(env);
234
235 g_assert(riscv_has_ext(env, RVH));
236
66e594f2
AF
237 if (current_virt) {
238 /* Current V=1 and we are about to change to V=0 */
239 env->vsstatus = env->mstatus & mstatus_mask;
240 env->mstatus &= ~mstatus_mask;
241 env->mstatus |= env->mstatus_hs;
242
243 env->vstvec = env->stvec;
244 env->stvec = env->stvec_hs;
245
246 env->vsscratch = env->sscratch;
247 env->sscratch = env->sscratch_hs;
248
249 env->vsepc = env->sepc;
250 env->sepc = env->sepc_hs;
251
252 env->vscause = env->scause;
253 env->scause = env->scause_hs;
254
ac12b601
AP
255 env->vstval = env->stval;
256 env->stval = env->stval_hs;
66e594f2
AF
257
258 env->vsatp = env->satp;
259 env->satp = env->satp_hs;
260 } else {
261 /* Current V=0 and we are about to change to V=1 */
262 env->mstatus_hs = env->mstatus & mstatus_mask;
263 env->mstatus &= ~mstatus_mask;
264 env->mstatus |= env->vsstatus;
265
266 env->stvec_hs = env->stvec;
267 env->stvec = env->vstvec;
268
269 env->sscratch_hs = env->sscratch;
270 env->sscratch = env->vsscratch;
271
272 env->sepc_hs = env->sepc;
273 env->sepc = env->vsepc;
274
275 env->scause_hs = env->scause;
276 env->scause = env->vscause;
277
ac12b601
AP
278 env->stval_hs = env->stval;
279 env->stval = env->vstval;
66e594f2
AF
280
281 env->satp_hs = env->satp;
282 env->satp = env->vsatp;
283 }
284}
285
cd032fe7
AP
286target_ulong riscv_cpu_get_geilen(CPURISCVState *env)
287{
288 if (!riscv_has_ext(env, RVH)) {
289 return 0;
290 }
291
292 return env->geilen;
293}
294
295void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen)
296{
297 if (!riscv_has_ext(env, RVH)) {
298 return;
299 }
300
301 if (geilen > (TARGET_LONG_BITS - 1)) {
302 return;
303 }
304
305 env->geilen = geilen;
306}
307
ef6bb7b6
AF
308bool riscv_cpu_virt_enabled(CPURISCVState *env)
309{
310 if (!riscv_has_ext(env, RVH)) {
311 return false;
312 }
313
314 return get_field(env->virt, VIRT_ONOFF);
315}
316
317void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable)
318{
319 if (!riscv_has_ext(env, RVH)) {
320 return;
321 }
322
eccc5a12
AF
323 /* Flush the TLB on all virt mode changes. */
324 if (get_field(env->virt, VIRT_ONOFF) != enable) {
325 tlb_flush(env_cpu(env));
326 }
327
ef6bb7b6 328 env->virt = set_field(env->virt, VIRT_ONOFF, enable);
02d9565b
AP
329
330 if (enable) {
331 /*
332 * The guest external interrupts from an interrupt controller are
333 * delivered only when the Guest/VM is running (i.e. V=1). This means
334 * any guest external interrupt which is triggered while the Guest/VM
335 * is not running (i.e. V=0) will be missed on QEMU resulting in guest
336 * with sluggish response to serial console input and other I/O events.
337 *
338 * To solve this, we check and inject interrupt after setting V=1.
339 */
340 riscv_cpu_update_mip(env_archcpu(env), 0, 0);
341 }
ef6bb7b6
AF
342}
343
1c1c060a 344bool riscv_cpu_two_stage_lookup(int mmu_idx)
5a894dd7 345{
1c1c060a 346 return mmu_idx & TB_FLAGS_PRIV_HYP_ACCESS_MASK;
5a894dd7
AF
347}
348
e3e7039c
MC
349int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint32_t interrupts)
350{
351 CPURISCVState *env = &cpu->env;
352 if (env->miclaim & interrupts) {
353 return -1;
354 } else {
355 env->miclaim |= interrupts;
356 return 0;
357 }
358}
359
df354dd4
MC
360uint32_t riscv_cpu_update_mip(RISCVCPU *cpu, uint32_t mask, uint32_t value)
361{
362 CPURISCVState *env = &cpu->env;
0a01f2ee 363 CPUState *cs = CPU(cpu);
cd032fe7 364 uint32_t gein, vsgein = 0, old = env->mip;
7ec5d303
AF
365 bool locked = false;
366
cd032fe7
AP
367 if (riscv_cpu_virt_enabled(env)) {
368 gein = get_field(env->hstatus, HSTATUS_VGEIN);
369 vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0;
370 }
371
7ec5d303
AF
372 if (!qemu_mutex_iothread_locked()) {
373 locked = true;
374 qemu_mutex_lock_iothread();
375 }
df354dd4 376
7ec5d303 377 env->mip = (env->mip & ~mask) | (value & mask);
df354dd4 378
cd032fe7 379 if (env->mip | vsgein) {
7ec5d303
AF
380 cpu_interrupt(cs, CPU_INTERRUPT_HARD);
381 } else {
382 cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
383 }
0a01f2ee 384
7ec5d303
AF
385 if (locked) {
386 qemu_mutex_unlock_iothread();
387 }
df354dd4
MC
388
389 return old;
390}
391
a47ef6e9
BM
392void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(uint32_t),
393 uint32_t arg)
c6957248
AP
394{
395 env->rdtime_fn = fn;
a47ef6e9 396 env->rdtime_fn_arg = arg;
c6957248
AP
397}
398
fb738839 399void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv)
df354dd4
MC
400{
401 if (newpriv > PRV_M) {
402 g_assert_not_reached();
403 }
404 if (newpriv == PRV_H) {
405 newpriv = PRV_U;
406 }
407 /* tlb_flush is unnecessary as mode is contained in mmu_idx */
408 env->priv = newpriv;
440544e1 409 env->xl = cpu_recompute_xl(env);
40bfa5f6 410 riscv_cpu_update_mask(env);
c13b169f
JS
411
412 /*
413 * Clear the load reservation - otherwise a reservation placed in one
414 * context/process can be used by another, resulting in an SC succeeding
415 * incorrectly. Version 2.2 of the ISA specification explicitly requires
416 * this behaviour, while later revisions say that the kernel "should" use
417 * an SC instruction to force the yielding of a load reservation on a
418 * preemptive context switch. As a result, do both.
419 */
420 env->load_res = -1;
df354dd4
MC
421}
422
b297129a
JS
423/*
424 * get_physical_address_pmp - check PMP permission for this physical address
425 *
426 * Match the PMP region and check permission for this physical address and it's
427 * TLB page. Returns 0 if the permission checking was successful
428 *
429 * @env: CPURISCVState
430 * @prot: The returned protection attributes
431 * @tlb_size: TLB page size containing addr. It could be modified after PMP
432 * permission checking. NULL if not set TLB page for addr.
433 * @addr: The physical address to be checked permission
434 * @access_type: The type of MMU access
435 * @mode: Indicates current privilege level.
436 */
437static int get_physical_address_pmp(CPURISCVState *env, int *prot,
438 target_ulong *tlb_size, hwaddr addr,
439 int size, MMUAccessType access_type,
440 int mode)
441{
442 pmp_priv_t pmp_priv;
443 target_ulong tlb_size_pmp = 0;
444
445 if (!riscv_feature(env, RISCV_FEATURE_PMP)) {
446 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
447 return TRANSLATE_SUCCESS;
448 }
449
450 if (!pmp_hart_has_privs(env, addr, size, 1 << access_type, &pmp_priv,
451 mode)) {
452 *prot = 0;
453 return TRANSLATE_PMP_FAIL;
454 }
455
456 *prot = pmp_priv_to_page_prot(pmp_priv);
457 if (tlb_size != NULL) {
458 if (pmp_is_range_in_tlb(env, addr & ~(*tlb_size - 1), &tlb_size_pmp)) {
459 *tlb_size = tlb_size_pmp;
460 }
461 }
462
463 return TRANSLATE_SUCCESS;
464}
465
0c3e702a
MC
466/* get_physical_address - get the physical address for this virtual address
467 *
468 * Do a page table walk to obtain the physical address corresponding to a
469 * virtual address. Returns 0 if the translation was successful
470 *
471 * Adapted from Spike's mmu_t::translate and mmu_t::walk
472 *
1448689c
AF
473 * @env: CPURISCVState
474 * @physical: This will be set to the calculated physical address
475 * @prot: The returned protection attributes
476 * @addr: The virtual address to be translated
33a9a57d
YJ
477 * @fault_pte_addr: If not NULL, this will be set to fault pte address
478 * when a error occurs on pte address translation.
479 * This will already be shifted to match htval.
1448689c
AF
480 * @access_type: The type of MMU access
481 * @mmu_idx: Indicates current privilege level
482 * @first_stage: Are we in first stage translation?
483 * Second stage is used for hypervisor guest translation
36a18664 484 * @two_stage: Are we going to perform two stage translation
11c27c6d 485 * @is_debug: Is this access from a debugger or the monitor?
0c3e702a
MC
486 */
487static int get_physical_address(CPURISCVState *env, hwaddr *physical,
488 int *prot, target_ulong addr,
33a9a57d 489 target_ulong *fault_pte_addr,
1448689c 490 int access_type, int mmu_idx,
11c27c6d
JF
491 bool first_stage, bool two_stage,
492 bool is_debug)
0c3e702a
MC
493{
494 /* NOTE: the env->pc value visible here will not be
495 * correct, but the value visible to the exception handler
496 * (riscv_cpu_do_interrupt) is correct */
aacb578f
PD
497 MemTxResult res;
498 MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
c445593d 499 int mode = mmu_idx & TB_FLAGS_PRIV_MMU_MASK;
36a18664 500 bool use_background = false;
0c3e702a 501
36a18664
AF
502 /*
503 * Check if we should use the background registers for the two
504 * stage translation. We don't need to check if we actually need
505 * two stage translation as that happened before this function
506 * was called. Background registers will be used if the guest has
507 * forced a two stage translation to be on (in HS or M mode).
508 */
db9ab38b 509 if (!riscv_cpu_virt_enabled(env) && two_stage) {
29b3361b
AF
510 use_background = true;
511 }
512
90ec1cff
GK
513 /* MPRV does not affect the virtual-machine load/store
514 instructions, HLV, HLVX, and HSV. */
515 if (riscv_cpu_two_stage_lookup(mmu_idx)) {
516 mode = get_field(env->hstatus, HSTATUS_SPVP);
517 } else if (mode == PRV_M && access_type != MMU_INST_FETCH) {
0c3e702a
MC
518 if (get_field(env->mstatus, MSTATUS_MPRV)) {
519 mode = get_field(env->mstatus, MSTATUS_MPP);
520 }
521 }
522
36a18664
AF
523 if (first_stage == false) {
524 /* We are in stage 2 translation, this is similar to stage 1. */
525 /* Stage 2 is always taken as U-mode */
526 mode = PRV_U;
527 }
528
0c3e702a
MC
529 if (mode == PRV_M || !riscv_feature(env, RISCV_FEATURE_MMU)) {
530 *physical = addr;
531 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
532 return TRANSLATE_SUCCESS;
533 }
534
535 *prot = 0;
536
ddf78132 537 hwaddr base;
36a18664
AF
538 int levels, ptidxbits, ptesize, vm, sum, mxr, widened;
539
540 if (first_stage == true) {
541 mxr = get_field(env->mstatus, MSTATUS_MXR);
542 } else {
543 mxr = get_field(env->vsstatus, MSTATUS_MXR);
544 }
0c3e702a 545
1a9540d1
AF
546 if (first_stage == true) {
547 if (use_background) {
db23e5d9 548 if (riscv_cpu_mxl(env) == MXL_RV32) {
419ddf00
AF
549 base = (hwaddr)get_field(env->vsatp, SATP32_PPN) << PGSHIFT;
550 vm = get_field(env->vsatp, SATP32_MODE);
551 } else {
552 base = (hwaddr)get_field(env->vsatp, SATP64_PPN) << PGSHIFT;
553 vm = get_field(env->vsatp, SATP64_MODE);
554 }
36a18664 555 } else {
db23e5d9 556 if (riscv_cpu_mxl(env) == MXL_RV32) {
419ddf00
AF
557 base = (hwaddr)get_field(env->satp, SATP32_PPN) << PGSHIFT;
558 vm = get_field(env->satp, SATP32_MODE);
559 } else {
560 base = (hwaddr)get_field(env->satp, SATP64_PPN) << PGSHIFT;
561 vm = get_field(env->satp, SATP64_MODE);
562 }
0c3e702a 563 }
36a18664 564 widened = 0;
1a9540d1 565 } else {
db23e5d9 566 if (riscv_cpu_mxl(env) == MXL_RV32) {
994b6bb2
AF
567 base = (hwaddr)get_field(env->hgatp, SATP32_PPN) << PGSHIFT;
568 vm = get_field(env->hgatp, SATP32_MODE);
569 } else {
570 base = (hwaddr)get_field(env->hgatp, SATP64_PPN) << PGSHIFT;
571 vm = get_field(env->hgatp, SATP64_MODE);
572 }
1a9540d1
AF
573 widened = 2;
574 }
c63ca4ff 575 /* status.SUM will be ignored if execute on background */
11c27c6d 576 sum = get_field(env->mstatus, MSTATUS_SUM) || use_background || is_debug;
1a9540d1
AF
577 switch (vm) {
578 case VM_1_10_SV32:
579 levels = 2; ptidxbits = 10; ptesize = 4; break;
580 case VM_1_10_SV39:
581 levels = 3; ptidxbits = 9; ptesize = 8; break;
582 case VM_1_10_SV48:
583 levels = 4; ptidxbits = 9; ptesize = 8; break;
584 case VM_1_10_SV57:
585 levels = 5; ptidxbits = 9; ptesize = 8; break;
586 case VM_1_10_MBARE:
587 *physical = addr;
588 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
589 return TRANSLATE_SUCCESS;
590 default:
591 g_assert_not_reached();
0c3e702a
MC
592 }
593
3109cd98 594 CPUState *cs = env_cpu(env);
36a18664
AF
595 int va_bits = PGSHIFT + levels * ptidxbits + widened;
596 target_ulong mask, masked_msbs;
597
598 if (TARGET_LONG_BITS > (va_bits - 1)) {
599 mask = (1L << (TARGET_LONG_BITS - (va_bits - 1))) - 1;
600 } else {
601 mask = 0;
602 }
603 masked_msbs = (addr >> (va_bits - 1)) & mask;
604
0c3e702a
MC
605 if (masked_msbs != 0 && masked_msbs != mask) {
606 return TRANSLATE_FAIL;
607 }
608
609 int ptshift = (levels - 1) * ptidxbits;
610 int i;
611
612#if !TCG_OVERSIZED_GUEST
613restart:
614#endif
615 for (i = 0; i < levels; i++, ptshift -= ptidxbits) {
36a18664
AF
616 target_ulong idx;
617 if (i == 0) {
618 idx = (addr >> (PGSHIFT + ptshift)) &
619 ((1 << (ptidxbits + widened)) - 1);
620 } else {
621 idx = (addr >> (PGSHIFT + ptshift)) &
0c3e702a 622 ((1 << ptidxbits) - 1);
36a18664 623 }
0c3e702a
MC
624
625 /* check that physical address of PTE is legal */
36a18664
AF
626 hwaddr pte_addr;
627
628 if (two_stage && first_stage) {
38472890 629 int vbase_prot;
36a18664
AF
630 hwaddr vbase;
631
632 /* Do the second stage translation on the base PTE address. */
88914473 633 int vbase_ret = get_physical_address(env, &vbase, &vbase_prot,
33a9a57d 634 base, NULL, MMU_DATA_LOAD,
11c27c6d
JF
635 mmu_idx, false, true,
636 is_debug);
88914473
AF
637
638 if (vbase_ret != TRANSLATE_SUCCESS) {
33a9a57d
YJ
639 if (fault_pte_addr) {
640 *fault_pte_addr = (base + idx * ptesize) >> 2;
641 }
642 return TRANSLATE_G_STAGE_FAIL;
88914473 643 }
36a18664
AF
644
645 pte_addr = vbase + idx * ptesize;
646 } else {
647 pte_addr = base + idx * ptesize;
648 }
1f447aec 649
b297129a
JS
650 int pmp_prot;
651 int pmp_ret = get_physical_address_pmp(env, &pmp_prot, NULL, pte_addr,
652 sizeof(target_ulong),
653 MMU_DATA_LOAD, PRV_S);
654 if (pmp_ret != TRANSLATE_SUCCESS) {
1f447aec
HA
655 return TRANSLATE_PMP_FAIL;
656 }
aacb578f 657
f08c7ff3 658 target_ulong pte;
db23e5d9 659 if (riscv_cpu_mxl(env) == MXL_RV32) {
f08c7ff3
AF
660 pte = address_space_ldl(cs->as, pte_addr, attrs, &res);
661 } else {
662 pte = address_space_ldq(cs->as, pte_addr, attrs, &res);
663 }
664
aacb578f
PD
665 if (res != MEMTX_OK) {
666 return TRANSLATE_FAIL;
667 }
668
ddf78132 669 hwaddr ppn = pte >> PTE_PPN_SHIFT;
0c3e702a 670
c3b03e58
MC
671 if (!(pte & PTE_V)) {
672 /* Invalid PTE */
673 return TRANSLATE_FAIL;
674 } else if (!(pte & (PTE_R | PTE_W | PTE_X))) {
675 /* Inner PTE, continue walking */
0c3e702a 676 base = ppn << PGSHIFT;
c3b03e58
MC
677 } else if ((pte & (PTE_R | PTE_W | PTE_X)) == PTE_W) {
678 /* Reserved leaf PTE flags: PTE_W */
679 return TRANSLATE_FAIL;
680 } else if ((pte & (PTE_R | PTE_W | PTE_X)) == (PTE_W | PTE_X)) {
681 /* Reserved leaf PTE flags: PTE_W + PTE_X */
682 return TRANSLATE_FAIL;
683 } else if ((pte & PTE_U) && ((mode != PRV_U) &&
684 (!sum || access_type == MMU_INST_FETCH))) {
685 /* User PTE flags when not U mode and mstatus.SUM is not set,
686 or the access type is an instruction fetch */
687 return TRANSLATE_FAIL;
688 } else if (!(pte & PTE_U) && (mode != PRV_S)) {
689 /* Supervisor PTE flags when not S mode */
690 return TRANSLATE_FAIL;
691 } else if (ppn & ((1ULL << ptshift) - 1)) {
692 /* Misaligned PPN */
693 return TRANSLATE_FAIL;
694 } else if (access_type == MMU_DATA_LOAD && !((pte & PTE_R) ||
695 ((pte & PTE_X) && mxr))) {
696 /* Read access check failed */
697 return TRANSLATE_FAIL;
698 } else if (access_type == MMU_DATA_STORE && !(pte & PTE_W)) {
699 /* Write access check failed */
700 return TRANSLATE_FAIL;
701 } else if (access_type == MMU_INST_FETCH && !(pte & PTE_X)) {
702 /* Fetch access check failed */
703 return TRANSLATE_FAIL;
0c3e702a
MC
704 } else {
705 /* if necessary, set accessed and dirty bits. */
706 target_ulong updated_pte = pte | PTE_A |
707 (access_type == MMU_DATA_STORE ? PTE_D : 0);
708
709 /* Page table updates need to be atomic with MTTCG enabled */
710 if (updated_pte != pte) {
c3b03e58
MC
711 /*
712 * - if accessed or dirty bits need updating, and the PTE is
713 * in RAM, then we do so atomically with a compare and swap.
714 * - if the PTE is in IO space or ROM, then it can't be updated
715 * and we return TRANSLATE_FAIL.
716 * - if the PTE changed by the time we went to update it, then
717 * it is no longer valid and we must re-walk the page table.
718 */
0c3e702a
MC
719 MemoryRegion *mr;
720 hwaddr l = sizeof(target_ulong), addr1;
721 mr = address_space_translate(cs->as, pte_addr,
bc6b1cec 722 &addr1, &l, false, MEMTXATTRS_UNSPECIFIED);
c3b03e58 723 if (memory_region_is_ram(mr)) {
0c3e702a
MC
724 target_ulong *pte_pa =
725 qemu_map_ram_ptr(mr->ram_block, addr1);
726#if TCG_OVERSIZED_GUEST
727 /* MTTCG is not enabled on oversized TCG guests so
728 * page table updates do not need to be atomic */
729 *pte_pa = pte = updated_pte;
730#else
731 target_ulong old_pte =
d73415a3 732 qatomic_cmpxchg(pte_pa, pte, updated_pte);
0c3e702a
MC
733 if (old_pte != pte) {
734 goto restart;
735 } else {
736 pte = updated_pte;
737 }
738#endif
739 } else {
740 /* misconfigured PTE in ROM (AD bits are not preset) or
741 * PTE is in IO space and can't be updated atomically */
742 return TRANSLATE_FAIL;
743 }
744 }
745
746 /* for superpage mappings, make a fake leaf PTE for the TLB's
747 benefit. */
748 target_ulong vpn = addr >> PGSHIFT;
9ef82119
ZL
749 *physical = ((ppn | (vpn & ((1L << ptshift) - 1))) << PGSHIFT) |
750 (addr & ~TARGET_PAGE_MASK);
0c3e702a 751
c3b03e58
MC
752 /* set permissions on the TLB entry */
753 if ((pte & PTE_R) || ((pte & PTE_X) && mxr)) {
0c3e702a
MC
754 *prot |= PAGE_READ;
755 }
756 if ((pte & PTE_X)) {
757 *prot |= PAGE_EXEC;
758 }
c3b03e58
MC
759 /* add write permission on stores or if the page is already dirty,
760 so that we TLB miss on later writes to update the dirty bit */
0c3e702a
MC
761 if ((pte & PTE_W) &&
762 (access_type == MMU_DATA_STORE || (pte & PTE_D))) {
763 *prot |= PAGE_WRITE;
764 }
765 return TRANSLATE_SUCCESS;
766 }
767 }
768 return TRANSLATE_FAIL;
769}
770
771static void raise_mmu_exception(CPURISCVState *env, target_ulong address,
1448689c 772 MMUAccessType access_type, bool pmp_violation,
1c1c060a 773 bool first_stage, bool two_stage)
0c3e702a 774{
3109cd98 775 CPUState *cs = env_cpu(env);
994b6bb2 776 int page_fault_exceptions, vm;
419ddf00
AF
777 uint64_t stap_mode;
778
db23e5d9 779 if (riscv_cpu_mxl(env) == MXL_RV32) {
419ddf00
AF
780 stap_mode = SATP32_MODE;
781 } else {
782 stap_mode = SATP64_MODE;
783 }
994b6bb2 784
1448689c 785 if (first_stage) {
419ddf00 786 vm = get_field(env->satp, stap_mode);
1448689c 787 } else {
419ddf00 788 vm = get_field(env->hgatp, stap_mode);
1448689c 789 }
419ddf00 790
994b6bb2
AF
791 page_fault_exceptions = vm != VM_1_10_MBARE && !pmp_violation;
792
0c3e702a
MC
793 switch (access_type) {
794 case MMU_INST_FETCH:
b2ef6ab9
AF
795 if (riscv_cpu_virt_enabled(env) && !first_stage) {
796 cs->exception_index = RISCV_EXCP_INST_GUEST_PAGE_FAULT;
797 } else {
798 cs->exception_index = page_fault_exceptions ?
799 RISCV_EXCP_INST_PAGE_FAULT : RISCV_EXCP_INST_ACCESS_FAULT;
800 }
0c3e702a
MC
801 break;
802 case MMU_DATA_LOAD:
1c1c060a 803 if (two_stage && !first_stage) {
b2ef6ab9
AF
804 cs->exception_index = RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT;
805 } else {
806 cs->exception_index = page_fault_exceptions ?
807 RISCV_EXCP_LOAD_PAGE_FAULT : RISCV_EXCP_LOAD_ACCESS_FAULT;
808 }
0c3e702a
MC
809 break;
810 case MMU_DATA_STORE:
1c1c060a 811 if (two_stage && !first_stage) {
b2ef6ab9
AF
812 cs->exception_index = RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT;
813 } else {
814 cs->exception_index = page_fault_exceptions ?
815 RISCV_EXCP_STORE_PAGE_FAULT : RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
816 }
0c3e702a
MC
817 break;
818 default:
819 g_assert_not_reached();
820 }
821 env->badaddr = address;
ec352d0c 822 env->two_stage_lookup = two_stage;
0c3e702a
MC
823}
824
825hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
826{
827 RISCVCPU *cpu = RISCV_CPU(cs);
36a18664 828 CPURISCVState *env = &cpu->env;
0c3e702a
MC
829 hwaddr phys_addr;
830 int prot;
831 int mmu_idx = cpu_mmu_index(&cpu->env, false);
832
33a9a57d 833 if (get_physical_address(env, &phys_addr, &prot, addr, NULL, 0, mmu_idx,
11c27c6d 834 true, riscv_cpu_virt_enabled(env), true)) {
0c3e702a
MC
835 return -1;
836 }
36a18664
AF
837
838 if (riscv_cpu_virt_enabled(env)) {
33a9a57d 839 if (get_physical_address(env, &phys_addr, &prot, phys_addr, NULL,
11c27c6d 840 0, mmu_idx, false, true, true)) {
36a18664
AF
841 return -1;
842 }
843 }
844
9ef82119 845 return phys_addr & TARGET_PAGE_MASK;
0c3e702a
MC
846}
847
37207e12
PD
848void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
849 vaddr addr, unsigned size,
850 MMUAccessType access_type,
851 int mmu_idx, MemTxAttrs attrs,
852 MemTxResult response, uintptr_t retaddr)
cbf58276
MC
853{
854 RISCVCPU *cpu = RISCV_CPU(cs);
855 CPURISCVState *env = &cpu->env;
856
37207e12 857 if (access_type == MMU_DATA_STORE) {
cbf58276 858 cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
f9e580c1 859 } else if (access_type == MMU_DATA_LOAD) {
cbf58276 860 cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT;
f9e580c1
EB
861 } else {
862 cs->exception_index = RISCV_EXCP_INST_ACCESS_FAULT;
cbf58276
MC
863 }
864
865 env->badaddr = addr;
ec352d0c
GK
866 env->two_stage_lookup = riscv_cpu_virt_enabled(env) ||
867 riscv_cpu_two_stage_lookup(mmu_idx);
37207e12 868 riscv_raise_exception(&cpu->env, cs->exception_index, retaddr);
cbf58276
MC
869}
870
0c3e702a
MC
871void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
872 MMUAccessType access_type, int mmu_idx,
873 uintptr_t retaddr)
874{
875 RISCVCPU *cpu = RISCV_CPU(cs);
876 CPURISCVState *env = &cpu->env;
877 switch (access_type) {
878 case MMU_INST_FETCH:
879 cs->exception_index = RISCV_EXCP_INST_ADDR_MIS;
880 break;
881 case MMU_DATA_LOAD:
882 cs->exception_index = RISCV_EXCP_LOAD_ADDR_MIS;
883 break;
884 case MMU_DATA_STORE:
885 cs->exception_index = RISCV_EXCP_STORE_AMO_ADDR_MIS;
886 break;
887 default:
888 g_assert_not_reached();
889 }
890 env->badaddr = addr;
ec352d0c
GK
891 env->two_stage_lookup = riscv_cpu_virt_enabled(env) ||
892 riscv_cpu_two_stage_lookup(mmu_idx);
fb738839 893 riscv_raise_exception(env, cs->exception_index, retaddr);
0c3e702a 894}
0c3e702a 895
8a4ca3c1
RH
896bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
897 MMUAccessType access_type, int mmu_idx,
898 bool probe, uintptr_t retaddr)
0c3e702a
MC
899{
900 RISCVCPU *cpu = RISCV_CPU(cs);
901 CPURISCVState *env = &cpu->env;
36a18664 902 vaddr im_address;
0c3e702a 903 hwaddr pa = 0;
b297129a 904 int prot, prot2, prot_pmp;
635b0b0e 905 bool pmp_violation = false;
36a18664 906 bool first_stage_error = true;
1c1c060a 907 bool two_stage_lookup = false;
0c3e702a 908 int ret = TRANSLATE_FAIL;
cc0fdb29 909 int mode = mmu_idx;
b297129a
JS
910 /* default TLB page size */
911 target_ulong tlb_size = TARGET_PAGE_SIZE;
0c3e702a 912
36a18664
AF
913 env->guest_phys_fault_addr = 0;
914
8a4ca3c1
RH
915 qemu_log_mask(CPU_LOG_MMU, "%s ad %" VADDR_PRIx " rw %d mmu_idx %d\n",
916 __func__, address, access_type, mmu_idx);
917
90ec1cff
GK
918 /* MPRV does not affect the virtual-machine load/store
919 instructions, HLV, HLVX, and HSV. */
920 if (riscv_cpu_two_stage_lookup(mmu_idx)) {
921 mode = get_field(env->hstatus, HSTATUS_SPVP);
922 } else if (mode == PRV_M && access_type != MMU_INST_FETCH &&
923 get_field(env->mstatus, MSTATUS_MPRV)) {
924 mode = get_field(env->mstatus, MSTATUS_MPP);
925 if (riscv_has_ext(env, RVH) && get_field(env->mstatus, MSTATUS_MPV)) {
926 two_stage_lookup = true;
cc0fdb29
HA
927 }
928 }
929
29b3361b 930 if (riscv_cpu_virt_enabled(env) ||
1c1c060a
AF
931 ((riscv_cpu_two_stage_lookup(mmu_idx) || two_stage_lookup) &&
932 access_type != MMU_INST_FETCH)) {
36a18664 933 /* Two stage lookup */
33a9a57d
YJ
934 ret = get_physical_address(env, &pa, &prot, address,
935 &env->guest_phys_fault_addr, access_type,
11c27c6d 936 mmu_idx, true, true, false);
36a18664 937
33a9a57d
YJ
938 /*
939 * A G-stage exception may be triggered during two state lookup.
940 * And the env->guest_phys_fault_addr has already been set in
941 * get_physical_address().
942 */
943 if (ret == TRANSLATE_G_STAGE_FAIL) {
944 first_stage_error = false;
945 access_type = MMU_DATA_LOAD;
946 }
947
36a18664
AF
948 qemu_log_mask(CPU_LOG_MMU,
949 "%s 1st-stage address=%" VADDR_PRIx " ret %d physical "
950 TARGET_FMT_plx " prot %d\n",
951 __func__, address, ret, pa, prot);
952
33a9a57d 953 if (ret == TRANSLATE_SUCCESS) {
36a18664
AF
954 /* Second stage lookup */
955 im_address = pa;
956
33a9a57d 957 ret = get_physical_address(env, &pa, &prot2, im_address, NULL,
11c27c6d
JF
958 access_type, mmu_idx, false, true,
959 false);
36a18664
AF
960
961 qemu_log_mask(CPU_LOG_MMU,
962 "%s 2nd-stage address=%" VADDR_PRIx " ret %d physical "
963 TARGET_FMT_plx " prot %d\n",
8f67cd6d
AF
964 __func__, im_address, ret, pa, prot2);
965
966 prot &= prot2;
36a18664 967
b297129a
JS
968 if (ret == TRANSLATE_SUCCESS) {
969 ret = get_physical_address_pmp(env, &prot_pmp, &tlb_size, pa,
970 size, access_type, mode);
663e1193
JS
971
972 qemu_log_mask(CPU_LOG_MMU,
973 "%s PMP address=" TARGET_FMT_plx " ret %d prot"
974 " %d tlb_size " TARGET_FMT_lu "\n",
975 __func__, pa, ret, prot_pmp, tlb_size);
976
b297129a 977 prot &= prot_pmp;
36a18664
AF
978 }
979
980 if (ret != TRANSLATE_SUCCESS) {
981 /*
982 * Guest physical address translation failed, this is a HS
983 * level exception
984 */
985 first_stage_error = false;
986 env->guest_phys_fault_addr = (im_address |
987 (address &
988 (TARGET_PAGE_SIZE - 1))) >> 2;
989 }
990 }
991 } else {
992 /* Single stage lookup */
33a9a57d 993 ret = get_physical_address(env, &pa, &prot, address, NULL,
11c27c6d 994 access_type, mmu_idx, true, false, false);
36a18664
AF
995
996 qemu_log_mask(CPU_LOG_MMU,
997 "%s address=%" VADDR_PRIx " ret %d physical "
998 TARGET_FMT_plx " prot %d\n",
999 __func__, address, ret, pa, prot);
8a4ca3c1 1000
b297129a
JS
1001 if (ret == TRANSLATE_SUCCESS) {
1002 ret = get_physical_address_pmp(env, &prot_pmp, &tlb_size, pa,
1003 size, access_type, mode);
663e1193
JS
1004
1005 qemu_log_mask(CPU_LOG_MMU,
1006 "%s PMP address=" TARGET_FMT_plx " ret %d prot"
1007 " %d tlb_size " TARGET_FMT_lu "\n",
1008 __func__, pa, ret, prot_pmp, tlb_size);
1009
b297129a
JS
1010 prot &= prot_pmp;
1011 }
1f447aec 1012 }
b297129a 1013
1f447aec 1014 if (ret == TRANSLATE_PMP_FAIL) {
635b0b0e 1015 pmp_violation = true;
0c3e702a 1016 }
36a18664 1017
0c3e702a 1018 if (ret == TRANSLATE_SUCCESS) {
b297129a
JS
1019 tlb_set_page(cs, address & ~(tlb_size - 1), pa & ~(tlb_size - 1),
1020 prot, mmu_idx, tlb_size);
8a4ca3c1
RH
1021 return true;
1022 } else if (probe) {
1023 return false;
1024 } else {
1c1c060a
AF
1025 raise_mmu_exception(env, address, access_type, pmp_violation,
1026 first_stage_error,
1027 riscv_cpu_virt_enabled(env) ||
1028 riscv_cpu_two_stage_lookup(mmu_idx));
8a4ca3c1 1029 riscv_raise_exception(env, cs->exception_index, retaddr);
0c3e702a 1030 }
36a18664
AF
1031
1032 return true;
0c3e702a 1033}
263e2ab2 1034#endif /* !CONFIG_USER_ONLY */
0c3e702a
MC
1035
1036/*
1037 * Handle Traps
1038 *
1039 * Adapted from Spike's processor_t::take_trap.
1040 *
1041 */
1042void riscv_cpu_do_interrupt(CPUState *cs)
1043{
1044#if !defined(CONFIG_USER_ONLY)
1045
1046 RISCVCPU *cpu = RISCV_CPU(cs);
1047 CPURISCVState *env = &cpu->env;
86d0c457 1048 bool write_gva = false;
284d697c 1049 uint64_t s;
0c3e702a 1050
acbbb94e
MC
1051 /* cs->exception is 32-bits wide unlike mcause which is XLEN-bits wide
1052 * so we mask off the MSB and separate into trap type and cause.
1053 */
1054 bool async = !!(cs->exception_index & RISCV_EXCP_INT_FLAG);
1055 target_ulong cause = cs->exception_index & RISCV_EXCP_INT_MASK;
1056 target_ulong deleg = async ? env->mideleg : env->medeleg;
1057 target_ulong tval = 0;
30675539
AF
1058 target_ulong htval = 0;
1059 target_ulong mtval2 = 0;
acbbb94e 1060
a10b9d93
KP
1061 if (cause == RISCV_EXCP_SEMIHOST) {
1062 if (env->priv >= PRV_S) {
1063 env->gpr[xA0] = do_common_semihosting(cs);
1064 env->pc += 4;
1065 return;
1066 }
1067 cause = RISCV_EXCP_BREAKPOINT;
1068 }
1069
acbbb94e
MC
1070 if (!async) {
1071 /* set tval to badaddr for traps with address information */
1072 switch (cause) {
ab67a1d0
AF
1073 case RISCV_EXCP_INST_GUEST_PAGE_FAULT:
1074 case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT:
1075 case RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT:
acbbb94e
MC
1076 case RISCV_EXCP_INST_ADDR_MIS:
1077 case RISCV_EXCP_INST_ACCESS_FAULT:
1078 case RISCV_EXCP_LOAD_ADDR_MIS:
1079 case RISCV_EXCP_STORE_AMO_ADDR_MIS:
1080 case RISCV_EXCP_LOAD_ACCESS_FAULT:
1081 case RISCV_EXCP_STORE_AMO_ACCESS_FAULT:
1082 case RISCV_EXCP_INST_PAGE_FAULT:
1083 case RISCV_EXCP_LOAD_PAGE_FAULT:
1084 case RISCV_EXCP_STORE_PAGE_FAULT:
86d0c457 1085 write_gva = true;
acbbb94e
MC
1086 tval = env->badaddr;
1087 break;
48eaeb56
AF
1088 case RISCV_EXCP_ILLEGAL_INST:
1089 tval = env->bins;
1090 break;
acbbb94e
MC
1091 default:
1092 break;
0c3e702a 1093 }
acbbb94e
MC
1094 /* ecall is dispatched as one cause so translate based on mode */
1095 if (cause == RISCV_EXCP_U_ECALL) {
1096 assert(env->priv <= 3);
5eb9e782
AF
1097
1098 if (env->priv == PRV_M) {
1099 cause = RISCV_EXCP_M_ECALL;
1100 } else if (env->priv == PRV_S && riscv_cpu_virt_enabled(env)) {
1101 cause = RISCV_EXCP_VS_ECALL;
1102 } else if (env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) {
1103 cause = RISCV_EXCP_S_ECALL;
1104 } else if (env->priv == PRV_U) {
1105 cause = RISCV_EXCP_U_ECALL;
1106 }
0c3e702a
MC
1107 }
1108 }
1109
c51a3f5d 1110 trace_riscv_trap(env->mhartid, async, cause, env->pc, tval,
69430111
AF
1111 riscv_cpu_get_trap_name(cause, async));
1112
1113 qemu_log_mask(CPU_LOG_INT,
1114 "%s: hart:"TARGET_FMT_ld", async:%d, cause:"TARGET_FMT_lx", "
1115 "epc:0x"TARGET_FMT_lx", tval:0x"TARGET_FMT_lx", desc=%s\n",
1116 __func__, env->mhartid, async, cause, env->pc, tval,
1117 riscv_cpu_get_trap_name(cause, async));
0c3e702a 1118
acbbb94e
MC
1119 if (env->priv <= PRV_S &&
1120 cause < TARGET_LONG_BITS && ((deleg >> cause) & 1)) {
0c3e702a 1121 /* handle the trap in S-mode */
5eb9e782
AF
1122 if (riscv_has_ext(env, RVH)) {
1123 target_ulong hdeleg = async ? env->hideleg : env->hedeleg;
1c1c060a 1124
50d16087 1125 if (riscv_cpu_virt_enabled(env) && ((hdeleg >> cause) & 1)) {
84b1c04b 1126 /* Trap to VS mode */
c5969a3a
RK
1127 /*
1128 * See if we need to adjust cause. Yes if its VS mode interrupt
1129 * no if hypervisor has delegated one of hs mode's interrupt
1130 */
1131 if (cause == IRQ_VS_TIMER || cause == IRQ_VS_SOFT ||
84b1c04b 1132 cause == IRQ_VS_EXT) {
c5969a3a 1133 cause = cause - 1;
84b1c04b 1134 }
86d0c457 1135 write_gva = false;
5eb9e782
AF
1136 } else if (riscv_cpu_virt_enabled(env)) {
1137 /* Trap into HS mode, from virt */
1138 riscv_cpu_swap_hypervisor_regs(env);
f2d5850f 1139 env->hstatus = set_field(env->hstatus, HSTATUS_SPVP,
ace54453 1140 env->priv);
5eb9e782
AF
1141 env->hstatus = set_field(env->hstatus, HSTATUS_SPV,
1142 riscv_cpu_virt_enabled(env));
1143
86d0c457 1144
30675539
AF
1145 htval = env->guest_phys_fault_addr;
1146
5eb9e782 1147 riscv_cpu_set_virt_enabled(env, 0);
5eb9e782
AF
1148 } else {
1149 /* Trap into HS mode */
ec352d0c 1150 env->hstatus = set_field(env->hstatus, HSTATUS_SPV, false);
30675539 1151 htval = env->guest_phys_fault_addr;
86d0c457 1152 write_gva = false;
5eb9e782 1153 }
86d0c457 1154 env->hstatus = set_field(env->hstatus, HSTATUS_GVA, write_gva);
5eb9e782
AF
1155 }
1156
1157 s = env->mstatus;
1a9540d1 1158 s = set_field(s, MSTATUS_SPIE, get_field(s, MSTATUS_SIE));
0c3e702a
MC
1159 s = set_field(s, MSTATUS_SPP, env->priv);
1160 s = set_field(s, MSTATUS_SIE, 0);
c7b95171 1161 env->mstatus = s;
16fdb8ff 1162 env->scause = cause | ((target_ulong)async << (TARGET_LONG_BITS - 1));
acbbb94e 1163 env->sepc = env->pc;
ac12b601 1164 env->stval = tval;
30675539 1165 env->htval = htval;
acbbb94e
MC
1166 env->pc = (env->stvec >> 2 << 2) +
1167 ((async && (env->stvec & 3) == 1) ? cause * 4 : 0);
fb738839 1168 riscv_cpu_set_mode(env, PRV_S);
0c3e702a 1169 } else {
acbbb94e 1170 /* handle the trap in M-mode */
5eb9e782
AF
1171 if (riscv_has_ext(env, RVH)) {
1172 if (riscv_cpu_virt_enabled(env)) {
1173 riscv_cpu_swap_hypervisor_regs(env);
1174 }
1175 env->mstatus = set_field(env->mstatus, MSTATUS_MPV,
284d697c 1176 riscv_cpu_virt_enabled(env));
9034e90a
AF
1177 if (riscv_cpu_virt_enabled(env) && tval) {
1178 env->mstatus = set_field(env->mstatus, MSTATUS_GVA, 1);
1179 }
5eb9e782 1180
30675539
AF
1181 mtval2 = env->guest_phys_fault_addr;
1182
5eb9e782
AF
1183 /* Trapping to M mode, virt is disabled */
1184 riscv_cpu_set_virt_enabled(env, 0);
5eb9e782
AF
1185 }
1186
1187 s = env->mstatus;
1a9540d1 1188 s = set_field(s, MSTATUS_MPIE, get_field(s, MSTATUS_MIE));
0c3e702a
MC
1189 s = set_field(s, MSTATUS_MPP, env->priv);
1190 s = set_field(s, MSTATUS_MIE, 0);
c7b95171 1191 env->mstatus = s;
acbbb94e
MC
1192 env->mcause = cause | ~(((target_ulong)-1) >> async);
1193 env->mepc = env->pc;
ac12b601 1194 env->mtval = tval;
30675539 1195 env->mtval2 = mtval2;
acbbb94e
MC
1196 env->pc = (env->mtvec >> 2 << 2) +
1197 ((async && (env->mtvec & 3) == 1) ? cause * 4 : 0);
fb738839 1198 riscv_cpu_set_mode(env, PRV_M);
0c3e702a 1199 }
d9360e96
MC
1200
1201 /* NOTE: it is not necessary to yield load reservations here. It is only
1202 * necessary for an SC from "another hart" to cause a load reservation
1203 * to be yielded. Refer to the memory consistency model section of the
1204 * RISC-V ISA Specification.
1205 */
1206
ec352d0c 1207 env->two_stage_lookup = false;
0c3e702a 1208#endif
330d2ae3 1209 cs->exception_index = RISCV_EXCP_NONE; /* mark handled to qemu */
0c3e702a 1210}