]>
Commit | Line | Data |
---|---|---|
0c3e702a | 1 | /* |
df354dd4 | 2 | * RISC-V CPU helpers for qemu. |
0c3e702a MC |
3 | * |
4 | * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu | |
5 | * Copyright (c) 2017-2018 SiFive, Inc. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify it | |
8 | * under the terms and conditions of the GNU General Public License, | |
9 | * version 2 or later, as published by the Free Software Foundation. | |
10 | * | |
11 | * This program is distributed in the hope it will be useful, but WITHOUT | |
12 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
13 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
14 | * more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License along with | |
17 | * this program. If not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | ||
20 | #include "qemu/osdep.h" | |
21 | #include "qemu/log.h" | |
7ec5d303 | 22 | #include "qemu/main-loop.h" |
0c3e702a MC |
23 | #include "cpu.h" |
24 | #include "exec/exec-all.h" | |
dcb32f1d | 25 | #include "tcg/tcg-op.h" |
929f0a7f | 26 | #include "trace.h" |
0c3e702a MC |
27 | |
28 | int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch) | |
29 | { | |
30 | #ifdef CONFIG_USER_ONLY | |
31 | return 0; | |
32 | #else | |
33 | return env->priv; | |
34 | #endif | |
35 | } | |
36 | ||
37 | #ifndef CONFIG_USER_ONLY | |
efbdbc26 | 38 | static int riscv_cpu_local_irq_pending(CPURISCVState *env) |
0c3e702a | 39 | { |
efbdbc26 MC |
40 | target_ulong mstatus_mie = get_field(env->mstatus, MSTATUS_MIE); |
41 | target_ulong mstatus_sie = get_field(env->mstatus, MSTATUS_SIE); | |
7ec5d303 | 42 | target_ulong pending = env->mip & env->mie; |
efbdbc26 MC |
43 | target_ulong mie = env->priv < PRV_M || (env->priv == PRV_M && mstatus_mie); |
44 | target_ulong sie = env->priv < PRV_S || (env->priv == PRV_S && mstatus_sie); | |
45 | target_ulong irqs = (pending & ~env->mideleg & -mie) | | |
46 | (pending & env->mideleg & -sie); | |
0c3e702a | 47 | |
efbdbc26 MC |
48 | if (irqs) { |
49 | return ctz64(irqs); /* since non-zero */ | |
0c3e702a MC |
50 | } else { |
51 | return EXCP_NONE; /* indicates no pending interrupt */ | |
52 | } | |
53 | } | |
54 | #endif | |
55 | ||
56 | bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request) | |
57 | { | |
58 | #if !defined(CONFIG_USER_ONLY) | |
59 | if (interrupt_request & CPU_INTERRUPT_HARD) { | |
60 | RISCVCPU *cpu = RISCV_CPU(cs); | |
61 | CPURISCVState *env = &cpu->env; | |
efbdbc26 | 62 | int interruptno = riscv_cpu_local_irq_pending(env); |
0c3e702a MC |
63 | if (interruptno >= 0) { |
64 | cs->exception_index = RISCV_EXCP_INT_FLAG | interruptno; | |
65 | riscv_cpu_do_interrupt(cs); | |
66 | return true; | |
67 | } | |
68 | } | |
69 | #endif | |
70 | return false; | |
71 | } | |
72 | ||
73 | #if !defined(CONFIG_USER_ONLY) | |
74 | ||
b345b480 AF |
75 | /* Return true is floating point support is currently enabled */ |
76 | bool riscv_cpu_fp_enabled(CPURISCVState *env) | |
77 | { | |
78 | if (env->mstatus & MSTATUS_FS) { | |
79 | return true; | |
80 | } | |
81 | ||
82 | return false; | |
83 | } | |
84 | ||
e3e7039c MC |
85 | int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint32_t interrupts) |
86 | { | |
87 | CPURISCVState *env = &cpu->env; | |
88 | if (env->miclaim & interrupts) { | |
89 | return -1; | |
90 | } else { | |
91 | env->miclaim |= interrupts; | |
92 | return 0; | |
93 | } | |
94 | } | |
95 | ||
df354dd4 MC |
96 | uint32_t riscv_cpu_update_mip(RISCVCPU *cpu, uint32_t mask, uint32_t value) |
97 | { | |
98 | CPURISCVState *env = &cpu->env; | |
0a01f2ee | 99 | CPUState *cs = CPU(cpu); |
7ec5d303 AF |
100 | uint32_t old = env->mip; |
101 | bool locked = false; | |
102 | ||
103 | if (!qemu_mutex_iothread_locked()) { | |
104 | locked = true; | |
105 | qemu_mutex_lock_iothread(); | |
106 | } | |
df354dd4 | 107 | |
7ec5d303 | 108 | env->mip = (env->mip & ~mask) | (value & mask); |
df354dd4 | 109 | |
7ec5d303 AF |
110 | if (env->mip) { |
111 | cpu_interrupt(cs, CPU_INTERRUPT_HARD); | |
112 | } else { | |
113 | cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); | |
114 | } | |
0a01f2ee | 115 | |
7ec5d303 AF |
116 | if (locked) { |
117 | qemu_mutex_unlock_iothread(); | |
118 | } | |
df354dd4 MC |
119 | |
120 | return old; | |
121 | } | |
122 | ||
fb738839 | 123 | void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv) |
df354dd4 MC |
124 | { |
125 | if (newpriv > PRV_M) { | |
126 | g_assert_not_reached(); | |
127 | } | |
128 | if (newpriv == PRV_H) { | |
129 | newpriv = PRV_U; | |
130 | } | |
131 | /* tlb_flush is unnecessary as mode is contained in mmu_idx */ | |
132 | env->priv = newpriv; | |
c13b169f JS |
133 | |
134 | /* | |
135 | * Clear the load reservation - otherwise a reservation placed in one | |
136 | * context/process can be used by another, resulting in an SC succeeding | |
137 | * incorrectly. Version 2.2 of the ISA specification explicitly requires | |
138 | * this behaviour, while later revisions say that the kernel "should" use | |
139 | * an SC instruction to force the yielding of a load reservation on a | |
140 | * preemptive context switch. As a result, do both. | |
141 | */ | |
142 | env->load_res = -1; | |
df354dd4 MC |
143 | } |
144 | ||
0c3e702a MC |
145 | /* get_physical_address - get the physical address for this virtual address |
146 | * | |
147 | * Do a page table walk to obtain the physical address corresponding to a | |
148 | * virtual address. Returns 0 if the translation was successful | |
149 | * | |
150 | * Adapted from Spike's mmu_t::translate and mmu_t::walk | |
151 | * | |
152 | */ | |
153 | static int get_physical_address(CPURISCVState *env, hwaddr *physical, | |
154 | int *prot, target_ulong addr, | |
155 | int access_type, int mmu_idx) | |
156 | { | |
157 | /* NOTE: the env->pc value visible here will not be | |
158 | * correct, but the value visible to the exception handler | |
159 | * (riscv_cpu_do_interrupt) is correct */ | |
aacb578f PD |
160 | MemTxResult res; |
161 | MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; | |
0c3e702a MC |
162 | int mode = mmu_idx; |
163 | ||
164 | if (mode == PRV_M && access_type != MMU_INST_FETCH) { | |
165 | if (get_field(env->mstatus, MSTATUS_MPRV)) { | |
166 | mode = get_field(env->mstatus, MSTATUS_MPP); | |
167 | } | |
168 | } | |
169 | ||
170 | if (mode == PRV_M || !riscv_feature(env, RISCV_FEATURE_MMU)) { | |
171 | *physical = addr; | |
172 | *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; | |
173 | return TRANSLATE_SUCCESS; | |
174 | } | |
175 | ||
176 | *prot = 0; | |
177 | ||
ddf78132 | 178 | hwaddr base; |
0c3e702a MC |
179 | int levels, ptidxbits, ptesize, vm, sum; |
180 | int mxr = get_field(env->mstatus, MSTATUS_MXR); | |
181 | ||
182 | if (env->priv_ver >= PRIV_VERSION_1_10_0) { | |
ddf78132 | 183 | base = (hwaddr)get_field(env->satp, SATP_PPN) << PGSHIFT; |
0c3e702a MC |
184 | sum = get_field(env->mstatus, MSTATUS_SUM); |
185 | vm = get_field(env->satp, SATP_MODE); | |
186 | switch (vm) { | |
187 | case VM_1_10_SV32: | |
188 | levels = 2; ptidxbits = 10; ptesize = 4; break; | |
189 | case VM_1_10_SV39: | |
190 | levels = 3; ptidxbits = 9; ptesize = 8; break; | |
191 | case VM_1_10_SV48: | |
192 | levels = 4; ptidxbits = 9; ptesize = 8; break; | |
193 | case VM_1_10_SV57: | |
194 | levels = 5; ptidxbits = 9; ptesize = 8; break; | |
195 | case VM_1_10_MBARE: | |
196 | *physical = addr; | |
197 | *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; | |
198 | return TRANSLATE_SUCCESS; | |
199 | default: | |
200 | g_assert_not_reached(); | |
201 | } | |
202 | } else { | |
ddf78132 | 203 | base = (hwaddr)(env->sptbr) << PGSHIFT; |
0c3e702a MC |
204 | sum = !get_field(env->mstatus, MSTATUS_PUM); |
205 | vm = get_field(env->mstatus, MSTATUS_VM); | |
206 | switch (vm) { | |
207 | case VM_1_09_SV32: | |
208 | levels = 2; ptidxbits = 10; ptesize = 4; break; | |
209 | case VM_1_09_SV39: | |
210 | levels = 3; ptidxbits = 9; ptesize = 8; break; | |
211 | case VM_1_09_SV48: | |
212 | levels = 4; ptidxbits = 9; ptesize = 8; break; | |
213 | case VM_1_09_MBARE: | |
214 | *physical = addr; | |
215 | *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; | |
216 | return TRANSLATE_SUCCESS; | |
217 | default: | |
218 | g_assert_not_reached(); | |
219 | } | |
220 | } | |
221 | ||
3109cd98 | 222 | CPUState *cs = env_cpu(env); |
0c3e702a MC |
223 | int va_bits = PGSHIFT + levels * ptidxbits; |
224 | target_ulong mask = (1L << (TARGET_LONG_BITS - (va_bits - 1))) - 1; | |
225 | target_ulong masked_msbs = (addr >> (va_bits - 1)) & mask; | |
226 | if (masked_msbs != 0 && masked_msbs != mask) { | |
227 | return TRANSLATE_FAIL; | |
228 | } | |
229 | ||
230 | int ptshift = (levels - 1) * ptidxbits; | |
231 | int i; | |
232 | ||
233 | #if !TCG_OVERSIZED_GUEST | |
234 | restart: | |
235 | #endif | |
236 | for (i = 0; i < levels; i++, ptshift -= ptidxbits) { | |
237 | target_ulong idx = (addr >> (PGSHIFT + ptshift)) & | |
238 | ((1 << ptidxbits) - 1); | |
239 | ||
240 | /* check that physical address of PTE is legal */ | |
ddf78132 | 241 | hwaddr pte_addr = base + idx * ptesize; |
1f447aec HA |
242 | |
243 | if (riscv_feature(env, RISCV_FEATURE_PMP) && | |
244 | !pmp_hart_has_privs(env, pte_addr, sizeof(target_ulong), | |
245 | 1 << MMU_DATA_LOAD, PRV_S)) { | |
246 | return TRANSLATE_PMP_FAIL; | |
247 | } | |
aacb578f | 248 | |
0c3e702a | 249 | #if defined(TARGET_RISCV32) |
aacb578f | 250 | target_ulong pte = address_space_ldl(cs->as, pte_addr, attrs, &res); |
0c3e702a | 251 | #elif defined(TARGET_RISCV64) |
aacb578f | 252 | target_ulong pte = address_space_ldq(cs->as, pte_addr, attrs, &res); |
0c3e702a | 253 | #endif |
aacb578f PD |
254 | if (res != MEMTX_OK) { |
255 | return TRANSLATE_FAIL; | |
256 | } | |
257 | ||
ddf78132 | 258 | hwaddr ppn = pte >> PTE_PPN_SHIFT; |
0c3e702a | 259 | |
c3b03e58 MC |
260 | if (!(pte & PTE_V)) { |
261 | /* Invalid PTE */ | |
262 | return TRANSLATE_FAIL; | |
263 | } else if (!(pte & (PTE_R | PTE_W | PTE_X))) { | |
264 | /* Inner PTE, continue walking */ | |
0c3e702a | 265 | base = ppn << PGSHIFT; |
c3b03e58 MC |
266 | } else if ((pte & (PTE_R | PTE_W | PTE_X)) == PTE_W) { |
267 | /* Reserved leaf PTE flags: PTE_W */ | |
268 | return TRANSLATE_FAIL; | |
269 | } else if ((pte & (PTE_R | PTE_W | PTE_X)) == (PTE_W | PTE_X)) { | |
270 | /* Reserved leaf PTE flags: PTE_W + PTE_X */ | |
271 | return TRANSLATE_FAIL; | |
272 | } else if ((pte & PTE_U) && ((mode != PRV_U) && | |
273 | (!sum || access_type == MMU_INST_FETCH))) { | |
274 | /* User PTE flags when not U mode and mstatus.SUM is not set, | |
275 | or the access type is an instruction fetch */ | |
276 | return TRANSLATE_FAIL; | |
277 | } else if (!(pte & PTE_U) && (mode != PRV_S)) { | |
278 | /* Supervisor PTE flags when not S mode */ | |
279 | return TRANSLATE_FAIL; | |
280 | } else if (ppn & ((1ULL << ptshift) - 1)) { | |
281 | /* Misaligned PPN */ | |
282 | return TRANSLATE_FAIL; | |
283 | } else if (access_type == MMU_DATA_LOAD && !((pte & PTE_R) || | |
284 | ((pte & PTE_X) && mxr))) { | |
285 | /* Read access check failed */ | |
286 | return TRANSLATE_FAIL; | |
287 | } else if (access_type == MMU_DATA_STORE && !(pte & PTE_W)) { | |
288 | /* Write access check failed */ | |
289 | return TRANSLATE_FAIL; | |
290 | } else if (access_type == MMU_INST_FETCH && !(pte & PTE_X)) { | |
291 | /* Fetch access check failed */ | |
292 | return TRANSLATE_FAIL; | |
0c3e702a MC |
293 | } else { |
294 | /* if necessary, set accessed and dirty bits. */ | |
295 | target_ulong updated_pte = pte | PTE_A | | |
296 | (access_type == MMU_DATA_STORE ? PTE_D : 0); | |
297 | ||
298 | /* Page table updates need to be atomic with MTTCG enabled */ | |
299 | if (updated_pte != pte) { | |
c3b03e58 MC |
300 | /* |
301 | * - if accessed or dirty bits need updating, and the PTE is | |
302 | * in RAM, then we do so atomically with a compare and swap. | |
303 | * - if the PTE is in IO space or ROM, then it can't be updated | |
304 | * and we return TRANSLATE_FAIL. | |
305 | * - if the PTE changed by the time we went to update it, then | |
306 | * it is no longer valid and we must re-walk the page table. | |
307 | */ | |
0c3e702a MC |
308 | MemoryRegion *mr; |
309 | hwaddr l = sizeof(target_ulong), addr1; | |
310 | mr = address_space_translate(cs->as, pte_addr, | |
bc6b1cec | 311 | &addr1, &l, false, MEMTXATTRS_UNSPECIFIED); |
c3b03e58 | 312 | if (memory_region_is_ram(mr)) { |
0c3e702a MC |
313 | target_ulong *pte_pa = |
314 | qemu_map_ram_ptr(mr->ram_block, addr1); | |
315 | #if TCG_OVERSIZED_GUEST | |
316 | /* MTTCG is not enabled on oversized TCG guests so | |
317 | * page table updates do not need to be atomic */ | |
318 | *pte_pa = pte = updated_pte; | |
319 | #else | |
320 | target_ulong old_pte = | |
321 | atomic_cmpxchg(pte_pa, pte, updated_pte); | |
322 | if (old_pte != pte) { | |
323 | goto restart; | |
324 | } else { | |
325 | pte = updated_pte; | |
326 | } | |
327 | #endif | |
328 | } else { | |
329 | /* misconfigured PTE in ROM (AD bits are not preset) or | |
330 | * PTE is in IO space and can't be updated atomically */ | |
331 | return TRANSLATE_FAIL; | |
332 | } | |
333 | } | |
334 | ||
335 | /* for superpage mappings, make a fake leaf PTE for the TLB's | |
336 | benefit. */ | |
337 | target_ulong vpn = addr >> PGSHIFT; | |
338 | *physical = (ppn | (vpn & ((1L << ptshift) - 1))) << PGSHIFT; | |
339 | ||
c3b03e58 MC |
340 | /* set permissions on the TLB entry */ |
341 | if ((pte & PTE_R) || ((pte & PTE_X) && mxr)) { | |
0c3e702a MC |
342 | *prot |= PAGE_READ; |
343 | } | |
344 | if ((pte & PTE_X)) { | |
345 | *prot |= PAGE_EXEC; | |
346 | } | |
c3b03e58 MC |
347 | /* add write permission on stores or if the page is already dirty, |
348 | so that we TLB miss on later writes to update the dirty bit */ | |
0c3e702a MC |
349 | if ((pte & PTE_W) && |
350 | (access_type == MMU_DATA_STORE || (pte & PTE_D))) { | |
351 | *prot |= PAGE_WRITE; | |
352 | } | |
353 | return TRANSLATE_SUCCESS; | |
354 | } | |
355 | } | |
356 | return TRANSLATE_FAIL; | |
357 | } | |
358 | ||
359 | static void raise_mmu_exception(CPURISCVState *env, target_ulong address, | |
635b0b0e | 360 | MMUAccessType access_type, bool pmp_violation) |
0c3e702a | 361 | { |
3109cd98 | 362 | CPUState *cs = env_cpu(env); |
0c3e702a MC |
363 | int page_fault_exceptions = |
364 | (env->priv_ver >= PRIV_VERSION_1_10_0) && | |
635b0b0e HA |
365 | get_field(env->satp, SATP_MODE) != VM_1_10_MBARE && |
366 | !pmp_violation; | |
0c3e702a MC |
367 | switch (access_type) { |
368 | case MMU_INST_FETCH: | |
369 | cs->exception_index = page_fault_exceptions ? | |
370 | RISCV_EXCP_INST_PAGE_FAULT : RISCV_EXCP_INST_ACCESS_FAULT; | |
371 | break; | |
372 | case MMU_DATA_LOAD: | |
373 | cs->exception_index = page_fault_exceptions ? | |
374 | RISCV_EXCP_LOAD_PAGE_FAULT : RISCV_EXCP_LOAD_ACCESS_FAULT; | |
375 | break; | |
376 | case MMU_DATA_STORE: | |
377 | cs->exception_index = page_fault_exceptions ? | |
378 | RISCV_EXCP_STORE_PAGE_FAULT : RISCV_EXCP_STORE_AMO_ACCESS_FAULT; | |
379 | break; | |
380 | default: | |
381 | g_assert_not_reached(); | |
382 | } | |
383 | env->badaddr = address; | |
384 | } | |
385 | ||
386 | hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) | |
387 | { | |
388 | RISCVCPU *cpu = RISCV_CPU(cs); | |
389 | hwaddr phys_addr; | |
390 | int prot; | |
391 | int mmu_idx = cpu_mmu_index(&cpu->env, false); | |
392 | ||
393 | if (get_physical_address(&cpu->env, &phys_addr, &prot, addr, 0, mmu_idx)) { | |
394 | return -1; | |
395 | } | |
396 | return phys_addr; | |
397 | } | |
398 | ||
37207e12 PD |
399 | void riscv_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, |
400 | vaddr addr, unsigned size, | |
401 | MMUAccessType access_type, | |
402 | int mmu_idx, MemTxAttrs attrs, | |
403 | MemTxResult response, uintptr_t retaddr) | |
cbf58276 MC |
404 | { |
405 | RISCVCPU *cpu = RISCV_CPU(cs); | |
406 | CPURISCVState *env = &cpu->env; | |
407 | ||
37207e12 | 408 | if (access_type == MMU_DATA_STORE) { |
cbf58276 MC |
409 | cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT; |
410 | } else { | |
411 | cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT; | |
412 | } | |
413 | ||
414 | env->badaddr = addr; | |
37207e12 | 415 | riscv_raise_exception(&cpu->env, cs->exception_index, retaddr); |
cbf58276 MC |
416 | } |
417 | ||
0c3e702a MC |
418 | void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr, |
419 | MMUAccessType access_type, int mmu_idx, | |
420 | uintptr_t retaddr) | |
421 | { | |
422 | RISCVCPU *cpu = RISCV_CPU(cs); | |
423 | CPURISCVState *env = &cpu->env; | |
424 | switch (access_type) { | |
425 | case MMU_INST_FETCH: | |
426 | cs->exception_index = RISCV_EXCP_INST_ADDR_MIS; | |
427 | break; | |
428 | case MMU_DATA_LOAD: | |
429 | cs->exception_index = RISCV_EXCP_LOAD_ADDR_MIS; | |
430 | break; | |
431 | case MMU_DATA_STORE: | |
432 | cs->exception_index = RISCV_EXCP_STORE_AMO_ADDR_MIS; | |
433 | break; | |
434 | default: | |
435 | g_assert_not_reached(); | |
436 | } | |
437 | env->badaddr = addr; | |
fb738839 | 438 | riscv_raise_exception(env, cs->exception_index, retaddr); |
0c3e702a | 439 | } |
0c3e702a MC |
440 | #endif |
441 | ||
8a4ca3c1 RH |
442 | bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, |
443 | MMUAccessType access_type, int mmu_idx, | |
444 | bool probe, uintptr_t retaddr) | |
0c3e702a MC |
445 | { |
446 | RISCVCPU *cpu = RISCV_CPU(cs); | |
447 | CPURISCVState *env = &cpu->env; | |
2921343b | 448 | #ifndef CONFIG_USER_ONLY |
0c3e702a MC |
449 | hwaddr pa = 0; |
450 | int prot; | |
635b0b0e | 451 | bool pmp_violation = false; |
0c3e702a | 452 | int ret = TRANSLATE_FAIL; |
cc0fdb29 | 453 | int mode = mmu_idx; |
0c3e702a | 454 | |
8a4ca3c1 RH |
455 | qemu_log_mask(CPU_LOG_MMU, "%s ad %" VADDR_PRIx " rw %d mmu_idx %d\n", |
456 | __func__, address, access_type, mmu_idx); | |
457 | ||
458 | ret = get_physical_address(env, &pa, &prot, address, access_type, mmu_idx); | |
0c3e702a | 459 | |
cc0fdb29 HA |
460 | if (mode == PRV_M && access_type != MMU_INST_FETCH) { |
461 | if (get_field(env->mstatus, MSTATUS_MPRV)) { | |
462 | mode = get_field(env->mstatus, MSTATUS_MPP); | |
463 | } | |
464 | } | |
465 | ||
0c3e702a | 466 | qemu_log_mask(CPU_LOG_MMU, |
8a4ca3c1 RH |
467 | "%s address=%" VADDR_PRIx " ret %d physical " TARGET_FMT_plx |
468 | " prot %d\n", __func__, address, ret, pa, prot); | |
469 | ||
a88365c1 | 470 | if (riscv_feature(env, RISCV_FEATURE_PMP) && |
e0f8fa72 | 471 | (ret == TRANSLATE_SUCCESS) && |
db21e6f7 | 472 | !pmp_hart_has_privs(env, pa, size, 1 << access_type, mode)) { |
1f447aec HA |
473 | ret = TRANSLATE_PMP_FAIL; |
474 | } | |
475 | if (ret == TRANSLATE_PMP_FAIL) { | |
635b0b0e | 476 | pmp_violation = true; |
0c3e702a MC |
477 | } |
478 | if (ret == TRANSLATE_SUCCESS) { | |
479 | tlb_set_page(cs, address & TARGET_PAGE_MASK, pa & TARGET_PAGE_MASK, | |
480 | prot, mmu_idx, TARGET_PAGE_SIZE); | |
8a4ca3c1 RH |
481 | return true; |
482 | } else if (probe) { | |
483 | return false; | |
484 | } else { | |
635b0b0e | 485 | raise_mmu_exception(env, address, access_type, pmp_violation); |
8a4ca3c1 | 486 | riscv_raise_exception(env, cs->exception_index, retaddr); |
0c3e702a MC |
487 | } |
488 | #else | |
8a4ca3c1 | 489 | switch (access_type) { |
0c3e702a MC |
490 | case MMU_INST_FETCH: |
491 | cs->exception_index = RISCV_EXCP_INST_PAGE_FAULT; | |
492 | break; | |
493 | case MMU_DATA_LOAD: | |
494 | cs->exception_index = RISCV_EXCP_LOAD_PAGE_FAULT; | |
495 | break; | |
496 | case MMU_DATA_STORE: | |
497 | cs->exception_index = RISCV_EXCP_STORE_PAGE_FAULT; | |
498 | break; | |
2921343b GM |
499 | default: |
500 | g_assert_not_reached(); | |
0c3e702a | 501 | } |
2921343b | 502 | env->badaddr = address; |
8a4ca3c1 | 503 | cpu_loop_exit_restore(cs, retaddr); |
0c3e702a | 504 | #endif |
0c3e702a MC |
505 | } |
506 | ||
507 | /* | |
508 | * Handle Traps | |
509 | * | |
510 | * Adapted from Spike's processor_t::take_trap. | |
511 | * | |
512 | */ | |
513 | void riscv_cpu_do_interrupt(CPUState *cs) | |
514 | { | |
515 | #if !defined(CONFIG_USER_ONLY) | |
516 | ||
517 | RISCVCPU *cpu = RISCV_CPU(cs); | |
518 | CPURISCVState *env = &cpu->env; | |
519 | ||
acbbb94e MC |
520 | /* cs->exception is 32-bits wide unlike mcause which is XLEN-bits wide |
521 | * so we mask off the MSB and separate into trap type and cause. | |
522 | */ | |
523 | bool async = !!(cs->exception_index & RISCV_EXCP_INT_FLAG); | |
524 | target_ulong cause = cs->exception_index & RISCV_EXCP_INT_MASK; | |
525 | target_ulong deleg = async ? env->mideleg : env->medeleg; | |
526 | target_ulong tval = 0; | |
527 | ||
528 | static const int ecall_cause_map[] = { | |
529 | [PRV_U] = RISCV_EXCP_U_ECALL, | |
530 | [PRV_S] = RISCV_EXCP_S_ECALL, | |
ab67a1d0 | 531 | [PRV_H] = RISCV_EXCP_VS_ECALL, |
acbbb94e MC |
532 | [PRV_M] = RISCV_EXCP_M_ECALL |
533 | }; | |
534 | ||
535 | if (!async) { | |
536 | /* set tval to badaddr for traps with address information */ | |
537 | switch (cause) { | |
ab67a1d0 AF |
538 | case RISCV_EXCP_INST_GUEST_PAGE_FAULT: |
539 | case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT: | |
540 | case RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT: | |
acbbb94e MC |
541 | case RISCV_EXCP_INST_ADDR_MIS: |
542 | case RISCV_EXCP_INST_ACCESS_FAULT: | |
543 | case RISCV_EXCP_LOAD_ADDR_MIS: | |
544 | case RISCV_EXCP_STORE_AMO_ADDR_MIS: | |
545 | case RISCV_EXCP_LOAD_ACCESS_FAULT: | |
546 | case RISCV_EXCP_STORE_AMO_ACCESS_FAULT: | |
547 | case RISCV_EXCP_INST_PAGE_FAULT: | |
548 | case RISCV_EXCP_LOAD_PAGE_FAULT: | |
549 | case RISCV_EXCP_STORE_PAGE_FAULT: | |
550 | tval = env->badaddr; | |
551 | break; | |
552 | default: | |
553 | break; | |
0c3e702a | 554 | } |
acbbb94e MC |
555 | /* ecall is dispatched as one cause so translate based on mode */ |
556 | if (cause == RISCV_EXCP_U_ECALL) { | |
557 | assert(env->priv <= 3); | |
558 | cause = ecall_cause_map[env->priv]; | |
0c3e702a MC |
559 | } |
560 | } | |
561 | ||
ab67a1d0 | 562 | trace_riscv_trap(env->mhartid, async, cause, env->pc, tval, cause < 23 ? |
929f0a7f | 563 | (async ? riscv_intr_names : riscv_excp_names)[cause] : "(unknown)"); |
0c3e702a | 564 | |
acbbb94e MC |
565 | if (env->priv <= PRV_S && |
566 | cause < TARGET_LONG_BITS && ((deleg >> cause) & 1)) { | |
0c3e702a | 567 | /* handle the trap in S-mode */ |
0c3e702a MC |
568 | target_ulong s = env->mstatus; |
569 | s = set_field(s, MSTATUS_SPIE, env->priv_ver >= PRIV_VERSION_1_10_0 ? | |
570 | get_field(s, MSTATUS_SIE) : get_field(s, MSTATUS_UIE << env->priv)); | |
571 | s = set_field(s, MSTATUS_SPP, env->priv); | |
572 | s = set_field(s, MSTATUS_SIE, 0); | |
c7b95171 | 573 | env->mstatus = s; |
16fdb8ff | 574 | env->scause = cause | ((target_ulong)async << (TARGET_LONG_BITS - 1)); |
acbbb94e MC |
575 | env->sepc = env->pc; |
576 | env->sbadaddr = tval; | |
577 | env->pc = (env->stvec >> 2 << 2) + | |
578 | ((async && (env->stvec & 3) == 1) ? cause * 4 : 0); | |
fb738839 | 579 | riscv_cpu_set_mode(env, PRV_S); |
0c3e702a | 580 | } else { |
acbbb94e | 581 | /* handle the trap in M-mode */ |
0c3e702a MC |
582 | target_ulong s = env->mstatus; |
583 | s = set_field(s, MSTATUS_MPIE, env->priv_ver >= PRIV_VERSION_1_10_0 ? | |
584 | get_field(s, MSTATUS_MIE) : get_field(s, MSTATUS_UIE << env->priv)); | |
585 | s = set_field(s, MSTATUS_MPP, env->priv); | |
586 | s = set_field(s, MSTATUS_MIE, 0); | |
c7b95171 | 587 | env->mstatus = s; |
acbbb94e MC |
588 | env->mcause = cause | ~(((target_ulong)-1) >> async); |
589 | env->mepc = env->pc; | |
590 | env->mbadaddr = tval; | |
591 | env->pc = (env->mtvec >> 2 << 2) + | |
592 | ((async && (env->mtvec & 3) == 1) ? cause * 4 : 0); | |
fb738839 | 593 | riscv_cpu_set_mode(env, PRV_M); |
0c3e702a | 594 | } |
d9360e96 MC |
595 | |
596 | /* NOTE: it is not necessary to yield load reservations here. It is only | |
597 | * necessary for an SC from "another hart" to cause a load reservation | |
598 | * to be yielded. Refer to the memory consistency model section of the | |
599 | * RISC-V ISA Specification. | |
600 | */ | |
601 | ||
0c3e702a MC |
602 | #endif |
603 | cs->exception_index = EXCP_NONE; /* mark handled to qemu */ | |
604 | } |