]>
Commit | Line | Data |
---|---|---|
65c5b75c MC |
1 | /* |
2 | * QEMU RISC-V PMP (Physical Memory Protection) | |
3 | * | |
4 | * Author: Daire McNamara, daire.mcnamara@emdalo.com | |
5 | * Ivan Griffin, ivan.griffin@emdalo.com | |
6 | * | |
7 | * This provides a RISC-V Physical Memory Protection implementation | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify it | |
10 | * under the terms and conditions of the GNU General Public License, | |
11 | * version 2 or later, as published by the Free Software Foundation. | |
12 | * | |
13 | * This program is distributed in the hope it will be useful, but WITHOUT | |
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
16 | * more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License along with | |
19 | * this program. If not, see <http://www.gnu.org/licenses/>. | |
20 | */ | |
21 | ||
65c5b75c MC |
22 | #include "qemu/osdep.h" |
23 | #include "qemu/log.h" | |
24 | #include "qapi/error.h" | |
25 | #include "cpu.h" | |
6591efb5 | 26 | #include "trace.h" |
2c2e0f28 | 27 | #include "exec/exec-all.h" |
65c5b75c MC |
28 | |
29 | static void pmp_write_cfg(CPURISCVState *env, uint32_t addr_index, | |
30 | uint8_t val); | |
31 | static uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t addr_index); | |
32 | static void pmp_update_rule(CPURISCVState *env, uint32_t pmp_index); | |
33 | ||
34 | /* | |
35 | * Accessor method to extract address matching type 'a field' from cfg reg | |
36 | */ | |
37 | static inline uint8_t pmp_get_a_field(uint8_t cfg) | |
38 | { | |
39 | uint8_t a = cfg >> 3; | |
40 | return a & 0x3; | |
41 | } | |
42 | ||
43 | /* | |
44 | * Check whether a PMP is locked or not. | |
45 | */ | |
46 | static inline int pmp_is_locked(CPURISCVState *env, uint32_t pmp_index) | |
47 | { | |
48 | ||
49 | if (env->pmp_state.pmp[pmp_index].cfg_reg & PMP_LOCK) { | |
50 | return 1; | |
51 | } | |
52 | ||
53 | /* Top PMP has no 'next' to check */ | |
54 | if ((pmp_index + 1u) >= MAX_RISCV_PMPS) { | |
55 | return 0; | |
56 | } | |
57 | ||
65c5b75c MC |
58 | return 0; |
59 | } | |
60 | ||
61 | /* | |
62 | * Count the number of active rules. | |
63 | */ | |
d102f19a | 64 | uint32_t pmp_get_num_rules(CPURISCVState *env) |
65c5b75c MC |
65 | { |
66 | return env->pmp_state.num_rules; | |
67 | } | |
68 | ||
69 | /* | |
70 | * Accessor to get the cfg reg for a specific PMP/HART | |
71 | */ | |
72 | static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t pmp_index) | |
73 | { | |
74 | if (pmp_index < MAX_RISCV_PMPS) { | |
75 | return env->pmp_state.pmp[pmp_index].cfg_reg; | |
76 | } | |
77 | ||
78 | return 0; | |
79 | } | |
80 | ||
81 | ||
82 | /* | |
83 | * Accessor to set the cfg reg for a specific PMP/HART | |
84 | * Bounds checks and relevant lock bit. | |
85 | */ | |
86 | static void pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val) | |
87 | { | |
88 | if (pmp_index < MAX_RISCV_PMPS) { | |
ae39e4ce HW |
89 | bool locked = true; |
90 | ||
6a3ffda2 | 91 | if (riscv_cpu_cfg(env)->epmp) { |
ae39e4ce HW |
92 | /* mseccfg.RLB is set */ |
93 | if (MSECCFG_RLB_ISSET(env)) { | |
94 | locked = false; | |
95 | } | |
96 | ||
97 | /* mseccfg.MML is not set */ | |
98 | if (!MSECCFG_MML_ISSET(env) && !pmp_is_locked(env, pmp_index)) { | |
99 | locked = false; | |
100 | } | |
101 | ||
102 | /* mseccfg.MML is set */ | |
103 | if (MSECCFG_MML_ISSET(env)) { | |
104 | /* not adding execute bit */ | |
105 | if ((val & PMP_LOCK) != 0 && (val & PMP_EXEC) != PMP_EXEC) { | |
106 | locked = false; | |
107 | } | |
108 | /* shared region and not adding X bit */ | |
109 | if ((val & PMP_LOCK) != PMP_LOCK && | |
110 | (val & 0x7) != (PMP_WRITE | PMP_EXEC)) { | |
111 | locked = false; | |
112 | } | |
113 | } | |
65c5b75c | 114 | } else { |
ae39e4ce HW |
115 | if (!pmp_is_locked(env, pmp_index)) { |
116 | locked = false; | |
117 | } | |
118 | } | |
119 | ||
120 | if (locked) { | |
aad5ac23 | 121 | qemu_log_mask(LOG_GUEST_ERROR, "ignoring pmpcfg write - locked\n"); |
ae39e4ce HW |
122 | } else { |
123 | env->pmp_state.pmp[pmp_index].cfg_reg = val; | |
124 | pmp_update_rule(env, pmp_index); | |
65c5b75c MC |
125 | } |
126 | } else { | |
aad5ac23 AF |
127 | qemu_log_mask(LOG_GUEST_ERROR, |
128 | "ignoring pmpcfg write - out of bounds\n"); | |
65c5b75c MC |
129 | } |
130 | } | |
131 | ||
132 | static void pmp_decode_napot(target_ulong a, target_ulong *sa, target_ulong *ea) | |
133 | { | |
134 | /* | |
135 | aaaa...aaa0 8-byte NAPOT range | |
136 | aaaa...aa01 16-byte NAPOT range | |
137 | aaaa...a011 32-byte NAPOT range | |
138 | ... | |
139 | aa01...1111 2^XLEN-byte NAPOT range | |
140 | a011...1111 2^(XLEN+1)-byte NAPOT range | |
141 | 0111...1111 2^(XLEN+2)-byte NAPOT range | |
142 | 1111...1111 Reserved | |
143 | */ | |
6248a8fe NP |
144 | a = (a << 2) | 0x3; |
145 | *sa = a & (a + 1); | |
146 | *ea = a | (a + 1); | |
65c5b75c MC |
147 | } |
148 | ||
24beb03e | 149 | void pmp_update_rule_addr(CPURISCVState *env, uint32_t pmp_index) |
65c5b75c | 150 | { |
65c5b75c MC |
151 | uint8_t this_cfg = env->pmp_state.pmp[pmp_index].cfg_reg; |
152 | target_ulong this_addr = env->pmp_state.pmp[pmp_index].addr_reg; | |
153 | target_ulong prev_addr = 0u; | |
154 | target_ulong sa = 0u; | |
155 | target_ulong ea = 0u; | |
156 | ||
157 | if (pmp_index >= 1u) { | |
158 | prev_addr = env->pmp_state.pmp[pmp_index - 1].addr_reg; | |
159 | } | |
160 | ||
161 | switch (pmp_get_a_field(this_cfg)) { | |
162 | case PMP_AMATCH_OFF: | |
163 | sa = 0u; | |
164 | ea = -1; | |
165 | break; | |
166 | ||
167 | case PMP_AMATCH_TOR: | |
168 | sa = prev_addr << 2; /* shift up from [xx:0] to [xx+2:2] */ | |
169 | ea = (this_addr << 2) - 1u; | |
2e983399 NP |
170 | if (sa > ea) { |
171 | sa = ea = 0u; | |
172 | } | |
65c5b75c MC |
173 | break; |
174 | ||
175 | case PMP_AMATCH_NA4: | |
176 | sa = this_addr << 2; /* shift up from [xx:0] to [xx+2:2] */ | |
cfad709b | 177 | ea = (sa + 4u) - 1u; |
65c5b75c MC |
178 | break; |
179 | ||
180 | case PMP_AMATCH_NAPOT: | |
181 | pmp_decode_napot(this_addr, &sa, &ea); | |
182 | break; | |
183 | ||
184 | default: | |
185 | sa = 0u; | |
186 | ea = 0u; | |
187 | break; | |
188 | } | |
189 | ||
190 | env->pmp_state.addr[pmp_index].sa = sa; | |
191 | env->pmp_state.addr[pmp_index].ea = ea; | |
24beb03e | 192 | } |
65c5b75c | 193 | |
24beb03e YJ |
194 | void pmp_update_rule_nums(CPURISCVState *env) |
195 | { | |
196 | int i; | |
197 | ||
198 | env->pmp_state.num_rules = 0; | |
65c5b75c MC |
199 | for (i = 0; i < MAX_RISCV_PMPS; i++) { |
200 | const uint8_t a_field = | |
201 | pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg); | |
202 | if (PMP_AMATCH_OFF != a_field) { | |
203 | env->pmp_state.num_rules++; | |
204 | } | |
205 | } | |
206 | } | |
207 | ||
24beb03e YJ |
208 | /* Convert cfg/addr reg values here into simple 'sa' --> start address and 'ea' |
209 | * end address values. | |
210 | * This function is called relatively infrequently whereas the check that | |
211 | * an address is within a pmp rule is called often, so optimise that one | |
212 | */ | |
213 | static void pmp_update_rule(CPURISCVState *env, uint32_t pmp_index) | |
214 | { | |
215 | pmp_update_rule_addr(env, pmp_index); | |
216 | pmp_update_rule_nums(env); | |
217 | } | |
218 | ||
65c5b75c MC |
219 | static int pmp_is_in_range(CPURISCVState *env, int pmp_index, target_ulong addr) |
220 | { | |
221 | int result = 0; | |
222 | ||
223 | if ((addr >= env->pmp_state.addr[pmp_index].sa) | |
224 | && (addr <= env->pmp_state.addr[pmp_index].ea)) { | |
225 | result = 1; | |
226 | } else { | |
227 | result = 0; | |
228 | } | |
229 | ||
230 | return result; | |
231 | } | |
232 | ||
b297129a JS |
233 | /* |
234 | * Check if the address has required RWX privs when no PMP entry is matched. | |
235 | */ | |
236 | static bool pmp_hart_has_privs_default(CPURISCVState *env, target_ulong addr, | |
237 | target_ulong size, pmp_priv_t privs, pmp_priv_t *allowed_privs, | |
238 | target_ulong mode) | |
239 | { | |
240 | bool ret; | |
241 | ||
6a3ffda2 | 242 | if (riscv_cpu_cfg(env)->epmp) { |
ae39e4ce HW |
243 | if (MSECCFG_MMWP_ISSET(env)) { |
244 | /* | |
245 | * The Machine Mode Whitelist Policy (mseccfg.MMWP) is set | |
246 | * so we default to deny all, even for M-mode. | |
247 | */ | |
248 | *allowed_privs = 0; | |
249 | return false; | |
250 | } else if (MSECCFG_MML_ISSET(env)) { | |
251 | /* | |
252 | * The Machine Mode Lockdown (mseccfg.MML) bit is set | |
253 | * so we can only execute code in M-mode with an applicable | |
254 | * rule. Other modes are disabled. | |
255 | */ | |
256 | if (mode == PRV_M && !(privs & PMP_EXEC)) { | |
257 | ret = true; | |
258 | *allowed_privs = PMP_READ | PMP_WRITE; | |
259 | } else { | |
260 | ret = false; | |
261 | *allowed_privs = 0; | |
262 | } | |
263 | ||
264 | return ret; | |
265 | } | |
266 | } | |
267 | ||
3fe40ef5 | 268 | if (!riscv_cpu_cfg(env)->pmp || (mode == PRV_M)) { |
b297129a JS |
269 | /* |
270 | * Privileged spec v1.10 states if HW doesn't implement any PMP entry | |
271 | * or no PMP entry matches an M-Mode access, the access succeeds. | |
272 | */ | |
273 | ret = true; | |
274 | *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC; | |
275 | } else { | |
276 | /* | |
277 | * Other modes are not allowed to succeed if they don't * match a rule, | |
278 | * but there are rules. We've checked for no rule earlier in this | |
279 | * function. | |
280 | */ | |
281 | ret = false; | |
282 | *allowed_privs = 0; | |
283 | } | |
284 | ||
285 | return ret; | |
286 | } | |
287 | ||
65c5b75c MC |
288 | |
289 | /* | |
290 | * Public Interface | |
291 | */ | |
292 | ||
293 | /* | |
294 | * Check if the address has required RWX privs to complete desired operation | |
824cac68 LZ |
295 | * Return PMP rule index if a pmp rule match |
296 | * Return MAX_RISCV_PMPS if default match | |
297 | * Return negtive value if no match | |
65c5b75c | 298 | */ |
824cac68 | 299 | int pmp_hart_has_privs(CPURISCVState *env, target_ulong addr, |
b297129a JS |
300 | target_ulong size, pmp_priv_t privs, pmp_priv_t *allowed_privs, |
301 | target_ulong mode) | |
65c5b75c MC |
302 | { |
303 | int i = 0; | |
304 | int ret = -1; | |
9667e535 | 305 | int pmp_size = 0; |
65c5b75c MC |
306 | target_ulong s = 0; |
307 | target_ulong e = 0; | |
65c5b75c MC |
308 | |
309 | /* Short cut if no rules */ | |
310 | if (0 == pmp_get_num_rules(env)) { | |
824cac68 LZ |
311 | if (pmp_hart_has_privs_default(env, addr, size, privs, |
312 | allowed_privs, mode)) { | |
313 | ret = MAX_RISCV_PMPS; | |
314 | } | |
65c5b75c MC |
315 | } |
316 | ||
9667e535 | 317 | if (size == 0) { |
dcf654a3 | 318 | if (riscv_cpu_cfg(env)->mmu) { |
1145188e AF |
319 | /* |
320 | * If size is unknown (0), assume that all bytes | |
321 | * from addr to the end of the page will be accessed. | |
322 | */ | |
323 | pmp_size = -(addr | TARGET_PAGE_MASK); | |
324 | } else { | |
325 | pmp_size = sizeof(target_ulong); | |
326 | } | |
9667e535 DL |
327 | } else { |
328 | pmp_size = size; | |
329 | } | |
330 | ||
65c5b75c MC |
331 | /* 1.10 draft priv spec states there is an implicit order |
332 | from low to high */ | |
333 | for (i = 0; i < MAX_RISCV_PMPS; i++) { | |
334 | s = pmp_is_in_range(env, i, addr); | |
9667e535 | 335 | e = pmp_is_in_range(env, i, addr + pmp_size - 1); |
65c5b75c MC |
336 | |
337 | /* partially inside */ | |
338 | if ((s + e) == 1) { | |
aad5ac23 AF |
339 | qemu_log_mask(LOG_GUEST_ERROR, |
340 | "pmp violation - access is partially inside\n"); | |
824cac68 | 341 | ret = -1; |
65c5b75c MC |
342 | break; |
343 | } | |
344 | ||
345 | /* fully inside */ | |
346 | const uint8_t a_field = | |
347 | pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg); | |
65c5b75c | 348 | |
f8162068 | 349 | /* |
ae39e4ce HW |
350 | * Convert the PMP permissions to match the truth table in the |
351 | * ePMP spec. | |
f8162068 | 352 | */ |
ae39e4ce HW |
353 | const uint8_t epmp_operation = |
354 | ((env->pmp_state.pmp[i].cfg_reg & PMP_LOCK) >> 4) | | |
355 | ((env->pmp_state.pmp[i].cfg_reg & PMP_READ) << 2) | | |
356 | (env->pmp_state.pmp[i].cfg_reg & PMP_WRITE) | | |
357 | ((env->pmp_state.pmp[i].cfg_reg & PMP_EXEC) >> 2); | |
358 | ||
f8162068 | 359 | if (((s + e) == 2) && (PMP_AMATCH_OFF != a_field)) { |
ae39e4ce HW |
360 | /* |
361 | * If the PMP entry is not off and the address is in range, | |
362 | * do the priv check | |
363 | */ | |
364 | if (!MSECCFG_MML_ISSET(env)) { | |
365 | /* | |
366 | * If mseccfg.MML Bit is not set, do pmp priv check | |
367 | * This will always apply to regular PMP. | |
368 | */ | |
369 | *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC; | |
370 | if ((mode != PRV_M) || pmp_is_locked(env, i)) { | |
371 | *allowed_privs &= env->pmp_state.pmp[i].cfg_reg; | |
372 | } | |
373 | } else { | |
374 | /* | |
375 | * If mseccfg.MML Bit set, do the enhanced pmp priv check | |
376 | */ | |
377 | if (mode == PRV_M) { | |
378 | switch (epmp_operation) { | |
379 | case 0: | |
380 | case 1: | |
381 | case 4: | |
382 | case 5: | |
383 | case 6: | |
384 | case 7: | |
385 | case 8: | |
386 | *allowed_privs = 0; | |
387 | break; | |
388 | case 2: | |
389 | case 3: | |
390 | case 14: | |
391 | *allowed_privs = PMP_READ | PMP_WRITE; | |
392 | break; | |
393 | case 9: | |
394 | case 10: | |
395 | *allowed_privs = PMP_EXEC; | |
396 | break; | |
397 | case 11: | |
398 | case 13: | |
399 | *allowed_privs = PMP_READ | PMP_EXEC; | |
400 | break; | |
401 | case 12: | |
402 | case 15: | |
403 | *allowed_privs = PMP_READ; | |
404 | break; | |
787a4baf AF |
405 | default: |
406 | g_assert_not_reached(); | |
ae39e4ce HW |
407 | } |
408 | } else { | |
409 | switch (epmp_operation) { | |
410 | case 0: | |
411 | case 8: | |
412 | case 9: | |
413 | case 12: | |
414 | case 13: | |
415 | case 14: | |
416 | *allowed_privs = 0; | |
417 | break; | |
418 | case 1: | |
419 | case 10: | |
420 | case 11: | |
421 | *allowed_privs = PMP_EXEC; | |
422 | break; | |
423 | case 2: | |
424 | case 4: | |
425 | case 15: | |
426 | *allowed_privs = PMP_READ; | |
427 | break; | |
428 | case 3: | |
429 | case 6: | |
430 | *allowed_privs = PMP_READ | PMP_WRITE; | |
431 | break; | |
432 | case 5: | |
433 | *allowed_privs = PMP_READ | PMP_EXEC; | |
434 | break; | |
435 | case 7: | |
436 | *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC; | |
437 | break; | |
787a4baf AF |
438 | default: |
439 | g_assert_not_reached(); | |
ae39e4ce HW |
440 | } |
441 | } | |
65c5b75c MC |
442 | } |
443 | ||
90b1fafc HC |
444 | /* |
445 | * If matching address range was found, the protection bits | |
446 | * defined with PMP must be used. We shouldn't fallback on | |
447 | * finding default privileges. | |
448 | */ | |
449 | ret = i; | |
b297129a | 450 | break; |
65c5b75c MC |
451 | } |
452 | } | |
453 | ||
454 | /* No rule matched */ | |
455 | if (ret == -1) { | |
824cac68 LZ |
456 | if (pmp_hart_has_privs_default(env, addr, size, privs, |
457 | allowed_privs, mode)) { | |
458 | ret = MAX_RISCV_PMPS; | |
459 | } | |
65c5b75c MC |
460 | } |
461 | ||
824cac68 | 462 | return ret; |
65c5b75c MC |
463 | } |
464 | ||
65c5b75c | 465 | /* |
b4cb178e | 466 | * Handle a write to a pmpcfg CSR |
65c5b75c MC |
467 | */ |
468 | void pmpcfg_csr_write(CPURISCVState *env, uint32_t reg_index, | |
469 | target_ulong val) | |
470 | { | |
471 | int i; | |
472 | uint8_t cfg_val; | |
79f26b3b | 473 | int pmpcfg_nums = 2 << riscv_cpu_mxl(env); |
65c5b75c | 474 | |
6591efb5 | 475 | trace_pmpcfg_csr_write(env->mhartid, reg_index, val); |
65c5b75c | 476 | |
79f26b3b | 477 | for (i = 0; i < pmpcfg_nums; i++) { |
65c5b75c | 478 | cfg_val = (val >> 8 * i) & 0xff; |
fdd33b86 | 479 | pmp_write_cfg(env, (reg_index * 4) + i, cfg_val); |
65c5b75c | 480 | } |
2c2e0f28 JS |
481 | |
482 | /* If PMP permission of any addr has been changed, flush TLB pages. */ | |
483 | tlb_flush(env_cpu(env)); | |
65c5b75c MC |
484 | } |
485 | ||
486 | ||
487 | /* | |
b4cb178e | 488 | * Handle a read from a pmpcfg CSR |
65c5b75c MC |
489 | */ |
490 | target_ulong pmpcfg_csr_read(CPURISCVState *env, uint32_t reg_index) | |
491 | { | |
492 | int i; | |
493 | target_ulong cfg_val = 0; | |
4a9b31b8 | 494 | target_ulong val = 0; |
79f26b3b | 495 | int pmpcfg_nums = 2 << riscv_cpu_mxl(env); |
65c5b75c | 496 | |
79f26b3b | 497 | for (i = 0; i < pmpcfg_nums; i++) { |
fdd33b86 | 498 | val = pmp_read_cfg(env, (reg_index * 4) + i); |
65c5b75c MC |
499 | cfg_val |= (val << (i * 8)); |
500 | } | |
6591efb5 | 501 | trace_pmpcfg_csr_read(env->mhartid, reg_index, cfg_val); |
65c5b75c MC |
502 | |
503 | return cfg_val; | |
504 | } | |
505 | ||
506 | ||
507 | /* | |
b4cb178e | 508 | * Handle a write to a pmpaddr CSR |
65c5b75c MC |
509 | */ |
510 | void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index, | |
511 | target_ulong val) | |
512 | { | |
6591efb5 | 513 | trace_pmpaddr_csr_write(env->mhartid, addr_index, val); |
94c6ba83 | 514 | |
65c5b75c | 515 | if (addr_index < MAX_RISCV_PMPS) { |
94c6ba83 AF |
516 | /* |
517 | * In TOR mode, need to check the lock bit of the next pmp | |
518 | * (if there is a next). | |
519 | */ | |
520 | if (addr_index + 1 < MAX_RISCV_PMPS) { | |
521 | uint8_t pmp_cfg = env->pmp_state.pmp[addr_index + 1].cfg_reg; | |
522 | ||
523 | if (pmp_cfg & PMP_LOCK && | |
524 | PMP_AMATCH_TOR == pmp_get_a_field(pmp_cfg)) { | |
525 | qemu_log_mask(LOG_GUEST_ERROR, | |
526 | "ignoring pmpaddr write - pmpcfg + 1 locked\n"); | |
527 | return; | |
528 | } | |
529 | } | |
530 | ||
65c5b75c MC |
531 | if (!pmp_is_locked(env, addr_index)) { |
532 | env->pmp_state.pmp[addr_index].addr_reg = val; | |
533 | pmp_update_rule(env, addr_index); | |
534 | } else { | |
aad5ac23 AF |
535 | qemu_log_mask(LOG_GUEST_ERROR, |
536 | "ignoring pmpaddr write - locked\n"); | |
65c5b75c MC |
537 | } |
538 | } else { | |
aad5ac23 AF |
539 | qemu_log_mask(LOG_GUEST_ERROR, |
540 | "ignoring pmpaddr write - out of bounds\n"); | |
65c5b75c MC |
541 | } |
542 | } | |
543 | ||
544 | ||
545 | /* | |
b4cb178e | 546 | * Handle a read from a pmpaddr CSR |
65c5b75c MC |
547 | */ |
548 | target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index) | |
549 | { | |
6591efb5 PMD |
550 | target_ulong val = 0; |
551 | ||
65c5b75c | 552 | if (addr_index < MAX_RISCV_PMPS) { |
6591efb5 PMD |
553 | val = env->pmp_state.pmp[addr_index].addr_reg; |
554 | trace_pmpaddr_csr_read(env->mhartid, addr_index, val); | |
65c5b75c | 555 | } else { |
aad5ac23 AF |
556 | qemu_log_mask(LOG_GUEST_ERROR, |
557 | "ignoring pmpaddr read - out of bounds\n"); | |
65c5b75c | 558 | } |
6591efb5 PMD |
559 | |
560 | return val; | |
65c5b75c | 561 | } |
af3fc195 | 562 | |
2582a95c HW |
563 | /* |
564 | * Handle a write to a mseccfg CSR | |
565 | */ | |
566 | void mseccfg_csr_write(CPURISCVState *env, target_ulong val) | |
567 | { | |
568 | int i; | |
569 | ||
570 | trace_mseccfg_csr_write(env->mhartid, val); | |
571 | ||
572 | /* RLB cannot be enabled if it's already 0 and if any regions are locked */ | |
573 | if (!MSECCFG_RLB_ISSET(env)) { | |
574 | for (i = 0; i < MAX_RISCV_PMPS; i++) { | |
575 | if (pmp_is_locked(env, i)) { | |
576 | val &= ~MSECCFG_RLB; | |
577 | break; | |
578 | } | |
579 | } | |
580 | } | |
581 | ||
582 | /* Sticky bits */ | |
583 | val |= (env->mseccfg & (MSECCFG_MMWP | MSECCFG_MML)); | |
584 | ||
585 | env->mseccfg = val; | |
586 | } | |
587 | ||
588 | /* | |
589 | * Handle a read from a mseccfg CSR | |
590 | */ | |
591 | target_ulong mseccfg_csr_read(CPURISCVState *env) | |
592 | { | |
593 | trace_mseccfg_csr_read(env->mhartid, env->mseccfg); | |
594 | return env->mseccfg; | |
595 | } | |
596 | ||
af3fc195 ZL |
597 | /* |
598 | * Calculate the TLB size if the start address or the end address of | |
b4cb178e | 599 | * PMP entry is presented in the TLB page. |
af3fc195 | 600 | */ |
824cac68 LZ |
601 | target_ulong pmp_get_tlb_size(CPURISCVState *env, int pmp_index, |
602 | target_ulong tlb_sa, target_ulong tlb_ea) | |
af3fc195 ZL |
603 | { |
604 | target_ulong pmp_sa = env->pmp_state.addr[pmp_index].sa; | |
605 | target_ulong pmp_ea = env->pmp_state.addr[pmp_index].ea; | |
606 | ||
824cac68 LZ |
607 | if (pmp_sa <= tlb_sa && pmp_ea >= tlb_ea) { |
608 | return TARGET_PAGE_SIZE; | |
609 | } else { | |
47566421 | 610 | /* |
824cac68 LZ |
611 | * At this point we have a tlb_size that is the smallest possible size |
612 | * That fits within a TARGET_PAGE_SIZE and the PMP region. | |
613 | * | |
614 | * If the size is less then TARGET_PAGE_SIZE we drop the size to 1. | |
615 | * This means the result isn't cached in the TLB and is only used for | |
616 | * a single translation. | |
617 | */ | |
618 | return 1; | |
af3fc195 | 619 | } |
af3fc195 | 620 | } |
b297129a JS |
621 | |
622 | /* | |
623 | * Convert PMP privilege to TLB page privilege. | |
624 | */ | |
625 | int pmp_priv_to_page_prot(pmp_priv_t pmp_priv) | |
626 | { | |
627 | int prot = 0; | |
628 | ||
629 | if (pmp_priv & PMP_READ) { | |
630 | prot |= PAGE_READ; | |
631 | } | |
632 | if (pmp_priv & PMP_WRITE) { | |
633 | prot |= PAGE_WRITE; | |
634 | } | |
635 | if (pmp_priv & PMP_EXEC) { | |
636 | prot |= PAGE_EXEC; | |
637 | } | |
638 | ||
639 | return prot; | |
640 | } |