]>
Commit | Line | Data |
---|---|---|
65c5b75c MC |
1 | /* |
2 | * QEMU RISC-V PMP (Physical Memory Protection) | |
3 | * | |
4 | * Author: Daire McNamara, daire.mcnamara@emdalo.com | |
5 | * Ivan Griffin, ivan.griffin@emdalo.com | |
6 | * | |
7 | * This provides a RISC-V Physical Memory Protection implementation | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify it | |
10 | * under the terms and conditions of the GNU General Public License, | |
11 | * version 2 or later, as published by the Free Software Foundation. | |
12 | * | |
13 | * This program is distributed in the hope it will be useful, but WITHOUT | |
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
16 | * more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License along with | |
19 | * this program. If not, see <http://www.gnu.org/licenses/>. | |
20 | */ | |
21 | ||
65c5b75c MC |
22 | #include "qemu/osdep.h" |
23 | #include "qemu/log.h" | |
24 | #include "qapi/error.h" | |
25 | #include "cpu.h" | |
6591efb5 | 26 | #include "trace.h" |
2c2e0f28 | 27 | #include "exec/exec-all.h" |
65c5b75c MC |
28 | |
29 | static void pmp_write_cfg(CPURISCVState *env, uint32_t addr_index, | |
c45eff30 | 30 | uint8_t val); |
65c5b75c MC |
31 | static uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t addr_index); |
32 | static void pmp_update_rule(CPURISCVState *env, uint32_t pmp_index); | |
33 | ||
34 | /* | |
35 | * Accessor method to extract address matching type 'a field' from cfg reg | |
36 | */ | |
37 | static inline uint8_t pmp_get_a_field(uint8_t cfg) | |
38 | { | |
39 | uint8_t a = cfg >> 3; | |
40 | return a & 0x3; | |
41 | } | |
42 | ||
43 | /* | |
44 | * Check whether a PMP is locked or not. | |
45 | */ | |
46 | static inline int pmp_is_locked(CPURISCVState *env, uint32_t pmp_index) | |
47 | { | |
48 | ||
49 | if (env->pmp_state.pmp[pmp_index].cfg_reg & PMP_LOCK) { | |
50 | return 1; | |
51 | } | |
52 | ||
53 | /* Top PMP has no 'next' to check */ | |
54 | if ((pmp_index + 1u) >= MAX_RISCV_PMPS) { | |
55 | return 0; | |
56 | } | |
57 | ||
65c5b75c MC |
58 | return 0; |
59 | } | |
60 | ||
61 | /* | |
62 | * Count the number of active rules. | |
63 | */ | |
d102f19a | 64 | uint32_t pmp_get_num_rules(CPURISCVState *env) |
65c5b75c MC |
65 | { |
66 | return env->pmp_state.num_rules; | |
67 | } | |
68 | ||
69 | /* | |
70 | * Accessor to get the cfg reg for a specific PMP/HART | |
71 | */ | |
72 | static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t pmp_index) | |
73 | { | |
74 | if (pmp_index < MAX_RISCV_PMPS) { | |
75 | return env->pmp_state.pmp[pmp_index].cfg_reg; | |
76 | } | |
77 | ||
78 | return 0; | |
79 | } | |
80 | ||
81 | ||
82 | /* | |
83 | * Accessor to set the cfg reg for a specific PMP/HART | |
84 | * Bounds checks and relevant lock bit. | |
85 | */ | |
86 | static void pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val) | |
87 | { | |
88 | if (pmp_index < MAX_RISCV_PMPS) { | |
ae39e4ce HW |
89 | bool locked = true; |
90 | ||
6a3ffda2 | 91 | if (riscv_cpu_cfg(env)->epmp) { |
ae39e4ce HW |
92 | /* mseccfg.RLB is set */ |
93 | if (MSECCFG_RLB_ISSET(env)) { | |
94 | locked = false; | |
95 | } | |
96 | ||
97 | /* mseccfg.MML is not set */ | |
98 | if (!MSECCFG_MML_ISSET(env) && !pmp_is_locked(env, pmp_index)) { | |
99 | locked = false; | |
100 | } | |
101 | ||
102 | /* mseccfg.MML is set */ | |
103 | if (MSECCFG_MML_ISSET(env)) { | |
104 | /* not adding execute bit */ | |
105 | if ((val & PMP_LOCK) != 0 && (val & PMP_EXEC) != PMP_EXEC) { | |
106 | locked = false; | |
107 | } | |
108 | /* shared region and not adding X bit */ | |
109 | if ((val & PMP_LOCK) != PMP_LOCK && | |
110 | (val & 0x7) != (PMP_WRITE | PMP_EXEC)) { | |
111 | locked = false; | |
112 | } | |
113 | } | |
65c5b75c | 114 | } else { |
ae39e4ce HW |
115 | if (!pmp_is_locked(env, pmp_index)) { |
116 | locked = false; | |
117 | } | |
118 | } | |
119 | ||
120 | if (locked) { | |
aad5ac23 | 121 | qemu_log_mask(LOG_GUEST_ERROR, "ignoring pmpcfg write - locked\n"); |
ae39e4ce HW |
122 | } else { |
123 | env->pmp_state.pmp[pmp_index].cfg_reg = val; | |
124 | pmp_update_rule(env, pmp_index); | |
65c5b75c MC |
125 | } |
126 | } else { | |
aad5ac23 AF |
127 | qemu_log_mask(LOG_GUEST_ERROR, |
128 | "ignoring pmpcfg write - out of bounds\n"); | |
65c5b75c MC |
129 | } |
130 | } | |
131 | ||
246f8796 WL |
132 | static void pmp_decode_napot(target_ulong a, target_ulong *sa, |
133 | target_ulong *ea) | |
65c5b75c MC |
134 | { |
135 | /* | |
3b57254d WL |
136 | * aaaa...aaa0 8-byte NAPOT range |
137 | * aaaa...aa01 16-byte NAPOT range | |
138 | * aaaa...a011 32-byte NAPOT range | |
139 | * ... | |
140 | * aa01...1111 2^XLEN-byte NAPOT range | |
141 | * a011...1111 2^(XLEN+1)-byte NAPOT range | |
142 | * 0111...1111 2^(XLEN+2)-byte NAPOT range | |
143 | * 1111...1111 Reserved | |
144 | */ | |
6248a8fe NP |
145 | a = (a << 2) | 0x3; |
146 | *sa = a & (a + 1); | |
147 | *ea = a | (a + 1); | |
65c5b75c MC |
148 | } |
149 | ||
24beb03e | 150 | void pmp_update_rule_addr(CPURISCVState *env, uint32_t pmp_index) |
65c5b75c | 151 | { |
65c5b75c MC |
152 | uint8_t this_cfg = env->pmp_state.pmp[pmp_index].cfg_reg; |
153 | target_ulong this_addr = env->pmp_state.pmp[pmp_index].addr_reg; | |
154 | target_ulong prev_addr = 0u; | |
155 | target_ulong sa = 0u; | |
156 | target_ulong ea = 0u; | |
157 | ||
158 | if (pmp_index >= 1u) { | |
159 | prev_addr = env->pmp_state.pmp[pmp_index - 1].addr_reg; | |
160 | } | |
161 | ||
162 | switch (pmp_get_a_field(this_cfg)) { | |
163 | case PMP_AMATCH_OFF: | |
164 | sa = 0u; | |
165 | ea = -1; | |
166 | break; | |
167 | ||
168 | case PMP_AMATCH_TOR: | |
169 | sa = prev_addr << 2; /* shift up from [xx:0] to [xx+2:2] */ | |
170 | ea = (this_addr << 2) - 1u; | |
2e983399 NP |
171 | if (sa > ea) { |
172 | sa = ea = 0u; | |
173 | } | |
65c5b75c MC |
174 | break; |
175 | ||
176 | case PMP_AMATCH_NA4: | |
177 | sa = this_addr << 2; /* shift up from [xx:0] to [xx+2:2] */ | |
cfad709b | 178 | ea = (sa + 4u) - 1u; |
65c5b75c MC |
179 | break; |
180 | ||
181 | case PMP_AMATCH_NAPOT: | |
182 | pmp_decode_napot(this_addr, &sa, &ea); | |
183 | break; | |
184 | ||
185 | default: | |
186 | sa = 0u; | |
187 | ea = 0u; | |
188 | break; | |
189 | } | |
190 | ||
191 | env->pmp_state.addr[pmp_index].sa = sa; | |
192 | env->pmp_state.addr[pmp_index].ea = ea; | |
24beb03e | 193 | } |
65c5b75c | 194 | |
24beb03e YJ |
195 | void pmp_update_rule_nums(CPURISCVState *env) |
196 | { | |
197 | int i; | |
198 | ||
199 | env->pmp_state.num_rules = 0; | |
65c5b75c MC |
200 | for (i = 0; i < MAX_RISCV_PMPS; i++) { |
201 | const uint8_t a_field = | |
202 | pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg); | |
203 | if (PMP_AMATCH_OFF != a_field) { | |
204 | env->pmp_state.num_rules++; | |
205 | } | |
206 | } | |
207 | } | |
208 | ||
3b57254d WL |
209 | /* |
210 | * Convert cfg/addr reg values here into simple 'sa' --> start address and 'ea' | |
24beb03e YJ |
211 | * end address values. |
212 | * This function is called relatively infrequently whereas the check that | |
213 | * an address is within a pmp rule is called often, so optimise that one | |
214 | */ | |
215 | static void pmp_update_rule(CPURISCVState *env, uint32_t pmp_index) | |
216 | { | |
217 | pmp_update_rule_addr(env, pmp_index); | |
218 | pmp_update_rule_nums(env); | |
219 | } | |
220 | ||
246f8796 WL |
221 | static int pmp_is_in_range(CPURISCVState *env, int pmp_index, |
222 | target_ulong addr) | |
65c5b75c MC |
223 | { |
224 | int result = 0; | |
225 | ||
c45eff30 WL |
226 | if ((addr >= env->pmp_state.addr[pmp_index].sa) && |
227 | (addr <= env->pmp_state.addr[pmp_index].ea)) { | |
65c5b75c MC |
228 | result = 1; |
229 | } else { | |
230 | result = 0; | |
231 | } | |
232 | ||
233 | return result; | |
234 | } | |
235 | ||
b297129a JS |
236 | /* |
237 | * Check if the address has required RWX privs when no PMP entry is matched. | |
238 | */ | |
97ec5aef | 239 | static bool pmp_hart_has_privs_default(CPURISCVState *env, pmp_priv_t privs, |
c45eff30 WL |
240 | pmp_priv_t *allowed_privs, |
241 | target_ulong mode) | |
b297129a JS |
242 | { |
243 | bool ret; | |
244 | ||
b84ffd6e WL |
245 | if (MSECCFG_MMWP_ISSET(env)) { |
246 | /* | |
247 | * The Machine Mode Whitelist Policy (mseccfg.MMWP) is set | |
248 | * so we default to deny all, even for M-mode. | |
249 | */ | |
250 | *allowed_privs = 0; | |
251 | return false; | |
252 | } else if (MSECCFG_MML_ISSET(env)) { | |
253 | /* | |
254 | * The Machine Mode Lockdown (mseccfg.MML) bit is set | |
255 | * so we can only execute code in M-mode with an applicable | |
256 | * rule. Other modes are disabled. | |
257 | */ | |
258 | if (mode == PRV_M && !(privs & PMP_EXEC)) { | |
259 | ret = true; | |
260 | *allowed_privs = PMP_READ | PMP_WRITE; | |
261 | } else { | |
262 | ret = false; | |
ae39e4ce | 263 | *allowed_privs = 0; |
ae39e4ce | 264 | } |
b84ffd6e WL |
265 | |
266 | return ret; | |
ae39e4ce HW |
267 | } |
268 | ||
3fe40ef5 | 269 | if (!riscv_cpu_cfg(env)->pmp || (mode == PRV_M)) { |
b297129a JS |
270 | /* |
271 | * Privileged spec v1.10 states if HW doesn't implement any PMP entry | |
272 | * or no PMP entry matches an M-Mode access, the access succeeds. | |
273 | */ | |
274 | ret = true; | |
275 | *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC; | |
276 | } else { | |
277 | /* | |
278 | * Other modes are not allowed to succeed if they don't * match a rule, | |
279 | * but there are rules. We've checked for no rule earlier in this | |
280 | * function. | |
281 | */ | |
282 | ret = false; | |
283 | *allowed_privs = 0; | |
284 | } | |
285 | ||
286 | return ret; | |
287 | } | |
288 | ||
65c5b75c MC |
289 | |
290 | /* | |
291 | * Public Interface | |
292 | */ | |
293 | ||
294 | /* | |
295 | * Check if the address has required RWX privs to complete desired operation | |
e9c39713 WL |
296 | * Return true if a pmp rule match or default match |
297 | * Return false if no match | |
65c5b75c | 298 | */ |
e9c39713 WL |
299 | bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr, |
300 | target_ulong size, pmp_priv_t privs, | |
301 | pmp_priv_t *allowed_privs, target_ulong mode) | |
65c5b75c MC |
302 | { |
303 | int i = 0; | |
e9c39713 | 304 | bool ret = false; |
9667e535 | 305 | int pmp_size = 0; |
65c5b75c MC |
306 | target_ulong s = 0; |
307 | target_ulong e = 0; | |
65c5b75c MC |
308 | |
309 | /* Short cut if no rules */ | |
310 | if (0 == pmp_get_num_rules(env)) { | |
97ec5aef | 311 | return pmp_hart_has_privs_default(env, privs, allowed_privs, mode); |
65c5b75c MC |
312 | } |
313 | ||
9667e535 | 314 | if (size == 0) { |
dcf654a3 | 315 | if (riscv_cpu_cfg(env)->mmu) { |
1145188e AF |
316 | /* |
317 | * If size is unknown (0), assume that all bytes | |
318 | * from addr to the end of the page will be accessed. | |
319 | */ | |
320 | pmp_size = -(addr | TARGET_PAGE_MASK); | |
321 | } else { | |
322 | pmp_size = sizeof(target_ulong); | |
323 | } | |
9667e535 DL |
324 | } else { |
325 | pmp_size = size; | |
326 | } | |
327 | ||
3b57254d WL |
328 | /* |
329 | * 1.10 draft priv spec states there is an implicit order | |
330 | * from low to high | |
331 | */ | |
65c5b75c MC |
332 | for (i = 0; i < MAX_RISCV_PMPS; i++) { |
333 | s = pmp_is_in_range(env, i, addr); | |
9667e535 | 334 | e = pmp_is_in_range(env, i, addr + pmp_size - 1); |
65c5b75c MC |
335 | |
336 | /* partially inside */ | |
337 | if ((s + e) == 1) { | |
aad5ac23 AF |
338 | qemu_log_mask(LOG_GUEST_ERROR, |
339 | "pmp violation - access is partially inside\n"); | |
e9c39713 | 340 | ret = false; |
65c5b75c MC |
341 | break; |
342 | } | |
343 | ||
344 | /* fully inside */ | |
345 | const uint8_t a_field = | |
346 | pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg); | |
65c5b75c | 347 | |
f8162068 | 348 | /* |
ae39e4ce HW |
349 | * Convert the PMP permissions to match the truth table in the |
350 | * ePMP spec. | |
f8162068 | 351 | */ |
ae39e4ce HW |
352 | const uint8_t epmp_operation = |
353 | ((env->pmp_state.pmp[i].cfg_reg & PMP_LOCK) >> 4) | | |
354 | ((env->pmp_state.pmp[i].cfg_reg & PMP_READ) << 2) | | |
355 | (env->pmp_state.pmp[i].cfg_reg & PMP_WRITE) | | |
356 | ((env->pmp_state.pmp[i].cfg_reg & PMP_EXEC) >> 2); | |
357 | ||
f8162068 | 358 | if (((s + e) == 2) && (PMP_AMATCH_OFF != a_field)) { |
ae39e4ce HW |
359 | /* |
360 | * If the PMP entry is not off and the address is in range, | |
361 | * do the priv check | |
362 | */ | |
363 | if (!MSECCFG_MML_ISSET(env)) { | |
364 | /* | |
365 | * If mseccfg.MML Bit is not set, do pmp priv check | |
366 | * This will always apply to regular PMP. | |
367 | */ | |
368 | *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC; | |
369 | if ((mode != PRV_M) || pmp_is_locked(env, i)) { | |
370 | *allowed_privs &= env->pmp_state.pmp[i].cfg_reg; | |
371 | } | |
372 | } else { | |
373 | /* | |
374 | * If mseccfg.MML Bit set, do the enhanced pmp priv check | |
375 | */ | |
376 | if (mode == PRV_M) { | |
377 | switch (epmp_operation) { | |
378 | case 0: | |
379 | case 1: | |
380 | case 4: | |
381 | case 5: | |
382 | case 6: | |
383 | case 7: | |
384 | case 8: | |
385 | *allowed_privs = 0; | |
386 | break; | |
387 | case 2: | |
388 | case 3: | |
389 | case 14: | |
390 | *allowed_privs = PMP_READ | PMP_WRITE; | |
391 | break; | |
392 | case 9: | |
393 | case 10: | |
394 | *allowed_privs = PMP_EXEC; | |
395 | break; | |
396 | case 11: | |
397 | case 13: | |
398 | *allowed_privs = PMP_READ | PMP_EXEC; | |
399 | break; | |
400 | case 12: | |
401 | case 15: | |
402 | *allowed_privs = PMP_READ; | |
403 | break; | |
787a4baf AF |
404 | default: |
405 | g_assert_not_reached(); | |
ae39e4ce HW |
406 | } |
407 | } else { | |
408 | switch (epmp_operation) { | |
409 | case 0: | |
410 | case 8: | |
411 | case 9: | |
412 | case 12: | |
413 | case 13: | |
414 | case 14: | |
415 | *allowed_privs = 0; | |
416 | break; | |
417 | case 1: | |
418 | case 10: | |
419 | case 11: | |
420 | *allowed_privs = PMP_EXEC; | |
421 | break; | |
422 | case 2: | |
423 | case 4: | |
424 | case 15: | |
425 | *allowed_privs = PMP_READ; | |
426 | break; | |
427 | case 3: | |
428 | case 6: | |
429 | *allowed_privs = PMP_READ | PMP_WRITE; | |
430 | break; | |
431 | case 5: | |
432 | *allowed_privs = PMP_READ | PMP_EXEC; | |
433 | break; | |
434 | case 7: | |
435 | *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC; | |
436 | break; | |
787a4baf AF |
437 | default: |
438 | g_assert_not_reached(); | |
ae39e4ce HW |
439 | } |
440 | } | |
65c5b75c MC |
441 | } |
442 | ||
90b1fafc HC |
443 | /* |
444 | * If matching address range was found, the protection bits | |
445 | * defined with PMP must be used. We shouldn't fallback on | |
446 | * finding default privileges. | |
447 | */ | |
e9c39713 | 448 | ret = true; |
b297129a | 449 | break; |
65c5b75c MC |
450 | } |
451 | } | |
452 | ||
453 | /* No rule matched */ | |
e9c39713 | 454 | if (!ret) { |
97ec5aef | 455 | ret = pmp_hart_has_privs_default(env, privs, allowed_privs, mode); |
65c5b75c MC |
456 | } |
457 | ||
824cac68 | 458 | return ret; |
65c5b75c MC |
459 | } |
460 | ||
65c5b75c | 461 | /* |
b4cb178e | 462 | * Handle a write to a pmpcfg CSR |
65c5b75c MC |
463 | */ |
464 | void pmpcfg_csr_write(CPURISCVState *env, uint32_t reg_index, | |
c45eff30 | 465 | target_ulong val) |
65c5b75c MC |
466 | { |
467 | int i; | |
468 | uint8_t cfg_val; | |
79f26b3b | 469 | int pmpcfg_nums = 2 << riscv_cpu_mxl(env); |
65c5b75c | 470 | |
6591efb5 | 471 | trace_pmpcfg_csr_write(env->mhartid, reg_index, val); |
65c5b75c | 472 | |
79f26b3b | 473 | for (i = 0; i < pmpcfg_nums; i++) { |
65c5b75c | 474 | cfg_val = (val >> 8 * i) & 0xff; |
fdd33b86 | 475 | pmp_write_cfg(env, (reg_index * 4) + i, cfg_val); |
65c5b75c | 476 | } |
2c2e0f28 JS |
477 | |
478 | /* If PMP permission of any addr has been changed, flush TLB pages. */ | |
479 | tlb_flush(env_cpu(env)); | |
65c5b75c MC |
480 | } |
481 | ||
482 | ||
483 | /* | |
b4cb178e | 484 | * Handle a read from a pmpcfg CSR |
65c5b75c MC |
485 | */ |
486 | target_ulong pmpcfg_csr_read(CPURISCVState *env, uint32_t reg_index) | |
487 | { | |
488 | int i; | |
489 | target_ulong cfg_val = 0; | |
4a9b31b8 | 490 | target_ulong val = 0; |
79f26b3b | 491 | int pmpcfg_nums = 2 << riscv_cpu_mxl(env); |
65c5b75c | 492 | |
79f26b3b | 493 | for (i = 0; i < pmpcfg_nums; i++) { |
fdd33b86 | 494 | val = pmp_read_cfg(env, (reg_index * 4) + i); |
65c5b75c MC |
495 | cfg_val |= (val << (i * 8)); |
496 | } | |
6591efb5 | 497 | trace_pmpcfg_csr_read(env->mhartid, reg_index, cfg_val); |
65c5b75c MC |
498 | |
499 | return cfg_val; | |
500 | } | |
501 | ||
502 | ||
503 | /* | |
b4cb178e | 504 | * Handle a write to a pmpaddr CSR |
65c5b75c MC |
505 | */ |
506 | void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index, | |
c45eff30 | 507 | target_ulong val) |
65c5b75c | 508 | { |
6591efb5 | 509 | trace_pmpaddr_csr_write(env->mhartid, addr_index, val); |
94c6ba83 | 510 | |
65c5b75c | 511 | if (addr_index < MAX_RISCV_PMPS) { |
94c6ba83 AF |
512 | /* |
513 | * In TOR mode, need to check the lock bit of the next pmp | |
514 | * (if there is a next). | |
515 | */ | |
516 | if (addr_index + 1 < MAX_RISCV_PMPS) { | |
517 | uint8_t pmp_cfg = env->pmp_state.pmp[addr_index + 1].cfg_reg; | |
518 | ||
519 | if (pmp_cfg & PMP_LOCK && | |
520 | PMP_AMATCH_TOR == pmp_get_a_field(pmp_cfg)) { | |
521 | qemu_log_mask(LOG_GUEST_ERROR, | |
522 | "ignoring pmpaddr write - pmpcfg + 1 locked\n"); | |
523 | return; | |
524 | } | |
525 | } | |
526 | ||
65c5b75c MC |
527 | if (!pmp_is_locked(env, addr_index)) { |
528 | env->pmp_state.pmp[addr_index].addr_reg = val; | |
529 | pmp_update_rule(env, addr_index); | |
530 | } else { | |
aad5ac23 AF |
531 | qemu_log_mask(LOG_GUEST_ERROR, |
532 | "ignoring pmpaddr write - locked\n"); | |
65c5b75c MC |
533 | } |
534 | } else { | |
aad5ac23 AF |
535 | qemu_log_mask(LOG_GUEST_ERROR, |
536 | "ignoring pmpaddr write - out of bounds\n"); | |
65c5b75c MC |
537 | } |
538 | } | |
539 | ||
540 | ||
541 | /* | |
b4cb178e | 542 | * Handle a read from a pmpaddr CSR |
65c5b75c MC |
543 | */ |
544 | target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index) | |
545 | { | |
6591efb5 PMD |
546 | target_ulong val = 0; |
547 | ||
65c5b75c | 548 | if (addr_index < MAX_RISCV_PMPS) { |
6591efb5 PMD |
549 | val = env->pmp_state.pmp[addr_index].addr_reg; |
550 | trace_pmpaddr_csr_read(env->mhartid, addr_index, val); | |
65c5b75c | 551 | } else { |
aad5ac23 AF |
552 | qemu_log_mask(LOG_GUEST_ERROR, |
553 | "ignoring pmpaddr read - out of bounds\n"); | |
65c5b75c | 554 | } |
6591efb5 PMD |
555 | |
556 | return val; | |
65c5b75c | 557 | } |
af3fc195 | 558 | |
2582a95c HW |
559 | /* |
560 | * Handle a write to a mseccfg CSR | |
561 | */ | |
562 | void mseccfg_csr_write(CPURISCVState *env, target_ulong val) | |
563 | { | |
564 | int i; | |
565 | ||
566 | trace_mseccfg_csr_write(env->mhartid, val); | |
567 | ||
568 | /* RLB cannot be enabled if it's already 0 and if any regions are locked */ | |
569 | if (!MSECCFG_RLB_ISSET(env)) { | |
570 | for (i = 0; i < MAX_RISCV_PMPS; i++) { | |
571 | if (pmp_is_locked(env, i)) { | |
572 | val &= ~MSECCFG_RLB; | |
573 | break; | |
574 | } | |
575 | } | |
576 | } | |
577 | ||
b84ffd6e WL |
578 | if (riscv_cpu_cfg(env)->epmp) { |
579 | /* Sticky bits */ | |
580 | val |= (env->mseccfg & (MSECCFG_MMWP | MSECCFG_MML)); | |
581 | } else { | |
582 | val &= ~(MSECCFG_MMWP | MSECCFG_MML | MSECCFG_RLB); | |
583 | } | |
2582a95c HW |
584 | |
585 | env->mseccfg = val; | |
586 | } | |
587 | ||
588 | /* | |
589 | * Handle a read from a mseccfg CSR | |
590 | */ | |
591 | target_ulong mseccfg_csr_read(CPURISCVState *env) | |
592 | { | |
593 | trace_mseccfg_csr_read(env->mhartid, env->mseccfg); | |
594 | return env->mseccfg; | |
595 | } | |
596 | ||
af3fc195 | 597 | /* |
dc7b5993 WL |
598 | * Calculate the TLB size. |
599 | * It's possible that PMP regions only cover partial of the TLB page, and | |
600 | * this may split the page into regions with different permissions. | |
601 | * For example if PMP0 is (0x80000008~0x8000000F, R) and PMP1 is (0x80000000 | |
602 | * ~0x80000FFF, RWX), then region 0x80000008~0x8000000F has R permission, and | |
603 | * the other regions in this page have RWX permissions. | |
604 | * A write access to 0x80000000 will match PMP1. However we cannot cache the | |
605 | * translation result in the TLB since this will make the write access to | |
606 | * 0x80000008 bypass the check of PMP0. | |
607 | * To avoid this we return a size of 1 (which means no caching) if the PMP | |
608 | * region only covers partial of the TLB page. | |
af3fc195 | 609 | */ |
dc7b5993 | 610 | target_ulong pmp_get_tlb_size(CPURISCVState *env, target_ulong addr) |
af3fc195 | 611 | { |
dc7b5993 WL |
612 | target_ulong pmp_sa; |
613 | target_ulong pmp_ea; | |
614 | target_ulong tlb_sa = addr & ~(TARGET_PAGE_SIZE - 1); | |
615 | target_ulong tlb_ea = tlb_sa + TARGET_PAGE_SIZE - 1; | |
616 | int i; | |
af3fc195 | 617 | |
dc7b5993 WL |
618 | /* |
619 | * If PMP is not supported or there are no PMP rules, the TLB page will not | |
620 | * be split into regions with different permissions by PMP so we set the | |
621 | * size to TARGET_PAGE_SIZE. | |
622 | */ | |
623 | if (!riscv_cpu_cfg(env)->pmp || !pmp_get_num_rules(env)) { | |
824cac68 | 624 | return TARGET_PAGE_SIZE; |
dc7b5993 WL |
625 | } |
626 | ||
627 | for (i = 0; i < MAX_RISCV_PMPS; i++) { | |
628 | if (pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg) == PMP_AMATCH_OFF) { | |
629 | continue; | |
630 | } | |
631 | ||
632 | pmp_sa = env->pmp_state.addr[i].sa; | |
633 | pmp_ea = env->pmp_state.addr[i].ea; | |
634 | ||
47566421 | 635 | /* |
dc7b5993 WL |
636 | * Only the first PMP entry that covers (whole or partial of) the TLB |
637 | * page really matters: | |
638 | * If it covers the whole TLB page, set the size to TARGET_PAGE_SIZE, | |
639 | * since the following PMP entries have lower priority and will not | |
640 | * affect the permissions of the page. | |
641 | * If it only covers partial of the TLB page, set the size to 1 since | |
642 | * the allowed permissions of the region may be different from other | |
643 | * region of the page. | |
3b57254d | 644 | */ |
dc7b5993 WL |
645 | if (pmp_sa <= tlb_sa && pmp_ea >= tlb_ea) { |
646 | return TARGET_PAGE_SIZE; | |
647 | } else if ((pmp_sa >= tlb_sa && pmp_sa <= tlb_ea) || | |
648 | (pmp_ea >= tlb_sa && pmp_ea <= tlb_ea)) { | |
649 | return 1; | |
650 | } | |
af3fc195 | 651 | } |
dc7b5993 WL |
652 | |
653 | /* | |
654 | * If no PMP entry matches the TLB page, the TLB page will also not be | |
655 | * split into regions with different permissions by PMP so we set the size | |
656 | * to TARGET_PAGE_SIZE. | |
657 | */ | |
658 | return TARGET_PAGE_SIZE; | |
af3fc195 | 659 | } |
b297129a JS |
660 | |
661 | /* | |
662 | * Convert PMP privilege to TLB page privilege. | |
663 | */ | |
664 | int pmp_priv_to_page_prot(pmp_priv_t pmp_priv) | |
665 | { | |
666 | int prot = 0; | |
667 | ||
668 | if (pmp_priv & PMP_READ) { | |
669 | prot |= PAGE_READ; | |
670 | } | |
671 | if (pmp_priv & PMP_WRITE) { | |
672 | prot |= PAGE_WRITE; | |
673 | } | |
674 | if (pmp_priv & PMP_EXEC) { | |
675 | prot |= PAGE_EXEC; | |
676 | } | |
677 | ||
678 | return prot; | |
679 | } |