]>
git.proxmox.com Git - mirror_qemu.git/blob - target/riscv/pmp.c
2 * QEMU RISC-V PMP (Physical Memory Protection)
4 * Author: Daire McNamara, daire.mcnamara@emdalo.com
5 * Ivan Griffin, ivan.griffin@emdalo.com
7 * This provides a RISC-V Physical Memory Protection implementation
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2 or later, as published by the Free Software Foundation.
13 * This program is distributed in the hope it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along with
19 * this program. If not, see <http://www.gnu.org/licenses/>.
22 #include "qemu/osdep.h"
24 #include "qapi/error.h"
27 #include "exec/exec-all.h"
29 static bool pmp_write_cfg(CPURISCVState
*env
, uint32_t addr_index
,
31 static uint8_t pmp_read_cfg(CPURISCVState
*env
, uint32_t addr_index
);
34 * Accessor method to extract address matching type 'a field' from cfg reg
36 static inline uint8_t pmp_get_a_field(uint8_t cfg
)
43 * Check whether a PMP is locked or not.
45 static inline int pmp_is_locked(CPURISCVState
*env
, uint32_t pmp_index
)
47 /* mseccfg.RLB is set */
48 if (MSECCFG_RLB_ISSET(env
)) {
52 if (env
->pmp_state
.pmp
[pmp_index
].cfg_reg
& PMP_LOCK
) {
56 /* Top PMP has no 'next' to check */
57 if ((pmp_index
+ 1u) >= MAX_RISCV_PMPS
) {
65 * Count the number of active rules.
67 uint32_t pmp_get_num_rules(CPURISCVState
*env
)
69 return env
->pmp_state
.num_rules
;
73 * Accessor to get the cfg reg for a specific PMP/HART
75 static inline uint8_t pmp_read_cfg(CPURISCVState
*env
, uint32_t pmp_index
)
77 if (pmp_index
< MAX_RISCV_PMPS
) {
78 return env
->pmp_state
.pmp
[pmp_index
].cfg_reg
;
86 * Accessor to set the cfg reg for a specific PMP/HART
87 * Bounds checks and relevant lock bit.
89 static bool pmp_write_cfg(CPURISCVState
*env
, uint32_t pmp_index
, uint8_t val
)
91 if (pmp_index
< MAX_RISCV_PMPS
) {
94 if (riscv_cpu_cfg(env
)->epmp
) {
95 /* mseccfg.RLB is set */
96 if (MSECCFG_RLB_ISSET(env
)) {
100 /* mseccfg.MML is not set */
101 if (!MSECCFG_MML_ISSET(env
) && !pmp_is_locked(env
, pmp_index
)) {
105 /* mseccfg.MML is set */
106 if (MSECCFG_MML_ISSET(env
)) {
107 /* not adding execute bit */
108 if ((val
& PMP_LOCK
) != 0 && (val
& PMP_EXEC
) != PMP_EXEC
) {
111 /* shared region and not adding X bit */
112 if ((val
& PMP_LOCK
) != PMP_LOCK
&&
113 (val
& 0x7) != (PMP_WRITE
| PMP_EXEC
)) {
118 if (!pmp_is_locked(env
, pmp_index
)) {
124 qemu_log_mask(LOG_GUEST_ERROR
, "ignoring pmpcfg write - locked\n");
125 } else if (env
->pmp_state
.pmp
[pmp_index
].cfg_reg
!= val
) {
126 env
->pmp_state
.pmp
[pmp_index
].cfg_reg
= val
;
127 pmp_update_rule_addr(env
, pmp_index
);
131 qemu_log_mask(LOG_GUEST_ERROR
,
132 "ignoring pmpcfg write - out of bounds\n");
138 static void pmp_decode_napot(target_ulong a
, target_ulong
*sa
,
142 * aaaa...aaa0 8-byte NAPOT range
143 * aaaa...aa01 16-byte NAPOT range
144 * aaaa...a011 32-byte NAPOT range
146 * aa01...1111 2^XLEN-byte NAPOT range
147 * a011...1111 2^(XLEN+1)-byte NAPOT range
148 * 0111...1111 2^(XLEN+2)-byte NAPOT range
149 * 1111...1111 Reserved
156 void pmp_update_rule_addr(CPURISCVState
*env
, uint32_t pmp_index
)
158 uint8_t this_cfg
= env
->pmp_state
.pmp
[pmp_index
].cfg_reg
;
159 target_ulong this_addr
= env
->pmp_state
.pmp
[pmp_index
].addr_reg
;
160 target_ulong prev_addr
= 0u;
161 target_ulong sa
= 0u;
162 target_ulong ea
= 0u;
164 if (pmp_index
>= 1u) {
165 prev_addr
= env
->pmp_state
.pmp
[pmp_index
- 1].addr_reg
;
168 switch (pmp_get_a_field(this_cfg
)) {
175 sa
= prev_addr
<< 2; /* shift up from [xx:0] to [xx+2:2] */
176 ea
= (this_addr
<< 2) - 1u;
183 sa
= this_addr
<< 2; /* shift up from [xx:0] to [xx+2:2] */
187 case PMP_AMATCH_NAPOT
:
188 pmp_decode_napot(this_addr
, &sa
, &ea
);
197 env
->pmp_state
.addr
[pmp_index
].sa
= sa
;
198 env
->pmp_state
.addr
[pmp_index
].ea
= ea
;
201 void pmp_update_rule_nums(CPURISCVState
*env
)
205 env
->pmp_state
.num_rules
= 0;
206 for (i
= 0; i
< MAX_RISCV_PMPS
; i
++) {
207 const uint8_t a_field
=
208 pmp_get_a_field(env
->pmp_state
.pmp
[i
].cfg_reg
);
209 if (PMP_AMATCH_OFF
!= a_field
) {
210 env
->pmp_state
.num_rules
++;
215 static int pmp_is_in_range(CPURISCVState
*env
, int pmp_index
,
220 if ((addr
>= env
->pmp_state
.addr
[pmp_index
].sa
) &&
221 (addr
<= env
->pmp_state
.addr
[pmp_index
].ea
)) {
231 * Check if the address has required RWX privs when no PMP entry is matched.
233 static bool pmp_hart_has_privs_default(CPURISCVState
*env
, pmp_priv_t privs
,
234 pmp_priv_t
*allowed_privs
,
239 if (MSECCFG_MMWP_ISSET(env
)) {
241 * The Machine Mode Whitelist Policy (mseccfg.MMWP) is set
242 * so we default to deny all, even for M-mode.
246 } else if (MSECCFG_MML_ISSET(env
)) {
248 * The Machine Mode Lockdown (mseccfg.MML) bit is set
249 * so we can only execute code in M-mode with an applicable
250 * rule. Other modes are disabled.
252 if (mode
== PRV_M
&& !(privs
& PMP_EXEC
)) {
254 *allowed_privs
= PMP_READ
| PMP_WRITE
;
263 if (!riscv_cpu_cfg(env
)->pmp
|| (mode
== PRV_M
)) {
265 * Privileged spec v1.10 states if HW doesn't implement any PMP entry
266 * or no PMP entry matches an M-Mode access, the access succeeds.
269 *allowed_privs
= PMP_READ
| PMP_WRITE
| PMP_EXEC
;
272 * Other modes are not allowed to succeed if they don't * match a rule,
273 * but there are rules. We've checked for no rule earlier in this
289 * Check if the address has required RWX privs to complete desired operation
290 * Return true if a pmp rule match or default match
291 * Return false if no match
293 bool pmp_hart_has_privs(CPURISCVState
*env
, target_ulong addr
,
294 target_ulong size
, pmp_priv_t privs
,
295 pmp_priv_t
*allowed_privs
, target_ulong mode
)
302 /* Short cut if no rules */
303 if (0 == pmp_get_num_rules(env
)) {
304 return pmp_hart_has_privs_default(env
, privs
, allowed_privs
, mode
);
308 if (riscv_cpu_cfg(env
)->mmu
) {
310 * If size is unknown (0), assume that all bytes
311 * from addr to the end of the page will be accessed.
313 pmp_size
= -(addr
| TARGET_PAGE_MASK
);
315 pmp_size
= sizeof(target_ulong
);
322 * 1.10 draft priv spec states there is an implicit order
325 for (i
= 0; i
< MAX_RISCV_PMPS
; i
++) {
326 s
= pmp_is_in_range(env
, i
, addr
);
327 e
= pmp_is_in_range(env
, i
, addr
+ pmp_size
- 1);
329 /* partially inside */
331 qemu_log_mask(LOG_GUEST_ERROR
,
332 "pmp violation - access is partially inside\n");
338 const uint8_t a_field
=
339 pmp_get_a_field(env
->pmp_state
.pmp
[i
].cfg_reg
);
342 * Convert the PMP permissions to match the truth table in the
345 const uint8_t epmp_operation
=
346 ((env
->pmp_state
.pmp
[i
].cfg_reg
& PMP_LOCK
) >> 4) |
347 ((env
->pmp_state
.pmp
[i
].cfg_reg
& PMP_READ
) << 2) |
348 (env
->pmp_state
.pmp
[i
].cfg_reg
& PMP_WRITE
) |
349 ((env
->pmp_state
.pmp
[i
].cfg_reg
& PMP_EXEC
) >> 2);
351 if (((s
+ e
) == 2) && (PMP_AMATCH_OFF
!= a_field
)) {
353 * If the PMP entry is not off and the address is in range,
356 if (!MSECCFG_MML_ISSET(env
)) {
358 * If mseccfg.MML Bit is not set, do pmp priv check
359 * This will always apply to regular PMP.
361 *allowed_privs
= PMP_READ
| PMP_WRITE
| PMP_EXEC
;
362 if ((mode
!= PRV_M
) || pmp_is_locked(env
, i
)) {
363 *allowed_privs
&= env
->pmp_state
.pmp
[i
].cfg_reg
;
367 * If mseccfg.MML Bit set, do the enhanced pmp priv check
370 switch (epmp_operation
) {
383 *allowed_privs
= PMP_READ
| PMP_WRITE
;
387 *allowed_privs
= PMP_EXEC
;
391 *allowed_privs
= PMP_READ
| PMP_EXEC
;
395 *allowed_privs
= PMP_READ
;
398 g_assert_not_reached();
401 switch (epmp_operation
) {
413 *allowed_privs
= PMP_EXEC
;
418 *allowed_privs
= PMP_READ
;
422 *allowed_privs
= PMP_READ
| PMP_WRITE
;
425 *allowed_privs
= PMP_READ
| PMP_EXEC
;
428 *allowed_privs
= PMP_READ
| PMP_WRITE
| PMP_EXEC
;
431 g_assert_not_reached();
437 * If matching address range was found, the protection bits
438 * defined with PMP must be used. We shouldn't fallback on
439 * finding default privileges.
441 return (privs
& *allowed_privs
) == privs
;
445 /* No rule matched */
446 return pmp_hart_has_privs_default(env
, privs
, allowed_privs
, mode
);
450 * Handle a write to a pmpcfg CSR
452 void pmpcfg_csr_write(CPURISCVState
*env
, uint32_t reg_index
,
457 int pmpcfg_nums
= 2 << riscv_cpu_mxl(env
);
458 bool modified
= false;
460 trace_pmpcfg_csr_write(env
->mhartid
, reg_index
, val
);
462 for (i
= 0; i
< pmpcfg_nums
; i
++) {
463 cfg_val
= (val
>> 8 * i
) & 0xff;
464 modified
|= pmp_write_cfg(env
, (reg_index
* 4) + i
, cfg_val
);
467 /* If PMP permission of any addr has been changed, flush TLB pages. */
469 pmp_update_rule_nums(env
);
470 tlb_flush(env_cpu(env
));
476 * Handle a read from a pmpcfg CSR
478 target_ulong
pmpcfg_csr_read(CPURISCVState
*env
, uint32_t reg_index
)
481 target_ulong cfg_val
= 0;
482 target_ulong val
= 0;
483 int pmpcfg_nums
= 2 << riscv_cpu_mxl(env
);
485 for (i
= 0; i
< pmpcfg_nums
; i
++) {
486 val
= pmp_read_cfg(env
, (reg_index
* 4) + i
);
487 cfg_val
|= (val
<< (i
* 8));
489 trace_pmpcfg_csr_read(env
->mhartid
, reg_index
, cfg_val
);
496 * Handle a write to a pmpaddr CSR
498 void pmpaddr_csr_write(CPURISCVState
*env
, uint32_t addr_index
,
501 trace_pmpaddr_csr_write(env
->mhartid
, addr_index
, val
);
502 bool is_next_cfg_tor
= false;
504 if (addr_index
< MAX_RISCV_PMPS
) {
506 * In TOR mode, need to check the lock bit of the next pmp
507 * (if there is a next).
509 if (addr_index
+ 1 < MAX_RISCV_PMPS
) {
510 uint8_t pmp_cfg
= env
->pmp_state
.pmp
[addr_index
+ 1].cfg_reg
;
511 is_next_cfg_tor
= PMP_AMATCH_TOR
== pmp_get_a_field(pmp_cfg
);
513 if (pmp_cfg
& PMP_LOCK
&& is_next_cfg_tor
) {
514 qemu_log_mask(LOG_GUEST_ERROR
,
515 "ignoring pmpaddr write - pmpcfg + 1 locked\n");
520 if (!pmp_is_locked(env
, addr_index
)) {
521 if (env
->pmp_state
.pmp
[addr_index
].addr_reg
!= val
) {
522 env
->pmp_state
.pmp
[addr_index
].addr_reg
= val
;
523 pmp_update_rule_addr(env
, addr_index
);
524 if (is_next_cfg_tor
) {
525 pmp_update_rule_addr(env
, addr_index
+ 1);
527 tlb_flush(env_cpu(env
));
530 qemu_log_mask(LOG_GUEST_ERROR
,
531 "ignoring pmpaddr write - locked\n");
534 qemu_log_mask(LOG_GUEST_ERROR
,
535 "ignoring pmpaddr write - out of bounds\n");
541 * Handle a read from a pmpaddr CSR
543 target_ulong
pmpaddr_csr_read(CPURISCVState
*env
, uint32_t addr_index
)
545 target_ulong val
= 0;
547 if (addr_index
< MAX_RISCV_PMPS
) {
548 val
= env
->pmp_state
.pmp
[addr_index
].addr_reg
;
549 trace_pmpaddr_csr_read(env
->mhartid
, addr_index
, val
);
551 qemu_log_mask(LOG_GUEST_ERROR
,
552 "ignoring pmpaddr read - out of bounds\n");
559 * Handle a write to a mseccfg CSR
561 void mseccfg_csr_write(CPURISCVState
*env
, target_ulong val
)
565 trace_mseccfg_csr_write(env
->mhartid
, val
);
567 /* RLB cannot be enabled if it's already 0 and if any regions are locked */
568 if (!MSECCFG_RLB_ISSET(env
)) {
569 for (i
= 0; i
< MAX_RISCV_PMPS
; i
++) {
570 if (pmp_is_locked(env
, i
)) {
577 if (riscv_cpu_cfg(env
)->epmp
) {
579 val
|= (env
->mseccfg
& (MSECCFG_MMWP
| MSECCFG_MML
));
580 if ((val
^ env
->mseccfg
) & (MSECCFG_MMWP
| MSECCFG_MML
)) {
581 tlb_flush(env_cpu(env
));
584 val
&= ~(MSECCFG_MMWP
| MSECCFG_MML
| MSECCFG_RLB
);
591 * Handle a read from a mseccfg CSR
593 target_ulong
mseccfg_csr_read(CPURISCVState
*env
)
595 trace_mseccfg_csr_read(env
->mhartid
, env
->mseccfg
);
600 * Calculate the TLB size.
601 * It's possible that PMP regions only cover partial of the TLB page, and
602 * this may split the page into regions with different permissions.
603 * For example if PMP0 is (0x80000008~0x8000000F, R) and PMP1 is (0x80000000
604 * ~0x80000FFF, RWX), then region 0x80000008~0x8000000F has R permission, and
605 * the other regions in this page have RWX permissions.
606 * A write access to 0x80000000 will match PMP1. However we cannot cache the
607 * translation result in the TLB since this will make the write access to
608 * 0x80000008 bypass the check of PMP0.
609 * To avoid this we return a size of 1 (which means no caching) if the PMP
610 * region only covers partial of the TLB page.
612 target_ulong
pmp_get_tlb_size(CPURISCVState
*env
, target_ulong addr
)
616 target_ulong tlb_sa
= addr
& ~(TARGET_PAGE_SIZE
- 1);
617 target_ulong tlb_ea
= tlb_sa
+ TARGET_PAGE_SIZE
- 1;
621 * If PMP is not supported or there are no PMP rules, the TLB page will not
622 * be split into regions with different permissions by PMP so we set the
623 * size to TARGET_PAGE_SIZE.
625 if (!riscv_cpu_cfg(env
)->pmp
|| !pmp_get_num_rules(env
)) {
626 return TARGET_PAGE_SIZE
;
629 for (i
= 0; i
< MAX_RISCV_PMPS
; i
++) {
630 if (pmp_get_a_field(env
->pmp_state
.pmp
[i
].cfg_reg
) == PMP_AMATCH_OFF
) {
634 pmp_sa
= env
->pmp_state
.addr
[i
].sa
;
635 pmp_ea
= env
->pmp_state
.addr
[i
].ea
;
638 * Only the first PMP entry that covers (whole or partial of) the TLB
639 * page really matters:
640 * If it covers the whole TLB page, set the size to TARGET_PAGE_SIZE,
641 * since the following PMP entries have lower priority and will not
642 * affect the permissions of the page.
643 * If it only covers partial of the TLB page, set the size to 1 since
644 * the allowed permissions of the region may be different from other
645 * region of the page.
647 if (pmp_sa
<= tlb_sa
&& pmp_ea
>= tlb_ea
) {
648 return TARGET_PAGE_SIZE
;
649 } else if ((pmp_sa
>= tlb_sa
&& pmp_sa
<= tlb_ea
) ||
650 (pmp_ea
>= tlb_sa
&& pmp_ea
<= tlb_ea
)) {
656 * If no PMP entry matches the TLB page, the TLB page will also not be
657 * split into regions with different permissions by PMP so we set the size
658 * to TARGET_PAGE_SIZE.
660 return TARGET_PAGE_SIZE
;
664 * Convert PMP privilege to TLB page privilege.
666 int pmp_priv_to_page_prot(pmp_priv_t pmp_priv
)
670 if (pmp_priv
& PMP_READ
) {
673 if (pmp_priv
& PMP_WRITE
) {
676 if (pmp_priv
& PMP_EXEC
) {