* this program. If not, see <http://www.gnu.org/licenses/>.
*/
-/*
- * PMP (Physical Memory Protection) is as-of-yet unused and needs testing.
- */
-
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "qapi/error.h"
return 0;
}
- /* In TOR mode, need to check the lock bit of the next pmp
- * (if there is a next)
- */
- const uint8_t a_field =
- pmp_get_a_field(env->pmp_state.pmp[pmp_index + 1].cfg_reg);
- if ((env->pmp_state.pmp[pmp_index + 1u].cfg_reg & PMP_LOCK) &&
- (PMP_AMATCH_TOR == a_field)) {
- return 1;
- }
-
return 0;
}
static void pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val)
{
if (pmp_index < MAX_RISCV_PMPS) {
- if (!pmp_is_locked(env, pmp_index)) {
- env->pmp_state.pmp[pmp_index].cfg_reg = val;
- pmp_update_rule(env, pmp_index);
+ bool locked = true;
+
+ if (riscv_cpu_cfg(env)->epmp) {
+ /* mseccfg.RLB is set */
+ if (MSECCFG_RLB_ISSET(env)) {
+ locked = false;
+ }
+
+ /* mseccfg.MML is not set */
+ if (!MSECCFG_MML_ISSET(env) && !pmp_is_locked(env, pmp_index)) {
+ locked = false;
+ }
+
+ /* mseccfg.MML is set */
+ if (MSECCFG_MML_ISSET(env)) {
+ /* not adding execute bit */
+ if ((val & PMP_LOCK) != 0 && (val & PMP_EXEC) != PMP_EXEC) {
+ locked = false;
+ }
+ /* shared region and not adding X bit */
+ if ((val & PMP_LOCK) != PMP_LOCK &&
+ (val & 0x7) != (PMP_WRITE | PMP_EXEC)) {
+ locked = false;
+ }
+ }
} else {
+ if (!pmp_is_locked(env, pmp_index)) {
+ locked = false;
+ }
+ }
+
+ if (locked) {
qemu_log_mask(LOG_GUEST_ERROR, "ignoring pmpcfg write - locked\n");
+ } else {
+ env->pmp_state.pmp[pmp_index].cfg_reg = val;
+ pmp_update_rule(env, pmp_index);
}
} else {
qemu_log_mask(LOG_GUEST_ERROR,
0111...1111 2^(XLEN+2)-byte NAPOT range
1111...1111 Reserved
*/
- if (a == -1) {
- *sa = 0u;
- *ea = -1;
- return;
- } else {
- target_ulong t1 = ctz64(~a);
- target_ulong base = (a & ~(((target_ulong)1 << t1) - 1)) << 2;
- target_ulong range = ((target_ulong)1 << (t1 + 3)) - 1;
- *sa = base;
- *ea = base + range;
- }
+ a = (a << 2) | 0x3;
+ *sa = a & (a + 1);
+ *ea = a | (a + 1);
}
void pmp_update_rule_addr(CPURISCVState *env, uint32_t pmp_index)
case PMP_AMATCH_TOR:
sa = prev_addr << 2; /* shift up from [xx:0] to [xx+2:2] */
ea = (this_addr << 2) - 1u;
+ if (sa > ea) {
+ sa = ea = 0u;
+ }
break;
case PMP_AMATCH_NA4:
{
bool ret;
- if ((!riscv_feature(env, RISCV_FEATURE_PMP)) || (mode == PRV_M)) {
+ if (riscv_cpu_cfg(env)->epmp) {
+ if (MSECCFG_MMWP_ISSET(env)) {
+ /*
+ * The Machine Mode Whitelist Policy (mseccfg.MMWP) is set
+ * so we default to deny all, even for M-mode.
+ */
+ *allowed_privs = 0;
+ return false;
+ } else if (MSECCFG_MML_ISSET(env)) {
+ /*
+ * The Machine Mode Lockdown (mseccfg.MML) bit is set
+ * so we can only execute code in M-mode with an applicable
+ * rule. Other modes are disabled.
+ */
+ if (mode == PRV_M && !(privs & PMP_EXEC)) {
+ ret = true;
+ *allowed_privs = PMP_READ | PMP_WRITE;
+ } else {
+ ret = false;
+ *allowed_privs = 0;
+ }
+
+ return ret;
+ }
+ }
+
+ if (!riscv_cpu_cfg(env)->pmp || (mode == PRV_M)) {
/*
* Privileged spec v1.10 states if HW doesn't implement any PMP entry
* or no PMP entry matches an M-Mode access, the access succeeds.
/*
* Check if the address has required RWX privs to complete desired operation
+ * Return PMP rule index if a pmp rule match
+ * Return MAX_RISCV_PMPS if default match
+ * Return negtive value if no match
*/
-bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr,
+int pmp_hart_has_privs(CPURISCVState *env, target_ulong addr,
target_ulong size, pmp_priv_t privs, pmp_priv_t *allowed_privs,
target_ulong mode)
{
/* Short cut if no rules */
if (0 == pmp_get_num_rules(env)) {
- return pmp_hart_has_privs_default(env, addr, size, privs,
- allowed_privs, mode);
+ if (pmp_hart_has_privs_default(env, addr, size, privs,
+ allowed_privs, mode)) {
+ ret = MAX_RISCV_PMPS;
+ }
}
if (size == 0) {
- if (riscv_feature(env, RISCV_FEATURE_MMU)) {
+ if (riscv_cpu_cfg(env)->mmu) {
/*
* If size is unknown (0), assume that all bytes
* from addr to the end of the page will be accessed.
if ((s + e) == 1) {
qemu_log_mask(LOG_GUEST_ERROR,
"pmp violation - access is partially inside\n");
- ret = 0;
+ ret = -1;
break;
}
pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg);
/*
- * If the PMP entry is not off and the address is in range, do the priv
- * check
+ * Convert the PMP permissions to match the truth table in the
+ * ePMP spec.
*/
+ const uint8_t epmp_operation =
+ ((env->pmp_state.pmp[i].cfg_reg & PMP_LOCK) >> 4) |
+ ((env->pmp_state.pmp[i].cfg_reg & PMP_READ) << 2) |
+ (env->pmp_state.pmp[i].cfg_reg & PMP_WRITE) |
+ ((env->pmp_state.pmp[i].cfg_reg & PMP_EXEC) >> 2);
+
if (((s + e) == 2) && (PMP_AMATCH_OFF != a_field)) {
- *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC;
- if ((mode != PRV_M) || pmp_is_locked(env, i)) {
- *allowed_privs &= env->pmp_state.pmp[i].cfg_reg;
+ /*
+ * If the PMP entry is not off and the address is in range,
+ * do the priv check
+ */
+ if (!MSECCFG_MML_ISSET(env)) {
+ /*
+ * If mseccfg.MML Bit is not set, do pmp priv check
+ * This will always apply to regular PMP.
+ */
+ *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC;
+ if ((mode != PRV_M) || pmp_is_locked(env, i)) {
+ *allowed_privs &= env->pmp_state.pmp[i].cfg_reg;
+ }
+ } else {
+ /*
+ * If mseccfg.MML Bit set, do the enhanced pmp priv check
+ */
+ if (mode == PRV_M) {
+ switch (epmp_operation) {
+ case 0:
+ case 1:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ case 8:
+ *allowed_privs = 0;
+ break;
+ case 2:
+ case 3:
+ case 14:
+ *allowed_privs = PMP_READ | PMP_WRITE;
+ break;
+ case 9:
+ case 10:
+ *allowed_privs = PMP_EXEC;
+ break;
+ case 11:
+ case 13:
+ *allowed_privs = PMP_READ | PMP_EXEC;
+ break;
+ case 12:
+ case 15:
+ *allowed_privs = PMP_READ;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ } else {
+ switch (epmp_operation) {
+ case 0:
+ case 8:
+ case 9:
+ case 12:
+ case 13:
+ case 14:
+ *allowed_privs = 0;
+ break;
+ case 1:
+ case 10:
+ case 11:
+ *allowed_privs = PMP_EXEC;
+ break;
+ case 2:
+ case 4:
+ case 15:
+ *allowed_privs = PMP_READ;
+ break;
+ case 3:
+ case 6:
+ *allowed_privs = PMP_READ | PMP_WRITE;
+ break;
+ case 5:
+ *allowed_privs = PMP_READ | PMP_EXEC;
+ break;
+ case 7:
+ *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ }
}
- ret = ((privs & *allowed_privs) == privs);
+ /*
+ * If matching address range was found, the protection bits
+ * defined with PMP must be used. We shouldn't fallback on
+ * finding default privileges.
+ */
+ ret = i;
break;
}
}
/* No rule matched */
if (ret == -1) {
- return pmp_hart_has_privs_default(env, addr, size, privs,
- allowed_privs, mode);
+ if (pmp_hart_has_privs_default(env, addr, size, privs,
+ allowed_privs, mode)) {
+ ret = MAX_RISCV_PMPS;
+ }
}
- return ret == 1 ? true : false;
+ return ret;
}
/*
- * Handle a write to a pmpcfg CSP
+ * Handle a write to a pmpcfg CSR
*/
void pmpcfg_csr_write(CPURISCVState *env, uint32_t reg_index,
target_ulong val)
{
int i;
uint8_t cfg_val;
+ int pmpcfg_nums = 2 << riscv_cpu_mxl(env);
trace_pmpcfg_csr_write(env->mhartid, reg_index, val);
- if ((reg_index & 1) && (sizeof(target_ulong) == 8)) {
- qemu_log_mask(LOG_GUEST_ERROR,
- "ignoring pmpcfg write - incorrect address\n");
- return;
- }
-
- for (i = 0; i < sizeof(target_ulong); i++) {
+ for (i = 0; i < pmpcfg_nums; i++) {
cfg_val = (val >> 8 * i) & 0xff;
pmp_write_cfg(env, (reg_index * 4) + i, cfg_val);
}
/*
- * Handle a read from a pmpcfg CSP
+ * Handle a read from a pmpcfg CSR
*/
target_ulong pmpcfg_csr_read(CPURISCVState *env, uint32_t reg_index)
{
int i;
target_ulong cfg_val = 0;
target_ulong val = 0;
+ int pmpcfg_nums = 2 << riscv_cpu_mxl(env);
- for (i = 0; i < sizeof(target_ulong); i++) {
+ for (i = 0; i < pmpcfg_nums; i++) {
val = pmp_read_cfg(env, (reg_index * 4) + i);
cfg_val |= (val << (i * 8));
}
/*
- * Handle a write to a pmpaddr CSP
+ * Handle a write to a pmpaddr CSR
*/
void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
target_ulong val)
{
trace_pmpaddr_csr_write(env->mhartid, addr_index, val);
+
if (addr_index < MAX_RISCV_PMPS) {
+ /*
+ * In TOR mode, need to check the lock bit of the next pmp
+ * (if there is a next).
+ */
+ if (addr_index + 1 < MAX_RISCV_PMPS) {
+ uint8_t pmp_cfg = env->pmp_state.pmp[addr_index + 1].cfg_reg;
+
+ if (pmp_cfg & PMP_LOCK &&
+ PMP_AMATCH_TOR == pmp_get_a_field(pmp_cfg)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "ignoring pmpaddr write - pmpcfg + 1 locked\n");
+ return;
+ }
+ }
+
if (!pmp_is_locked(env, addr_index)) {
env->pmp_state.pmp[addr_index].addr_reg = val;
pmp_update_rule(env, addr_index);
/*
- * Handle a read from a pmpaddr CSP
+ * Handle a read from a pmpaddr CSR
*/
target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index)
{
}
/*
- * Calculate the TLB size if the start address or the end address of
- * PMP entry is presented in thie TLB page.
+ * Handle a write to a mseccfg CSR
*/
-static target_ulong pmp_get_tlb_size(CPURISCVState *env, int pmp_index,
- target_ulong tlb_sa, target_ulong tlb_ea)
+void mseccfg_csr_write(CPURISCVState *env, target_ulong val)
{
- target_ulong pmp_sa = env->pmp_state.addr[pmp_index].sa;
- target_ulong pmp_ea = env->pmp_state.addr[pmp_index].ea;
+ int i;
- if (pmp_sa >= tlb_sa && pmp_ea <= tlb_ea) {
- return pmp_ea - pmp_sa + 1;
- }
+ trace_mseccfg_csr_write(env->mhartid, val);
- if (pmp_sa >= tlb_sa && pmp_sa <= tlb_ea && pmp_ea >= tlb_ea) {
- return tlb_ea - pmp_sa + 1;
+ /* RLB cannot be enabled if it's already 0 and if any regions are locked */
+ if (!MSECCFG_RLB_ISSET(env)) {
+ for (i = 0; i < MAX_RISCV_PMPS; i++) {
+ if (pmp_is_locked(env, i)) {
+ val &= ~MSECCFG_RLB;
+ break;
+ }
+ }
}
- if (pmp_ea <= tlb_ea && pmp_ea >= tlb_sa && pmp_sa <= tlb_sa) {
- return pmp_ea - tlb_sa + 1;
- }
+ /* Sticky bits */
+ val |= (env->mseccfg & (MSECCFG_MMWP | MSECCFG_MML));
- return 0;
+ env->mseccfg = val;
}
/*
- * Check is there a PMP entry which range covers this page. If so,
- * try to find the minimum granularity for the TLB size.
+ * Handle a read from a mseccfg CSR
*/
-bool pmp_is_range_in_tlb(CPURISCVState *env, hwaddr tlb_sa,
- target_ulong *tlb_size)
+target_ulong mseccfg_csr_read(CPURISCVState *env)
{
- int i;
- target_ulong val;
- target_ulong tlb_ea = (tlb_sa + TARGET_PAGE_SIZE - 1);
+ trace_mseccfg_csr_read(env->mhartid, env->mseccfg);
+ return env->mseccfg;
+}
- for (i = 0; i < MAX_RISCV_PMPS; i++) {
- val = pmp_get_tlb_size(env, i, tlb_sa, tlb_ea);
- if (val) {
- if (*tlb_size == 0 || *tlb_size > val) {
- *tlb_size = val;
- }
- }
- }
+/*
+ * Calculate the TLB size if the start address or the end address of
+ * PMP entry is presented in the TLB page.
+ */
+target_ulong pmp_get_tlb_size(CPURISCVState *env, int pmp_index,
+ target_ulong tlb_sa, target_ulong tlb_ea)
+{
+ target_ulong pmp_sa = env->pmp_state.addr[pmp_index].sa;
+ target_ulong pmp_ea = env->pmp_state.addr[pmp_index].ea;
- if (*tlb_size != 0) {
- return true;
+ if (pmp_sa <= tlb_sa && pmp_ea >= tlb_ea) {
+ return TARGET_PAGE_SIZE;
+ } else {
+ /*
+ * At this point we have a tlb_size that is the smallest possible size
+ * That fits within a TARGET_PAGE_SIZE and the PMP region.
+ *
+ * If the size is less then TARGET_PAGE_SIZE we drop the size to 1.
+ * This means the result isn't cached in the TLB and is only used for
+ * a single translation.
+ */
+ return 1;
}
-
- return false;
}
/*