#include "trace.h"
#include "exec/exec-all.h"
-static void pmp_write_cfg(CPURISCVState *env, uint32_t addr_index,
- uint8_t val);
+static bool pmp_write_cfg(CPURISCVState *env, uint32_t addr_index,
+ uint8_t val);
static uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t addr_index);
-static void pmp_update_rule(CPURISCVState *env, uint32_t pmp_index);
/*
* Accessor method to extract address matching type 'a field' from cfg reg
*/
static inline int pmp_is_locked(CPURISCVState *env, uint32_t pmp_index)
{
+ /* mseccfg.RLB is set */
+ if (MSECCFG_RLB_ISSET(env)) {
+ return 0;
+ }
if (env->pmp_state.pmp[pmp_index].cfg_reg & PMP_LOCK) {
return 1;
* Accessor to set the cfg reg for a specific PMP/HART
* Bounds checks and relevant lock bit.
*/
-static void pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val)
+static bool pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val)
{
if (pmp_index < MAX_RISCV_PMPS) {
bool locked = true;
- if (riscv_feature(env, RISCV_FEATURE_EPMP)) {
+ if (riscv_cpu_cfg(env)->ext_smepmp) {
/* mseccfg.RLB is set */
if (MSECCFG_RLB_ISSET(env)) {
locked = false;
if (locked) {
qemu_log_mask(LOG_GUEST_ERROR, "ignoring pmpcfg write - locked\n");
- } else {
+ } else if (env->pmp_state.pmp[pmp_index].cfg_reg != val) {
+ /* If !mseccfg.MML then ignore writes with encoding RW=01 */
+ if ((val & PMP_WRITE) && !(val & PMP_READ) &&
+ !MSECCFG_MML_ISSET(env)) {
+ return false;
+ }
env->pmp_state.pmp[pmp_index].cfg_reg = val;
- pmp_update_rule(env, pmp_index);
+ pmp_update_rule_addr(env, pmp_index);
+ return true;
}
} else {
qemu_log_mask(LOG_GUEST_ERROR,
"ignoring pmpcfg write - out of bounds\n");
}
+
+ return false;
}
-static void pmp_decode_napot(target_ulong a, target_ulong *sa, target_ulong *ea)
+void pmp_unlock_entries(CPURISCVState *env)
{
- /*
- aaaa...aaa0 8-byte NAPOT range
- aaaa...aa01 16-byte NAPOT range
- aaaa...a011 32-byte NAPOT range
- ...
- aa01...1111 2^XLEN-byte NAPOT range
- a011...1111 2^(XLEN+1)-byte NAPOT range
- 0111...1111 2^(XLEN+2)-byte NAPOT range
- 1111...1111 Reserved
- */
- if (a == -1) {
- *sa = 0u;
- *ea = -1;
- return;
- } else {
- target_ulong t1 = ctz64(~a);
- target_ulong base = (a & ~(((target_ulong)1 << t1) - 1)) << 2;
- target_ulong range = ((target_ulong)1 << (t1 + 3)) - 1;
- *sa = base;
- *ea = base + range;
+ uint32_t pmp_num = pmp_get_num_rules(env);
+ int i;
+
+ for (i = 0; i < pmp_num; i++) {
+ env->pmp_state.pmp[i].cfg_reg &= ~(PMP_LOCK | PMP_AMATCH);
}
}
+static void pmp_decode_napot(hwaddr a, hwaddr *sa, hwaddr *ea)
+{
+ /*
+ * aaaa...aaa0 8-byte NAPOT range
+ * aaaa...aa01 16-byte NAPOT range
+ * aaaa...a011 32-byte NAPOT range
+ * ...
+ * aa01...1111 2^XLEN-byte NAPOT range
+ * a011...1111 2^(XLEN+1)-byte NAPOT range
+ * 0111...1111 2^(XLEN+2)-byte NAPOT range
+ * 1111...1111 Reserved
+ */
+ a = (a << 2) | 0x3;
+ *sa = a & (a + 1);
+ *ea = a | (a + 1);
+}
+
void pmp_update_rule_addr(CPURISCVState *env, uint32_t pmp_index)
{
uint8_t this_cfg = env->pmp_state.pmp[pmp_index].cfg_reg;
target_ulong this_addr = env->pmp_state.pmp[pmp_index].addr_reg;
target_ulong prev_addr = 0u;
- target_ulong sa = 0u;
- target_ulong ea = 0u;
+ hwaddr sa = 0u;
+ hwaddr ea = 0u;
if (pmp_index >= 1u) {
prev_addr = env->pmp_state.pmp[pmp_index - 1].addr_reg;
case PMP_AMATCH_TOR:
sa = prev_addr << 2; /* shift up from [xx:0] to [xx+2:2] */
ea = (this_addr << 2) - 1u;
+ if (sa > ea) {
+ sa = ea = 0u;
+ }
break;
case PMP_AMATCH_NA4:
}
}
-/* Convert cfg/addr reg values here into simple 'sa' --> start address and 'ea'
- * end address values.
- * This function is called relatively infrequently whereas the check that
- * an address is within a pmp rule is called often, so optimise that one
- */
-static void pmp_update_rule(CPURISCVState *env, uint32_t pmp_index)
-{
- pmp_update_rule_addr(env, pmp_index);
- pmp_update_rule_nums(env);
-}
-
-static int pmp_is_in_range(CPURISCVState *env, int pmp_index, target_ulong addr)
+static int pmp_is_in_range(CPURISCVState *env, int pmp_index, hwaddr addr)
{
int result = 0;
- if ((addr >= env->pmp_state.addr[pmp_index].sa)
- && (addr <= env->pmp_state.addr[pmp_index].ea)) {
+ if ((addr >= env->pmp_state.addr[pmp_index].sa) &&
+ (addr <= env->pmp_state.addr[pmp_index].ea)) {
result = 1;
} else {
result = 0;
/*
* Check if the address has required RWX privs when no PMP entry is matched.
*/
-static bool pmp_hart_has_privs_default(CPURISCVState *env, target_ulong addr,
- target_ulong size, pmp_priv_t privs, pmp_priv_t *allowed_privs,
- target_ulong mode)
+static bool pmp_hart_has_privs_default(CPURISCVState *env, pmp_priv_t privs,
+ pmp_priv_t *allowed_privs,
+ target_ulong mode)
{
bool ret;
- if (riscv_feature(env, RISCV_FEATURE_EPMP)) {
- if (MSECCFG_MMWP_ISSET(env)) {
- /*
- * The Machine Mode Whitelist Policy (mseccfg.MMWP) is set
- * so we default to deny all, even for M-mode.
- */
+ if (MSECCFG_MMWP_ISSET(env)) {
+ /*
+ * The Machine Mode Whitelist Policy (mseccfg.MMWP) is set
+ * so we default to deny all, even for M-mode.
+ */
+ *allowed_privs = 0;
+ return false;
+ } else if (MSECCFG_MML_ISSET(env)) {
+ /*
+ * The Machine Mode Lockdown (mseccfg.MML) bit is set
+ * so we can only execute code in M-mode with an applicable
+ * rule. Other modes are disabled.
+ */
+ if (mode == PRV_M && !(privs & PMP_EXEC)) {
+ ret = true;
+ *allowed_privs = PMP_READ | PMP_WRITE;
+ } else {
+ ret = false;
*allowed_privs = 0;
- return false;
- } else if (MSECCFG_MML_ISSET(env)) {
- /*
- * The Machine Mode Lockdown (mseccfg.MML) bit is set
- * so we can only execute code in M-mode with an applicable
- * rule. Other modes are disabled.
- */
- if (mode == PRV_M && !(privs & PMP_EXEC)) {
- ret = true;
- *allowed_privs = PMP_READ | PMP_WRITE;
- } else {
- ret = false;
- *allowed_privs = 0;
- }
-
- return ret;
}
+
+ return ret;
}
- if ((!riscv_feature(env, RISCV_FEATURE_PMP)) || (mode == PRV_M)) {
+ if (!riscv_cpu_cfg(env)->pmp || (mode == PRV_M)) {
/*
* Privileged spec v1.10 states if HW doesn't implement any PMP entry
* or no PMP entry matches an M-Mode access, the access succeeds.
/*
* Check if the address has required RWX privs to complete desired operation
+ * Return true if a pmp rule match or default match
+ * Return false if no match
*/
-bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr,
- target_ulong size, pmp_priv_t privs, pmp_priv_t *allowed_privs,
- target_ulong mode)
+bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr,
+ target_ulong size, pmp_priv_t privs,
+ pmp_priv_t *allowed_privs, target_ulong mode)
{
int i = 0;
- int ret = -1;
int pmp_size = 0;
- target_ulong s = 0;
- target_ulong e = 0;
+ hwaddr s = 0;
+ hwaddr e = 0;
/* Short cut if no rules */
if (0 == pmp_get_num_rules(env)) {
- return pmp_hart_has_privs_default(env, addr, size, privs,
- allowed_privs, mode);
+ return pmp_hart_has_privs_default(env, privs, allowed_privs, mode);
}
if (size == 0) {
- if (riscv_feature(env, RISCV_FEATURE_MMU)) {
+ if (riscv_cpu_cfg(env)->mmu) {
/*
* If size is unknown (0), assume that all bytes
* from addr to the end of the page will be accessed.
pmp_size = size;
}
- /* 1.10 draft priv spec states there is an implicit order
- from low to high */
+ /*
+ * 1.10 draft priv spec states there is an implicit order
+ * from low to high
+ */
for (i = 0; i < MAX_RISCV_PMPS; i++) {
s = pmp_is_in_range(env, i, addr);
e = pmp_is_in_range(env, i, addr + pmp_size - 1);
if ((s + e) == 1) {
qemu_log_mask(LOG_GUEST_ERROR,
"pmp violation - access is partially inside\n");
- ret = 0;
- break;
+ *allowed_privs = 0;
+ return false;
}
/* fully inside */
/*
* Convert the PMP permissions to match the truth table in the
- * ePMP spec.
+ * Smepmp spec.
*/
- const uint8_t epmp_operation =
+ const uint8_t smepmp_operation =
((env->pmp_state.pmp[i].cfg_reg & PMP_LOCK) >> 4) |
((env->pmp_state.pmp[i].cfg_reg & PMP_READ) << 2) |
(env->pmp_state.pmp[i].cfg_reg & PMP_WRITE) |
* If mseccfg.MML Bit set, do the enhanced pmp priv check
*/
if (mode == PRV_M) {
- switch (epmp_operation) {
+ switch (smepmp_operation) {
case 0:
case 1:
case 4:
g_assert_not_reached();
}
} else {
- switch (epmp_operation) {
+ switch (smepmp_operation) {
case 0:
case 8:
case 9:
}
}
- ret = ((privs & *allowed_privs) == privs);
- break;
+ /*
+ * If matching address range was found, the protection bits
+ * defined with PMP must be used. We shouldn't fallback on
+ * finding default privileges.
+ */
+ return (privs & *allowed_privs) == privs;
}
}
/* No rule matched */
- if (ret == -1) {
- return pmp_hart_has_privs_default(env, addr, size, privs,
- allowed_privs, mode);
- }
-
- return ret == 1 ? true : false;
+ return pmp_hart_has_privs_default(env, privs, allowed_privs, mode);
}
/*
* Handle a write to a pmpcfg CSR
*/
void pmpcfg_csr_write(CPURISCVState *env, uint32_t reg_index,
- target_ulong val)
+ target_ulong val)
{
int i;
uint8_t cfg_val;
+ int pmpcfg_nums = 2 << riscv_cpu_mxl(env);
+ bool modified = false;
trace_pmpcfg_csr_write(env->mhartid, reg_index, val);
- if ((reg_index & 1) && (sizeof(target_ulong) == 8)) {
- qemu_log_mask(LOG_GUEST_ERROR,
- "ignoring pmpcfg write - incorrect address\n");
- return;
- }
-
- for (i = 0; i < sizeof(target_ulong); i++) {
+ for (i = 0; i < pmpcfg_nums; i++) {
cfg_val = (val >> 8 * i) & 0xff;
- pmp_write_cfg(env, (reg_index * 4) + i, cfg_val);
+ modified |= pmp_write_cfg(env, (reg_index * 4) + i, cfg_val);
}
/* If PMP permission of any addr has been changed, flush TLB pages. */
- tlb_flush(env_cpu(env));
+ if (modified) {
+ pmp_update_rule_nums(env);
+ tlb_flush(env_cpu(env));
+ }
}
int i;
target_ulong cfg_val = 0;
target_ulong val = 0;
+ int pmpcfg_nums = 2 << riscv_cpu_mxl(env);
- for (i = 0; i < sizeof(target_ulong); i++) {
+ for (i = 0; i < pmpcfg_nums; i++) {
val = pmp_read_cfg(env, (reg_index * 4) + i);
cfg_val |= (val << (i * 8));
}
* Handle a write to a pmpaddr CSR
*/
void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
- target_ulong val)
+ target_ulong val)
{
trace_pmpaddr_csr_write(env->mhartid, addr_index, val);
+ bool is_next_cfg_tor = false;
if (addr_index < MAX_RISCV_PMPS) {
/*
*/
if (addr_index + 1 < MAX_RISCV_PMPS) {
uint8_t pmp_cfg = env->pmp_state.pmp[addr_index + 1].cfg_reg;
+ is_next_cfg_tor = PMP_AMATCH_TOR == pmp_get_a_field(pmp_cfg);
- if (pmp_cfg & PMP_LOCK &&
- PMP_AMATCH_TOR == pmp_get_a_field(pmp_cfg)) {
+ if (pmp_cfg & PMP_LOCK && is_next_cfg_tor) {
qemu_log_mask(LOG_GUEST_ERROR,
"ignoring pmpaddr write - pmpcfg + 1 locked\n");
return;
}
if (!pmp_is_locked(env, addr_index)) {
- env->pmp_state.pmp[addr_index].addr_reg = val;
- pmp_update_rule(env, addr_index);
+ if (env->pmp_state.pmp[addr_index].addr_reg != val) {
+ env->pmp_state.pmp[addr_index].addr_reg = val;
+ pmp_update_rule_addr(env, addr_index);
+ if (is_next_cfg_tor) {
+ pmp_update_rule_addr(env, addr_index + 1);
+ }
+ tlb_flush(env_cpu(env));
+ }
} else {
qemu_log_mask(LOG_GUEST_ERROR,
"ignoring pmpaddr write - locked\n");
}
}
- /* Sticky bits */
- val |= (env->mseccfg & (MSECCFG_MMWP | MSECCFG_MML));
+ if (riscv_cpu_cfg(env)->ext_smepmp) {
+ /* Sticky bits */
+ val |= (env->mseccfg & (MSECCFG_MMWP | MSECCFG_MML));
+ if ((val ^ env->mseccfg) & (MSECCFG_MMWP | MSECCFG_MML)) {
+ tlb_flush(env_cpu(env));
+ }
+ } else {
+ val &= ~(MSECCFG_MMWP | MSECCFG_MML | MSECCFG_RLB);
+ }
env->mseccfg = val;
}
}
/*
- * Calculate the TLB size if the start address or the end address of
- * PMP entry is presented in the TLB page.
+ * Calculate the TLB size.
+ * It's possible that PMP regions only cover partial of the TLB page, and
+ * this may split the page into regions with different permissions.
+ * For example if PMP0 is (0x80000008~0x8000000F, R) and PMP1 is (0x80000000
+ * ~0x80000FFF, RWX), then region 0x80000008~0x8000000F has R permission, and
+ * the other regions in this page have RWX permissions.
+ * A write access to 0x80000000 will match PMP1. However we cannot cache the
+ * translation result in the TLB since this will make the write access to
+ * 0x80000008 bypass the check of PMP0.
+ * To avoid this we return a size of 1 (which means no caching) if the PMP
+ * region only covers partial of the TLB page.
*/
-static target_ulong pmp_get_tlb_size(CPURISCVState *env, int pmp_index,
- target_ulong tlb_sa, target_ulong tlb_ea)
+target_ulong pmp_get_tlb_size(CPURISCVState *env, hwaddr addr)
{
- target_ulong pmp_sa = env->pmp_state.addr[pmp_index].sa;
- target_ulong pmp_ea = env->pmp_state.addr[pmp_index].ea;
-
- if (pmp_sa >= tlb_sa && pmp_ea <= tlb_ea) {
- return pmp_ea - pmp_sa + 1;
- }
-
- if (pmp_sa >= tlb_sa && pmp_sa <= tlb_ea && pmp_ea >= tlb_ea) {
- return tlb_ea - pmp_sa + 1;
- }
+ hwaddr pmp_sa;
+ hwaddr pmp_ea;
+ hwaddr tlb_sa = addr & ~(TARGET_PAGE_SIZE - 1);
+ hwaddr tlb_ea = tlb_sa + TARGET_PAGE_SIZE - 1;
+ int i;
- if (pmp_ea <= tlb_ea && pmp_ea >= tlb_sa && pmp_sa <= tlb_sa) {
- return pmp_ea - tlb_sa + 1;
+ /*
+ * If PMP is not supported or there are no PMP rules, the TLB page will not
+ * be split into regions with different permissions by PMP so we set the
+ * size to TARGET_PAGE_SIZE.
+ */
+ if (!riscv_cpu_cfg(env)->pmp || !pmp_get_num_rules(env)) {
+ return TARGET_PAGE_SIZE;
}
- return 0;
-}
-
-/*
- * Check is there a PMP entry which range covers this page. If so,
- * try to find the minimum granularity for the TLB size.
- */
-bool pmp_is_range_in_tlb(CPURISCVState *env, hwaddr tlb_sa,
- target_ulong *tlb_size)
-{
- int i;
- target_ulong val;
- target_ulong tlb_ea = (tlb_sa + TARGET_PAGE_SIZE - 1);
-
for (i = 0; i < MAX_RISCV_PMPS; i++) {
- val = pmp_get_tlb_size(env, i, tlb_sa, tlb_ea);
- if (val) {
- if (*tlb_size == 0 || *tlb_size > val) {
- *tlb_size = val;
- }
+ if (pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg) == PMP_AMATCH_OFF) {
+ continue;
}
- }
- if (*tlb_size != 0) {
- return true;
+ pmp_sa = env->pmp_state.addr[i].sa;
+ pmp_ea = env->pmp_state.addr[i].ea;
+
+ /*
+ * Only the first PMP entry that covers (whole or partial of) the TLB
+ * page really matters:
+ * If it covers the whole TLB page, set the size to TARGET_PAGE_SIZE,
+ * since the following PMP entries have lower priority and will not
+ * affect the permissions of the page.
+ * If it only covers partial of the TLB page, set the size to 1 since
+ * the allowed permissions of the region may be different from other
+ * region of the page.
+ */
+ if (pmp_sa <= tlb_sa && pmp_ea >= tlb_ea) {
+ return TARGET_PAGE_SIZE;
+ } else if ((pmp_sa >= tlb_sa && pmp_sa <= tlb_ea) ||
+ (pmp_ea >= tlb_sa && pmp_ea <= tlb_ea)) {
+ return 1;
+ }
}
- return false;
+ /*
+ * If no PMP entry matches the TLB page, the TLB page will also not be
+ * split into regions with different permissions by PMP so we set the size
+ * to TARGET_PAGE_SIZE.
+ */
+ return TARGET_PAGE_SIZE;
}
/*