*/
static inline int pmp_is_locked(CPURISCVState *env, uint32_t pmp_index)
{
+ /* mseccfg.RLB is set */
+ if (MSECCFG_RLB_ISSET(env)) {
+ return 0;
+ }
if (env->pmp_state.pmp[pmp_index].cfg_reg & PMP_LOCK) {
return 1;
if (pmp_index < MAX_RISCV_PMPS) {
bool locked = true;
- if (riscv_cpu_cfg(env)->epmp) {
+ if (riscv_cpu_cfg(env)->ext_smepmp) {
/* mseccfg.RLB is set */
if (MSECCFG_RLB_ISSET(env)) {
locked = false;
if (locked) {
qemu_log_mask(LOG_GUEST_ERROR, "ignoring pmpcfg write - locked\n");
} else if (env->pmp_state.pmp[pmp_index].cfg_reg != val) {
+ /* If !mseccfg.MML then ignore writes with encoding RW=01 */
+ if ((val & PMP_WRITE) && !(val & PMP_READ) &&
+ !MSECCFG_MML_ISSET(env)) {
+ return false;
+ }
env->pmp_state.pmp[pmp_index].cfg_reg = val;
pmp_update_rule_addr(env, pmp_index);
return true;
return false;
}
-static void pmp_decode_napot(target_ulong a, target_ulong *sa,
- target_ulong *ea)
+void pmp_unlock_entries(CPURISCVState *env)
+{
+ uint32_t pmp_num = pmp_get_num_rules(env);
+ int i;
+
+ for (i = 0; i < pmp_num; i++) {
+ env->pmp_state.pmp[i].cfg_reg &= ~(PMP_LOCK | PMP_AMATCH);
+ }
+}
+
+static void pmp_decode_napot(hwaddr a, hwaddr *sa, hwaddr *ea)
{
/*
* aaaa...aaa0 8-byte NAPOT range
uint8_t this_cfg = env->pmp_state.pmp[pmp_index].cfg_reg;
target_ulong this_addr = env->pmp_state.pmp[pmp_index].addr_reg;
target_ulong prev_addr = 0u;
- target_ulong sa = 0u;
- target_ulong ea = 0u;
+ hwaddr sa = 0u;
+ hwaddr ea = 0u;
if (pmp_index >= 1u) {
prev_addr = env->pmp_state.pmp[pmp_index - 1].addr_reg;
}
}
-static int pmp_is_in_range(CPURISCVState *env, int pmp_index,
- target_ulong addr)
+static int pmp_is_in_range(CPURISCVState *env, int pmp_index, hwaddr addr)
{
int result = 0;
* Return true if a pmp rule match or default match
* Return false if no match
*/
-bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr,
+bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr,
target_ulong size, pmp_priv_t privs,
pmp_priv_t *allowed_privs, target_ulong mode)
{
int i = 0;
int pmp_size = 0;
- target_ulong s = 0;
- target_ulong e = 0;
+ hwaddr s = 0;
+ hwaddr e = 0;
/* Short cut if no rules */
if (0 == pmp_get_num_rules(env)) {
/*
* Convert the PMP permissions to match the truth table in the
- * ePMP spec.
+ * Smepmp spec.
*/
- const uint8_t epmp_operation =
+ const uint8_t smepmp_operation =
((env->pmp_state.pmp[i].cfg_reg & PMP_LOCK) >> 4) |
((env->pmp_state.pmp[i].cfg_reg & PMP_READ) << 2) |
(env->pmp_state.pmp[i].cfg_reg & PMP_WRITE) |
* If mseccfg.MML Bit set, do the enhanced pmp priv check
*/
if (mode == PRV_M) {
- switch (epmp_operation) {
+ switch (smepmp_operation) {
case 0:
case 1:
case 4:
g_assert_not_reached();
}
} else {
- switch (epmp_operation) {
+ switch (smepmp_operation) {
case 0:
case 8:
case 9:
}
}
- if (riscv_cpu_cfg(env)->epmp) {
+ if (riscv_cpu_cfg(env)->ext_smepmp) {
/* Sticky bits */
val |= (env->mseccfg & (MSECCFG_MMWP | MSECCFG_MML));
if ((val ^ env->mseccfg) & (MSECCFG_MMWP | MSECCFG_MML)) {
* To avoid this we return a size of 1 (which means no caching) if the PMP
* region only covers partial of the TLB page.
*/
-target_ulong pmp_get_tlb_size(CPURISCVState *env, target_ulong addr)
+target_ulong pmp_get_tlb_size(CPURISCVState *env, hwaddr addr)
{
- target_ulong pmp_sa;
- target_ulong pmp_ea;
- target_ulong tlb_sa = addr & ~(TARGET_PAGE_SIZE - 1);
- target_ulong tlb_ea = tlb_sa + TARGET_PAGE_SIZE - 1;
+ hwaddr pmp_sa;
+ hwaddr pmp_ea;
+ hwaddr tlb_sa = addr & ~(TARGET_PAGE_SIZE - 1);
+ hwaddr tlb_ea = tlb_sa + TARGET_PAGE_SIZE - 1;
int i;
/*