diff options
Diffstat (limited to 'target/riscv/pmp.c')
-rw-r--r-- | target/riscv/pmp.c | 174 |
1 files changed, 100 insertions, 74 deletions
diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c index b0841d4..3540327 100644 --- a/target/riscv/pmp.c +++ b/target/riscv/pmp.c @@ -26,12 +26,22 @@ #include "trace.h" #include "exec/cputlb.h" #include "exec/page-protection.h" +#include "exec/target_page.h" static bool pmp_write_cfg(CPURISCVState *env, uint32_t addr_index, uint8_t val); static uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t addr_index); /* + * Convert the PMP permissions to match the truth table in the Smepmp spec. + */ +static inline uint8_t pmp_get_smepmp_operation(uint8_t cfg) +{ + return ((cfg & PMP_LOCK) >> 4) | ((cfg & PMP_READ) << 2) | + (cfg & PMP_WRITE) | ((cfg & PMP_EXEC) >> 2); +} + +/* * Accessor method to extract address matching type 'a field' from cfg reg */ static inline uint8_t pmp_get_a_field(uint8_t cfg) @@ -45,21 +55,58 @@ static inline uint8_t pmp_get_a_field(uint8_t cfg) */ static inline int pmp_is_locked(CPURISCVState *env, uint32_t pmp_index) { - /* mseccfg.RLB is set */ - if (MSECCFG_RLB_ISSET(env)) { - return 0; - } - if (env->pmp_state.pmp[pmp_index].cfg_reg & PMP_LOCK) { return 1; } - /* Top PMP has no 'next' to check */ - if ((pmp_index + 1u) >= MAX_RISCV_PMPS) { + return 0; +} + +/* + * Check whether a PMP is locked for writing or not. + * (i.e. has LOCK flag and mseccfg.RLB is unset) + */ +static int pmp_is_readonly(CPURISCVState *env, uint32_t pmp_index) +{ + return pmp_is_locked(env, pmp_index) && !MSECCFG_RLB_ISSET(env); +} + +/* + * Check whether `val` is an invalid Smepmp config value + */ +static int pmp_is_invalid_smepmp_cfg(CPURISCVState *env, uint8_t val) +{ + /* No check if mseccfg.MML is not set or if mseccfg.RLB is set */ + if (!MSECCFG_MML_ISSET(env) || MSECCFG_RLB_ISSET(env)) { return 0; } - return 0; + /* + * Adding a rule with executable privileges that either is M-mode-only + * or a locked Shared-Region is not possible + */ + switch (pmp_get_smepmp_operation(val)) { + case 0: + case 1: + case 2: + case 3: + case 4: + case 5: + case 6: + case 7: + case 8: + case 12: + case 14: + case 15: + return 0; + case 9: + case 10: + case 11: + case 13: + return 1; + default: + g_assert_not_reached(); + } } /* @@ -75,7 +122,9 @@ uint32_t pmp_get_num_rules(CPURISCVState *env) */ static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t pmp_index) { - if (pmp_index < MAX_RISCV_PMPS) { + uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions; + + if (pmp_index < pmp_regions) { return env->pmp_state.pmp[pmp_index].cfg_reg; } @@ -89,46 +138,21 @@ static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t pmp_index) */ static bool pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val) { - if (pmp_index < MAX_RISCV_PMPS) { - bool locked = true; - - if (riscv_cpu_cfg(env)->ext_smepmp) { - /* mseccfg.RLB is set */ - if (MSECCFG_RLB_ISSET(env)) { - locked = false; - } - - /* mseccfg.MML is not set */ - if (!MSECCFG_MML_ISSET(env) && !pmp_is_locked(env, pmp_index)) { - locked = false; - } + uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions; - /* mseccfg.MML is set */ - if (MSECCFG_MML_ISSET(env)) { - /* not adding execute bit */ - if ((val & PMP_LOCK) != 0 && (val & PMP_EXEC) != PMP_EXEC) { - locked = false; - } - /* shared region and not adding X bit */ - if ((val & PMP_LOCK) != PMP_LOCK && - (val & 0x7) != (PMP_WRITE | PMP_EXEC)) { - locked = false; - } - } - } else { - if (!pmp_is_locked(env, pmp_index)) { - locked = false; - } + if (pmp_index < pmp_regions) { + if (env->pmp_state.pmp[pmp_index].cfg_reg == val) { + /* no change */ + return false; } - if (locked) { - qemu_log_mask(LOG_GUEST_ERROR, "ignoring pmpcfg write - locked\n"); - } else if (env->pmp_state.pmp[pmp_index].cfg_reg != val) { - /* If !mseccfg.MML then ignore writes with encoding RW=01 */ - if ((val & PMP_WRITE) && !(val & PMP_READ) && - !MSECCFG_MML_ISSET(env)) { - return false; - } + if (pmp_is_readonly(env, pmp_index)) { + qemu_log_mask(LOG_GUEST_ERROR, + "ignoring pmpcfg write - read only\n"); + } else if (pmp_is_invalid_smepmp_cfg(env, val)) { + qemu_log_mask(LOG_GUEST_ERROR, + "ignoring pmpcfg write - invalid\n"); + } else { env->pmp_state.pmp[pmp_index].cfg_reg = val; pmp_update_rule_addr(env, pmp_index); return true; @@ -216,9 +240,10 @@ void pmp_update_rule_addr(CPURISCVState *env, uint32_t pmp_index) void pmp_update_rule_nums(CPURISCVState *env) { int i; + uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions; env->pmp_state.num_rules = 0; - for (i = 0; i < MAX_RISCV_PMPS; i++) { + for (i = 0; i < pmp_regions; i++) { const uint8_t a_field = pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg); if (PMP_AMATCH_OFF != a_field) { @@ -312,6 +337,7 @@ bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr, int pmp_size = 0; hwaddr s = 0; hwaddr e = 0; + uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions; /* Short cut if no rules */ if (0 == pmp_get_num_rules(env)) { @@ -336,7 +362,7 @@ bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr, * 1.10 draft priv spec states there is an implicit order * from low to high */ - for (i = 0; i < MAX_RISCV_PMPS; i++) { + for (i = 0; i < pmp_regions; i++) { s = pmp_is_in_range(env, i, addr); e = pmp_is_in_range(env, i, addr + pmp_size - 1); @@ -352,16 +378,6 @@ bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr, const uint8_t a_field = pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg); - /* - * Convert the PMP permissions to match the truth table in the - * Smepmp spec. - */ - const uint8_t smepmp_operation = - ((env->pmp_state.pmp[i].cfg_reg & PMP_LOCK) >> 4) | - ((env->pmp_state.pmp[i].cfg_reg & PMP_READ) << 2) | - (env->pmp_state.pmp[i].cfg_reg & PMP_WRITE) | - ((env->pmp_state.pmp[i].cfg_reg & PMP_EXEC) >> 2); - if (((s + e) == 2) && (PMP_AMATCH_OFF != a_field)) { /* * If the PMP entry is not off and the address is in range, @@ -380,6 +396,9 @@ bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr, /* * If mseccfg.MML Bit set, do the enhanced pmp priv check */ + const uint8_t smepmp_operation = + pmp_get_smepmp_operation(env->pmp_state.pmp[i].cfg_reg); + if (mode == PRV_M) { switch (smepmp_operation) { case 0: @@ -514,35 +533,39 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index, { trace_pmpaddr_csr_write(env->mhartid, addr_index, val); bool is_next_cfg_tor = false; + uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions; + + if (addr_index < pmp_regions) { + if (env->pmp_state.pmp[addr_index].addr_reg == val) { + /* no change */ + return; + } - if (addr_index < MAX_RISCV_PMPS) { /* * In TOR mode, need to check the lock bit of the next pmp * (if there is a next). */ - if (addr_index + 1 < MAX_RISCV_PMPS) { + if (addr_index + 1 < pmp_regions) { uint8_t pmp_cfg = env->pmp_state.pmp[addr_index + 1].cfg_reg; is_next_cfg_tor = PMP_AMATCH_TOR == pmp_get_a_field(pmp_cfg); - if (pmp_is_locked(env, addr_index + 1) && is_next_cfg_tor) { + if (pmp_is_readonly(env, addr_index + 1) && is_next_cfg_tor) { qemu_log_mask(LOG_GUEST_ERROR, - "ignoring pmpaddr write - pmpcfg + 1 locked\n"); + "ignoring pmpaddr write - pmpcfg+1 read only\n"); return; } } - if (!pmp_is_locked(env, addr_index)) { - if (env->pmp_state.pmp[addr_index].addr_reg != val) { - env->pmp_state.pmp[addr_index].addr_reg = val; - pmp_update_rule_addr(env, addr_index); - if (is_next_cfg_tor) { - pmp_update_rule_addr(env, addr_index + 1); - } - tlb_flush(env_cpu(env)); + if (!pmp_is_readonly(env, addr_index)) { + env->pmp_state.pmp[addr_index].addr_reg = val; + pmp_update_rule_addr(env, addr_index); + if (is_next_cfg_tor) { + pmp_update_rule_addr(env, addr_index + 1); } + tlb_flush(env_cpu(env)); } else { qemu_log_mask(LOG_GUEST_ERROR, - "ignoring pmpaddr write - locked\n"); + "ignoring pmpaddr write - read only\n"); } } else { qemu_log_mask(LOG_GUEST_ERROR, @@ -557,8 +580,9 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index, target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index) { target_ulong val = 0; + uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions; - if (addr_index < MAX_RISCV_PMPS) { + if (addr_index < pmp_regions) { val = env->pmp_state.pmp[addr_index].addr_reg; trace_pmpaddr_csr_read(env->mhartid, addr_index, val); } else { @@ -576,6 +600,7 @@ void mseccfg_csr_write(CPURISCVState *env, target_ulong val) { int i; uint64_t mask = MSECCFG_MMWP | MSECCFG_MML; + uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions; /* Update PMM field only if the value is valid according to Zjpm v1.0 */ if (riscv_cpu_cfg(env)->ext_smmpm && riscv_cpu_mxl(env) == MXL_RV64 && @@ -587,7 +612,7 @@ void mseccfg_csr_write(CPURISCVState *env, target_ulong val) /* RLB cannot be enabled if it's already 0 and if any regions are locked */ if (!MSECCFG_RLB_ISSET(env)) { - for (i = 0; i < MAX_RISCV_PMPS; i++) { + for (i = 0; i < pmp_regions; i++) { if (pmp_is_locked(env, i)) { val &= ~MSECCFG_RLB; break; @@ -643,6 +668,7 @@ target_ulong pmp_get_tlb_size(CPURISCVState *env, hwaddr addr) hwaddr tlb_sa = addr & ~(TARGET_PAGE_SIZE - 1); hwaddr tlb_ea = tlb_sa + TARGET_PAGE_SIZE - 1; int i; + uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions; /* * If PMP is not supported or there are no PMP rules, the TLB page will not @@ -653,7 +679,7 @@ target_ulong pmp_get_tlb_size(CPURISCVState *env, hwaddr addr) return TARGET_PAGE_SIZE; } - for (i = 0; i < MAX_RISCV_PMPS; i++) { + for (i = 0; i < pmp_regions; i++) { if (pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg) == PMP_AMATCH_OFF) { continue; } |