aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrew Waterman <andrew@sifive.com>2020-05-10 00:13:38 -0700
committerChih-Min Chao <chihmin.chao@sifive.com>2020-05-19 19:57:06 -0700
commit65cfc4bca7db24a387fdc8c8f04d77c41617dc82 (patch)
tree80dc439d2b774a9feff267c473e9df4b46bd5ac4
parent1028871cf96008744744aa39d88c496a71792125 (diff)
downloadspike-65cfc4bca7db24a387fdc8c8f04d77c41617dc82.zip
spike-65cfc4bca7db24a387fdc8c8f04d77c41617dc82.tar.gz
spike-65cfc4bca7db24a387fdc8c8f04d77c41617dc82.tar.bz2
Implement configurable PMP count
If no PMPs exist, simply deny access to the registers. If some but not all PMPs exist, the others are hardwired to 0.
-rw-r--r--riscv/processor.cc22
1 files changed, 16 insertions, 6 deletions
diff --git a/riscv/processor.cc b/riscv/processor.cc
index 9caee5c..b3b53ee 100644
--- a/riscv/processor.cc
+++ b/riscv/processor.cc
@@ -468,10 +468,12 @@ void processor_t::reset()
set_csr(CSR_MSTATUS, state.mstatus);
VU.reset();
- // For backwards compatibility with software that is unaware of PMP,
- // initialize PMP to permit unprivileged access to all of memory.
- set_csr(CSR_PMPADDR0, ~reg_t(0));
- set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT);
+ if (n_pmp > 0) {
+ // For backwards compatibility with software that is unaware of PMP,
+ // initialize PMP to permit unprivileged access to all of memory.
+ set_csr(CSR_PMPADDR0, ~reg_t(0));
+ set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT);
+ }
if (ext)
ext->reset(); // reset the extension
@@ -680,19 +682,27 @@ void processor_t::set_csr(int which, reg_t val)
reg_t all_ints = delegable_ints | MIP_MSIP | MIP_MTIP;
if (which >= CSR_PMPADDR0 && which < CSR_PMPADDR0 + state.max_pmp) {
+ // If no PMPs are configured, disallow access to all. Otherwise, allow
+ // access to all, but unimplemented ones are hardwired to zero.
+ if (n_pmp == 0)
+ return;
+
size_t i = which - CSR_PMPADDR0;
bool locked = state.pmpcfg[i] & PMP_L;
bool next_locked = i+1 < state.max_pmp && (state.pmpcfg[i+1] & PMP_L);
bool next_tor = i+1 < state.max_pmp && (state.pmpcfg[i+1] & PMP_A) == PMP_TOR;
- if (!locked && !(next_locked && next_tor))
+ if (i < n_pmp && !locked && !(next_locked && next_tor))
state.pmpaddr[i] = val & ((reg_t(1) << (MAX_PADDR_BITS - PMP_SHIFT)) - 1);
mmu->flush_tlb();
}
if (which >= CSR_PMPCFG0 && which < CSR_PMPCFG0 + state.max_pmp / 4) {
+ if (n_pmp == 0)
+ return;
+
for (size_t i0 = (which - CSR_PMPCFG0) * 4, i = i0; i < i0 + xlen / 8; i++) {
- if (!(state.pmpcfg[i] & PMP_L)) {
+ if (i < n_pmp && !(state.pmpcfg[i] & PMP_L)) {
uint8_t cfg = (val >> (8 * (i - i0))) & (PMP_R | PMP_W | PMP_X | PMP_A | PMP_L);
cfg &= ~PMP_W | ((cfg & PMP_R) ? PMP_W : 0); // Disallow R=0 W=1
state.pmpcfg[i] = cfg;