aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrew Waterman <andrew@sifive.com>2024-07-19 23:45:57 -0700
committerGitHub <noreply@github.com>2024-07-19 23:45:57 -0700
commit83a2035e4065e20999295100846e2eb4ee28d79e (patch)
treed4acfcdb99b8d19630d9be9075f97c718bb3ae93
parent344a860ba01f358dae7f862ef292c89d969770e4 (diff)
parent7f8c6638868d316096e63a3aea8e3277c615971b (diff)
downloadspike-83a2035e4065e20999295100846e2eb4ee28d79e.zip
spike-83a2035e4065e20999295100846e2eb4ee28d79e.tar.gz
spike-83a2035e4065e20999295100846e2eb4ee28d79e.tar.bz2
Merge pull request #1718 from YenHaoChen/pr-pm
Implement pointer masking
-rw-r--r--disasm/isa_parser.cc6
-rw-r--r--riscv/csrs.cc37
-rw-r--r--riscv/csrs.h7
-rw-r--r--riscv/decode_macros.h3
-rw-r--r--riscv/isa_parser.h3
-rw-r--r--riscv/mmu.cc105
-rw-r--r--riscv/mmu.h47
-rw-r--r--riscv/processor.cc9
8 files changed, 156 insertions, 61 deletions
diff --git a/disasm/isa_parser.cc b/disasm/isa_parser.cc
index 140bc87..79203df 100644
--- a/disasm/isa_parser.cc
+++ b/disasm/isa_parser.cc
@@ -318,6 +318,12 @@ isa_parser_t::isa_parser_t(const char* str, const char *priv)
extension_table[EXT_ZICFILP] = true;
} else if (ext_str == "zicfiss") {
extension_table[EXT_ZICFISS] = true;
+ } else if (ext_str == "smmpm") {
+ extension_table[EXT_SMMPM] = true;
+ } else if (ext_str == "smnpm") {
+ extension_table[EXT_SMNPM] = true;
+ } else if (ext_str == "ssnpm") {
+ extension_table[EXT_SSNPM] = true;
} else if (ext_str.substr(0, 3) == "zvl") {
reg_t new_vlen;
try {
diff --git a/riscv/csrs.cc b/riscv/csrs.cc
index a3f088c..a74cf15 100644
--- a/riscv/csrs.cc
+++ b/riscv/csrs.cc
@@ -286,6 +286,7 @@ mseccfg_csr_t::mseccfg_csr_t(processor_t* const proc, const reg_t addr):
void mseccfg_csr_t::verify_permissions(insn_t insn, bool write) const {
basic_csr_t::verify_permissions(insn, write);
if (!proc->extension_enabled(EXT_SMEPMP) &&
+ !proc->extension_enabled(EXT_SMMPM) &&
!proc->extension_enabled(EXT_ZICFILP) &&
!proc->extension_enabled(EXT_ZKR))
throw trap_illegal_instruction(insn.bits());
@@ -342,6 +343,12 @@ bool mseccfg_csr_t::unlogged_write(const reg_t val) noexcept {
new_val |= (val & MSECCFG_MLPE);
}
+ if (proc->extension_enabled(EXT_SMMPM)) {
+ const reg_t pmm_reserved = 1; // Reserved value of mseccfg.PMM
+ reg_t pmm = get_field(val, MSECCFG_PMM);
+ new_val = set_field(new_val, MSECCFG_PMM, pmm != pmm_reserved ? pmm : 0);
+ }
+
return basic_csr_t::unlogged_write(new_val);
}
@@ -968,7 +975,15 @@ envcfg_csr_t::envcfg_csr_t(processor_t* const proc, const reg_t addr, const reg_
bool envcfg_csr_t::unlogged_write(const reg_t val) noexcept {
const reg_t cbie_reserved = 2; // Reserved value of xenvcfg.CBIE
- const reg_t adjusted_val = get_field(val, MENVCFG_CBIE) != cbie_reserved ? val : set_field(val, MENVCFG_CBIE, 0);
+ reg_t adjusted_val = get_field(val, MENVCFG_CBIE) != cbie_reserved ? val : set_field(val, MENVCFG_CBIE, 0);
+
+ const reg_t pmm_reserved = 1; // Reserved value of xseccfg.PMM
+ const reg_t pmm = get_field(adjusted_val, MENVCFG_PMM);
+ adjusted_val = set_field(adjusted_val, MENVCFG_PMM, pmm != pmm_reserved ? pmm : 0);
+
+ if (get_field(adjusted_val, MENVCFG_PMM) != get_field(read(), MENVCFG_PMM))
+ proc->get_mmu()->flush_tlb();
+
return masked_csr_t::unlogged_write(adjusted_val);
}
@@ -1836,3 +1851,23 @@ void mtval2_csr_t::verify_permissions(insn_t insn, bool write) const {
if (!proc->extension_enabled('H') && !proc->extension_enabled(EXT_SSDBLTRP))
throw trap_illegal_instruction(insn.bits());
}
+
+hstatus_csr_t::hstatus_csr_t(processor_t* const proc, const reg_t addr):
+ basic_csr_t(proc, addr, set_field((reg_t)0, HSTATUS_VSXL, xlen_to_uxl(proc->get_const_xlen()))) {
+}
+
+bool hstatus_csr_t::unlogged_write(const reg_t val) noexcept {
+ const reg_t mask = HSTATUS_VTSR | HSTATUS_VTW
+ | (proc->supports_impl(IMPL_MMU) ? HSTATUS_VTVM : 0)
+ | (proc->extension_enabled(EXT_SSNPM) ? HSTATUS_HUPMM : 0)
+ | HSTATUS_HU | HSTATUS_SPVP | HSTATUS_SPV | HSTATUS_GVA;
+
+ const reg_t pmm_reserved = 1; // Reserved value of mseccfg.PMM
+ reg_t pmm = get_field(val, HSTATUS_HUPMM);
+ const reg_t adjusted_val = set_field(val, HSTATUS_HUPMM, pmm != pmm_reserved ? pmm : 0);
+
+ const reg_t new_hstatus = (read() & ~mask) | (adjusted_val & mask);
+ if (get_field(new_hstatus, HSTATUS_HUPMM) != get_field(read(), HSTATUS_HUPMM))
+ proc->get_mmu()->flush_tlb();
+ return basic_csr_t::unlogged_write(new_hstatus);
+}
diff --git a/riscv/csrs.h b/riscv/csrs.h
index dc89963..db61fba 100644
--- a/riscv/csrs.h
+++ b/riscv/csrs.h
@@ -891,4 +891,11 @@ class mtval2_csr_t: public hypervisor_csr_t {
mtval2_csr_t(processor_t* const proc, const reg_t addr);
virtual void verify_permissions(insn_t insn, bool write) const override;
};
+
+class hstatus_csr_t final: public basic_csr_t {
+ public:
+ hstatus_csr_t(processor_t* const proc, const reg_t addr);
+ protected:
+ virtual bool unlogged_write(const reg_t val) noexcept override;
+};
#endif
diff --git a/riscv/decode_macros.h b/riscv/decode_macros.h
index 675634a..7fdec4e 100644
--- a/riscv/decode_macros.h
+++ b/riscv/decode_macros.h
@@ -218,8 +218,9 @@ static inline bool is_aligned(const unsigned val, const unsigned pos)
#define sext32(x) ((sreg_t)(int32_t)(x))
#define zext32(x) ((reg_t)(uint32_t)(x))
-#define sext_xlen(x) (((sreg_t)(x) << (64 - xlen)) >> (64 - xlen))
+#define sext(x, pos) (((sreg_t)(x) << (64 - (pos))) >> (64 - (pos)))
#define zext(x, pos) (((reg_t)(x) << (64 - (pos))) >> (64 - (pos)))
+#define sext_xlen(x) sext(x, xlen)
#define zext_xlen(x) zext(x, xlen)
#define set_pc(x) \
diff --git a/riscv/isa_parser.h b/riscv/isa_parser.h
index 783af80..45f637c 100644
--- a/riscv/isa_parser.h
+++ b/riscv/isa_parser.h
@@ -82,6 +82,9 @@ typedef enum {
EXT_ZICFILP,
EXT_ZICFISS,
EXT_SSDBLTRP,
+ EXT_SMMPM,
+ EXT_SMNPM,
+ EXT_SSNPM,
NUM_ISA_EXTENSIONS
} isa_extension_t;
diff --git a/riscv/mmu.cc b/riscv/mmu.cc
index 37bdb90..94997a2 100644
--- a/riscv/mmu.cc
+++ b/riscv/mmu.cc
@@ -5,6 +5,7 @@
#include "arith.h"
#include "simif.h"
#include "processor.h"
+#include "decode_macros.h"
mmu_t::mmu_t(simif_t* sim, endianness_t endianness, processor_t* proc)
: sim(sim), proc(proc),
@@ -54,7 +55,7 @@ void throw_access_exception(bool virt, reg_t addr, access_type type)
reg_t mmu_t::translate(mem_access_info_t access_info, reg_t len)
{
- reg_t addr = access_info.vaddr;
+ reg_t addr = access_info.transformed_vaddr;
access_type type = access_info.type;
if (!proc)
return addr;
@@ -192,9 +193,10 @@ void mmu_t::check_triggers(triggers::operation_t operation, reg_t address, bool
void mmu_t::load_slow_path_intrapage(reg_t len, uint8_t* bytes, mem_access_info_t access_info)
{
reg_t addr = access_info.vaddr;
- reg_t vpn = addr >> PGSHIFT;
+ reg_t transformed_addr = access_info.transformed_vaddr;
+ reg_t vpn = transformed_addr >> PGSHIFT;
if (!access_info.flags.is_special_access() && vpn == (tlb_load_tag[vpn % TLB_ENTRIES] & ~TLB_CHECK_TRIGGERS)) {
- auto host_addr = tlb_data[vpn % TLB_ENTRIES].host_offset + addr;
+ auto host_addr = tlb_data[vpn % TLB_ENTRIES].host_offset + transformed_addr;
memcpy(bytes, host_addr, len);
return;
}
@@ -202,7 +204,7 @@ void mmu_t::load_slow_path_intrapage(reg_t len, uint8_t* bytes, mem_access_info_
reg_t paddr = translate(access_info, len);
if (access_info.flags.lr && !sim->reservable(paddr)) {
- throw trap_load_access_fault(access_info.effective_virt, addr, 0, 0);
+ throw trap_load_access_fault(access_info.effective_virt, transformed_addr, 0, 0);
}
if (auto host_addr = sim->addr_to_mem(paddr)) {
@@ -213,7 +215,7 @@ void mmu_t::load_slow_path_intrapage(reg_t len, uint8_t* bytes, mem_access_info_
refill_tlb(addr, paddr, host_addr, LOAD);
} else if (!mmio_load(paddr, len, bytes)) {
- throw trap_load_access_fault(access_info.effective_virt, addr, 0, 0);
+ throw trap_load_access_fault(access_info.effective_virt, transformed_addr, 0, 0);
}
if (access_info.flags.lr) {
@@ -221,42 +223,44 @@ void mmu_t::load_slow_path_intrapage(reg_t len, uint8_t* bytes, mem_access_info_
}
}
-void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, xlate_flags_t xlate_flags)
+void mmu_t::load_slow_path(reg_t original_addr, reg_t len, uint8_t* bytes, xlate_flags_t xlate_flags)
{
- auto access_info = generate_access_info(addr, LOAD, xlate_flags);
- check_triggers(triggers::OPERATION_LOAD, addr, access_info.effective_virt);
+ auto access_info = generate_access_info(original_addr, LOAD, xlate_flags);
+ reg_t transformed_addr = access_info.transformed_vaddr;
+ check_triggers(triggers::OPERATION_LOAD, transformed_addr, access_info.effective_virt);
- if ((addr & (len - 1)) == 0) {
+ if ((transformed_addr & (len - 1)) == 0) {
load_slow_path_intrapage(len, bytes, access_info);
} else {
bool gva = access_info.effective_virt;
if (!is_misaligned_enabled())
- throw trap_load_address_misaligned(gva, addr, 0, 0);
+ throw trap_load_address_misaligned(gva, transformed_addr, 0, 0);
if (access_info.flags.lr)
- throw trap_load_access_fault(gva, addr, 0, 0);
+ throw trap_load_access_fault(gva, transformed_addr, 0, 0);
- reg_t len_page0 = std::min(len, PGSIZE - addr % PGSIZE);
+ reg_t len_page0 = std::min(len, PGSIZE - transformed_addr % PGSIZE);
load_slow_path_intrapage(len_page0, bytes, access_info);
if (len_page0 != len)
load_slow_path_intrapage(len - len_page0, bytes + len_page0, access_info.split_misaligned_access(len_page0));
}
while (len > sizeof(reg_t)) {
- check_triggers(triggers::OPERATION_LOAD, addr, access_info.effective_virt, reg_from_bytes(sizeof(reg_t), bytes));
+ check_triggers(triggers::OPERATION_LOAD, transformed_addr, access_info.effective_virt, reg_from_bytes(sizeof(reg_t), bytes));
len -= sizeof(reg_t);
bytes += sizeof(reg_t);
}
- check_triggers(triggers::OPERATION_LOAD, addr, access_info.effective_virt, reg_from_bytes(len, bytes));
+ check_triggers(triggers::OPERATION_LOAD, transformed_addr, access_info.effective_virt, reg_from_bytes(len, bytes));
}
void mmu_t::store_slow_path_intrapage(reg_t len, const uint8_t* bytes, mem_access_info_t access_info, bool actually_store)
{
reg_t addr = access_info.vaddr;
- reg_t vpn = addr >> PGSHIFT;
+ reg_t transformed_addr = access_info.transformed_vaddr;
+ reg_t vpn = transformed_addr >> PGSHIFT;
if (!access_info.flags.is_special_access() && vpn == (tlb_store_tag[vpn % TLB_ENTRIES] & ~TLB_CHECK_TRIGGERS)) {
if (actually_store) {
- auto host_addr = tlb_data[vpn % TLB_ENTRIES].host_offset + addr;
+ auto host_addr = tlb_data[vpn % TLB_ENTRIES].host_offset + transformed_addr;
memcpy(host_addr, bytes, len);
}
return;
@@ -272,34 +276,35 @@ void mmu_t::store_slow_path_intrapage(reg_t len, const uint8_t* bytes, mem_acces
else if (!access_info.flags.is_special_access())
refill_tlb(addr, paddr, host_addr, STORE);
} else if (!mmio_store(paddr, len, bytes)) {
- throw trap_store_access_fault(access_info.effective_virt, addr, 0, 0);
+ throw trap_store_access_fault(access_info.effective_virt, transformed_addr, 0, 0);
}
}
}
-void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, xlate_flags_t xlate_flags, bool actually_store, bool UNUSED require_alignment)
+void mmu_t::store_slow_path(reg_t original_addr, reg_t len, const uint8_t* bytes, xlate_flags_t xlate_flags, bool actually_store, bool UNUSED require_alignment)
{
- auto access_info = generate_access_info(addr, STORE, xlate_flags);
+ auto access_info = generate_access_info(original_addr, STORE, xlate_flags);
+ reg_t transformed_addr = access_info.transformed_vaddr;
if (actually_store) {
reg_t trig_len = len;
const uint8_t* trig_bytes = bytes;
while (trig_len > sizeof(reg_t)) {
- check_triggers(triggers::OPERATION_STORE, addr, access_info.effective_virt, reg_from_bytes(sizeof(reg_t), trig_bytes));
+ check_triggers(triggers::OPERATION_STORE, transformed_addr, access_info.effective_virt, reg_from_bytes(sizeof(reg_t), trig_bytes));
trig_len -= sizeof(reg_t);
trig_bytes += sizeof(reg_t);
}
- check_triggers(triggers::OPERATION_STORE, addr, access_info.effective_virt, reg_from_bytes(trig_len, trig_bytes));
+ check_triggers(triggers::OPERATION_STORE, transformed_addr, access_info.effective_virt, reg_from_bytes(trig_len, trig_bytes));
}
- if (addr & (len - 1)) {
+ if (transformed_addr & (len - 1)) {
bool gva = access_info.effective_virt;
if (!is_misaligned_enabled())
- throw trap_store_address_misaligned(gva, addr, 0, 0);
+ throw trap_store_address_misaligned(gva, transformed_addr, 0, 0);
if (require_alignment)
- throw trap_store_access_fault(gva, addr, 0, 0);
+ throw trap_store_access_fault(gva, transformed_addr, 0, 0);
- reg_t len_page0 = std::min(len, PGSIZE - addr % PGSIZE);
+ reg_t len_page0 = std::min(len, PGSIZE - transformed_addr % PGSIZE);
store_slow_path_intrapage(len_page0, bytes, access_info, actually_store);
if (len_page0 != len)
store_slow_path_intrapage(len - len_page0, bytes + len_page0, access_info.split_misaligned_access(len_page0), actually_store);
@@ -484,7 +489,7 @@ reg_t mmu_t::s2xlate(reg_t gva, reg_t gpa, access_type type, access_type trap_ty
reg_t mmu_t::walk(mem_access_info_t access_info)
{
access_type type = access_info.type;
- reg_t addr = access_info.vaddr;
+ reg_t addr = access_info.transformed_vaddr;
bool virt = access_info.effective_virt;
bool hlvx = access_info.flags.hlvx;
reg_t mode = access_info.effective_priv;
@@ -607,3 +612,51 @@ void mmu_t::register_memtracer(memtracer_t* t)
flush_tlb();
tracer.hook(t);
}
+
+reg_t mmu_t::get_pmlen(bool effective_virt, reg_t effective_priv, xlate_flags_t flags) const {
+ if (!proc || proc->get_xlen() != 64 || (in_mprv() && (proc->state.sstatus->read() & MSTATUS_MXR)) || flags.hlvx)
+ return 0;
+
+ reg_t pmm = 0;
+ if (effective_priv == PRV_M)
+ pmm = get_field(proc->state.mseccfg->read(), MSECCFG_PMM);
+ else if (!effective_virt && (effective_priv == PRV_S || (!proc->extension_enabled('S') && effective_priv == PRV_U)))
+ pmm = get_field(proc->state.menvcfg->read(), MENVCFG_PMM);
+ else if (effective_virt && effective_priv == PRV_S)
+ pmm = get_field(proc->state.henvcfg->read(), HENVCFG_PMM);
+ else if (proc->state.prv == PRV_U && flags.forced_virt)
+ pmm = get_field(proc->state.hstatus->read(), HSTATUS_HUPMM);
+ else if (effective_priv == PRV_U)
+ pmm = get_field(proc->state.senvcfg->read(), SENVCFG_PMM);
+ else
+ assert(false);
+
+ switch (pmm) {
+ case 2: return 7;
+ case 3: return 16;
+ }
+ return 0;
+}
+
+mem_access_info_t mmu_t::generate_access_info(reg_t addr, access_type type, xlate_flags_t xlate_flags) {
+ if (!proc)
+ return {addr, addr, 0, false, {}, type};
+ bool virt = proc->state.v;
+ reg_t mode = proc->state.prv;
+ if (type != FETCH) {
+ if (in_mprv()) {
+ mode = get_field(proc->state.mstatus->read(), MSTATUS_MPP);
+ if (get_field(proc->state.mstatus->read(), MSTATUS_MPV) && mode != PRV_M)
+ virt = true;
+ }
+ if (xlate_flags.forced_virt) {
+ virt = true;
+ mode = get_field(proc->state.hstatus->read(), HSTATUS_SPVP);
+ }
+ }
+ reg_t pmlen = get_pmlen(virt, mode, xlate_flags);
+ reg_t satp = proc->state.satp->readvirt(virt);
+ bool is_physical_addr = mode == PRV_M || get_field(satp, SATP64_MODE) == SATP_MODE_OFF;
+ reg_t transformed_addr = is_physical_addr ? zext(addr, 64 - pmlen) : sext(addr, 64 - pmlen);
+ return {addr, transformed_addr, mode, virt, xlate_flags, type};
+}
diff --git a/riscv/mmu.h b/riscv/mmu.h
index 3e4ae9a..0aa1f96 100644
--- a/riscv/mmu.h
+++ b/riscv/mmu.h
@@ -51,13 +51,14 @@ struct xlate_flags_t {
struct mem_access_info_t {
const reg_t vaddr;
+ const reg_t transformed_vaddr;
const reg_t effective_priv;
const bool effective_virt;
const xlate_flags_t flags;
const access_type type;
mem_access_info_t split_misaligned_access(reg_t offset) const {
- return {vaddr + offset, effective_priv, effective_virt, flags, type};
+ return {vaddr + offset, transformed_vaddr + offset, effective_priv, effective_virt, flags, type};
}
};
@@ -71,24 +72,8 @@ private:
std::map<reg_t, reg_t> alloc_cache;
std::vector<std::pair<reg_t, reg_t >> addr_tbl;
- mem_access_info_t generate_access_info(reg_t addr, access_type type, xlate_flags_t xlate_flags) {
- if (!proc)
- return {addr, 0, false, {}, type};
- bool virt = proc->state.v;
- reg_t mode = proc->state.prv;
- if (type != FETCH) {
- if (in_mprv()) {
- mode = get_field(proc->state.mstatus->read(), MSTATUS_MPP);
- if (get_field(proc->state.mstatus->read(), MSTATUS_MPV) && mode != PRV_M)
- virt = true;
- }
- if (xlate_flags.forced_virt) {
- virt = true;
- mode = get_field(proc->state.hstatus->read(), HSTATUS_SPVP);
- }
- }
- return {addr, mode, virt, xlate_flags, type};
- }
+ reg_t get_pmlen(bool effective_virt, reg_t effective_priv, xlate_flags_t flags) const;
+ mem_access_info_t generate_access_info(reg_t addr, access_type type, xlate_flags_t xlate_flags);
public:
mmu_t(simif_t* sim, endianness_t endianness, processor_t* proc);
@@ -236,24 +221,30 @@ public:
}
void cbo_zero(reg_t addr) {
- auto base = addr & ~(blocksz - 1);
+ auto access_info = generate_access_info(addr, STORE, {});
+ reg_t transformed_addr = access_info.transformed_vaddr;
+
+ auto base = transformed_addr & ~(blocksz - 1);
for (size_t offset = 0; offset < blocksz; offset += 1) {
- check_triggers(triggers::OPERATION_STORE, base + offset, false, addr, std::nullopt);
+ check_triggers(triggers::OPERATION_STORE, base + offset, false, transformed_addr, std::nullopt);
store<uint8_t>(base + offset, 0);
}
}
void clean_inval(reg_t addr, bool clean, bool inval) {
- auto base = addr & ~(blocksz - 1);
+ auto access_info = generate_access_info(addr, LOAD, {});
+ reg_t transformed_addr = access_info.transformed_vaddr;
+
+ auto base = transformed_addr & ~(blocksz - 1);
for (size_t offset = 0; offset < blocksz; offset += 1)
- check_triggers(triggers::OPERATION_STORE, base + offset, false, addr, std::nullopt);
+ check_triggers(triggers::OPERATION_STORE, base + offset, false, transformed_addr, std::nullopt);
convert_load_traps_to_store_traps({
- const reg_t paddr = translate(generate_access_info(addr, LOAD, {}), 1);
+ const reg_t paddr = translate(generate_access_info(transformed_addr, LOAD, {}), 1);
if (sim->reservable(paddr)) {
if (tracer.interested_in_range(paddr, paddr + PGSIZE, LOAD))
tracer.clean_invalidate(paddr, blocksz, clean, inval);
} else {
- throw trap_store_access_fault((proc) ? proc->state.v : false, addr, 0, 0);
+ throw trap_store_access_fault((proc) ? proc->state.v : false, transformed_addr, 0, 0);
}
})
}
@@ -416,9 +407,9 @@ private:
// handle uncommon cases: TLB misses, page faults, MMIO
tlb_entry_t fetch_slow_path(reg_t addr);
- void load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, xlate_flags_t xlate_flags);
+ void load_slow_path(reg_t original_addr, reg_t len, uint8_t* bytes, xlate_flags_t xlate_flags);
void load_slow_path_intrapage(reg_t len, uint8_t* bytes, mem_access_info_t access_info);
- void store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, xlate_flags_t xlate_flags, bool actually_store, bool require_alignment);
+ void store_slow_path(reg_t original_addr, reg_t len, const uint8_t* bytes, xlate_flags_t xlate_flags, bool actually_store, bool require_alignment);
void store_slow_path_intrapage(reg_t len, const uint8_t* bytes, mem_access_info_t access_info, bool actually_store);
bool mmio_fetch(reg_t paddr, size_t len, uint8_t* bytes);
bool mmio_load(reg_t paddr, size_t len, uint8_t* bytes);
@@ -490,7 +481,7 @@ private:
return (uint16_t*)(translate_insn_addr(addr).host_offset + addr);
}
- inline bool in_mprv()
+ inline bool in_mprv() const
{
return proc != nullptr
&& !(proc->state.mnstatus && !get_field(proc->state.mnstatus->read(), MNSTATUS_NMIE))
diff --git a/riscv/processor.cc b/riscv/processor.cc
index 7299548..418599b 100644
--- a/riscv/processor.cc
+++ b/riscv/processor.cc
@@ -297,11 +297,7 @@ void state_t::reset(processor_t* const proc, reg_t max_isa)
csrmap[CSR_SCAUSE] = scause = std::make_shared<virtualized_csr_t>(proc, nonvirtual_scause, vscause);
csrmap[CSR_MTVAL2] = mtval2 = std::make_shared<mtval2_csr_t>(proc, CSR_MTVAL2);
csrmap[CSR_MTINST] = mtinst = std::make_shared<hypervisor_csr_t>(proc, CSR_MTINST);
- const reg_t hstatus_init = set_field((reg_t)0, HSTATUS_VSXL, xlen_to_uxl(proc->get_const_xlen()));
- const reg_t hstatus_mask = HSTATUS_VTSR | HSTATUS_VTW
- | (proc->supports_impl(IMPL_MMU) ? HSTATUS_VTVM : 0)
- | HSTATUS_HU | HSTATUS_SPVP | HSTATUS_SPV | HSTATUS_GVA;
- csrmap[CSR_HSTATUS] = hstatus = std::make_shared<masked_csr_t>(proc, CSR_HSTATUS, hstatus_mask, hstatus_init);
+ csrmap[CSR_HSTATUS] = hstatus = std::make_shared<hstatus_csr_t>(proc, CSR_HSTATUS);
csrmap[CSR_HGEIE] = std::make_shared<const_csr_t>(proc, CSR_HGEIE, 0);
csrmap[CSR_HGEIP] = std::make_shared<const_csr_t>(proc, CSR_HGEIP, 0);
csrmap[CSR_HIDELEG] = hideleg = std::make_shared<hideleg_csr_t>(proc, CSR_HIDELEG, mideleg);
@@ -388,6 +384,7 @@ void state_t::reset(processor_t* const proc, reg_t max_isa)
if (proc->extension_enabled_const('U')) {
const reg_t menvcfg_mask = (proc->extension_enabled(EXT_ZICBOM) ? MENVCFG_CBCFE | MENVCFG_CBIE : 0) |
(proc->extension_enabled(EXT_ZICBOZ) ? MENVCFG_CBZE : 0) |
+ (proc->extension_enabled(EXT_SMNPM) ? MENVCFG_PMM : 0) |
(proc->extension_enabled(EXT_SVADU) ? MENVCFG_ADUE: 0) |
(proc->extension_enabled(EXT_SVPBMT) ? MENVCFG_PBMTE : 0) |
(proc->extension_enabled(EXT_SSTC) ? MENVCFG_STCE : 0) |
@@ -404,11 +401,13 @@ void state_t::reset(processor_t* const proc, reg_t max_isa)
}
const reg_t senvcfg_mask = (proc->extension_enabled(EXT_ZICBOM) ? SENVCFG_CBCFE | SENVCFG_CBIE : 0) |
(proc->extension_enabled(EXT_ZICBOZ) ? SENVCFG_CBZE : 0) |
+ (proc->extension_enabled(EXT_SSNPM) ? SENVCFG_PMM : 0) |
(proc->extension_enabled(EXT_ZICFILP) ? SENVCFG_LPE : 0) |
(proc->extension_enabled(EXT_ZICFISS) ? SENVCFG_SSE : 0);
csrmap[CSR_SENVCFG] = senvcfg = std::make_shared<senvcfg_csr_t>(proc, CSR_SENVCFG, senvcfg_mask, 0);
const reg_t henvcfg_mask = (proc->extension_enabled(EXT_ZICBOM) ? HENVCFG_CBCFE | HENVCFG_CBIE : 0) |
(proc->extension_enabled(EXT_ZICBOZ) ? HENVCFG_CBZE : 0) |
+ (proc->extension_enabled(EXT_SSNPM) ? HENVCFG_PMM : 0) |
(proc->extension_enabled(EXT_SVADU) ? HENVCFG_ADUE: 0) |
(proc->extension_enabled(EXT_SVPBMT) ? HENVCFG_PBMTE : 0) |
(proc->extension_enabled(EXT_SSTC) ? HENVCFG_STCE : 0) |