From 3286d262eb42f414e7e43c734532f309dee9fe81 Mon Sep 17 00:00:00 2001 From: Ryan Buchner Date: Mon, 17 Apr 2023 18:53:27 -0700 Subject: Rename RISCV_XLATE_VIRT to RISCV_XLATE_FORCED_VIRT More readable/understandable. --- riscv/mmu.cc | 6 +++--- riscv/mmu.h | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/riscv/mmu.cc b/riscv/mmu.cc index be24f40..f6f0a0d 100644 --- a/riscv/mmu.cc +++ b/riscv/mmu.cc @@ -66,7 +66,7 @@ reg_t mmu_t::translate(reg_t addr, reg_t len, access_type type, uint32_t xlate_f if (get_field(proc->state.mstatus->read(), MSTATUS_MPV) && mode != PRV_M) virt = true; } - if (xlate_flags & RISCV_XLATE_VIRT) { + if (xlate_flags & RISCV_XLATE_FORCED_VIRT) { virt = true; mode = get_field(proc->state.hstatus->read(), HSTATUS_SPVP); } @@ -236,7 +236,7 @@ void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate if ((addr & (len - 1)) == 0) { load_slow_path_intrapage(addr, len, bytes, xlate_flags); } else { - bool gva = ((proc) ? proc->state.v : false) || (RISCV_XLATE_VIRT & xlate_flags); + bool gva = ((proc) ? proc->state.v : false) || (RISCV_XLATE_FORCED_VIRT & xlate_flags); if (!is_misaligned_enabled()) throw trap_load_address_misaligned(gva, addr, 0, 0); @@ -284,7 +284,7 @@ void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_ check_triggers(triggers::OPERATION_STORE, addr, reg_from_bytes(len, bytes)); if (addr & (len - 1)) { - bool gva = ((proc) ? proc->state.v : false) || (RISCV_XLATE_VIRT & xlate_flags); + bool gva = ((proc) ? proc->state.v : false) || (RISCV_XLATE_FORCED_VIRT & xlate_flags); if (!is_misaligned_enabled()) throw trap_store_address_misaligned(gva, addr, 0, 0); diff --git a/riscv/mmu.h b/riscv/mmu.h index ef054cf..d63a43f 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -51,7 +51,7 @@ public: mmu_t(simif_t* sim, endianness_t endianness, processor_t* proc); ~mmu_t(); -#define RISCV_XLATE_VIRT (1U << 0) +#define RISCV_XLATE_FORCED_VIRT (1U << 0) #define RISCV_XLATE_VIRT_HLVX (1U << 1) #define RISCV_XLATE_LR (1U << 2) @@ -81,12 +81,12 @@ public: template T guest_load(reg_t addr) { - return load(addr, RISCV_XLATE_VIRT); + return load(addr, RISCV_XLATE_FORCED_VIRT); } template T guest_load_x(reg_t addr) { - return load(addr, RISCV_XLATE_VIRT|RISCV_XLATE_VIRT_HLVX); + return load(addr, RISCV_XLATE_FORCED_VIRT|RISCV_XLATE_VIRT_HLVX); } template @@ -108,7 +108,7 @@ public: template void guest_store(reg_t addr, T val) { - store(addr, val, RISCV_XLATE_VIRT); + store(addr, val, RISCV_XLATE_FORCED_VIRT); } // AMO/Zicbom faults should be reported as store faults -- cgit v1.1 From d091f84af4ddc1e3c64c78d9cbac0277efd32554 Mon Sep 17 00:00:00 2001 From: rbuchner Date: Fri, 21 Apr 2023 12:22:43 -0700 Subject: Add xlate_flags_t struct Use xlate_flags_t rather than XLATE_FLAGS preprocessing directives --- riscv/mmu.cc | 34 +++++++++++++++++----------------- riscv/mmu.h | 56 +++++++++++++++++++++++++++++++++++--------------------- 2 files changed, 52 insertions(+), 38 deletions(-) diff --git a/riscv/mmu.cc b/riscv/mmu.cc index f6f0a0d..4c42610 100644 --- a/riscv/mmu.cc +++ b/riscv/mmu.cc @@ -52,13 +52,13 @@ void throw_access_exception(bool virt, reg_t addr, access_type type) } } -reg_t mmu_t::translate(reg_t addr, reg_t len, access_type type, uint32_t xlate_flags) +reg_t mmu_t::translate(reg_t addr, reg_t len, access_type type, xlate_flags_t xlate_flags) { if (!proc) return addr; bool virt = proc->state.v; - bool hlvx = xlate_flags & RISCV_XLATE_VIRT_HLVX; + bool hlvx = xlate_flags.hlvx; reg_t mode = proc->state.prv; if (type != FETCH) { if (in_mprv()) { @@ -66,7 +66,7 @@ reg_t mmu_t::translate(reg_t addr, reg_t len, access_type type, uint32_t xlate_f if (get_field(proc->state.mstatus->read(), MSTATUS_MPV) && mode != PRV_M) virt = true; } - if (xlate_flags & RISCV_XLATE_FORCED_VIRT) { + if (xlate_flags.forced_virt) { virt = true; mode = get_field(proc->state.hstatus->read(), HSTATUS_SPVP); } @@ -85,7 +85,7 @@ tlb_entry_t mmu_t::fetch_slow_path(reg_t vaddr) tlb_entry_t result; reg_t vpn = vaddr >> PGSHIFT; if (unlikely(tlb_insn_tag[vpn % TLB_ENTRIES] != (vpn | TLB_CHECK_TRIGGERS))) { - reg_t paddr = translate(vaddr, sizeof(fetch_temp), FETCH, 0); + reg_t paddr = translate(vaddr, sizeof(fetch_temp), FETCH, {false, false, false}); if (auto host_addr = sim->addr_to_mem(paddr)) { result = refill_tlb(vaddr, paddr, host_addr, FETCH); } else { @@ -198,10 +198,10 @@ void mmu_t::check_triggers(triggers::operation_t operation, reg_t address, std:: } } -void mmu_t::load_slow_path_intrapage(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags) +void mmu_t::load_slow_path_intrapage(reg_t addr, reg_t len, uint8_t* bytes, xlate_flags_t xlate_flags) { reg_t vpn = addr >> PGSHIFT; - if (xlate_flags == 0 && vpn == (tlb_load_tag[vpn % TLB_ENTRIES] & ~TLB_CHECK_TRIGGERS)) { + if (!(xlate_flags.hlvx || xlate_flags.forced_virt || xlate_flags.lr) && vpn == (tlb_load_tag[vpn % TLB_ENTRIES] & ~TLB_CHECK_TRIGGERS)) { auto host_addr = tlb_data[vpn % TLB_ENTRIES].host_offset + addr; memcpy(bytes, host_addr, len); return; @@ -209,7 +209,7 @@ void mmu_t::load_slow_path_intrapage(reg_t addr, reg_t len, uint8_t* bytes, uint reg_t paddr = translate(addr, len, LOAD, xlate_flags); - if ((xlate_flags & RISCV_XLATE_LR) && !sim->reservable(paddr)) { + if (xlate_flags.lr && !sim->reservable(paddr)) { throw trap_load_access_fault((proc) ? proc->state.v : false, addr, 0, 0); } @@ -217,30 +217,30 @@ void mmu_t::load_slow_path_intrapage(reg_t addr, reg_t len, uint8_t* bytes, uint memcpy(bytes, host_addr, len); if (tracer.interested_in_range(paddr, paddr + PGSIZE, LOAD)) tracer.trace(paddr, len, LOAD); - else if (xlate_flags == 0) + else if (!(xlate_flags.hlvx || xlate_flags.forced_virt || xlate_flags.lr)) refill_tlb(addr, paddr, host_addr, LOAD); } else if (!mmio_load(paddr, len, bytes)) { throw trap_load_access_fault((proc) ? proc->state.v : false, addr, 0, 0); } - if (xlate_flags & RISCV_XLATE_LR) { + if (xlate_flags.lr) { load_reservation_address = paddr; } } -void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags) +void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, xlate_flags_t xlate_flags) { check_triggers(triggers::OPERATION_LOAD, addr); if ((addr & (len - 1)) == 0) { load_slow_path_intrapage(addr, len, bytes, xlate_flags); } else { - bool gva = ((proc) ? proc->state.v : false) || (RISCV_XLATE_FORCED_VIRT & xlate_flags); + bool gva = ((proc) ? proc->state.v : false) || xlate_flags.forced_virt; if (!is_misaligned_enabled()) throw trap_load_address_misaligned(gva, addr, 0, 0); - if (xlate_flags & RISCV_XLATE_LR) + if (xlate_flags.lr) throw trap_load_access_fault(gva, addr, 0, 0); reg_t len_page0 = std::min(len, PGSIZE - addr % PGSIZE); @@ -252,10 +252,10 @@ void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate check_triggers(triggers::OPERATION_LOAD, addr, reg_from_bytes(len, bytes)); } -void mmu_t::store_slow_path_intrapage(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store) +void mmu_t::store_slow_path_intrapage(reg_t addr, reg_t len, const uint8_t* bytes, xlate_flags_t xlate_flags, bool actually_store) { reg_t vpn = addr >> PGSHIFT; - if (xlate_flags == 0 && vpn == (tlb_store_tag[vpn % TLB_ENTRIES] & ~TLB_CHECK_TRIGGERS)) { + if (!(xlate_flags.hlvx || xlate_flags.forced_virt || xlate_flags.lr) && vpn == (tlb_store_tag[vpn % TLB_ENTRIES] & ~TLB_CHECK_TRIGGERS)) { if (actually_store) { auto host_addr = tlb_data[vpn % TLB_ENTRIES].host_offset + addr; memcpy(host_addr, bytes, len); @@ -270,7 +270,7 @@ void mmu_t::store_slow_path_intrapage(reg_t addr, reg_t len, const uint8_t* byte memcpy(host_addr, bytes, len); if (tracer.interested_in_range(paddr, paddr + PGSIZE, STORE)) tracer.trace(paddr, len, STORE); - else if (xlate_flags == 0) + else if (!(xlate_flags.hlvx || xlate_flags.forced_virt || xlate_flags.lr)) refill_tlb(addr, paddr, host_addr, STORE); } else if (!mmio_store(paddr, len, bytes)) { throw trap_store_access_fault((proc) ? proc->state.v : false, addr, 0, 0); @@ -278,13 +278,13 @@ void mmu_t::store_slow_path_intrapage(reg_t addr, reg_t len, const uint8_t* byte } } -void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store, bool UNUSED require_alignment) +void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, xlate_flags_t xlate_flags, bool actually_store, bool UNUSED require_alignment) { if (actually_store) check_triggers(triggers::OPERATION_STORE, addr, reg_from_bytes(len, bytes)); if (addr & (len - 1)) { - bool gva = ((proc) ? proc->state.v : false) || (RISCV_XLATE_FORCED_VIRT & xlate_flags); + bool gva = ((proc) ? proc->state.v : false) || xlate_flags.forced_virt; if (!is_misaligned_enabled()) throw trap_store_address_misaligned(gva, addr, 0, 0); diff --git a/riscv/mmu.h b/riscv/mmu.h index d63a43f..3c04672 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -38,6 +38,12 @@ struct tlb_entry_t { reg_t target_offset; }; +struct xlate_flags_t { + const bool forced_virt : 1; + const bool hlvx : 1; + const bool lr : 1; +}; + void throw_access_exception(bool virt, reg_t addr, access_type type); // this class implements a processor's port into the virtual memory system. @@ -51,18 +57,14 @@ public: mmu_t(simif_t* sim, endianness_t endianness, processor_t* proc); ~mmu_t(); -#define RISCV_XLATE_FORCED_VIRT (1U << 0) -#define RISCV_XLATE_VIRT_HLVX (1U << 1) -#define RISCV_XLATE_LR (1U << 2) - template - T ALWAYS_INLINE load(reg_t addr, uint32_t xlate_flags = 0) { + T ALWAYS_INLINE load(reg_t addr, xlate_flags_t xlate_flags = {false, false, false}) { target_endian res; reg_t vpn = addr >> PGSHIFT; bool aligned = (addr & (sizeof(T) - 1)) == 0; bool tlb_hit = tlb_load_tag[vpn % TLB_ENTRIES] == vpn; - if (likely(xlate_flags == 0 && aligned && tlb_hit)) { + if (likely(!(xlate_flags.hlvx || xlate_flags.forced_virt || xlate_flags.lr) && aligned && tlb_hit)) { res = *(target_endian*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr); } else { load_slow_path(addr, sizeof(T), (uint8_t*)&res, xlate_flags); @@ -76,26 +78,35 @@ public: template T load_reserved(reg_t addr) { - return load(addr, RISCV_XLATE_LR); + bool forced_virt = false; + bool hlvx = false; + bool lr = true; + return load(addr, {forced_virt, hlvx, lr}); } template T guest_load(reg_t addr) { - return load(addr, RISCV_XLATE_FORCED_VIRT); + bool forced_virt = true; + bool hlvx = false; + bool lr = false; + return load(addr, {forced_virt, hlvx, lr}); } template T guest_load_x(reg_t addr) { - return load(addr, RISCV_XLATE_FORCED_VIRT|RISCV_XLATE_VIRT_HLVX); + bool forced_virt = true; + bool hlvx = true; + bool lr = false; + return load(addr, {forced_virt, hlvx, lr}); } template - void ALWAYS_INLINE store(reg_t addr, T val, uint32_t xlate_flags = 0) { + void ALWAYS_INLINE store(reg_t addr, T val, xlate_flags_t xlate_flags = {false, false, false}) { reg_t vpn = addr >> PGSHIFT; bool aligned = (addr & (sizeof(T) - 1)) == 0; bool tlb_hit = tlb_store_tag[vpn % TLB_ENTRIES] == vpn; - if (xlate_flags == 0 && likely(aligned && tlb_hit)) { + if (!(xlate_flags.hlvx || xlate_flags.forced_virt || xlate_flags.lr) && likely(aligned && tlb_hit)) { *(target_endian*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr) = to_target(val); } else { target_endian target_val = to_target(val); @@ -108,7 +119,10 @@ public: template void guest_store(reg_t addr, T val) { - store(addr, val, RISCV_XLATE_FORCED_VIRT); + bool forced_virt = true; + bool hlvx = false; + bool lr = false; + store(addr, val, {forced_virt, hlvx, lr}); } // AMO/Zicbom faults should be reported as store faults @@ -130,7 +144,7 @@ public: template T amo(reg_t addr, op f) { convert_load_traps_to_store_traps({ - store_slow_path(addr, sizeof(T), nullptr, 0, false, true); + store_slow_path(addr, sizeof(T), nullptr, {false, false, false}, false, true); auto lhs = load(addr); store(addr, f(lhs)); return lhs; @@ -164,7 +178,7 @@ public: void clean_inval(reg_t addr, bool clean, bool inval) { convert_load_traps_to_store_traps({ - const reg_t paddr = translate(addr, blocksz, LOAD, 0) & ~(blocksz - 1); + const reg_t paddr = translate(addr, blocksz, LOAD, {false, false, false}) & ~(blocksz - 1); if (sim->reservable(paddr)) { if (tracer.interested_in_range(paddr, paddr + PGSIZE, LOAD)) tracer.clean_invalidate(paddr, blocksz, clean, inval); @@ -183,10 +197,10 @@ public: { if (vaddr & (size-1)) { // Raise either access fault or misaligned exception - store_slow_path(vaddr, size, nullptr, 0, false, true); + store_slow_path(vaddr, size, nullptr, {false, false, false}, false, true); } - reg_t paddr = translate(vaddr, 1, STORE, 0); + reg_t paddr = translate(vaddr, 1, STORE, {false, false, false}); if (sim->reservable(paddr)) return load_reservation_address == paddr; else @@ -332,17 +346,17 @@ private: // handle uncommon cases: TLB misses, page faults, MMIO tlb_entry_t fetch_slow_path(reg_t addr); - void load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags); - void load_slow_path_intrapage(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags); - void store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store, bool require_alignment); - void store_slow_path_intrapage(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store); + void load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, xlate_flags_t xlate_flags); + void load_slow_path_intrapage(reg_t addr, reg_t len, uint8_t* bytes, xlate_flags_t xlate_flags); + void store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, xlate_flags_t xlate_flags, bool actually_store, bool require_alignment); + void store_slow_path_intrapage(reg_t addr, reg_t len, const uint8_t* bytes, xlate_flags_t xlate_flags, bool actually_store); bool mmio_fetch(reg_t paddr, size_t len, uint8_t* bytes); bool mmio_load(reg_t paddr, size_t len, uint8_t* bytes); bool mmio_store(reg_t paddr, size_t len, const uint8_t* bytes); bool mmio(reg_t paddr, size_t len, uint8_t* bytes, access_type type); bool mmio_ok(reg_t paddr, access_type type); void check_triggers(triggers::operation_t operation, reg_t address, std::optional data = std::nullopt); - reg_t translate(reg_t addr, reg_t len, access_type type, uint32_t xlate_flags); + reg_t translate(reg_t addr, reg_t len, access_type type, xlate_flags_t xlate_flags); reg_t pte_load(reg_t pte_paddr, reg_t addr, bool virt, access_type trap_type, size_t ptesize) { if (ptesize == 4) -- cgit v1.1 From a0c5bf31ba22119bf365c5fcff262736d1b4ac49 Mon Sep 17 00:00:00 2001 From: rbuchner Date: Mon, 24 Apr 2023 09:45:26 -0700 Subject: Add is_special_access() to xlate_flags_t --- riscv/mmu.cc | 8 ++++---- riscv/mmu.h | 8 ++++++-- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/riscv/mmu.cc b/riscv/mmu.cc index 4c42610..6a5fdee 100644 --- a/riscv/mmu.cc +++ b/riscv/mmu.cc @@ -201,7 +201,7 @@ void mmu_t::check_triggers(triggers::operation_t operation, reg_t address, std:: void mmu_t::load_slow_path_intrapage(reg_t addr, reg_t len, uint8_t* bytes, xlate_flags_t xlate_flags) { reg_t vpn = addr >> PGSHIFT; - if (!(xlate_flags.hlvx || xlate_flags.forced_virt || xlate_flags.lr) && vpn == (tlb_load_tag[vpn % TLB_ENTRIES] & ~TLB_CHECK_TRIGGERS)) { + if (!xlate_flags.is_special_access() && vpn == (tlb_load_tag[vpn % TLB_ENTRIES] & ~TLB_CHECK_TRIGGERS)) { auto host_addr = tlb_data[vpn % TLB_ENTRIES].host_offset + addr; memcpy(bytes, host_addr, len); return; @@ -217,7 +217,7 @@ void mmu_t::load_slow_path_intrapage(reg_t addr, reg_t len, uint8_t* bytes, xlat memcpy(bytes, host_addr, len); if (tracer.interested_in_range(paddr, paddr + PGSIZE, LOAD)) tracer.trace(paddr, len, LOAD); - else if (!(xlate_flags.hlvx || xlate_flags.forced_virt || xlate_flags.lr)) + else if (!xlate_flags.is_special_access()) refill_tlb(addr, paddr, host_addr, LOAD); } else if (!mmio_load(paddr, len, bytes)) { @@ -255,7 +255,7 @@ void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, xlate_flags_t void mmu_t::store_slow_path_intrapage(reg_t addr, reg_t len, const uint8_t* bytes, xlate_flags_t xlate_flags, bool actually_store) { reg_t vpn = addr >> PGSHIFT; - if (!(xlate_flags.hlvx || xlate_flags.forced_virt || xlate_flags.lr) && vpn == (tlb_store_tag[vpn % TLB_ENTRIES] & ~TLB_CHECK_TRIGGERS)) { + if (!xlate_flags.is_special_access() && vpn == (tlb_store_tag[vpn % TLB_ENTRIES] & ~TLB_CHECK_TRIGGERS)) { if (actually_store) { auto host_addr = tlb_data[vpn % TLB_ENTRIES].host_offset + addr; memcpy(host_addr, bytes, len); @@ -270,7 +270,7 @@ void mmu_t::store_slow_path_intrapage(reg_t addr, reg_t len, const uint8_t* byte memcpy(host_addr, bytes, len); if (tracer.interested_in_range(paddr, paddr + PGSIZE, STORE)) tracer.trace(paddr, len, STORE); - else if (!(xlate_flags.hlvx || xlate_flags.forced_virt || xlate_flags.lr)) + else if (!xlate_flags.is_special_access()) refill_tlb(addr, paddr, host_addr, STORE); } else if (!mmio_store(paddr, len, bytes)) { throw trap_store_access_fault((proc) ? proc->state.v : false, addr, 0, 0); diff --git a/riscv/mmu.h b/riscv/mmu.h index 3c04672..6e79539 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -42,6 +42,10 @@ struct xlate_flags_t { const bool forced_virt : 1; const bool hlvx : 1; const bool lr : 1; + + bool is_special_access() const { + return forced_virt || hlvx || lr; + } }; void throw_access_exception(bool virt, reg_t addr, access_type type); @@ -64,7 +68,7 @@ public: bool aligned = (addr & (sizeof(T) - 1)) == 0; bool tlb_hit = tlb_load_tag[vpn % TLB_ENTRIES] == vpn; - if (likely(!(xlate_flags.hlvx || xlate_flags.forced_virt || xlate_flags.lr) && aligned && tlb_hit)) { + if (likely(!xlate_flags.is_special_access() && aligned && tlb_hit)) { res = *(target_endian*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr); } else { load_slow_path(addr, sizeof(T), (uint8_t*)&res, xlate_flags); @@ -106,7 +110,7 @@ public: bool aligned = (addr & (sizeof(T) - 1)) == 0; bool tlb_hit = tlb_store_tag[vpn % TLB_ENTRIES] == vpn; - if (!(xlate_flags.hlvx || xlate_flags.forced_virt || xlate_flags.lr) && likely(aligned && tlb_hit)) { + if (!xlate_flags.is_special_access() && likely(aligned && tlb_hit)) { *(target_endian*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr) = to_target(val); } else { target_endian target_val = to_target(val); -- cgit v1.1 From 8a34e1a5b3bc68b915127af15ea9254f7d812727 Mon Sep 17 00:00:00 2001 From: Ryan Buchner Date: Tue, 11 Apr 2023 10:42:58 -0700 Subject: Add structure (mem_access_info_t) for holding memory access information Add complementary function for generating access information. Update mmu_t::translate() to accept a mem_access_info_t. --- riscv/mmu.cc | 28 ++++++++++------------------ riscv/mmu.h | 34 +++++++++++++++++++++++++++++++--- 2 files changed, 41 insertions(+), 21 deletions(-) diff --git a/riscv/mmu.cc b/riscv/mmu.cc index 6a5fdee..e2341fb 100644 --- a/riscv/mmu.cc +++ b/riscv/mmu.cc @@ -52,25 +52,16 @@ void throw_access_exception(bool virt, reg_t addr, access_type type) } } -reg_t mmu_t::translate(reg_t addr, reg_t len, access_type type, xlate_flags_t xlate_flags) +reg_t mmu_t::translate(mem_access_info_t access_info, reg_t len) { + reg_t addr = access_info.vaddr; + access_type type = access_info.type; if (!proc) return addr; - bool virt = proc->state.v; - bool hlvx = xlate_flags.hlvx; - reg_t mode = proc->state.prv; - if (type != FETCH) { - if (in_mprv()) { - mode = get_field(proc->state.mstatus->read(), MSTATUS_MPP); - if (get_field(proc->state.mstatus->read(), MSTATUS_MPV) && mode != PRV_M) - virt = true; - } - if (xlate_flags.forced_virt) { - virt = true; - mode = get_field(proc->state.hstatus->read(), HSTATUS_SPVP); - } - } + bool virt = access_info.effective_virt; + bool hlvx = access_info.flags.hlvx; + reg_t mode = (reg_t) access_info.effective_priv; reg_t paddr = walk(addr, type, mode, virt, hlvx) | (addr & (PGSIZE-1)); if (!pmp_ok(paddr, len, type, mode)) @@ -80,12 +71,13 @@ reg_t mmu_t::translate(reg_t addr, reg_t len, access_type type, xlate_flags_t xl tlb_entry_t mmu_t::fetch_slow_path(reg_t vaddr) { + auto access_info = generate_access_info(vaddr, FETCH, {false, false, false}); check_triggers(triggers::OPERATION_EXECUTE, vaddr); tlb_entry_t result; reg_t vpn = vaddr >> PGSHIFT; if (unlikely(tlb_insn_tag[vpn % TLB_ENTRIES] != (vpn | TLB_CHECK_TRIGGERS))) { - reg_t paddr = translate(vaddr, sizeof(fetch_temp), FETCH, {false, false, false}); + reg_t paddr = translate(access_info, sizeof(fetch_temp)); if (auto host_addr = sim->addr_to_mem(paddr)) { result = refill_tlb(vaddr, paddr, host_addr, FETCH); } else { @@ -207,7 +199,7 @@ void mmu_t::load_slow_path_intrapage(reg_t addr, reg_t len, uint8_t* bytes, xlat return; } - reg_t paddr = translate(addr, len, LOAD, xlate_flags); + reg_t paddr = translate(generate_access_info(addr, LOAD, xlate_flags), len); if (xlate_flags.lr && !sim->reservable(paddr)) { throw trap_load_access_fault((proc) ? proc->state.v : false, addr, 0, 0); @@ -263,7 +255,7 @@ void mmu_t::store_slow_path_intrapage(reg_t addr, reg_t len, const uint8_t* byte return; } - reg_t paddr = translate(addr, len, STORE, xlate_flags); + reg_t paddr = translate(generate_access_info(addr, STORE, xlate_flags), len); if (actually_store) { if (auto host_addr = sim->addr_to_mem(paddr)) { diff --git a/riscv/mmu.h b/riscv/mmu.h index 6e79539..1039de1 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -48,6 +48,14 @@ struct xlate_flags_t { } }; +struct mem_access_info_t { + const reg_t vaddr; + const reg_t effective_priv; + const bool effective_virt; + const xlate_flags_t flags; + const access_type type; +}; + void throw_access_exception(bool virt, reg_t addr, access_type type); // this class implements a processor's port into the virtual memory system. @@ -57,6 +65,26 @@ class mmu_t private: std::map alloc_cache; std::vector> addr_tbl; + + mem_access_info_t generate_access_info(reg_t addr, access_type type, xlate_flags_t xlate_flags) { + if (!proc) + return {addr, 0, false, {false, false, false}, type}; + bool virt = proc->state.v; + reg_t mode = proc->state.prv; + if (type != FETCH) { + if (in_mprv()) { + mode = get_field(proc->state.mstatus->read(), MSTATUS_MPP); + if (get_field(proc->state.mstatus->read(), MSTATUS_MPV) && mode != PRV_M) + virt = true; + } + if (xlate_flags.forced_virt) { + virt = true; + mode = get_field(proc->state.hstatus->read(), HSTATUS_SPVP); + } + } + return {addr, mode, virt, xlate_flags, type}; + } + public: mmu_t(simif_t* sim, endianness_t endianness, processor_t* proc); ~mmu_t(); @@ -182,7 +210,7 @@ public: void clean_inval(reg_t addr, bool clean, bool inval) { convert_load_traps_to_store_traps({ - const reg_t paddr = translate(addr, blocksz, LOAD, {false, false, false}) & ~(blocksz - 1); + const reg_t paddr = translate(generate_access_info(addr, LOAD, {false, false, false}), blocksz) & ~(blocksz - 1); if (sim->reservable(paddr)) { if (tracer.interested_in_range(paddr, paddr + PGSIZE, LOAD)) tracer.clean_invalidate(paddr, blocksz, clean, inval); @@ -204,7 +232,7 @@ public: store_slow_path(vaddr, size, nullptr, {false, false, false}, false, true); } - reg_t paddr = translate(vaddr, 1, STORE, {false, false, false}); + reg_t paddr = translate(generate_access_info(vaddr, STORE, {false, false, false}), 1); if (sim->reservable(paddr)) return load_reservation_address == paddr; else @@ -360,7 +388,7 @@ private: bool mmio(reg_t paddr, size_t len, uint8_t* bytes, access_type type); bool mmio_ok(reg_t paddr, access_type type); void check_triggers(triggers::operation_t operation, reg_t address, std::optional data = std::nullopt); - reg_t translate(reg_t addr, reg_t len, access_type type, xlate_flags_t xlate_flags); + reg_t translate(mem_access_info_t access_info, reg_t len); reg_t pte_load(reg_t pte_paddr, reg_t addr, bool virt, access_type trap_type, size_t ptesize) { if (ptesize == 4) -- cgit v1.1 From 87690a5ed4b55a3938efac098ce68fbf4d7fb037 Mon Sep 17 00:00:00 2001 From: Ryan Buchner Date: Mon, 17 Apr 2023 19:19:01 -0700 Subject: Adjust load_slow_path_intrapage to recieve a mem_access_info_t as input --- riscv/mmu.cc | 20 +++++++++++--------- riscv/mmu.h | 2 +- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/riscv/mmu.cc b/riscv/mmu.cc index e2341fb..34cd170 100644 --- a/riscv/mmu.cc +++ b/riscv/mmu.cc @@ -190,18 +190,19 @@ void mmu_t::check_triggers(triggers::operation_t operation, reg_t address, std:: } } -void mmu_t::load_slow_path_intrapage(reg_t addr, reg_t len, uint8_t* bytes, xlate_flags_t xlate_flags) +void mmu_t::load_slow_path_intrapage(reg_t len, uint8_t* bytes, mem_access_info_t access_info) { + reg_t addr = access_info.vaddr; reg_t vpn = addr >> PGSHIFT; - if (!xlate_flags.is_special_access() && vpn == (tlb_load_tag[vpn % TLB_ENTRIES] & ~TLB_CHECK_TRIGGERS)) { + if (!access_info.flags.is_special_access() && vpn == (tlb_load_tag[vpn % TLB_ENTRIES] & ~TLB_CHECK_TRIGGERS)) { auto host_addr = tlb_data[vpn % TLB_ENTRIES].host_offset + addr; memcpy(bytes, host_addr, len); return; } - reg_t paddr = translate(generate_access_info(addr, LOAD, xlate_flags), len); + reg_t paddr = translate(access_info, len); - if (xlate_flags.lr && !sim->reservable(paddr)) { + if (access_info.flags.lr && !sim->reservable(paddr)) { throw trap_load_access_fault((proc) ? proc->state.v : false, addr, 0, 0); } @@ -209,24 +210,25 @@ void mmu_t::load_slow_path_intrapage(reg_t addr, reg_t len, uint8_t* bytes, xlat memcpy(bytes, host_addr, len); if (tracer.interested_in_range(paddr, paddr + PGSIZE, LOAD)) tracer.trace(paddr, len, LOAD); - else if (!xlate_flags.is_special_access()) + else if (!access_info.flags.is_special_access()) refill_tlb(addr, paddr, host_addr, LOAD); } else if (!mmio_load(paddr, len, bytes)) { throw trap_load_access_fault((proc) ? proc->state.v : false, addr, 0, 0); } - if (xlate_flags.lr) { + if (access_info.flags.lr) { load_reservation_address = paddr; } } void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, xlate_flags_t xlate_flags) { + auto access_info = generate_access_info(addr, LOAD, xlate_flags); check_triggers(triggers::OPERATION_LOAD, addr); if ((addr & (len - 1)) == 0) { - load_slow_path_intrapage(addr, len, bytes, xlate_flags); + load_slow_path_intrapage(len, bytes, access_info); } else { bool gva = ((proc) ? proc->state.v : false) || xlate_flags.forced_virt; if (!is_misaligned_enabled()) @@ -236,9 +238,9 @@ void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, xlate_flags_t throw trap_load_access_fault(gva, addr, 0, 0); reg_t len_page0 = std::min(len, PGSIZE - addr % PGSIZE); - load_slow_path_intrapage(addr, len_page0, bytes, xlate_flags); + load_slow_path_intrapage(len_page0, bytes, access_info); if (len_page0 != len) - load_slow_path_intrapage(addr + len_page0, len - len_page0, bytes + len_page0, xlate_flags); + load_slow_path_intrapage(len - len_page0, bytes + len_page0, generate_access_info(addr + len_page0, LOAD, xlate_flags)); } check_triggers(triggers::OPERATION_LOAD, addr, reg_from_bytes(len, bytes)); diff --git a/riscv/mmu.h b/riscv/mmu.h index 1039de1..1d38849 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -379,7 +379,7 @@ private: // handle uncommon cases: TLB misses, page faults, MMIO tlb_entry_t fetch_slow_path(reg_t addr); void load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, xlate_flags_t xlate_flags); - void load_slow_path_intrapage(reg_t addr, reg_t len, uint8_t* bytes, xlate_flags_t xlate_flags); + void load_slow_path_intrapage(reg_t len, uint8_t* bytes, mem_access_info_t access_info); void store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, xlate_flags_t xlate_flags, bool actually_store, bool require_alignment); void store_slow_path_intrapage(reg_t addr, reg_t len, const uint8_t* bytes, xlate_flags_t xlate_flags, bool actually_store); bool mmio_fetch(reg_t paddr, size_t len, uint8_t* bytes); -- cgit v1.1 From 9312137ae2a218632ec293ecc12da7c72fa828b2 Mon Sep 17 00:00:00 2001 From: rbuchner Date: Mon, 24 Apr 2023 16:47:28 -0700 Subject: Use access_info.effective_virt when access_fault due to non-reservable lr Fixes case 4 from https://github.com/riscv-software-src/riscv-isa-sim/issues/872 --- riscv/mmu.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/riscv/mmu.cc b/riscv/mmu.cc index 34cd170..acbf652 100644 --- a/riscv/mmu.cc +++ b/riscv/mmu.cc @@ -203,7 +203,7 @@ void mmu_t::load_slow_path_intrapage(reg_t len, uint8_t* bytes, mem_access_info_ reg_t paddr = translate(access_info, len); if (access_info.flags.lr && !sim->reservable(paddr)) { - throw trap_load_access_fault((proc) ? proc->state.v : false, addr, 0, 0); + throw trap_load_access_fault(access_info.effective_virt, addr, 0, 0); } if (auto host_addr = sim->addr_to_mem(paddr)) { -- cgit v1.1 From bd675766091549e4fc1607f6106b0dce7dc03d21 Mon Sep 17 00:00:00 2001 From: rbuchner Date: Mon, 24 Apr 2023 16:59:34 -0700 Subject: Use access_info.effective_virt when failed mmio_load (i.e. device detects access fault) Fixes case 3 from https://github.com/riscv-software-src/riscv-isa-sim/issues/872 --- riscv/mmu.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/riscv/mmu.cc b/riscv/mmu.cc index acbf652..db6c31e 100644 --- a/riscv/mmu.cc +++ b/riscv/mmu.cc @@ -214,7 +214,7 @@ void mmu_t::load_slow_path_intrapage(reg_t len, uint8_t* bytes, mem_access_info_ refill_tlb(addr, paddr, host_addr, LOAD); } else if (!mmio_load(paddr, len, bytes)) { - throw trap_load_access_fault((proc) ? proc->state.v : false, addr, 0, 0); + throw trap_load_access_fault(access_info.effective_virt, addr, 0, 0); } if (access_info.flags.lr) { -- cgit v1.1 From 2745d3139cefd1fc2b97bb9382188c59f15eced9 Mon Sep 17 00:00:00 2001 From: Ryan Buchner Date: Mon, 17 Apr 2023 20:22:59 -0700 Subject: Use access_info within load_slow_path rather than xlate_flags Fixes case 2 from https://github.com/riscv-software-src/riscv-isa-sim/issues/872 --- riscv/mmu.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/riscv/mmu.cc b/riscv/mmu.cc index db6c31e..be986fe 100644 --- a/riscv/mmu.cc +++ b/riscv/mmu.cc @@ -230,11 +230,11 @@ void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, xlate_flags_t if ((addr & (len - 1)) == 0) { load_slow_path_intrapage(len, bytes, access_info); } else { - bool gva = ((proc) ? proc->state.v : false) || xlate_flags.forced_virt; + bool gva = access_info.effective_virt; if (!is_misaligned_enabled()) throw trap_load_address_misaligned(gva, addr, 0, 0); - if (xlate_flags.lr) + if (access_info.flags.lr) throw trap_load_access_fault(gva, addr, 0, 0); reg_t len_page0 = std::min(len, PGSIZE - addr % PGSIZE); -- cgit v1.1 From 125c4d6a6400eef6365d8379efef1330c429f64e Mon Sep 17 00:00:00 2001 From: Ryan Buchner Date: Mon, 17 Apr 2023 20:33:46 -0700 Subject: Adjust store_slow_path_intrapage to recieve a mem_access_info_t as input --- riscv/mmu.cc | 16 +++++++++------- riscv/mmu.h | 2 +- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/riscv/mmu.cc b/riscv/mmu.cc index be986fe..cf77325 100644 --- a/riscv/mmu.cc +++ b/riscv/mmu.cc @@ -246,10 +246,11 @@ void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, xlate_flags_t check_triggers(triggers::OPERATION_LOAD, addr, reg_from_bytes(len, bytes)); } -void mmu_t::store_slow_path_intrapage(reg_t addr, reg_t len, const uint8_t* bytes, xlate_flags_t xlate_flags, bool actually_store) +void mmu_t::store_slow_path_intrapage(reg_t len, const uint8_t* bytes, mem_access_info_t access_info, bool actually_store) { + reg_t addr = access_info.vaddr; reg_t vpn = addr >> PGSHIFT; - if (!xlate_flags.is_special_access() && vpn == (tlb_store_tag[vpn % TLB_ENTRIES] & ~TLB_CHECK_TRIGGERS)) { + if (!access_info.flags.is_special_access() && vpn == (tlb_store_tag[vpn % TLB_ENTRIES] & ~TLB_CHECK_TRIGGERS)) { if (actually_store) { auto host_addr = tlb_data[vpn % TLB_ENTRIES].host_offset + addr; memcpy(host_addr, bytes, len); @@ -257,14 +258,14 @@ void mmu_t::store_slow_path_intrapage(reg_t addr, reg_t len, const uint8_t* byte return; } - reg_t paddr = translate(generate_access_info(addr, STORE, xlate_flags), len); + reg_t paddr = translate(access_info, len); if (actually_store) { if (auto host_addr = sim->addr_to_mem(paddr)) { memcpy(host_addr, bytes, len); if (tracer.interested_in_range(paddr, paddr + PGSIZE, STORE)) tracer.trace(paddr, len, STORE); - else if (!xlate_flags.is_special_access()) + else if (!access_info.flags.is_special_access()) refill_tlb(addr, paddr, host_addr, STORE); } else if (!mmio_store(paddr, len, bytes)) { throw trap_store_access_fault((proc) ? proc->state.v : false, addr, 0, 0); @@ -274,6 +275,7 @@ void mmu_t::store_slow_path_intrapage(reg_t addr, reg_t len, const uint8_t* byte void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, xlate_flags_t xlate_flags, bool actually_store, bool UNUSED require_alignment) { + auto access_info = generate_access_info(addr, STORE, xlate_flags); if (actually_store) check_triggers(triggers::OPERATION_STORE, addr, reg_from_bytes(len, bytes)); @@ -286,11 +288,11 @@ void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, xlate_f throw trap_store_access_fault(gva, addr, 0, 0); reg_t len_page0 = std::min(len, PGSIZE - addr % PGSIZE); - store_slow_path_intrapage(addr, len_page0, bytes, xlate_flags, actually_store); + store_slow_path_intrapage(len_page0, bytes, access_info, actually_store); if (len_page0 != len) - store_slow_path_intrapage(addr + len_page0, len - len_page0, bytes + len_page0, xlate_flags, actually_store); + store_slow_path_intrapage(len - len_page0, bytes + len_page0, generate_access_info(addr + len_page0, STORE, xlate_flags), actually_store); } else { - store_slow_path_intrapage(addr, len, bytes, xlate_flags, actually_store); + store_slow_path_intrapage(len, bytes, access_info, actually_store); } } diff --git a/riscv/mmu.h b/riscv/mmu.h index 1d38849..41f6751 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -381,7 +381,7 @@ private: void load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, xlate_flags_t xlate_flags); void load_slow_path_intrapage(reg_t len, uint8_t* bytes, mem_access_info_t access_info); void store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, xlate_flags_t xlate_flags, bool actually_store, bool require_alignment); - void store_slow_path_intrapage(reg_t addr, reg_t len, const uint8_t* bytes, xlate_flags_t xlate_flags, bool actually_store); + void store_slow_path_intrapage(reg_t len, const uint8_t* bytes, mem_access_info_t access_info, bool actually_store); bool mmio_fetch(reg_t paddr, size_t len, uint8_t* bytes); bool mmio_load(reg_t paddr, size_t len, uint8_t* bytes); bool mmio_store(reg_t paddr, size_t len, const uint8_t* bytes); -- cgit v1.1 From 850600792ec04756f7720a9b376cfb2d8ad6c917 Mon Sep 17 00:00:00 2001 From: rbuchner Date: Mon, 1 May 2023 12:42:28 -0700 Subject: Use access_info.effective_virt when failed mmio_store (i.e. device detects access fault) Fixes case 3 from https://github.com/riscv-software-src/riscv-isa-sim/issues/872 --- riscv/mmu.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/riscv/mmu.cc b/riscv/mmu.cc index cf77325..a524035 100644 --- a/riscv/mmu.cc +++ b/riscv/mmu.cc @@ -268,7 +268,7 @@ void mmu_t::store_slow_path_intrapage(reg_t len, const uint8_t* bytes, mem_acces else if (!access_info.flags.is_special_access()) refill_tlb(addr, paddr, host_addr, STORE); } else if (!mmio_store(paddr, len, bytes)) { - throw trap_store_access_fault((proc) ? proc->state.v : false, addr, 0, 0); + throw trap_store_access_fault(access_info.effective_virt, addr, 0, 0); } } } -- cgit v1.1 From f7900e4730e1c13fa42789bc01d8f0366756130e Mon Sep 17 00:00:00 2001 From: Ryan Buchner Date: Mon, 17 Apr 2023 20:34:29 -0700 Subject: Use access_info within store_slow_path rather than xlate_flags --- riscv/mmu.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/riscv/mmu.cc b/riscv/mmu.cc index a524035..7264ea8 100644 --- a/riscv/mmu.cc +++ b/riscv/mmu.cc @@ -280,7 +280,7 @@ void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, xlate_f check_triggers(triggers::OPERATION_STORE, addr, reg_from_bytes(len, bytes)); if (addr & (len - 1)) { - bool gva = ((proc) ? proc->state.v : false) || xlate_flags.forced_virt; + bool gva = access_info.effective_virt; if (!is_misaligned_enabled()) throw trap_store_address_misaligned(gva, addr, 0, 0); -- cgit v1.1 From 4b9996bad9a3327b13056f21b7b2e03fdc41f65a Mon Sep 17 00:00:00 2001 From: rbuchner Date: Tue, 18 Apr 2023 14:06:12 -0700 Subject: Pass mem_access_info_t into walk() --- riscv/mmu.cc | 10 +++++++--- riscv/mmu.h | 2 +- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/riscv/mmu.cc b/riscv/mmu.cc index 7264ea8..f40ce30 100644 --- a/riscv/mmu.cc +++ b/riscv/mmu.cc @@ -60,10 +60,9 @@ reg_t mmu_t::translate(mem_access_info_t access_info, reg_t len) return addr; bool virt = access_info.effective_virt; - bool hlvx = access_info.flags.hlvx; reg_t mode = (reg_t) access_info.effective_priv; - reg_t paddr = walk(addr, type, mode, virt, hlvx) | (addr & (PGSIZE-1)); + reg_t paddr = walk(access_info) | (addr & (PGSIZE-1)); if (!pmp_ok(paddr, len, type, mode)) throw_access_exception(virt, addr, type); return paddr; @@ -461,8 +460,13 @@ reg_t mmu_t::s2xlate(reg_t gva, reg_t gpa, access_type type, access_type trap_ty } } -reg_t mmu_t::walk(reg_t addr, access_type type, reg_t mode, bool virt, bool hlvx) +reg_t mmu_t::walk(mem_access_info_t access_info) { + access_type type = access_info.type; + reg_t addr = access_info.vaddr; + bool virt = access_info.effective_virt; + bool hlvx = access_info.flags.hlvx; + reg_t mode = access_info.effective_priv; reg_t page_mask = (reg_t(1) << PGSHIFT) - 1; reg_t satp = proc->get_state()->satp->readvirt(virt); vm_info vm = decode_vm_info(proc->get_const_xlen(), false, mode, satp); diff --git a/riscv/mmu.h b/riscv/mmu.h index 41f6751..7d14ad5 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -374,7 +374,7 @@ private: reg_t s2xlate(reg_t gva, reg_t gpa, access_type type, access_type trap_type, bool virt, bool hlvx); // perform a page table walk for a given VA; set referenced/dirty bits - reg_t walk(reg_t addr, access_type type, reg_t prv, bool virt, bool hlvx); + reg_t walk(mem_access_info_t access_info); // handle uncommon cases: TLB misses, page faults, MMIO tlb_entry_t fetch_slow_path(reg_t addr); -- cgit v1.1 From 36b8c12e9f4d92f3cb97daf4ea0613436724438f Mon Sep 17 00:00:00 2001 From: rbuchner Date: Wed, 19 Apr 2023 16:20:56 -0700 Subject: Add split_misaligned_access() to mem_access_info_t --- riscv/mmu.cc | 4 ++-- riscv/mmu.h | 4 ++++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/riscv/mmu.cc b/riscv/mmu.cc index f40ce30..734e8cd 100644 --- a/riscv/mmu.cc +++ b/riscv/mmu.cc @@ -239,7 +239,7 @@ void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, xlate_flags_t reg_t len_page0 = std::min(len, PGSIZE - addr % PGSIZE); load_slow_path_intrapage(len_page0, bytes, access_info); if (len_page0 != len) - load_slow_path_intrapage(len - len_page0, bytes + len_page0, generate_access_info(addr + len_page0, LOAD, xlate_flags)); + load_slow_path_intrapage(len - len_page0, bytes + len_page0, access_info.split_misaligned_access(len_page0)); } check_triggers(triggers::OPERATION_LOAD, addr, reg_from_bytes(len, bytes)); @@ -289,7 +289,7 @@ void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, xlate_f reg_t len_page0 = std::min(len, PGSIZE - addr % PGSIZE); store_slow_path_intrapage(len_page0, bytes, access_info, actually_store); if (len_page0 != len) - store_slow_path_intrapage(len - len_page0, bytes + len_page0, generate_access_info(addr + len_page0, STORE, xlate_flags), actually_store); + store_slow_path_intrapage(len - len_page0, bytes + len_page0, access_info.split_misaligned_access(len_page0), actually_store); } else { store_slow_path_intrapage(len, bytes, access_info, actually_store); } diff --git a/riscv/mmu.h b/riscv/mmu.h index 7d14ad5..2f93863 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -54,6 +54,10 @@ struct mem_access_info_t { const bool effective_virt; const xlate_flags_t flags; const access_type type; + + mem_access_info_t split_misaligned_access(reg_t offset) const { + return {vaddr + offset, effective_priv, effective_virt, flags, type}; + } }; void throw_access_exception(bool virt, reg_t addr, access_type type); -- cgit v1.1 From 33fbc2df39df914d3462bede4112db7966d49a3c Mon Sep 17 00:00:00 2001 From: rbuchner Date: Fri, 28 Apr 2023 16:28:16 -0700 Subject: Plumb in effective virtual bit to take_trigger_action() --- riscv/execute.cc | 6 +++--- riscv/mmu.cc | 16 ++++++++-------- riscv/mmu.h | 2 +- riscv/processor.cc | 2 +- riscv/processor.h | 2 +- riscv/triggers.h | 5 +++-- 6 files changed, 17 insertions(+), 16 deletions(-) diff --git a/riscv/execute.cc b/riscv/execute.cc index acf0e90..295879d 100644 --- a/riscv/execute.cc +++ b/riscv/execute.cc @@ -267,7 +267,7 @@ void processor_t::step(size_t n) auto match = TM.detect_icount_match(); if (match.has_value()) { assert(match->timing == triggers::TIMING_BEFORE); - throw triggers::matched_t((triggers::operation_t)0, 0, match->action); + throw triggers::matched_t((triggers::operation_t)0, 0, match->action, state.v); } } @@ -310,7 +310,7 @@ void processor_t::step(size_t n) // Trigger action takes priority over single step auto match = TM.detect_trap_match(t); if (match.has_value()) - take_trigger_action(match->action, 0, state.pc); + take_trigger_action(match->action, 0, state.pc, 0); else if (unlikely(state.single_step == state.STEP_STEPPED)) { state.single_step = state.STEP_NONE; enter_debug_mode(DCSR_CAUSE_STEP); @@ -322,7 +322,7 @@ void processor_t::step(size_t n) delete mmu->matched_trigger; mmu->matched_trigger = NULL; } - take_trigger_action(t.action, t.address, pc); + take_trigger_action(t.action, t.address, pc, t.gva); } catch(trap_debug_mode&) { diff --git a/riscv/mmu.cc b/riscv/mmu.cc index 734e8cd..358ccd3 100644 --- a/riscv/mmu.cc +++ b/riscv/mmu.cc @@ -71,7 +71,7 @@ reg_t mmu_t::translate(mem_access_info_t access_info, reg_t len) tlb_entry_t mmu_t::fetch_slow_path(reg_t vaddr) { auto access_info = generate_access_info(vaddr, FETCH, {false, false, false}); - check_triggers(triggers::OPERATION_EXECUTE, vaddr); + check_triggers(triggers::OPERATION_EXECUTE, vaddr, access_info.effective_virt); tlb_entry_t result; reg_t vpn = vaddr >> PGSHIFT; @@ -88,7 +88,7 @@ tlb_entry_t mmu_t::fetch_slow_path(reg_t vaddr) result = tlb_data[vpn % TLB_ENTRIES]; } - check_triggers(triggers::OPERATION_EXECUTE, vaddr, from_le(*(const uint16_t*)(result.host_offset + vaddr))); + check_triggers(triggers::OPERATION_EXECUTE, vaddr, access_info.effective_virt, from_le(*(const uint16_t*)(result.host_offset + vaddr))); return result; } @@ -169,7 +169,7 @@ bool mmu_t::mmio(reg_t paddr, size_t len, uint8_t* bytes, access_type type) return true; } -void mmu_t::check_triggers(triggers::operation_t operation, reg_t address, std::optional data) +void mmu_t::check_triggers(triggers::operation_t operation, reg_t address, bool virt, std::optional data) { if (matched_trigger || !proc) return; @@ -179,13 +179,13 @@ void mmu_t::check_triggers(triggers::operation_t operation, reg_t address, std:: if (match.has_value()) switch (match->timing) { case triggers::TIMING_BEFORE: - throw triggers::matched_t(operation, address, match->action); + throw triggers::matched_t(operation, address, match->action, virt); case triggers::TIMING_AFTER: // We want to take this exception on the next instruction. We check // whether to do so in the I$ refill path, so flush the I$. flush_icache(); - matched_trigger = new triggers::matched_t(operation, address, match->action); + matched_trigger = new triggers::matched_t(operation, address, match->action, virt); } } @@ -224,7 +224,7 @@ void mmu_t::load_slow_path_intrapage(reg_t len, uint8_t* bytes, mem_access_info_ void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, xlate_flags_t xlate_flags) { auto access_info = generate_access_info(addr, LOAD, xlate_flags); - check_triggers(triggers::OPERATION_LOAD, addr); + check_triggers(triggers::OPERATION_LOAD, addr, access_info.effective_virt); if ((addr & (len - 1)) == 0) { load_slow_path_intrapage(len, bytes, access_info); @@ -242,7 +242,7 @@ void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, xlate_flags_t load_slow_path_intrapage(len - len_page0, bytes + len_page0, access_info.split_misaligned_access(len_page0)); } - check_triggers(triggers::OPERATION_LOAD, addr, reg_from_bytes(len, bytes)); + check_triggers(triggers::OPERATION_LOAD, addr, access_info.effective_virt, reg_from_bytes(len, bytes)); } void mmu_t::store_slow_path_intrapage(reg_t len, const uint8_t* bytes, mem_access_info_t access_info, bool actually_store) @@ -276,7 +276,7 @@ void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, xlate_f { auto access_info = generate_access_info(addr, STORE, xlate_flags); if (actually_store) - check_triggers(triggers::OPERATION_STORE, addr, reg_from_bytes(len, bytes)); + check_triggers(triggers::OPERATION_STORE, addr, access_info.effective_virt, reg_from_bytes(len, bytes)); if (addr & (len - 1)) { bool gva = access_info.effective_virt; diff --git a/riscv/mmu.h b/riscv/mmu.h index 2f93863..5a4835c 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -391,7 +391,7 @@ private: bool mmio_store(reg_t paddr, size_t len, const uint8_t* bytes); bool mmio(reg_t paddr, size_t len, uint8_t* bytes, access_type type); bool mmio_ok(reg_t paddr, access_type type); - void check_triggers(triggers::operation_t operation, reg_t address, std::optional data = std::nullopt); + void check_triggers(triggers::operation_t operation, reg_t address, bool virt, std::optional data = std::nullopt); reg_t translate(mem_access_info_t access_info, reg_t len); reg_t pte_load(reg_t pte_paddr, reg_t addr, bool virt, access_type trap_type, size_t ptesize) { diff --git a/riscv/processor.cc b/riscv/processor.cc index 330bd30..0ccb651 100644 --- a/riscv/processor.cc +++ b/riscv/processor.cc @@ -885,7 +885,7 @@ void processor_t::take_trap(trap_t& t, reg_t epc) } } -void processor_t::take_trigger_action(triggers::action_t action, reg_t breakpoint_tval, reg_t epc) +void processor_t::take_trigger_action(triggers::action_t action, reg_t breakpoint_tval, reg_t epc, bool virt) { if (debug) { std::stringstream s; // first put everything in a string, later send it to output diff --git a/riscv/processor.h b/riscv/processor.h index 8117568..1b74cc2 100644 --- a/riscv/processor.h +++ b/riscv/processor.h @@ -331,7 +331,7 @@ private: void take_pending_interrupt() { take_interrupt(state.mip->read() & state.mie->read()); } void take_interrupt(reg_t mask); // take first enabled interrupt in mask void take_trap(trap_t& t, reg_t epc); // take an exception - void take_trigger_action(triggers::action_t action, reg_t breakpoint_tval, reg_t epc); + void take_trigger_action(triggers::action_t action, reg_t breakpoint_tval, reg_t epc, bool virt); void disasm(insn_t insn); // disassemble and print an instruction int paddr_bits(); diff --git a/riscv/triggers.h b/riscv/triggers.h index 6e3d74d..aeda4d5 100644 --- a/riscv/triggers.h +++ b/riscv/triggers.h @@ -54,12 +54,13 @@ struct match_result_t { class matched_t { public: - matched_t(triggers::operation_t operation, reg_t address, action_t action) : - operation(operation), address(address), action(action) {} + matched_t(triggers::operation_t operation, reg_t address, action_t action, bool gva) : + operation(operation), address(address), action(action), gva(gva) {} triggers::operation_t operation; reg_t address; action_t action; + bool gva; }; class trigger_t { -- cgit v1.1 From a30a0d63677151cc688fa4e0a05ac664e63d94f4 Mon Sep 17 00:00:00 2001 From: rbuchner Date: Mon, 1 May 2023 09:13:47 -0700 Subject: Use passed in virtual bit for creating traps in take_trigger_action() rahter than state.v Fixes case 1 from https://github.com/riscv-software-src/riscv-isa-sim/issues/872 --- riscv/processor.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/riscv/processor.cc b/riscv/processor.cc index 0ccb651..74a0b8f 100644 --- a/riscv/processor.cc +++ b/riscv/processor.cc @@ -899,7 +899,7 @@ void processor_t::take_trigger_action(triggers::action_t action, reg_t breakpoin enter_debug_mode(DCSR_CAUSE_HWBP); break; case triggers::ACTION_DEBUG_EXCEPTION: { - trap_breakpoint trap(state.v, breakpoint_tval); + trap_breakpoint trap(virt, breakpoint_tval); take_trap(trap, epc); break; } -- cgit v1.1