diff options
author | Andrew Waterman <andrew@sifive.com> | 2025-01-15 19:21:42 -0800 |
---|---|---|
committer | Andrew Waterman <andrew@sifive.com> | 2025-01-15 19:21:42 -0800 |
commit | 91e5d8745e55122989d6e091b27c48136ca01deb (patch) | |
tree | 1770045f94e90e1962724efad54e876a45b639ee | |
parent | f289ad5be80e7e93eaff0d7265613a673e2e948f (diff) | |
download | riscv-isa-sim-sanitize.zip riscv-isa-sim-sanitize.tar.gz riscv-isa-sim-sanitize.tar.bz2 |
separate TLBssanitize
-rw-r--r-- | riscv/mmu.cc | 52 | ||||
-rw-r--r-- | riscv/mmu.h | 24 |
2 files changed, 39 insertions, 37 deletions
diff --git a/riscv/mmu.cc b/riscv/mmu.cc index 65a40db..7679610 100644 --- a/riscv/mmu.cc +++ b/riscv/mmu.cc @@ -36,9 +36,9 @@ void mmu_t::flush_icache() void mmu_t::flush_tlb() { - memset(tlb_insn_tag, -1, sizeof(tlb_insn_tag)); - memset(tlb_load_tag, -1, sizeof(tlb_load_tag)); - memset(tlb_store_tag, -1, sizeof(tlb_store_tag)); + memset(tlb_insn, -1, sizeof(tlb_insn)); + memset(tlb_load, -1, sizeof(tlb_load)); + memset(tlb_store, -1, sizeof(tlb_store)); flush_icache(); } @@ -76,7 +76,7 @@ tlb_entry_t mmu_t::fetch_slow_path(reg_t vaddr) tlb_entry_t result; reg_t vpn = vaddr >> PGSHIFT; - if (unlikely(tlb_insn_tag[vpn % TLB_ENTRIES] != (vpn | TLB_CHECK_TRIGGERS))) { + if (unlikely(tlb_insn[vpn % TLB_ENTRIES].tag != (vpn | TLB_CHECK_TRIGGERS))) { reg_t paddr = translate(access_info, sizeof(fetch_temp[0])); if (auto host_addr = sim->addr_to_mem(paddr)) { result = refill_tlb(vaddr, paddr, host_addr, FETCH); @@ -86,7 +86,7 @@ tlb_entry_t mmu_t::fetch_slow_path(reg_t vaddr) result = {uintptr_t(&fetch_temp), paddr - (vaddr & (PGSIZE-1))}; } } else { - result = tlb_data[vpn % TLB_ENTRIES]; + result = tlb_insn[vpn % TLB_ENTRIES].data; } check_triggers(triggers::OPERATION_EXECUTE, vaddr, access_info.effective_virt, from_le(*(const uint16_t*)(result.host_addr + (vaddr & (PGSIZE-1))))); @@ -195,8 +195,8 @@ void mmu_t::load_slow_path_intrapage(reg_t len, uint8_t* bytes, mem_access_info_ reg_t addr = access_info.vaddr; reg_t transformed_addr = access_info.transformed_vaddr; reg_t vpn = transformed_addr >> PGSHIFT; - if (!access_info.flags.is_special_access() && vpn == (tlb_load_tag[vpn % TLB_ENTRIES] & ~TLB_CHECK_TRIGGERS)) { - auto host_addr = (const void*)(tlb_data[vpn % TLB_ENTRIES].host_addr + (transformed_addr & (PGSIZE-1))); + if (!access_info.flags.is_special_access() && vpn == (tlb_load[vpn % TLB_ENTRIES].tag & ~TLB_CHECK_TRIGGERS)) { + auto host_addr = (const void*)(tlb_load[vpn % TLB_ENTRIES].data.host_addr + (transformed_addr & (PGSIZE-1))); memcpy(bytes, host_addr, len); return; } @@ -258,9 +258,9 @@ void mmu_t::store_slow_path_intrapage(reg_t len, const uint8_t* bytes, mem_acces reg_t addr = access_info.vaddr; reg_t transformed_addr = access_info.transformed_vaddr; reg_t vpn = transformed_addr >> PGSHIFT; - if (!access_info.flags.is_special_access() && vpn == (tlb_store_tag[vpn % TLB_ENTRIES] & ~TLB_CHECK_TRIGGERS)) { + if (!access_info.flags.is_special_access() && vpn == (tlb_store[vpn % TLB_ENTRIES].tag & ~TLB_CHECK_TRIGGERS)) { if (actually_store) { - auto host_addr = (void*)(tlb_data[vpn % TLB_ENTRIES].host_addr + (transformed_addr & (PGSIZE-1))); + auto host_addr = (void*)(tlb_store[vpn % TLB_ENTRIES].data.host_addr + (transformed_addr & (PGSIZE-1))); memcpy(host_addr, bytes, len); } return; @@ -320,28 +320,26 @@ tlb_entry_t mmu_t::refill_tlb(reg_t vaddr, reg_t paddr, char* host_addr, access_ tlb_entry_t entry = {uintptr_t(host_addr) - (vaddr & (PGSIZE-1)), paddr - (vaddr & (PGSIZE-1))}; - if (in_mprv()) + if (in_mprv() || !pmp_homogeneous(paddr & ~reg_t(PGSIZE - 1), PGSIZE)) return entry; - if ((tlb_load_tag[idx] & ~TLB_CHECK_TRIGGERS) != expected_tag) - tlb_load_tag[idx] = -1; - if ((tlb_store_tag[idx] & ~TLB_CHECK_TRIGGERS) != expected_tag) - tlb_store_tag[idx] = -1; - if ((tlb_insn_tag[idx] & ~TLB_CHECK_TRIGGERS) != expected_tag) - tlb_insn_tag[idx] = -1; - - if ((check_triggers_fetch && type == FETCH) || - (check_triggers_load && type == LOAD) || - (check_triggers_store && type == STORE)) - expected_tag |= TLB_CHECK_TRIGGERS; - - if (pmp_homogeneous(paddr & ~reg_t(PGSIZE - 1), PGSIZE)) { - if (type == FETCH) tlb_insn_tag[idx] = expected_tag; - else if (type == STORE) tlb_store_tag[idx] = expected_tag; - else tlb_load_tag[idx] = expected_tag; + switch (type) { + case FETCH: + tlb_insn[idx].data = entry; + tlb_insn[idx].tag = expected_tag | (check_triggers_fetch ? TLB_CHECK_TRIGGERS : 0); + break; + case LOAD: + tlb_load[idx].data = entry; + tlb_load[idx].tag = expected_tag | (check_triggers_load ? TLB_CHECK_TRIGGERS : 0); + break; + case STORE: + tlb_store[idx].data = entry; + tlb_store[idx].tag = expected_tag | (check_triggers_store ? TLB_CHECK_TRIGGERS : 0); + break; + default: + abort(); } - tlb_data[idx] = entry; return entry; } diff --git a/riscv/mmu.h b/riscv/mmu.h index 1cc83d6..2481d81 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -38,6 +38,11 @@ struct tlb_entry_t { reg_t target_addr; }; +struct dtlb_entry_t { + tlb_entry_t data; + reg_t tag; +}; + struct xlate_flags_t { const bool forced_virt : 1 {false}; const bool hlvx : 1 {false}; @@ -82,10 +87,10 @@ public: target_endian<T> res; reg_t vpn = addr >> PGSHIFT; bool aligned = (addr & (sizeof(T) - 1)) == 0; - bool tlb_hit = tlb_load_tag[vpn % TLB_ENTRIES] == vpn; + bool tlb_hit = tlb_load[vpn % TLB_ENTRIES].tag == vpn; if (likely(!xlate_flags.is_special_access() && aligned && tlb_hit)) { - res = *(target_endian<T>*)(tlb_data[vpn % TLB_ENTRIES].host_addr + (addr & (PGSIZE-1))); + res = *(target_endian<T>*)(tlb_load[vpn % TLB_ENTRIES].data.host_addr + (addr & (PGSIZE-1))); } else { load_slow_path(addr, sizeof(T), (uint8_t*)&res, xlate_flags); } @@ -123,10 +128,10 @@ public: void ALWAYS_INLINE store(reg_t addr, T val, xlate_flags_t xlate_flags = {}) { reg_t vpn = addr >> PGSHIFT; bool aligned = (addr & (sizeof(T) - 1)) == 0; - bool tlb_hit = tlb_store_tag[vpn % TLB_ENTRIES] == vpn; + bool tlb_hit = tlb_store[vpn % TLB_ENTRIES].tag == vpn; if (!xlate_flags.is_special_access() && likely(aligned && tlb_hit)) { - *(target_endian<T>*)(tlb_data[vpn % TLB_ENTRIES].host_addr + (addr & (PGSIZE-1))) = to_target(val); + *(target_endian<T>*)(tlb_store[vpn % TLB_ENTRIES].data.host_addr + (addr & (PGSIZE-1))) = to_target(val); } else { target_endian<T> target_val = to_target(val); store_slow_path(addr, sizeof(T), (const uint8_t*)&target_val, xlate_flags, true, false); @@ -387,10 +392,9 @@ private: // If a TLB tag has TLB_CHECK_TRIGGERS set, then the MMU must check for a // trigger match before completing an access. static const reg_t TLB_CHECK_TRIGGERS = reg_t(1) << 63; - tlb_entry_t tlb_data[TLB_ENTRIES]; - reg_t tlb_insn_tag[TLB_ENTRIES]; - reg_t tlb_load_tag[TLB_ENTRIES]; - reg_t tlb_store_tag[TLB_ENTRIES]; + dtlb_entry_t tlb_load[TLB_ENTRIES]; + dtlb_entry_t tlb_store[TLB_ENTRIES]; + dtlb_entry_t tlb_insn[TLB_ENTRIES]; // temporary location to store instructions fetched from an MMIO region uint16_t fetch_temp[PGSIZE / sizeof(uint16_t)]; @@ -472,8 +476,8 @@ private: // ITLB lookup inline tlb_entry_t translate_insn_addr(reg_t addr) { reg_t vpn = addr >> PGSHIFT; - if (likely(tlb_insn_tag[vpn % TLB_ENTRIES] == vpn)) - return tlb_data[vpn % TLB_ENTRIES]; + if (likely(tlb_insn[vpn % TLB_ENTRIES].tag == vpn)) + return tlb_insn[vpn % TLB_ENTRIES].data; return fetch_slow_path(addr); } |