aboutsummaryrefslogtreecommitdiff
path: root/riscv/mmu.cc
diff options
context:
space:
mode:
authorAndrew Waterman <andrew@sifive.com>2022-10-05 18:38:02 -0700
committerAndrew Waterman <andrew@sifive.com>2022-10-06 19:30:40 -0700
commit6311f7513aa150797f69ecac906978bb9e9fecbd (patch)
tree36d172a7811a0a68858e0ce0c2ee0ae921b3c2b9 /riscv/mmu.cc
parent749ead90a5c254ca23d54ef3e0669e51df127a5d (diff)
downloadspike-6311f7513aa150797f69ecac906978bb9e9fecbd.zip
spike-6311f7513aa150797f69ecac906978bb9e9fecbd.tar.gz
spike-6311f7513aa150797f69ecac906978bb9e9fecbd.tar.bz2
Move all uncommon-case store functionality into store_slow_path
As a side effect, misaligned stores now behave the same as aligned stores with respect to triggers: only the first byte is checked.
Diffstat (limited to 'riscv/mmu.cc')
-rw-r--r--riscv/mmu.cc43
1 files changed, 35 insertions, 8 deletions
diff --git a/riscv/mmu.cc b/riscv/mmu.cc
index 49889df..9c9a6c5 100644
--- a/riscv/mmu.cc
+++ b/riscv/mmu.cc
@@ -194,15 +194,13 @@ void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate
}
}
-void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store)
+void mmu_t::store_slow_path_intrapage(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store)
{
- if (actually_store) {
- if (!matched_trigger) {
- reg_t data = reg_from_bytes(len, bytes);
- matched_trigger = trigger_exception(triggers::OPERATION_STORE, addr, true, data);
- if (matched_trigger)
- throw *matched_trigger;
- }
+ reg_t vpn = addr >> PGSHIFT;
+ if (xlate_flags == 0 && vpn == (tlb_store_tag[vpn % TLB_ENTRIES] & ~TLB_CHECK_TRIGGERS)) {
+ auto host_addr = tlb_data[vpn % TLB_ENTRIES].host_offset + addr;
+ memcpy(host_addr, bytes, len);
+ return;
}
reg_t paddr = translate(addr, len, STORE, xlate_flags);
@@ -220,6 +218,35 @@ void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_
}
}
+void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store, bool UNUSED require_alignment)
+{
+ if (actually_store) {
+ if (!matched_trigger) {
+ reg_t data = reg_from_bytes(len, bytes);
+ matched_trigger = trigger_exception(triggers::OPERATION_STORE, addr, true, data);
+ if (matched_trigger)
+ throw *matched_trigger;
+ }
+ }
+
+ if (addr & (len - 1)) {
+ bool gva = ((proc) ? proc->state.v : false) || (RISCV_XLATE_VIRT & xlate_flags);
+#ifndef RISCV_ENABLE_MISALIGNED
+ throw trap_store_address_misaligned(gva, addr, 0, 0);
+#else
+ if (require_alignment)
+ throw trap_store_access_fault(gva, addr, 0, 0);
+
+ reg_t len_page0 = std::min(len, PGSIZE - addr % PGSIZE);
+ store_slow_path_intrapage(addr, len_page0, bytes, xlate_flags, actually_store);
+ if (len_page0 != len)
+ store_slow_path_intrapage(addr + len_page0, len - len_page0, bytes + len_page0, xlate_flags, actually_store);
+#endif
+ } else {
+ store_slow_path_intrapage(addr, len, bytes, xlate_flags, actually_store);
+ }
+}
+
tlb_entry_t mmu_t::refill_tlb(reg_t vaddr, reg_t paddr, char* host_addr, access_type type)
{
reg_t idx = (vaddr >> PGSHIFT) % TLB_ENTRIES;