diff options
author | Tim Newsome <tim@sifive.com> | 2022-05-25 10:58:25 -0700 |
---|---|---|
committer | Tim Newsome <tim@sifive.com> | 2022-05-25 14:20:26 -0700 |
commit | d392e69f96a4e6eef772c108fd10aed320b86e76 (patch) | |
tree | 3692186a812d6c1f4ae2994ec15772afaeb117f5 | |
parent | 1c31a6126982868f1966d7d77f1ceec43bb545ed (diff) | |
download | riscv-isa-sim-d392e69f96a4e6eef772c108fd10aed320b86e76.zip riscv-isa-sim-d392e69f96a4e6eef772c108fd10aed320b86e76.tar.gz riscv-isa-sim-d392e69f96a4e6eef772c108fd10aed320b86e76.tar.bz2 |
Fix trigger store priority.
Fixes the other half of #971.
Compared the start of this set of changes, it now takes 11% longer to
run the towers benchmark (with 22 discs).
-rw-r--r-- | riscv/mmu.cc | 12 | ||||
-rw-r--r-- | riscv/mmu.h | 38 |
2 files changed, 36 insertions, 14 deletions
diff --git a/riscv/mmu.cc b/riscv/mmu.cc index e43d585..6a796c4 100644 --- a/riscv/mmu.cc +++ b/riscv/mmu.cc @@ -184,7 +184,8 @@ void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate } } -void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store) +void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store, + bool require_alignment) { reg_t paddr = translate(addr, len, STORE, xlate_flags); @@ -195,6 +196,15 @@ void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_ throw *matched_trigger; } + if (unlikely(addr & (len-1))) { + if (require_alignment) { + store_conditional_address_misaligned(addr); + } else { + reg_t val = reg_from_bytes(len, bytes); + return misaligned_store(addr, val, len, xlate_flags); + } + } + if (actually_store) { if (auto host_addr = sim->addr_to_mem(paddr)) { memcpy(host_addr, bytes, len); diff --git a/riscv/mmu.h b/riscv/mmu.h index abfc281..1072847 100644 --- a/riscv/mmu.h +++ b/riscv/mmu.h @@ -164,14 +164,23 @@ public: template<class T, unsigned xlate_flags> inline void store_fast(reg_t addr, T val, bool actually_store=true, bool require_alignment=false) { const size_t size = sizeof(T); - if (unlikely(addr & (size-1))) { - if (require_alignment) - store_conditional_address_misaligned(addr); - else - return misaligned_store(addr, val, size, xlate_flags, actually_store); + const reg_t vpn = addr >> PGSHIFT; + if (xlate_flags == 0 && + unlikely(tlb_store_tag[vpn % TLB_ENTRIES] == (vpn | TLB_CHECK_TRIGGERS)) && + actually_store) { + if (!matched_trigger) { + matched_trigger = trigger_exception(triggers::OPERATION_STORE, addr, val); + if (matched_trigger) + throw *matched_trigger; + } } - reg_t vpn = addr >> PGSHIFT; if (xlate_flags == 0 && likely(tlb_store_tag[vpn % TLB_ENTRIES] == vpn)) { + if (unlikely(addr & (size-1))) { + if (require_alignment) + store_conditional_address_misaligned(addr); + else + return misaligned_store(addr, val, size, xlate_flags, actually_store); + } if (actually_store) { if (proc) WRITE_MEM(addr, val, size); @@ -179,12 +188,13 @@ public: } } else if (xlate_flags == 0 && unlikely(tlb_store_tag[vpn % TLB_ENTRIES] == (vpn | TLB_CHECK_TRIGGERS))) { + if (unlikely(addr & (size-1))) { + if (require_alignment) + store_conditional_address_misaligned(addr); + else + return misaligned_store(addr, val, size, xlate_flags, actually_store); + } if (actually_store) { - if (!matched_trigger) { - matched_trigger = trigger_exception(triggers::OPERATION_STORE, addr, val); - if (matched_trigger) - throw *matched_trigger; - } if (proc) WRITE_MEM(addr, val, size); *(target_endian<T>*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr) = to_target(val); @@ -192,7 +202,8 @@ public: } else { target_endian<T> target_val = to_target(val); - store_slow_path(addr, size, (const uint8_t*)&target_val, xlate_flags, actually_store); + store_slow_path(addr, size, (const uint8_t*)&target_val, xlate_flags, actually_store, + require_alignment); if (actually_store && proc) WRITE_MEM(addr, val, size); } @@ -471,7 +482,8 @@ private: // handle uncommon cases: TLB misses, page faults, MMIO tlb_entry_t fetch_slow_path(reg_t addr); void load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags, bool require_alignment); - void store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store); + void store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store, + bool require_alignment); bool mmio_load(reg_t addr, size_t len, uint8_t* bytes); bool mmio_store(reg_t addr, size_t len, const uint8_t* bytes); bool mmio_ok(reg_t addr, access_type type); |