aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrew Waterman <andrew@sifive.com>2022-10-19 17:21:11 -0700
committerAndrew Waterman <andrew@sifive.com>2022-10-19 21:12:25 -0700
commit905db657f56c86b5fb558e7a3a5ea04dafa46858 (patch)
tree24893efc910e55dffe589148dd183c461f81650d
parent5746722334321b14c1937224f822cf47b1135b4e (diff)
downloadriscv-isa-sim-905db657f56c86b5fb558e7a3a5ea04dafa46858.zip
riscv-isa-sim-905db657f56c86b5fb558e7a3a5ea04dafa46858.tar.gz
riscv-isa-sim-905db657f56c86b5fb558e7a3a5ea04dafa46858.tar.bz2
Fix imprecise exception on LR to MMIO space
The old implementation performed the load before checking whether the memory region was valid for LR. So, for LR to MMIO, we would action side effects before raising the exception, which is not precise.
-rw-r--r--riscv/insns/lr_d.h4
-rw-r--r--riscv/insns/lr_w.h4
-rw-r--r--riscv/mmu.cc6
-rw-r--r--riscv/mmu.h17
4 files changed, 14 insertions, 17 deletions
diff --git a/riscv/insns/lr_d.h b/riscv/insns/lr_d.h
index 6dd8d67..214daff 100644
--- a/riscv/insns/lr_d.h
+++ b/riscv/insns/lr_d.h
@@ -1,5 +1,3 @@
require_extension('A');
require_rv64;
-auto res = MMU.load_int64(RS1, true);
-MMU.acquire_load_reservation(RS1);
-WRITE_RD(res);
+WRITE_RD(MMU.load_reserved<int64_t>(RS1));
diff --git a/riscv/insns/lr_w.h b/riscv/insns/lr_w.h
index 185be53..354590f 100644
--- a/riscv/insns/lr_w.h
+++ b/riscv/insns/lr_w.h
@@ -1,4 +1,2 @@
require_extension('A');
-auto res = MMU.load_int32(RS1, true);
-MMU.acquire_load_reservation(RS1);
-WRITE_RD(res);
+WRITE_RD(MMU.load_reserved<int32_t>(RS1));
diff --git a/riscv/mmu.cc b/riscv/mmu.cc
index de82a77..0c858ae 100644
--- a/riscv/mmu.cc
+++ b/riscv/mmu.cc
@@ -190,7 +190,11 @@ void mmu_t::load_slow_path_intrapage(reg_t addr, reg_t len, uint8_t* bytes, uint
tracer.trace(paddr, len, LOAD);
else if (xlate_flags == 0)
refill_tlb(addr, paddr, host_addr, LOAD);
- } else if (!mmio_load(paddr, len, bytes)) {
+
+ if (xlate_flags & RISCV_XLATE_LR) {
+ load_reservation_address = paddr;
+ }
+ } else if ((xlate_flags & RISCV_XLATE_LR) || !mmio_load(paddr, len, bytes)) {
throw trap_load_access_fault((proc) ? proc->state.v : false, addr, 0, 0);
}
}
diff --git a/riscv/mmu.h b/riscv/mmu.h
index 70a644d..bbe473b 100644
--- a/riscv/mmu.h
+++ b/riscv/mmu.h
@@ -49,8 +49,9 @@ public:
mmu_t(simif_t* sim, processor_t* proc);
~mmu_t();
-#define RISCV_XLATE_VIRT (1U << 0)
+#define RISCV_XLATE_VIRT (1U << 0)
#define RISCV_XLATE_VIRT_HLVX (1U << 1)
+#define RISCV_XLATE_LR (1U << 2)
#ifndef RISCV_ENABLE_COMMITLOG
# define READ_MEM(addr, size) ((void)(addr), (void)(size))
@@ -78,6 +79,11 @@ public:
return from_target(res);
}
+ template<typename T>
+ T load_reserved(reg_t addr) {
+ return load<T>(addr, true, RISCV_XLATE_LR);
+ }
+
// template for functions that load an aligned value from memory
#define load_func(type, prefix, xlate_flags) \
type##_t ALWAYS_INLINE prefix##_##type(reg_t addr, bool require_alignment = false) { return load<type##_t>(addr, require_alignment, xlate_flags); }
@@ -222,15 +228,6 @@ public:
load_reservation_address = (reg_t)-1;
}
- inline void acquire_load_reservation(reg_t vaddr)
- {
- reg_t paddr = translate(vaddr, 1, LOAD, 0);
- if (auto host_addr = sim->addr_to_mem(paddr))
- load_reservation_address = refill_tlb(vaddr, paddr, host_addr, LOAD).target_offset + vaddr;
- else
- throw trap_load_access_fault((proc) ? proc->state.v : false, vaddr, 0, 0); // disallow LR to I/O space
- }
-
inline bool check_load_reservation(reg_t vaddr, size_t size)
{
if (vaddr & (size-1)) {