aboutsummaryrefslogtreecommitdiff
path: root/riscv/mmu.h
diff options
context:
space:
mode:
authorAndrew Waterman <andrew@sifive.com>2021-07-21 17:57:01 -0700
committerAndrew Waterman <andrew@sifive.com>2021-07-21 18:00:13 -0700
commitcea9eb4a138590b9204c593e9ee61a6e1a6c6753 (patch)
tree6c71bde6f02ffef2ff79ba6cac5035f67e871ae7 /riscv/mmu.h
parentbe5d4caa0fc66bdd614f9407c003417725e7cee8 (diff)
downloadspike-cea9eb4a138590b9204c593e9ee61a6e1a6c6753.zip
spike-cea9eb4a138590b9204c593e9ee61a6e1a6c6753.tar.gz
spike-cea9eb4a138590b9204c593e9ee61a6e1a6c6753.tar.bz2
Simplify (and possibly fix) handling of HLV/HSV TLB accesses
The previous scheme flushed the TLB before and after HLV/HSV. I think this was slightly wrong in the case of a debug trigger match: because the TLB gets refilled before the trigger exception gets thrown, we might not have reached the second TLB flush, so the entry could linger. Instead of flushing, simply don't access the TLB and don't refill the TLB for these instructions. Other than the trigger exception case, the effect is the same: we'll perform a full table walk and we won't cache the result.
Diffstat (limited to 'riscv/mmu.h')
-rw-r--r--riscv/mmu.h16
1 files changed, 4 insertions, 12 deletions
diff --git a/riscv/mmu.h b/riscv/mmu.h
index 503bb1d..74e162b 100644
--- a/riscv/mmu.h
+++ b/riscv/mmu.h
@@ -98,19 +98,17 @@ public:
// template for functions that load an aligned value from memory
#define load_func(type, prefix, xlate_flags) \
inline type##_t prefix##_##type(reg_t addr, bool require_alignment = false) { \
- if ((xlate_flags) != 0) \
- flush_tlb(); \
if (unlikely(addr & (sizeof(type##_t)-1))) { \
if (require_alignment) load_reserved_address_misaligned(addr); \
else return misaligned_load(addr, sizeof(type##_t), xlate_flags); \
} \
reg_t vpn = addr >> PGSHIFT; \
size_t size = sizeof(type##_t); \
- if (likely(tlb_load_tag[vpn % TLB_ENTRIES] == vpn)) { \
+ if ((xlate_flags) == 0 && likely(tlb_load_tag[vpn % TLB_ENTRIES] == vpn)) { \
if (proc) READ_MEM(addr, size); \
return from_target(*(target_endian<type##_t>*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr)); \
} \
- if (unlikely(tlb_load_tag[vpn % TLB_ENTRIES] == (vpn | TLB_CHECK_TRIGGERS))) { \
+ if ((xlate_flags) == 0 && unlikely(tlb_load_tag[vpn % TLB_ENTRIES] == (vpn | TLB_CHECK_TRIGGERS))) { \
type##_t data = from_target(*(target_endian<type##_t>*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr)); \
if (!matched_trigger) { \
matched_trigger = trigger_exception(OPERATION_LOAD, addr, data); \
@@ -123,8 +121,6 @@ public:
target_endian<type##_t> res; \
load_slow_path(addr, sizeof(type##_t), (uint8_t*)&res, (xlate_flags)); \
if (proc) READ_MEM(addr, size); \
- if ((xlate_flags) != 0) \
- flush_tlb(); \
return from_target(res); \
}
@@ -164,17 +160,15 @@ public:
// template for functions that store an aligned value to memory
#define store_func(type, prefix, xlate_flags) \
void prefix##_##type(reg_t addr, type##_t val) { \
- if ((xlate_flags) != 0) \
- flush_tlb(); \
if (unlikely(addr & (sizeof(type##_t)-1))) \
return misaligned_store(addr, val, sizeof(type##_t), xlate_flags); \
reg_t vpn = addr >> PGSHIFT; \
size_t size = sizeof(type##_t); \
- if (likely(tlb_store_tag[vpn % TLB_ENTRIES] == vpn)) { \
+ if ((xlate_flags) == 0 && likely(tlb_store_tag[vpn % TLB_ENTRIES] == vpn)) { \
if (proc) WRITE_MEM(addr, val, size); \
*(target_endian<type##_t>*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr) = to_target(val); \
} \
- else if (unlikely(tlb_store_tag[vpn % TLB_ENTRIES] == (vpn | TLB_CHECK_TRIGGERS))) { \
+ else if ((xlate_flags) == 0 && unlikely(tlb_store_tag[vpn % TLB_ENTRIES] == (vpn | TLB_CHECK_TRIGGERS))) { \
if (!matched_trigger) { \
matched_trigger = trigger_exception(OPERATION_STORE, addr, val); \
if (matched_trigger) \
@@ -188,8 +182,6 @@ public:
store_slow_path(addr, sizeof(type##_t), (const uint8_t*)&target_val, (xlate_flags)); \
if (proc) WRITE_MEM(addr, val, size); \
} \
- if ((xlate_flags) != 0) \
- flush_tlb(); \
}
// template for functions that perform an atomic memory operation