aboutsummaryrefslogtreecommitdiff
path: root/riscv
diff options
context:
space:
mode:
authorAndrew Waterman <andrew@sifive.com>2022-10-13 13:43:27 -0700
committerGitHub <noreply@github.com>2022-10-13 13:43:27 -0700
commite2e66015af4b5fce4bc958e70398f1fb7af7bcd9 (patch)
treeec4f5577f8b20f83b134d4102615e33f652e72e3 /riscv
parent86d9fe49eda1fff863a43e682015216a25cc72f3 (diff)
parent7b8114f707a7b2de9fd2d393b9d019180de83025 (diff)
downloadspike-e2e66015af4b5fce4bc958e70398f1fb7af7bcd9.zip
spike-e2e66015af4b5fce4bc958e70398f1fb7af7bcd9.tar.gz
spike-e2e66015af4b5fce4bc958e70398f1fb7af7bcd9.tar.bz2
Merge pull request #1107 from riscv-software-src/simplify-ld-st
Simplify handling of load/store/fetch slow-path cases; fix two minor trigger bugs
Diffstat (limited to 'riscv')
-rw-r--r--riscv/execute.cc9
-rw-r--r--riscv/mmu.cc123
-rw-r--r--riscv/mmu.h170
3 files changed, 123 insertions, 179 deletions
diff --git a/riscv/execute.cc b/riscv/execute.cc
index 5d24ce8..f0bb946 100644
--- a/riscv/execute.cc
+++ b/riscv/execute.cc
@@ -309,15 +309,6 @@ void processor_t::step(size_t n)
catch (triggers::matched_t& t)
{
if (mmu->matched_trigger) {
- // This exception came from the MMU. That means the instruction hasn't
- // fully executed yet. We start it again, but this time it won't throw
- // an exception because matched_trigger is already set. (All memory
- // instructions are idempotent so restarting is safe.)
-
- insn_fetch_t fetch = mmu->load_insn(pc);
- pc = execute_insn(this, pc, fetch);
- advance_pc();
-
delete mmu->matched_trigger;
mmu->matched_trigger = NULL;
}
diff --git a/riscv/mmu.cc b/riscv/mmu.cc
index 38a7241..fdad05f 100644
--- a/riscv/mmu.cc
+++ b/riscv/mmu.cc
@@ -76,16 +76,26 @@ reg_t mmu_t::translate(reg_t addr, reg_t len, access_type type, uint32_t xlate_f
tlb_entry_t mmu_t::fetch_slow_path(reg_t vaddr)
{
- reg_t paddr = translate(vaddr, sizeof(fetch_temp), FETCH, 0);
+ check_triggers(triggers::OPERATION_EXECUTE, vaddr, false);
- if (auto host_addr = sim->addr_to_mem(paddr)) {
- return refill_tlb(vaddr, paddr, host_addr, FETCH);
+ tlb_entry_t result;
+ reg_t vpn = vaddr >> PGSHIFT;
+ if (unlikely(tlb_insn_tag[vpn % TLB_ENTRIES] != (vpn | TLB_CHECK_TRIGGERS))) {
+ reg_t paddr = translate(vaddr, sizeof(fetch_temp), FETCH, 0);
+ if (auto host_addr = sim->addr_to_mem(paddr)) {
+ result = refill_tlb(vaddr, paddr, host_addr, FETCH);
+ } else {
+ if (!mmio_load(paddr, sizeof fetch_temp, (uint8_t*)&fetch_temp))
+ throw trap_instruction_access_fault(proc->state.v, vaddr, 0, 0);
+ result = {(char*)&fetch_temp - vaddr, paddr - vaddr};
+ }
} else {
- if (!mmio_load(paddr, sizeof fetch_temp, (uint8_t*)&fetch_temp))
- throw trap_instruction_access_fault(proc->state.v, vaddr, 0, 0);
- tlb_entry_t entry = {(char*)&fetch_temp - vaddr, paddr - vaddr};
- return entry;
+ result = tlb_data[vpn % TLB_ENTRIES];
}
+
+ check_triggers(triggers::OPERATION_EXECUTE, vaddr, true, from_le(*(const uint16_t*)(result.host_offset + vaddr)));
+
+ return result;
}
reg_t reg_from_bytes(size_t len, const uint8_t* bytes)
@@ -139,12 +149,37 @@ bool mmu_t::mmio_store(reg_t addr, size_t len, const uint8_t* bytes)
return sim->mmio_store(addr, len, bytes);
}
-void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags)
+void mmu_t::check_triggers(triggers::operation_t operation, reg_t address, bool has_data, reg_t data)
+{
+ if (matched_trigger || !proc)
+ return;
+
+ triggers::action_t action;
+ auto match = proc->TM.memory_access_match(&action, operation, address, has_data, data);
+
+ switch (match) {
+ case triggers::MATCH_NONE:
+ return;
+
+ case triggers::MATCH_FIRE_BEFORE:
+ throw triggers::matched_t(operation, address, data, action);
+
+ case triggers::MATCH_FIRE_AFTER:
+ // We want to take this exception on the next instruction. We check
+ // whether to do so in the I$ refill path, so flush the I$.
+ flush_icache();
+ matched_trigger = new triggers::matched_t(operation, address, data, action);
+ return;
+ }
+}
+
+void mmu_t::load_slow_path_intrapage(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags)
{
- if (!matched_trigger) {
- matched_trigger = trigger_exception(triggers::OPERATION_LOAD, addr, false);
- if (matched_trigger)
- throw *matched_trigger;
+ reg_t vpn = addr >> PGSHIFT;
+ if (xlate_flags == 0 && vpn == (tlb_load_tag[vpn % TLB_ENTRIES] & ~TLB_CHECK_TRIGGERS)) {
+ auto host_addr = tlb_data[vpn % TLB_ENTRIES].host_offset + addr;
+ memcpy(bytes, host_addr, len);
+ return;
}
reg_t paddr = translate(addr, len, LOAD, xlate_flags);
@@ -158,24 +193,39 @@ void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate
} else if (!mmio_load(paddr, len, bytes)) {
throw trap_load_access_fault((proc) ? proc->state.v : false, addr, 0, 0);
}
+}
- if (!matched_trigger) {
- reg_t data = reg_from_bytes(len, bytes);
- matched_trigger = trigger_exception(triggers::OPERATION_LOAD, addr, true, data);
- if (matched_trigger)
- throw *matched_trigger;
+void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags, bool UNUSED require_alignment)
+{
+ check_triggers(triggers::OPERATION_LOAD, addr, false);
+
+ if ((addr & (len - 1)) == 0) {
+ load_slow_path_intrapage(addr, len, bytes, xlate_flags);
+ } else {
+ bool gva = ((proc) ? proc->state.v : false) || (RISCV_XLATE_VIRT & xlate_flags);
+#ifndef RISCV_ENABLE_MISALIGNED
+ throw trap_load_address_misaligned(gva, addr, 0, 0);
+#else
+ if (require_alignment)
+ throw trap_load_access_fault(gva, addr, 0, 0);
+
+ reg_t len_page0 = std::min(len, PGSIZE - addr % PGSIZE);
+ load_slow_path_intrapage(addr, len_page0, bytes, xlate_flags);
+ if (len_page0 != len)
+ load_slow_path_intrapage(addr + len_page0, len - len_page0, bytes + len_page0, xlate_flags);
+#endif
}
+
+ check_triggers(triggers::OPERATION_LOAD, addr, true, reg_from_bytes(len, bytes));
}
-void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store)
+void mmu_t::store_slow_path_intrapage(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store)
{
- if (actually_store) {
- if (!matched_trigger) {
- reg_t data = reg_from_bytes(len, bytes);
- matched_trigger = trigger_exception(triggers::OPERATION_STORE, addr, true, data);
- if (matched_trigger)
- throw *matched_trigger;
- }
+ reg_t vpn = addr >> PGSHIFT;
+ if (xlate_flags == 0 && vpn == (tlb_store_tag[vpn % TLB_ENTRIES] & ~TLB_CHECK_TRIGGERS)) {
+ auto host_addr = tlb_data[vpn % TLB_ENTRIES].host_offset + addr;
+ memcpy(host_addr, bytes, len);
+ return;
}
reg_t paddr = translate(addr, len, STORE, xlate_flags);
@@ -193,6 +243,29 @@ void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_
}
}
+void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store, bool UNUSED require_alignment)
+{
+ if (actually_store)
+ check_triggers(triggers::OPERATION_STORE, addr, true, reg_from_bytes(len, bytes));
+
+ if (addr & (len - 1)) {
+ bool gva = ((proc) ? proc->state.v : false) || (RISCV_XLATE_VIRT & xlate_flags);
+#ifndef RISCV_ENABLE_MISALIGNED
+ throw trap_store_address_misaligned(gva, addr, 0, 0);
+#else
+ if (require_alignment)
+ throw trap_store_access_fault(gva, addr, 0, 0);
+
+ reg_t len_page0 = std::min(len, PGSIZE - addr % PGSIZE);
+ store_slow_path_intrapage(addr, len_page0, bytes, xlate_flags, actually_store);
+ if (len_page0 != len)
+ store_slow_path_intrapage(addr + len_page0, len - len_page0, bytes + len_page0, xlate_flags, actually_store);
+#endif
+ } else {
+ store_slow_path_intrapage(addr, len, bytes, xlate_flags, actually_store);
+ }
+}
+
tlb_entry_t mmu_t::refill_tlb(reg_t vaddr, reg_t paddr, char* host_addr, access_type type)
{
reg_t idx = (vaddr >> PGSHIFT) % TLB_ENTRIES;
diff --git a/riscv/mmu.h b/riscv/mmu.h
index 8b9ff9b..01e74ef 100644
--- a/riscv/mmu.h
+++ b/riscv/mmu.h
@@ -52,44 +52,6 @@ public:
#define RISCV_XLATE_VIRT (1U << 0)
#define RISCV_XLATE_VIRT_HLVX (1U << 1)
- inline reg_t misaligned_load(reg_t addr, size_t UNUSED size, uint32_t xlate_flags)
- {
-#ifdef RISCV_ENABLE_MISALIGNED
- reg_t res = 0;
- for (size_t i = 0; i < size; i++) {
- const reg_t byteaddr = addr + (target_big_endian? size-1-i : i);
- const reg_t bytedata
- = (RISCV_XLATE_VIRT_HLVX & xlate_flags) ? guest_load_x_uint8(byteaddr)
- : (RISCV_XLATE_VIRT & xlate_flags) ? guest_load_uint8(byteaddr)
- : load_uint8(byteaddr)
- ;
- res += bytedata << (i * 8);
- }
- return res;
-#else
- bool gva = ((proc) ? proc->state.v : false) || (RISCV_XLATE_VIRT & xlate_flags);
- throw trap_load_address_misaligned(gva, addr, 0, 0);
-#endif
- }
-
- inline void misaligned_store(reg_t addr, reg_t UNUSED data, size_t UNUSED size, uint32_t xlate_flags, bool UNUSED actually_store=true)
- {
-#ifdef RISCV_ENABLE_MISALIGNED
- for (size_t i = 0; i < size; i++) {
- const reg_t byteaddr = addr + (target_big_endian? size-1-i : i);
- const reg_t bytedata = data >> (i * 8);
- if (RISCV_XLATE_VIRT & xlate_flags) {
- guest_store_uint8(byteaddr, bytedata, actually_store);
- } else {
- store_uint8(byteaddr, bytedata, actually_store);
- }
- }
-#else
- bool gva = ((proc) ? proc->state.v : false) || (RISCV_XLATE_VIRT & xlate_flags);
- throw trap_store_address_misaligned(gva, addr, 0, 0);
-#endif
- }
-
#ifndef RISCV_ENABLE_COMMITLOG
# define READ_MEM(addr, size) ((void)(addr), (void)(size))
#else
@@ -100,35 +62,19 @@ public:
// template for functions that load an aligned value from memory
#define load_func(type, prefix, xlate_flags) \
type##_t ALWAYS_INLINE prefix##_##type(reg_t addr, bool require_alignment = false) { \
- if (unlikely(addr & (sizeof(type##_t)-1))) { \
- if (!matched_trigger) { \
- matched_trigger = trigger_exception(triggers::OPERATION_LOAD, addr, false); \
- if (matched_trigger) \
- throw *matched_trigger; \
- } \
- if (require_alignment) load_reserved_address_misaligned(addr); \
- else return misaligned_load(addr, sizeof(type##_t), xlate_flags); \
- } \
reg_t vpn = addr >> PGSHIFT; \
size_t size = sizeof(type##_t); \
- if ((xlate_flags) == 0 && likely(tlb_load_tag[vpn % TLB_ENTRIES] == vpn)) { \
+ bool aligned = (addr & (size - 1)) == 0; \
+ bool tlb_hit = tlb_load_tag[vpn % TLB_ENTRIES] == vpn; \
+ if (likely((xlate_flags) == 0 && aligned && tlb_hit)) { \
if (proc) READ_MEM(addr, size); \
return from_target(*(target_endian<type##_t>*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr)); \
- } \
- if ((xlate_flags) == 0 && unlikely(tlb_load_tag[vpn % TLB_ENTRIES] == (vpn | TLB_CHECK_TRIGGERS))) { \
- type##_t data = from_target(*(target_endian<type##_t>*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr)); \
- if (!matched_trigger) { \
- matched_trigger = trigger_exception(triggers::OPERATION_LOAD, addr, true, data); \
- if (matched_trigger) \
- throw *matched_trigger; \
- } \
+ } else { \
+ target_endian<type##_t> res; \
+ load_slow_path(addr, sizeof(type##_t), (uint8_t*)&res, (xlate_flags), require_alignment); \
if (proc) READ_MEM(addr, size); \
- return data; \
+ return from_target(res); \
} \
- target_endian<type##_t> res; \
- load_slow_path(addr, sizeof(type##_t), (uint8_t*)&res, (xlate_flags)); \
- if (proc) READ_MEM(addr, size); \
- return from_target(res); \
}
// load value from memory at aligned address; zero extend to register width
@@ -142,7 +88,6 @@ public:
load_func(uint16, guest_load, RISCV_XLATE_VIRT)
load_func(uint32, guest_load, RISCV_XLATE_VIRT)
load_func(uint64, guest_load, RISCV_XLATE_VIRT)
- load_func(uint8, guest_load_x, RISCV_XLATE_VIRT|RISCV_XLATE_VIRT_HLVX) // only for use by misaligned HLVX
load_func(uint16, guest_load_x, RISCV_XLATE_VIRT|RISCV_XLATE_VIRT_HLVX)
load_func(uint32, guest_load_x, RISCV_XLATE_VIRT|RISCV_XLATE_VIRT_HLVX)
@@ -168,39 +113,18 @@ public:
// template for functions that store an aligned value to memory
#define store_func(type, prefix, xlate_flags) \
void ALWAYS_INLINE prefix##_##type(reg_t addr, type##_t val, bool actually_store=true, bool require_alignment=false) { \
- if (unlikely(addr & (sizeof(type##_t)-1))) { \
- if (actually_store) { \
- if (!matched_trigger) { \
- matched_trigger = trigger_exception(triggers::OPERATION_STORE, addr, true, val); \
- if (matched_trigger) \
- throw *matched_trigger; \
- } \
- } \
- if (require_alignment) store_conditional_address_misaligned(addr); \
- else return misaligned_store(addr, val, sizeof(type##_t), xlate_flags, actually_store); \
- } \
reg_t vpn = addr >> PGSHIFT; \
size_t size = sizeof(type##_t); \
- if ((xlate_flags) == 0 && likely(tlb_store_tag[vpn % TLB_ENTRIES] == vpn)) { \
- if (actually_store) { \
- if (proc) WRITE_MEM(addr, val, size); \
- *(target_endian<type##_t>*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr) = to_target(val); \
- } \
- } \
- else if ((xlate_flags) == 0 && unlikely(tlb_store_tag[vpn % TLB_ENTRIES] == (vpn | TLB_CHECK_TRIGGERS))) { \
+ bool aligned = (addr & (size - 1)) == 0; \
+ bool tlb_hit = tlb_store_tag[vpn % TLB_ENTRIES] == vpn; \
+ if ((xlate_flags) == 0 && likely(aligned && tlb_hit)) { \
if (actually_store) { \
- if (!matched_trigger) { \
- matched_trigger = trigger_exception(triggers::OPERATION_STORE, addr, true, val); \
- if (matched_trigger) \
- throw *matched_trigger; \
- } \
if (proc) WRITE_MEM(addr, val, size); \
*(target_endian<type##_t>*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr) = to_target(val); \
} \
- } \
- else { \
+ } else { \
target_endian<type##_t> target_val = to_target(val); \
- store_slow_path(addr, sizeof(type##_t), (const uint8_t*)&target_val, (xlate_flags), actually_store); \
+ store_slow_path(addr, sizeof(type##_t), (const uint8_t*)&target_val, (xlate_flags), actually_store, require_alignment); \
if (actually_store && proc) WRITE_MEM(addr, val, size); \
} \
}
@@ -300,30 +224,12 @@ public:
throw trap_load_access_fault((proc) ? proc->state.v : false, vaddr, 0, 0); // disallow LR to I/O space
}
- inline void load_reserved_address_misaligned(reg_t vaddr)
- {
- bool gva = proc ? proc->state.v : false;
-#ifdef RISCV_ENABLE_MISALIGNED
- throw trap_load_access_fault(gva, vaddr, 0, 0);
-#else
- throw trap_load_address_misaligned(gva, vaddr, 0, 0);
-#endif
- }
-
- inline void store_conditional_address_misaligned(reg_t vaddr)
- {
- bool gva = proc ? proc->state.v : false;
-#ifdef RISCV_ENABLE_MISALIGNED
- throw trap_store_access_fault(gva, vaddr, 0, 0);
-#else
- throw trap_store_address_misaligned(gva, vaddr, 0, 0);
-#endif
- }
-
inline bool check_load_reservation(reg_t vaddr, size_t size)
{
- if (vaddr & (size-1))
- store_conditional_address_misaligned(vaddr);
+ if (vaddr & (size-1)) {
+ // Raise either access fault or misaligned exception
+ store_slow_path(vaddr, size, nullptr, 0, false, true);
+ }
reg_t paddr = translate(vaddr, 1, STORE, 0);
if (auto host_addr = sim->addr_to_mem(paddr))
@@ -341,6 +247,9 @@ public:
inline icache_entry_t* refill_icache(reg_t addr, icache_entry_t* entry)
{
+ if (matched_trigger)
+ throw *matched_trigger;
+
auto tlb_entry = translate_insn_addr(addr);
insn_bits_t insn = from_le(*(uint16_t*)(tlb_entry.host_offset + addr));
int length = insn_length(insn);
@@ -471,11 +380,14 @@ private:
// handle uncommon cases: TLB misses, page faults, MMIO
tlb_entry_t fetch_slow_path(reg_t addr);
- void load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags);
- void store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store);
+ void load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags, bool require_alignment);
+ void load_slow_path_intrapage(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags);
+ void store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store, bool require_alignment);
+ void store_slow_path_intrapage(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store);
bool mmio_load(reg_t addr, size_t len, uint8_t* bytes);
bool mmio_store(reg_t addr, size_t len, const uint8_t* bytes);
bool mmio_ok(reg_t addr, access_type type);
+ void check_triggers(triggers::operation_t operation, reg_t address, bool has_data, reg_t data = 0);
reg_t translate(reg_t addr, reg_t len, access_type type, uint32_t xlate_flags);
// ITLB lookup
@@ -483,45 +395,13 @@ private:
reg_t vpn = addr >> PGSHIFT;
if (likely(tlb_insn_tag[vpn % TLB_ENTRIES] == vpn))
return tlb_data[vpn % TLB_ENTRIES];
- triggers::action_t action;
- auto match = proc->TM.memory_access_match(&action, triggers::OPERATION_EXECUTE, addr, false);
- if (match != triggers::MATCH_NONE) {
- throw triggers::matched_t(triggers::OPERATION_EXECUTE, addr, 0, action);
- }
- tlb_entry_t result;
- if (unlikely(tlb_insn_tag[vpn % TLB_ENTRIES] != (vpn | TLB_CHECK_TRIGGERS))) {
- result = fetch_slow_path(addr);
- } else {
- result = tlb_data[vpn % TLB_ENTRIES];
- }
- target_endian<uint16_t>* ptr = (target_endian<uint16_t>*)(result.host_offset + addr);
- match = proc->TM.memory_access_match(&action, triggers::OPERATION_EXECUTE, addr, true, from_target(*ptr));
- if (match != triggers::MATCH_NONE) {
- throw triggers::matched_t(triggers::OPERATION_EXECUTE, addr, from_target(*ptr), action);
- }
- return result;
+ return fetch_slow_path(addr);
}
inline const uint16_t* translate_insn_addr_to_host(reg_t addr) {
return (uint16_t*)(translate_insn_addr(addr).host_offset + addr);
}
- inline triggers::matched_t *trigger_exception(triggers::operation_t operation,
- reg_t address, bool has_data, reg_t data=0)
- {
- if (!proc) {
- return NULL;
- }
- triggers::action_t action;
- auto match = proc->TM.memory_access_match(&action, operation, address, has_data, data);
- if (match == triggers::MATCH_NONE)
- return NULL;
- if (match == triggers::MATCH_FIRE_BEFORE) {
- throw triggers::matched_t(operation, address, data, action);
- }
- return new triggers::matched_t(operation, address, data, action);
- }
-
reg_t pmp_homogeneous(reg_t addr, reg_t len);
bool pmp_ok(reg_t addr, reg_t len, access_type type, reg_t mode);