aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrew Waterman <andrew@sifive.com>2017-04-30 17:37:06 -0700
committerAndrew Waterman <andrew@sifive.com>2017-04-30 18:45:41 -0700
commit1d2892407fed141d20607ae9c49e50673e2c5c11 (patch)
tree1500bae57974bf1ab1458cf515c1cd021355d44a
parent115297efff43025c66d330db6fc1bc6eda37f905 (diff)
downloadspike-1d2892407fed141d20607ae9c49e50673e2c5c11.zip
spike-1d2892407fed141d20607ae9c49e50673e2c5c11.tar.gz
spike-1d2892407fed141d20607ae9c49e50673e2c5c11.tar.bz2
Store both host & target address in soft TLB
-rw-r--r--riscv/mmu.cc33
-rw-r--r--riscv/mmu.h49
-rw-r--r--riscv/sim.h3
3 files changed, 47 insertions, 38 deletions
diff --git a/riscv/mmu.cc b/riscv/mmu.cc
index 8df38e5..f0adb22 100644
--- a/riscv/mmu.cc
+++ b/riscv/mmu.cc
@@ -47,17 +47,17 @@ reg_t mmu_t::translate(reg_t addr, access_type type)
return walk(addr, type, mode) | (addr & (PGSIZE-1));
}
-const uint16_t* mmu_t::fetch_slow_path(reg_t vaddr)
+tlb_entry_t mmu_t::fetch_slow_path(reg_t vaddr)
{
reg_t paddr = translate(vaddr, FETCH);
- if (sim->addr_is_mem(paddr)) {
- refill_tlb(vaddr, paddr, FETCH);
- return (const uint16_t*)sim->addr_to_mem(paddr);
+ if (auto host_addr = sim->addr_to_mem(paddr)) {
+ return refill_tlb(vaddr, paddr, host_addr, FETCH);
} else {
if (!sim->mmio_load(paddr, sizeof fetch_temp, (uint8_t*)&fetch_temp))
throw trap_instruction_access_fault(vaddr);
- return &fetch_temp;
+ tlb_entry_t entry = {(char*)&fetch_temp - vaddr, paddr - vaddr};
+ return entry;
}
}
@@ -91,12 +91,12 @@ void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes)
{
reg_t paddr = translate(addr, LOAD);
- if (sim->addr_is_mem(paddr)) {
- memcpy(bytes, sim->addr_to_mem(paddr), len);
+ if (auto host_addr = sim->addr_to_mem(paddr)) {
+ memcpy(bytes, host_addr, len);
if (tracer.interested_in_range(paddr, paddr + PGSIZE, LOAD))
tracer.trace(paddr, len, LOAD);
else
- refill_tlb(addr, paddr, LOAD);
+ refill_tlb(addr, paddr, host_addr, LOAD);
} else if (!sim->mmio_load(paddr, len, bytes)) {
throw trap_load_access_fault(addr);
}
@@ -120,18 +120,18 @@ void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes)
throw *matched_trigger;
}
- if (sim->addr_is_mem(paddr)) {
- memcpy(sim->addr_to_mem(paddr), bytes, len);
+ if (auto host_addr = sim->addr_to_mem(paddr)) {
+ memcpy(host_addr, bytes, len);
if (tracer.interested_in_range(paddr, paddr + PGSIZE, STORE))
tracer.trace(paddr, len, STORE);
else
- refill_tlb(addr, paddr, STORE);
+ refill_tlb(addr, paddr, host_addr, STORE);
} else if (!sim->mmio_store(paddr, len, bytes)) {
throw trap_store_access_fault(addr);
}
}
-void mmu_t::refill_tlb(reg_t vaddr, reg_t paddr, access_type type)
+tlb_entry_t mmu_t::refill_tlb(reg_t vaddr, reg_t paddr, char* host_addr, access_type type)
{
reg_t idx = (vaddr >> PGSHIFT) % TLB_ENTRIES;
reg_t expected_tag = vaddr >> PGSHIFT;
@@ -152,7 +152,9 @@ void mmu_t::refill_tlb(reg_t vaddr, reg_t paddr, access_type type)
else if (type == STORE) tlb_store_tag[idx] = expected_tag;
else tlb_load_tag[idx] = expected_tag;
- tlb_data[idx] = sim->addr_to_mem(paddr) - vaddr;
+ tlb_entry_t entry = {host_addr - vaddr, paddr - vaddr};
+ tlb_data[idx] = entry;
+ return entry;
}
reg_t mmu_t::walk(reg_t addr, access_type type, reg_t mode)
@@ -178,11 +180,10 @@ reg_t mmu_t::walk(reg_t addr, access_type type, reg_t mode)
reg_t idx = (addr >> (PGSHIFT + ptshift)) & ((1 << vm.idxbits) - 1);
// check that physical address of PTE is legal
- reg_t pte_addr = base + idx * vm.ptesize;
- if (!sim->addr_is_mem(pte_addr))
+ auto ppte = sim->addr_to_mem(base + idx * vm.ptesize);
+ if (!ppte)
throw trap_load_access_fault(addr);
- void* ppte = sim->addr_to_mem(pte_addr);
reg_t pte = vm.ptesize == 4 ? *(uint32_t*)ppte : *(uint64_t*)ppte;
reg_t ppn = pte >> PTE_PPN_SHIFT;
diff --git a/riscv/mmu.h b/riscv/mmu.h
index a8d9675..f70a969 100644
--- a/riscv/mmu.h
+++ b/riscv/mmu.h
@@ -30,6 +30,11 @@ struct icache_entry_t {
insn_fetch_t data;
};
+struct tlb_entry_t {
+ char* host_offset;
+ reg_t target_offset;
+};
+
class trigger_matched_t
{
public:
@@ -80,9 +85,9 @@ public:
return misaligned_load(addr, sizeof(type##_t)); \
reg_t vpn = addr >> PGSHIFT; \
if (likely(tlb_load_tag[vpn % TLB_ENTRIES] == vpn)) \
- return *(type##_t*)(tlb_data[vpn % TLB_ENTRIES] + addr); \
+ return *(type##_t*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr); \
if (unlikely(tlb_load_tag[vpn % TLB_ENTRIES] == (vpn | TLB_CHECK_TRIGGERS))) { \
- type##_t data = *(type##_t*)(tlb_data[vpn % TLB_ENTRIES] + addr); \
+ type##_t data = *(type##_t*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr); \
if (!matched_trigger) { \
matched_trigger = trigger_exception(OPERATION_LOAD, addr, data); \
if (matched_trigger) \
@@ -114,14 +119,14 @@ public:
return misaligned_store(addr, val, sizeof(type##_t)); \
reg_t vpn = addr >> PGSHIFT; \
if (likely(tlb_store_tag[vpn % TLB_ENTRIES] == vpn)) \
- *(type##_t*)(tlb_data[vpn % TLB_ENTRIES] + addr) = val; \
+ *(type##_t*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr) = val; \
else if (unlikely(tlb_store_tag[vpn % TLB_ENTRIES] == (vpn | TLB_CHECK_TRIGGERS))) { \
if (!matched_trigger) { \
matched_trigger = trigger_exception(OPERATION_STORE, addr, val); \
if (matched_trigger) \
throw *matched_trigger; \
} \
- *(type##_t*)(tlb_data[vpn % TLB_ENTRIES] + addr) = val; \
+ *(type##_t*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr) = val; \
} \
else \
store_slow_path(addr, sizeof(type##_t), (const uint8_t*)&val); \
@@ -165,29 +170,29 @@ public:
inline icache_entry_t* refill_icache(reg_t addr, icache_entry_t* entry)
{
- const uint16_t* iaddr = translate_insn_addr(addr);
- insn_bits_t insn = *iaddr;
+ auto tlb_entry = translate_insn_addr(addr);
+ insn_bits_t insn = *(uint16_t*)(tlb_entry.host_offset + addr);
int length = insn_length(insn);
if (likely(length == 4)) {
- insn |= (insn_bits_t)*(const int16_t*)translate_insn_addr(addr + 2) << 16;
+ insn |= (insn_bits_t)*(const int16_t*)translate_insn_addr_to_host(addr + 2) << 16;
} else if (length == 2) {
insn = (int16_t)insn;
} else if (length == 6) {
- insn |= (insn_bits_t)*(const int16_t*)translate_insn_addr(addr + 4) << 32;
- insn |= (insn_bits_t)*(const uint16_t*)translate_insn_addr(addr + 2) << 16;
+ insn |= (insn_bits_t)*(const int16_t*)translate_insn_addr_to_host(addr + 4) << 32;
+ insn |= (insn_bits_t)*(const uint16_t*)translate_insn_addr_to_host(addr + 2) << 16;
} else {
static_assert(sizeof(insn_bits_t) == 8, "insn_bits_t must be uint64_t");
- insn |= (insn_bits_t)*(const int16_t*)translate_insn_addr(addr + 6) << 48;
- insn |= (insn_bits_t)*(const uint16_t*)translate_insn_addr(addr + 4) << 32;
- insn |= (insn_bits_t)*(const uint16_t*)translate_insn_addr(addr + 2) << 16;
+ insn |= (insn_bits_t)*(const int16_t*)translate_insn_addr_to_host(addr + 6) << 48;
+ insn |= (insn_bits_t)*(const uint16_t*)translate_insn_addr_to_host(addr + 4) << 32;
+ insn |= (insn_bits_t)*(const uint16_t*)translate_insn_addr_to_host(addr + 2) << 16;
}
insn_fetch_t fetch = {proc->decode_insn(insn), insn};
entry->tag = addr;
entry->data = fetch;
- reg_t paddr = sim->mem_to_addr((char*)iaddr);
+ reg_t paddr = tlb_entry.target_offset + addr;;
if (tracer.interested_in_range(paddr, paddr + 1, FETCH)) {
entry->tag = -1;
tracer.trace(paddr, length, FETCH);
@@ -228,39 +233,43 @@ private:
// If a TLB tag has TLB_CHECK_TRIGGERS set, then the MMU must check for a
// trigger match before completing an access.
static const reg_t TLB_CHECK_TRIGGERS = reg_t(1) << 63;
- char* tlb_data[TLB_ENTRIES];
+ tlb_entry_t tlb_data[TLB_ENTRIES];
reg_t tlb_insn_tag[TLB_ENTRIES];
reg_t tlb_load_tag[TLB_ENTRIES];
reg_t tlb_store_tag[TLB_ENTRIES];
// finish translation on a TLB miss and update the TLB
- void refill_tlb(reg_t vaddr, reg_t paddr, access_type type);
+ tlb_entry_t refill_tlb(reg_t vaddr, reg_t paddr, char* host_addr, access_type type);
const char* fill_from_mmio(reg_t vaddr, reg_t paddr);
// perform a page table walk for a given VA; set referenced/dirty bits
reg_t walk(reg_t addr, access_type type, reg_t prv);
// handle uncommon cases: TLB misses, page faults, MMIO
- const uint16_t* fetch_slow_path(reg_t addr);
+ tlb_entry_t fetch_slow_path(reg_t addr);
void load_slow_path(reg_t addr, reg_t len, uint8_t* bytes);
void store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes);
reg_t translate(reg_t addr, access_type type);
// ITLB lookup
- inline const uint16_t* translate_insn_addr(reg_t addr) {
+ inline tlb_entry_t translate_insn_addr(reg_t addr) {
reg_t vpn = addr >> PGSHIFT;
if (likely(tlb_insn_tag[vpn % TLB_ENTRIES] == vpn))
- return (uint16_t*)(tlb_data[vpn % TLB_ENTRIES] + addr);
+ return tlb_data[vpn % TLB_ENTRIES];
if (unlikely(tlb_insn_tag[vpn % TLB_ENTRIES] == (vpn | TLB_CHECK_TRIGGERS))) {
- uint16_t* ptr = (uint16_t*)(tlb_data[vpn % TLB_ENTRIES] + addr);
+ uint16_t* ptr = (uint16_t*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr);
int match = proc->trigger_match(OPERATION_EXECUTE, addr, *ptr);
if (match >= 0)
throw trigger_matched_t(match, OPERATION_EXECUTE, addr, *ptr);
- return ptr;
+ return tlb_data[vpn % TLB_ENTRIES];
}
return fetch_slow_path(addr);
}
+ inline const uint16_t* translate_insn_addr_to_host(reg_t addr) {
+ return (uint16_t*)(translate_insn_addr(addr).host_offset + addr);
+ }
+
inline trigger_matched_t *trigger_exception(trigger_operation_t operation,
reg_t address, reg_t data)
{
diff --git a/riscv/sim.h b/riscv/sim.h
index 8586bee..edf15db 100644
--- a/riscv/sim.h
+++ b/riscv/sim.h
@@ -60,8 +60,7 @@ private:
bool addr_is_mem(reg_t addr) {
return addr >= DRAM_BASE && addr < DRAM_BASE + memsz;
}
- char* addr_to_mem(reg_t addr) { return mem + addr - DRAM_BASE; }
- reg_t mem_to_addr(char* x) { return x - mem + DRAM_BASE; }
+ char* addr_to_mem(reg_t addr) { return addr_is_mem(addr) ? mem + addr - DRAM_BASE : 0; }
bool mmio_load(reg_t addr, size_t len, uint8_t* bytes);
bool mmio_store(reg_t addr, size_t len, const uint8_t* bytes);
void make_dtb();