aboutsummaryrefslogtreecommitdiff
path: root/riscv/mmu.h
diff options
context:
space:
mode:
authorAnup Patel <anup.patel@wdc.com>2020-06-21 23:30:58 +0530
committerAnup Patel <anup@brainfault.org>2020-07-09 23:04:16 +0530
commitb75aff9e5d132a16d9326bc0b3bbc724ae3c753c (patch)
tree01500078707b981ec6af402e519090b0e88b1903 /riscv/mmu.h
parent9af85e39a550ba031e4fe9c1913e275959a9927b (diff)
downloadspike-b75aff9e5d132a16d9326bc0b3bbc724ae3c753c.zip
spike-b75aff9e5d132a16d9326bc0b3bbc724ae3c753c.tar.gz
spike-b75aff9e5d132a16d9326bc0b3bbc724ae3c753c.tar.bz2
Implement hypervisor two-stage MMU
We extend our existing MMU implementation to support two-stage translation when running VS-mode for RISC-V hypervisor extension. Signed-off-by: Anup Patel <anup.patel@wdc.com>
Diffstat (limited to 'riscv/mmu.h')
-rw-r--r--riscv/mmu.h120
1 files changed, 84 insertions, 36 deletions
diff --git a/riscv/mmu.h b/riscv/mmu.h
index edf38ed..990f137 100644
--- a/riscv/mmu.h
+++ b/riscv/mmu.h
@@ -87,9 +87,14 @@ public:
proc->state.log_mem_read.push_back(std::make_tuple(addr, 0, size));
#endif
+#define RISCV_XLATE_VIRT (1U << 0)
+#define RISCV_XLATE_VIRT_MXR (1U << 1)
+
// template for functions that load an aligned value from memory
- #define load_func(type) \
- inline type##_t load_##type(reg_t addr) { \
+ #define load_func(type, prefix, xlate_flags) \
+ inline type##_t prefix##_##type(reg_t addr) { \
+ if (xlate_flags) \
+ flush_tlb(); \
if (unlikely(addr & (sizeof(type##_t)-1))) \
return misaligned_load(addr, sizeof(type##_t)); \
reg_t vpn = addr >> PGSHIFT; \
@@ -109,22 +114,38 @@ public:
return data; \
} \
type##_t res; \
- load_slow_path(addr, sizeof(type##_t), (uint8_t*)&res); \
+ load_slow_path(addr, sizeof(type##_t), (uint8_t*)&res, (xlate_flags)); \
if (proc) READ_MEM(addr, size); \
+ if (xlate_flags) \
+ flush_tlb(); \
return from_le(res); \
}
// load value from memory at aligned address; zero extend to register width
- load_func(uint8)
- load_func(uint16)
- load_func(uint32)
- load_func(uint64)
+ load_func(uint8, load, 0)
+ load_func(uint16, load, 0)
+ load_func(uint32, load, 0)
+ load_func(uint64, load, 0)
+
+ // load value from guest memory at aligned address; zero extend to register width
+ load_func(uint8, guest_load, RISCV_XLATE_VIRT)
+ load_func(uint16, guest_load, RISCV_XLATE_VIRT)
+ load_func(uint32, guest_load, RISCV_XLATE_VIRT)
+ load_func(uint64, guest_load, RISCV_XLATE_VIRT)
+ load_func(uint16, guest_load_x, RISCV_XLATE_VIRT|RISCV_XLATE_VIRT_MXR)
+ load_func(uint32, guest_load_x, RISCV_XLATE_VIRT|RISCV_XLATE_VIRT_MXR)
// load value from memory at aligned address; sign extend to register width
- load_func(int8)
- load_func(int16)
- load_func(int32)
- load_func(int64)
+ load_func(int8, load, 0)
+ load_func(int16, load, 0)
+ load_func(int32, load, 0)
+ load_func(int64, load, 0)
+
+ // load value from guest memory at aligned address; sign extend to register width
+ load_func(int8, guest_load, RISCV_XLATE_VIRT)
+ load_func(int16, guest_load, RISCV_XLATE_VIRT)
+ load_func(int32, guest_load, RISCV_XLATE_VIRT)
+ load_func(int64, guest_load, RISCV_XLATE_VIRT)
#ifndef RISCV_ENABLE_COMMITLOG
# define WRITE_MEM(addr, value, size) ({})
@@ -134,8 +155,10 @@ public:
#endif
// template for functions that store an aligned value to memory
- #define store_func(type) \
- void store_##type(reg_t addr, type##_t val) { \
+ #define store_func(type, prefix, xlate_flags) \
+ void prefix##_##type(reg_t addr, type##_t val) { \
+ if (xlate_flags) \
+ flush_tlb(); \
if (unlikely(addr & (sizeof(type##_t)-1))) \
return misaligned_store(addr, val, sizeof(type##_t)); \
reg_t vpn = addr >> PGSHIFT; \
@@ -154,10 +177,12 @@ public:
*(type##_t*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr) = to_le(val); \
} \
else { \
- type##_t le_val = to_le(val); \
- store_slow_path(addr, sizeof(type##_t), (const uint8_t*)&le_val); \
+ type##_t le_val = to_le(val); \
+ store_slow_path(addr, sizeof(type##_t), (const uint8_t*)&le_val, (xlate_flags)); \
if (proc) WRITE_MEM(addr, val, size); \
} \
+ if (xlate_flags) \
+ flush_tlb(); \
}
// template for functions that perform an atomic memory operation
@@ -199,10 +224,16 @@ public:
}
// store value to memory at aligned address
- store_func(uint8)
- store_func(uint16)
- store_func(uint32)
- store_func(uint64)
+ store_func(uint8, store, 0)
+ store_func(uint16, store, 0)
+ store_func(uint32, store, 0)
+ store_func(uint64, store, 0)
+
+ // store value to guest memory at aligned address
+ store_func(uint8, guest_store, RISCV_XLATE_VIRT)
+ store_func(uint16, guest_store, RISCV_XLATE_VIRT)
+ store_func(uint32, guest_store, RISCV_XLATE_VIRT)
+ store_func(uint64, guest_store, RISCV_XLATE_VIRT)
// perform an atomic memory operation at an aligned address
amo_func(uint32)
@@ -215,7 +246,7 @@ public:
inline void acquire_load_reservation(reg_t vaddr)
{
- reg_t paddr = translate(vaddr, 1, LOAD);
+ reg_t paddr = translate(vaddr, 1, LOAD, 0);
if (auto host_addr = sim->addr_to_mem(paddr))
load_reservation_address = refill_tlb(vaddr, paddr, host_addr, LOAD).target_offset + vaddr;
else
@@ -227,7 +258,7 @@ public:
if (vaddr & (size-1))
throw trap_store_address_misaligned(vaddr, 0, 0);
- reg_t paddr = translate(vaddr, 1, STORE);
+ reg_t paddr = translate(vaddr, 1, STORE, 0);
if (auto host_addr = sim->addr_to_mem(paddr))
return load_reservation_address == refill_tlb(vaddr, paddr, host_addr, STORE).target_offset + vaddr;
else
@@ -335,17 +366,20 @@ private:
tlb_entry_t refill_tlb(reg_t vaddr, reg_t paddr, char* host_addr, access_type type);
const char* fill_from_mmio(reg_t vaddr, reg_t paddr);
+ // perform a stage2 translation for a given guest address
+ reg_t s2xlate(reg_t gva, reg_t gpa, access_type type, bool virt, bool mxr);
+
// perform a page table walk for a given VA; set referenced/dirty bits
- reg_t walk(reg_t addr, access_type type, reg_t prv);
+ reg_t walk(reg_t addr, access_type type, reg_t prv, bool virt, bool mxr);
// handle uncommon cases: TLB misses, page faults, MMIO
tlb_entry_t fetch_slow_path(reg_t addr);
- void load_slow_path(reg_t addr, reg_t len, uint8_t* bytes);
- void store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes);
+ void load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags);
+ void store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags);
bool mmio_load(reg_t addr, size_t len, uint8_t* bytes);
bool mmio_store(reg_t addr, size_t len, const uint8_t* bytes);
bool mmio_ok(reg_t addr, access_type type);
- reg_t translate(reg_t addr, reg_t len, access_type type);
+ reg_t translate(reg_t addr, reg_t len, access_type type, uint32_t xlate_flags);
// ITLB lookup
inline tlb_entry_t translate_insn_addr(reg_t addr) {
@@ -402,27 +436,41 @@ private:
struct vm_info {
int levels;
int idxbits;
+ int widenbits;
int ptesize;
reg_t ptbase;
};
-inline vm_info decode_vm_info(int xlen, reg_t prv, reg_t satp)
+inline vm_info decode_vm_info(int xlen, bool stage2, reg_t prv, reg_t satp)
{
if (prv == PRV_M) {
- return {0, 0, 0, 0};
- } else if (prv <= PRV_S && xlen == 32) {
+ return {0, 0, 0, 0, 0};
+ } else if (!stage2 && prv <= PRV_S && xlen == 32) {
switch (get_field(satp, SATP32_MODE)) {
- case SATP_MODE_OFF: return {0, 0, 0, 0};
- case SATP_MODE_SV32: return {2, 10, 4, (satp & SATP32_PPN) << PGSHIFT};
+ case SATP_MODE_OFF: return {0, 0, 0, 0, 0};
+ case SATP_MODE_SV32: return {2, 10, 0, 4, (satp & SATP32_PPN) << PGSHIFT};
default: abort();
}
- } else if (prv <= PRV_S && xlen == 64) {
+ } else if (!stage2 && prv <= PRV_S && xlen == 64) {
switch (get_field(satp, SATP64_MODE)) {
- case SATP_MODE_OFF: return {0, 0, 0, 0};
- case SATP_MODE_SV39: return {3, 9, 8, (satp & SATP64_PPN) << PGSHIFT};
- case SATP_MODE_SV48: return {4, 9, 8, (satp & SATP64_PPN) << PGSHIFT};
- case SATP_MODE_SV57: return {5, 9, 8, (satp & SATP64_PPN) << PGSHIFT};
- case SATP_MODE_SV64: return {6, 9, 8, (satp & SATP64_PPN) << PGSHIFT};
+ case SATP_MODE_OFF: return {0, 0, 0, 0, 0};
+ case SATP_MODE_SV39: return {3, 9, 0, 8, (satp & SATP64_PPN) << PGSHIFT};
+ case SATP_MODE_SV48: return {4, 9, 0, 8, (satp & SATP64_PPN) << PGSHIFT};
+ case SATP_MODE_SV57: return {5, 9, 0, 8, (satp & SATP64_PPN) << PGSHIFT};
+ case SATP_MODE_SV64: return {6, 9, 0, 8, (satp & SATP64_PPN) << PGSHIFT};
+ default: abort();
+ }
+ } else if (stage2 && xlen == 32) {
+ switch (get_field(satp, HGATP32_MODE)) {
+ case HGATP_MODE_OFF: return {0, 0, 0, 0, 0};
+ case HGATP_MODE_SV32X4: return {2, 10, 2, 4, (satp & HGATP32_PPN) << PGSHIFT};
+ default: abort();
+ }
+ } else if (stage2 && xlen == 64) {
+ switch (get_field(satp, HGATP64_MODE)) {
+ case HGATP_MODE_OFF: return {0, 0, 0, 0, 0};
+ case HGATP_MODE_SV39X4: return {3, 9, 2, 8, (satp & HGATP64_PPN) << PGSHIFT};
+ case HGATP_MODE_SV48X4: return {4, 9, 2, 8, (satp & HGATP64_PPN) << PGSHIFT};
default: abort();
}
} else {