aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Makefile4
-rw-r--r--model/riscv_types.sail3
-rw-r--r--model/riscv_vmem_rv32.sail52
-rw-r--r--model/riscv_vmem_rv64.sail (renamed from model/riscv_vmem.sail)4
-rw-r--r--model/riscv_vmem_sv32.sail112
-rw-r--r--model/riscv_vmem_sv39.sail1
-rw-r--r--model/riscv_vmem_sv48.sail106
-rw-r--r--model/riscv_vmem_tlb.sail2
8 files changed, 280 insertions, 4 deletions
diff --git a/Makefile b/Makefile
index 2c980bf..0958c48 100644
--- a/Makefile
+++ b/Makefile
@@ -7,7 +7,9 @@ SAIL_RMEM_INST_SRCS = riscv_insts_begin.sail $(SAIL_RMEM_INST) riscv_insts_end.s
SAIL_SYS_SRCS = riscv_csr_map.sail riscv_sys_regs.sail riscv_next_regs.sail riscv_next_control.sail riscv_sys_control.sail
-SAIL_VM_SRCS = riscv_vmem_common.sail riscv_vmem_tlb.sail riscv_vmem_sv39.sail riscv_vmem.sail
+SAIL_RV32_VM_SRCS = riscv_vmem_sv32.sail riscv_vmem_rv32.sail
+SAIL_RV64_VM_SRCS = riscv_vmem_sv39.sail riscv_vmem_sv48.sail riscv_vmem_rv64.sail
+SAIL_VM_SRCS = riscv_vmem_common.sail riscv_vmem_tlb.sail $(SAIL_RV64_VM_SRCS)
# non-instruction sources
PRELUDE = prelude.sail prelude_mapping.sail riscv_xlen.sail prelude_mem.sail
diff --git a/model/riscv_types.sail b/model/riscv_types.sail
index cf3edac..c0e538f 100644
--- a/model/riscv_types.sail
+++ b/model/riscv_types.sail
@@ -435,13 +435,14 @@ function extStatus_of_bits(e) =
/* supervisor-level address translation modes */
type satp_mode = bits(4)
-enum SATPMode = {Sbare, Sv32, Sv39}
+enum SATPMode = {Sbare, Sv32, Sv39, Sv48}
function satp64Mode_of_bits(a : Architecture, m : satp_mode) -> option(SATPMode) =
match (a, m) {
(_, 0x0) => Some(Sbare),
(RV32, 0x1) => Some(Sv32),
(RV64, 0x8) => Some(Sv39),
+ (RV64, 0x9) => Some(Sv48),
(_, _) => None()
}
diff --git a/model/riscv_vmem_rv32.sail b/model/riscv_vmem_rv32.sail
new file mode 100644
index 0000000..539a879
--- /dev/null
+++ b/model/riscv_vmem_rv32.sail
@@ -0,0 +1,52 @@
+/* RV32 Supervisor-mode address translation and page-table walks. */
+
+/* Define the architectural satp and its legalizer. */
+
+register satp : xlenbits
+
+function legalize_satp(a : Architecture, o : xlenbits, v : xlenbits) -> xlenbits =
+ legalize_satp32(a, o, v)
+
+/* Compute the address translation mode. */
+
+val translationMode : (Privilege) -> SATPMode effect {rreg, escape}
+function translationMode(priv) = {
+ if priv == Machine then Sbare
+ else {
+ let arch = architecture(get_mstatus_SXL(mstatus));
+ match arch {
+ Some(RV32) => {
+ let s = Mk_Satp32(satp[31..0]);
+ if s.Mode() == false then Sbare else Sv32
+ },
+ _ => internal_error("unsupported address translation arch")
+ }
+ }
+}
+
+/* Top-level address translation dispatcher */
+
+val translateAddr : (xlenbits, AccessType, ReadType) -> TR_Result(xlenbits, ExceptionType) effect {escape, rmem, rreg, wmv, wreg}
+function translateAddr(vAddr, ac, rt) = {
+ let effPriv : Privilege = match rt {
+ Instruction => cur_privilege,
+ Data => if mstatus.MPRV() == true
+ then privLevel_of_bits(mstatus.MPP())
+ else cur_privilege
+ };
+ let mxr : bool = mstatus.MXR() == true;
+ let do_sum : bool = mstatus.SUM() == true;
+ let mode : SATPMode = translationMode(effPriv);
+
+ let asid = curAsid32(satp);
+ let ptb = curPTB32(satp);
+
+ match mode {
+ Sbare => TR_Address(vAddr),
+ SV39 => match translate32(asid, ptb, vAddr, ac, effPriv, mxr, do_sum, SV32_LEVELS - 1) {
+ TR_Address(pa) => TR_Address(to_phys_addr(pa)),
+ TR_Failure(f) => TR_Failure(translationException(ac, f))
+ },
+ _ => internal_error("unsupported address translation scheme")
+ }
+}
diff --git a/model/riscv_vmem.sail b/model/riscv_vmem_rv64.sail
index 33f94dd..c7d9048 100644
--- a/model/riscv_vmem.sail
+++ b/model/riscv_vmem_rv64.sail
@@ -54,6 +54,10 @@ function translateAddr(vAddr, ac, rt) = {
TR_Address(pa) => TR_Address(EXTZ(pa)),
TR_Failure(f) => TR_Failure(translationException(ac, f))
},
+ SV48 => match translate48(asid, ptb, vAddr[47 .. 0], ac, effPriv, mxr, do_sum, SV39_LEVELS - 1) {
+ TR_Address(pa) => TR_Address(EXTZ(pa)),
+ TR_Failure(f) => TR_Failure(translationException(ac, f))
+ },
_ => internal_error("unsupported address translation scheme")
}
}
diff --git a/model/riscv_vmem_sv32.sail b/model/riscv_vmem_sv32.sail
new file mode 100644
index 0000000..d7addee
--- /dev/null
+++ b/model/riscv_vmem_sv32.sail
@@ -0,0 +1,112 @@
+/* Sv32 address translation for RV32. */
+
+/* FIXME: paddr32 is 34-bits, but phys_mem accesses in riscv_mem take 32-bit (xlenbits) addresses.
+ * Define a converter for now.
+ */
+
+function to_phys_addr(a : paddr32) -> xlenbits = a[31..0]
+
+val walk32 : (vaddr32, AccessType, Privilege, bool, bool, paddr32, nat, bool) -> PTW_Result(paddr32, SV32_PTE) effect {rmem, escape}
+function walk32(vaddr, ac, priv, mxr, do_sum, ptb, level, global) = {
+ let va = Mk_SV32_Vaddr(vaddr);
+ let pt_ofs : paddr32 = shiftl(EXTZ(shiftr(va.VPNi(), (level * SV32_LEVEL_BITS))[(SV32_LEVEL_BITS - 1) .. 0]),
+ PTE32_LOG_SIZE);
+ let pte_addr = ptb + pt_ofs;
+ /* FIXME: we assume here that walks only access physical-memory-backed addresses, and not MMIO regions. */
+ match (phys_mem_read(Data, to_phys_addr(pte_addr), 4, false, false, false)) {
+ MemException(_) => {
+/* print("walk32(vaddr=" ^ BitStr(vaddr) ^ " level=" ^ string_of_int(level)
+ ^ " pt_base=" ^ BitStr(ptb)
+ ^ " pt_ofs=" ^ BitStr(pt_ofs)
+ ^ " pte_addr=" ^ BitStr(pte_addr)
+ ^ ": invalid pte address"); */
+ PTW_Failure(PTW_Access)
+ },
+ MemValue(v) => {
+ let pte = Mk_SV32_PTE(v);
+ let pbits = pte.BITS();
+ let pattr = Mk_PTE_Bits(pbits);
+ let is_global = global | (pattr.G() == true);
+/* print("walk32(vaddr=" ^ BitStr(vaddr) ^ " level=" ^ string_of_int(level)
+ ^ " pt_base=" ^ BitStr(ptb)
+ ^ " pt_ofs=" ^ BitStr(pt_ofs)
+ ^ " pte_addr=" ^ BitStr(pte_addr)
+ ^ " pte=" ^ BitStr(v)); */
+ if isInvalidPTE(pbits) then {
+/* print("walk32: invalid pte"); */
+ PTW_Failure(PTW_Invalid_PTE)
+ } else {
+ if isPTEPtr(pbits) then {
+ if level == 0 then {
+ /* last-level PTE contains a pointer instead of a leaf */
+/* print("walk32: last-level pte contains a ptr"); */
+ PTW_Failure(PTW_Invalid_PTE)
+ } else {
+ /* walk down the pointer to the next level */
+ walk32(vaddr, ac, priv, mxr, do_sum, EXTZ(shiftl(pte.PPNi(), PAGESIZE_BITS)), level - 1, is_global)
+ }
+ } else { /* leaf PTE */
+ if ~ (checkPTEPermission(ac, priv, mxr, do_sum, pattr)) then {
+/* print("walk32: pte permission check failure"); */
+ PTW_Failure(PTW_No_Permission)
+ } else {
+ if level > 0 then { /* superpage */
+ /* fixme hack: to get a mask of appropriate size */
+ let mask = shiftl(pte.PPNi() ^ pte.PPNi() ^ EXTZ(0b1), level * SV32_LEVEL_BITS) - 1;
+ if (pte.PPNi() & mask) != EXTZ(0b0) then {
+ /* misaligned superpage mapping */
+/* print("walk32: misaligned superpage mapping"); */
+ PTW_Failure(PTW_Misaligned)
+ } else {
+ /* add the appropriate bits of the VPN to the superpage PPN */
+ let ppn = pte.PPNi() | (EXTZ(va.VPNi()) & mask);
+/* let res = append(ppn, va.PgOfs());
+ print("walk32: using superpage: pte.ppn=" ^ BitStr(pte.PPNi())
+ ^ " ppn=" ^ BitStr(ppn) ^ " res=" ^ BitStr(res)); */
+ PTW_Success(append(ppn, va.PgOfs()), pte, pte_addr, level, is_global)
+ }
+ } else {
+ /* normal leaf PTE */
+/* let res = append(pte.PPNi(), va.PgOfs());
+ print("walk32: pte.ppn=" ^ BitStr(pte.PPNi()) ^ " ppn=" ^ BitStr(pte.PPNi()) ^ " res=" ^ BitStr(res)); */
+ PTW_Success(append(pte.PPNi(), va.PgOfs()), pte, pte_addr, level, is_global)
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+val translate32 : (asid32, paddr32, vaddr32, AccessType, Privilege, bool, bool, nat) -> TR_Result(paddr32, PTW_Error) effect {rreg, wreg, wmv, escape, rmem}
+function translate32(asid, ptb, vAddr, ac, priv, mxr, do_sum, level) = {
+ match walk32(vAddr, ac, priv, mxr, do_sum, ptb, level, false) {
+ PTW_Failure(f) => TR_Failure(f),
+ PTW_Success(pAddr, pte, pteAddr, level, global) => {
+ match update_PTE_Bits(Mk_PTE_Bits(pte.BITS()), ac) {
+ None() => {
+ /* addToTLB32(asid, vAddr, pAddr, pte, pteAddr, level, global); */
+ TR_Address(pAddr)
+ },
+ Some(pbits) =>
+ if ~ (plat_enable_dirty_update ())
+ then {
+ /* pte needs dirty/accessed update but that is not enabled */
+ TR_Failure(PTW_PTE_Update)
+ } else {
+ w_pte : SV32_PTE = update_BITS(pte, pbits.bits());
+ match checked_mem_write(to_phys_addr(pteAddr), 4, w_pte.bits(), false, false, false) {
+ MemValue(_) => {
+ /* addToTLB32(asid, vAddr, pAddr, w_pte, pteAddr, level, global); */
+ TR_Address(pAddr)
+ },
+ MemException(e) => {
+ /* pte is not in valid memory */
+ TR_Failure(PTW_Access)
+ }
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/model/riscv_vmem_sv39.sail b/model/riscv_vmem_sv39.sail
index 7ef5ef0..fca37a5 100644
--- a/model/riscv_vmem_sv39.sail
+++ b/model/riscv_vmem_sv39.sail
@@ -104,4 +104,3 @@ function translate39(asid, ptb, vAddr, ac, priv, mxr, do_sum, level) = {
}
}
}
-
diff --git a/model/riscv_vmem_sv48.sail b/model/riscv_vmem_sv48.sail
new file mode 100644
index 0000000..b29ae52
--- /dev/null
+++ b/model/riscv_vmem_sv48.sail
@@ -0,0 +1,106 @@
+/* Sv48 address translation for RV64. */
+
+val walk48 : (vaddr48, AccessType, Privilege, bool, bool, paddr64, nat, bool) -> PTW_Result(paddr64, SV48_PTE) effect {rmem, escape}
+function walk48(vaddr, ac, priv, mxr, do_sum, ptb, level, global) = {
+ let va = Mk_SV48_Vaddr(vaddr);
+ let pt_ofs : paddr64 = shiftl(EXTZ(shiftr(va.VPNi(), (level * SV48_LEVEL_BITS))[(SV48_LEVEL_BITS - 1) .. 0]),
+ PTE48_LOG_SIZE);
+ let pte_addr = ptb + pt_ofs;
+ /* FIXME: we assume here that walks only access physical-memory-backed addresses, and not MMIO regions. */
+ match (phys_mem_read(Data, EXTZ(pte_addr), 8, false, false, false)) {
+ MemException(_) => {
+/* print("walk48(vaddr=" ^ BitStr(vaddr) ^ " level=" ^ string_of_int(level)
+ ^ " pt_base=" ^ BitStr(ptb)
+ ^ " pt_ofs=" ^ BitStr(pt_ofs)
+ ^ " pte_addr=" ^ BitStr(pte_addr)
+ ^ ": invalid pte address"); */
+ PTW_Failure(PTW_Access)
+ },
+ MemValue(v) => {
+ let pte = Mk_SV48_PTE(v);
+ let pbits = pte.BITS();
+ let pattr = Mk_PTE_Bits(pbits);
+ let is_global = global | (pattr.G() == true);
+/* print("walk48(vaddr=" ^ BitStr(vaddr) ^ " level=" ^ string_of_int(level)
+ ^ " pt_base=" ^ BitStr(ptb)
+ ^ " pt_ofs=" ^ BitStr(pt_ofs)
+ ^ " pte_addr=" ^ BitStr(pte_addr)
+ ^ " pte=" ^ BitStr(v)); */
+ if isInvalidPTE(pbits) then {
+/* print("walk48: invalid pte"); */
+ PTW_Failure(PTW_Invalid_PTE)
+ } else {
+ if isPTEPtr(pbits) then {
+ if level == 0 then {
+ /* last-level PTE contains a pointer instead of a leaf */
+/* print("walk48: last-level pte contains a ptr"); */
+ PTW_Failure(PTW_Invalid_PTE)
+ } else {
+ /* walk down the pointer to the next level */
+ walk48(vaddr, ac, priv, mxr, do_sum, EXTZ(shiftl(pte.PPNi(), PAGESIZE_BITS)), level - 1, is_global)
+ }
+ } else { /* leaf PTE */
+ if ~ (checkPTEPermission(ac, priv, mxr, do_sum, pattr)) then {
+/* print("walk48: pte permission check failure"); */
+ PTW_Failure(PTW_No_Permission)
+ } else {
+ if level > 0 then { /* superpage */
+ /* fixme hack: to get a mask of appropriate size */
+ let mask = shiftl(pte.PPNi() ^ pte.PPNi() ^ EXTZ(0b1), level * SV48_LEVEL_BITS) - 1;
+ if (pte.PPNi() & mask) != EXTZ(0b0) then {
+ /* misaligned superpage mapping */
+/* print("walk48: misaligned superpage mapping"); */
+ PTW_Failure(PTW_Misaligned)
+ } else {
+ /* add the appropriate bits of the VPN to the superpage PPN */
+ let ppn = pte.PPNi() | (EXTZ(va.VPNi()) & mask);
+/* let res = append(ppn, va.PgOfs());
+ print("walk48: using superpage: pte.ppn=" ^ BitStr(pte.PPNi())
+ ^ " ppn=" ^ BitStr(ppn) ^ " res=" ^ BitStr(res)); */
+ PTW_Success(append(ppn, va.PgOfs()), pte, pte_addr, level, is_global)
+ }
+ } else {
+ /* normal leaf PTE */
+/* let res = append(pte.PPNi(), va.PgOfs());
+ print("walk48: pte.ppn=" ^ BitStr(pte.PPNi()) ^ " ppn=" ^ BitStr(pte.PPNi()) ^ " res=" ^ BitStr(res)); */
+ PTW_Success(append(pte.PPNi(), va.PgOfs()), pte, pte_addr, level, is_global)
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+val translate48 : (asid64, paddr64, vaddr48, AccessType, Privilege, bool, bool, nat) -> TR_Result(paddr64, PTW_Error) effect {rreg, wreg, wmv, escape, rmem}
+function translate48(asid, ptb, vAddr, ac, priv, mxr, do_sum, level) = {
+ match walk48(vAddr, ac, priv, mxr, do_sum, ptb, level, false) {
+ PTW_Failure(f) => TR_Failure(f),
+ PTW_Success(pAddr, pte, pteAddr, level, global) => {
+ match update_PTE_Bits(Mk_PTE_Bits(pte.BITS()), ac) {
+ None() => {
+ /* addToTLB48(asid, vAddr, pAddr, pte, pteAddr, level, global); */
+ TR_Address(pAddr)
+ },
+ Some(pbits) =>
+ if ~ (plat_enable_dirty_update ())
+ then {
+ /* pte needs dirty/accessed update but that is not enabled */
+ TR_Failure(PTW_PTE_Update)
+ } else {
+ w_pte : SV48_PTE = update_BITS(pte, pbits.bits());
+ match checked_mem_write(EXTZ(pteAddr), 8, w_pte.bits(), false, false, false) {
+ MemValue(_) => {
+ /* addToTLB48(asid, vAddr, pAddr, w_pte, pteAddr, level, global); */
+ TR_Address(pAddr)
+ },
+ MemException(e) => {
+ /* pte is not in valid memory */
+ TR_Failure(PTW_Access)
+ }
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/model/riscv_vmem_tlb.sail b/model/riscv_vmem_tlb.sail
index 5e6b128..a9663f5 100644
--- a/model/riscv_vmem_tlb.sail
+++ b/model/riscv_vmem_tlb.sail
@@ -13,7 +13,7 @@ struct TLB39_Entry = {
vAddrMask : vaddr39, /* selection mask for superpages */
pte : SV39_PTE, /* permissions */
pteAddr : paddr64, /* for dirty writeback */
- age : xlenbits
+ age : bits(64)
}
/* the rreg effect is an artifact of using the cycle counter to provide the age */