diff options
author | Prashanth Mundkur <prashanth.mundkur@gmail.com> | 2018-06-07 12:56:29 -0700 |
---|---|---|
committer | Prashanth Mundkur <prashanth.mundkur@gmail.com> | 2018-06-07 13:13:23 -0700 |
commit | 62b3c1cd55dc9c83367a2f3e1a664127e0e4f354 (patch) | |
tree | f846d59a913b37c9c1baa90a8c577a796dda8efc /riscv_mem.sail | |
parent | da30f57f421b5cdac73204176690871d1d9de3db (diff) | |
download | sail-riscv-62b3c1cd55dc9c83367a2f3e1a664127e0e4f354.zip sail-riscv-62b3c1cd55dc9c83367a2f3e1a664127e0e4f354.tar.gz sail-riscv-62b3c1cd55dc9c83367a2f3e1a664127e0e4f354.tar.bz2 |
Update physical memory and address translation for MMIO.
- Assume for now that atomic accesses are only to memory regions, to leave
their effects unchanged.
- The top-level mem_read and mem_write functions for physical memory now
have rreg and wreg effects due to MMIO (due to reads/writes to device registers).
It would be nice to have a separate construct for non-CPU-register state to avoid
polluting the footprint.
- Assume for now that page-table walks access physical memory regions only, and
not MMIO regions. Leave a fixme note there to address this later, perhaps when
PMP/PMA is added.
Diffstat (limited to 'riscv_mem.sail')
-rw-r--r-- | riscv_mem.sail | 73 |
1 files changed, 54 insertions, 19 deletions
diff --git a/riscv_mem.sail b/riscv_mem.sail index 10fd98f..0fc9ddb 100644 --- a/riscv_mem.sail +++ b/riscv_mem.sail @@ -3,13 +3,34 @@ function is_aligned_addr (addr : xlenbits, width : atom('n)) -> forall 'n. bool = unsigned(addr) % width == 0 -function checked_mem_read(t : ReadType, addr : xlenbits, width : atom('n)) -> forall 'n. MemoryOpResult(bits(8 * 'n)) = +// only used for actual memory regions, to avoid MMIO effects +function phys_mem_read(t : ReadType, addr : xlenbits, width : atom('n)) -> forall 'n. MemoryOpResult(bits(8 * 'n)) = match (t, __RISCV_read(addr, width)) { (Instruction, None()) => MemException(E_Fetch_Access_Fault), (Data, None()) => MemException(E_Load_Access_Fault), (_, Some(v)) => MemValue(v) } +function checked_mem_read(t : ReadType, addr : xlenbits, width : atom('n)) -> forall 'n. MemoryOpResult(bits(8 * 'n)) = { + /* Consult physical memory map to dispatch MMIO and (TODO) handle PMP/PMA. */ + if within_phys_mem(addr, width) + then phys_mem_read(t, addr, width) + /* treat MMIO regions as not executable for now. + TODO: this should actually come from PMP/PMA. */ + else if t == Data then { + if within_clint(addr, width) + then clint_load(addr, width) + /* todo: handle constraint on 'n + else if within_htif_readable(addr, width) + then htif_load(addr, width) + */ + else MemException(E_Load_Access_Fault) + } + else MemException(E_Load_Access_Fault) +} + +/* FIXME: We assume atomic accesses are only done to memory-backed regions. MMIO is not modeled. */ + val MEMr : forall 'n. (xlenbits, atom('n)) -> MemoryOpResult(bits(8 * 'n)) effect {rmem} val MEMr_acquire : forall 'n. (xlenbits, atom('n)) -> MemoryOpResult(bits(8 * 'n)) effect {rmem} val MEMr_strong_acquire : forall 'n. (xlenbits, atom('n)) -> MemoryOpResult(bits(8 * 'n)) effect {rmem} @@ -17,20 +38,21 @@ val MEMr_reserved : forall 'n. (xlenbits, atom('n)) -> MemoryOpRe val MEMr_reserved_acquire : forall 'n. (xlenbits, atom('n)) -> MemoryOpResult(bits(8 * 'n)) effect {rmem} val MEMr_reserved_strong_acquire : forall 'n. (xlenbits, atom('n)) -> MemoryOpResult(bits(8 * 'n)) effect {rmem} -function MEMr (addr, width) = checked_mem_read(Data, addr, width) -function MEMr_acquire (addr, width) = checked_mem_read(Data, addr, width) -function MEMr_strong_acquire (addr, width) = checked_mem_read(Data, addr, width) -function MEMr_reserved (addr, width) = checked_mem_read(Data, addr, width) -function MEMr_reserved_acquire (addr, width) = checked_mem_read(Data, addr, width) -function MEMr_reserved_strong_acquire (addr, width) = checked_mem_read(Data, addr, width) +function MEMr (addr, width) = phys_mem_read(Data, addr, width) +function MEMr_acquire (addr, width) = phys_mem_read(Data, addr, width) +function MEMr_strong_acquire (addr, width) = phys_mem_read(Data, addr, width) +function MEMr_reserved (addr, width) = phys_mem_read(Data, addr, width) +function MEMr_reserved_acquire (addr, width) = phys_mem_read(Data, addr, width) +function MEMr_reserved_strong_acquire (addr, width) = phys_mem_read(Data, addr, width) -val mem_read : forall 'n. (xlenbits, atom('n), bool, bool, bool) -> MemoryOpResult(bits(8 * 'n)) effect {rmem, escape} +/* NOTE: The rreg effect is due to MMIO. */ +val mem_read : forall 'n, 'n > 0. (xlenbits, atom('n), bool, bool, bool) -> MemoryOpResult(bits(8 * 'n)) effect {rmem, rreg, escape} function mem_read (addr, width, aq, rl, res) = { if (aq | res) & (~ (is_aligned_addr(addr, width))) then MemException(E_Load_Addr_Align) else match (aq, rl, res) { - (false, false, false) => MEMr(addr, width), + (false, false, false) => checked_mem_read(Data, addr, width), (true, false, false) => MEMr_acquire(addr, width), (false, false, true) => MEMr_reserved(addr, width), (true, false, true) => MEMr_reserved_acquire(addr, width), @@ -71,11 +93,24 @@ function mem_write_ea (addr, width, aq, rl, con) = { } } -function checked_mem_write(addr : xlenbits, width : atom('n), data: bits(8 * 'n)) -> forall 'n. MemoryOpResult(unit) = +// only used for actual memory regions, to avoid MMIO effects +function phys_mem_write(addr : xlenbits, width : atom('n), data: bits(8 * 'n)) -> forall 'n. MemoryOpResult(unit) = if (__RISCV_write(addr, width, data)) then MemValue(()) else MemException(E_SAMO_Access_Fault) +function checked_mem_write(addr : xlenbits, width : atom('n), data: bits(8 * 'n)) -> forall 'n, 'n > 0. MemoryOpResult(unit) = + /* Consult physical memory map to dispatch MMIO and (TODO) handle PMP/PMA. */ + if within_phys_mem(addr, width) + then phys_mem_write(addr, width, data) + else if within_clint(addr, width) + then clint_store(addr, width, data) + else if within_htif_writable(addr, width) & sizeof('n) <= 8 // FIXME + then htif_store(addr, width, data) + else MemException(E_SAMO_Access_Fault) + +/* FIXME: We assume atomic accesses are only done to memory-backed regions. MMIO is not modeled. */ + val MEMval : forall 'n. (xlenbits, atom('n), bits(8 * 'n)) -> MemoryOpResult(unit) effect {wmv} val MEMval_release : forall 'n. (xlenbits, atom('n), bits(8 * 'n)) -> MemoryOpResult(unit) effect {wmv} val MEMval_strong_release : forall 'n. (xlenbits, atom('n), bits(8 * 'n)) -> MemoryOpResult(unit) effect {wmv} @@ -83,21 +118,21 @@ val MEMval_conditional : forall 'n. (xlenbits, atom('n), bits(8 * val MEMval_conditional_release : forall 'n. (xlenbits, atom('n), bits(8 * 'n)) -> MemoryOpResult(unit) effect {wmv} val MEMval_conditional_strong_release : forall 'n. (xlenbits, atom('n), bits(8 * 'n)) -> MemoryOpResult(unit) effect {wmv} -function MEMval (addr, width, data) = checked_mem_write(addr, width, data) -function MEMval_release (addr, width, data) = checked_mem_write(addr, width, data) -function MEMval_strong_release (addr, width, data) = checked_mem_write(addr, width, data) -function MEMval_conditional (addr, width, data) = checked_mem_write(addr, width, data) -function MEMval_conditional_release (addr, width, data) = checked_mem_write(addr, width, data) -function MEMval_conditional_strong_release (addr, width, data) = checked_mem_write(addr, width, data) - +function MEMval (addr, width, data) = phys_mem_write(addr, width, data) +function MEMval_release (addr, width, data) = phys_mem_write(addr, width, data) +function MEMval_strong_release (addr, width, data) = phys_mem_write(addr, width, data) +function MEMval_conditional (addr, width, data) = phys_mem_write(addr, width, data) +function MEMval_conditional_release (addr, width, data) = phys_mem_write(addr, width, data) +function MEMval_conditional_strong_release (addr, width, data) = phys_mem_write(addr, width, data) -val mem_write_value : forall 'n. (xlenbits, atom('n), bits(8 * 'n), bool, bool, bool) -> MemoryOpResult(unit) effect {wmv, escape} +/* NOTE: The wreg effect is due to MMIO. */ +val mem_write_value : forall 'n, 'n > 0. (xlenbits, atom('n), bits(8 * 'n), bool, bool, bool) -> MemoryOpResult(unit) effect {wmv, wreg, escape} function mem_write_value (addr, width, value, aq, rl, con) = { if (rl | con) & (~ (is_aligned_addr(addr, width))) then MemException(E_SAMO_Addr_Align) else match (aq, rl, con) { - (false, false, false) => MEMval(addr, width, value), + (false, false, false) => checked_mem_write(addr, width, value), (false, true, false) => MEMval_release(addr, width, value), (false, false, true) => MEMval_conditional(addr, width, value), (false, true, true) => MEMval_conditional_release(addr, width, value), |