diff options
author | Anup Patel <anup@brainfault.org> | 2021-12-14 11:25:55 +0530 |
---|---|---|
committer | Anup Patel <anup@brainfault.org> | 2022-04-20 10:20:10 +0530 |
commit | 5a433081f4ce1a49ee83d1a81cf4922e7542a20c (patch) | |
tree | c99dfe8db908caf9319c905e5bba388b3206d43a /riscv/plic.cc | |
parent | d5b1a65c0e3a0b6b46eb66d5d0284bf3a6cc1e0c (diff) | |
download | spike-5a433081f4ce1a49ee83d1a81cf4922e7542a20c.zip spike-5a433081f4ce1a49ee83d1a81cf4922e7542a20c.tar.gz spike-5a433081f4ce1a49ee83d1a81cf4922e7542a20c.tar.bz2 |
Add PLIC emulation
We need an interrupt controller in Spike which will allow us to
emulate more real-world devices such as UART, VirtIO net, VirtIO
block, etc.
The RISC-V PLIC (or SiFive PLIC) is the commonly used interrupt
controller in existing RISC-V platforms so this patch adds PLIC
emulation for Spike.
Signed-off-by: Anup Patel <anup@brainfault.org>
Diffstat (limited to 'riscv/plic.cc')
-rw-r--r-- | riscv/plic.cc | 393 |
1 files changed, 393 insertions, 0 deletions
diff --git a/riscv/plic.cc b/riscv/plic.cc new file mode 100644 index 0000000..2d08c8e --- /dev/null +++ b/riscv/plic.cc @@ -0,0 +1,393 @@ +#include <sys/time.h> +#include "devices.h" +#include "processor.h" + +#define PLIC_MAX_CONTEXTS 15872 + +/* + * The PLIC consists of memory-mapped control registers, with a memory map + * as follows: + * + * base + 0x000000: Reserved (interrupt source 0 does not exist) + * base + 0x000004: Interrupt source 1 priority + * base + 0x000008: Interrupt source 2 priority + * ... + * base + 0x000FFC: Interrupt source 1023 priority + * base + 0x001000: Pending 0 + * base + 0x001FFF: Pending + * base + 0x002000: Enable bits for sources 0-31 on context 0 + * base + 0x002004: Enable bits for sources 32-63 on context 0 + * ... + * base + 0x0020FC: Enable bits for sources 992-1023 on context 0 + * base + 0x002080: Enable bits for sources 0-31 on context 1 + * ... + * base + 0x002100: Enable bits for sources 0-31 on context 2 + * ... + * base + 0x1F1F80: Enable bits for sources 992-1023 on context 15871 + * base + 0x1F1F84: Reserved + * ... (higher context IDs would fit here, but wouldn't fit + * inside the per-context priority vector) + * base + 0x1FFFFC: Reserved + * base + 0x200000: Priority threshold for context 0 + * base + 0x200004: Claim/complete for context 0 + * base + 0x200008: Reserved + * ... + * base + 0x200FFC: Reserved + * base + 0x201000: Priority threshold for context 1 + * base + 0x201004: Claim/complete for context 1 + * ... + * base + 0xFFE000: Priority threshold for context 15871 + * base + 0xFFE004: Claim/complete for context 15871 + * base + 0xFFE008: Reserved + * ... + * base + 0xFFFFFC: Reserved + */ + +/* Each interrupt source has a priority register associated with it. */ +#define PRIORITY_BASE 0 +#define PRIORITY_PER_ID 4 + +/* + * Each hart context has a vector of interupt enable bits associated with it. + * There's one bit for each interrupt source. + */ +#define ENABLE_BASE 0x2000 +#define ENABLE_PER_HART 0x80 + +/* + * Each hart context has a set of control registers associated with it. Right + * now there's only two: a source priority threshold over which the hart will + * take an interrupt, and a register to claim interrupts. + */ +#define CONTEXT_BASE 0x200000 +#define CONTEXT_PER_HART 0x1000 +#define CONTEXT_THRESHOLD 0 +#define CONTEXT_CLAIM 4 + +#define REG_SIZE 0x1000000 + +plic_t::plic_t(std::vector<processor_t*>& procs, bool smode, uint32_t ndev) + : procs(procs), contexts(procs.size() * (smode ? 2 : 1)) +{ + size_t i; + size_t contexts_per_hart = smode ? 2 : 1; + plic_context_t *c; + + num_ids = ndev + 1; + num_ids_word = num_ids / 32; + if ((num_ids_word * 32) < num_ids) + num_ids_word++; + max_prio = (1UL << PLIC_PRIO_BITS) - 1; + memset(priority, 0, sizeof(priority)); + memset(level, 0, sizeof(level)); + + for (i = 0; i < contexts.size(); i++) { + c = &contexts[i]; + c->num = i; + c->proc = procs[i / contexts_per_hart]; + if (smode) { + c->mmode = (i % contexts_per_hart == 0); + } else { + c->mmode = true; + } + memset(&c->enable, 0, sizeof(c->enable)); + memset(&c->pending, 0, sizeof(c->pending)); + memset(&c->pending_priority, 0, sizeof(c->pending_priority)); + memset(&c->claimed, 0, sizeof(c->claimed)); + } +} + +uint32_t plic_t::context_best_pending(plic_context_t *c) +{ + uint8_t best_id_prio = 0; + uint32_t i, j, id, best_id = 0; + + for (i = 0; i < num_ids_word; i++) { + if (!c->pending[i]) { + continue; + } + + for (j = 0; j < 32; j++) { + id = i * 32 + j; + if ((num_ids <= id) || + !(c->pending[i] & (1 << j)) || + (c->claimed[i] & (1 << j))) { + continue; + } + + if (!best_id || + (best_id_prio < c->pending_priority[id])) { + best_id = id; + best_id_prio = c->pending_priority[id]; + } + } + } + + return best_id; +} + +void plic_t::context_update(plic_context_t *c) +{ + uint32_t best_id = context_best_pending(c); + reg_t mask = c->mmode ? MIP_MEIP : MIP_SEIP; + + c->proc->state.mip->backdoor_write_with_mask(mask, best_id ? mask : 0); +} + +uint32_t plic_t::context_claim(plic_context_t *c) +{ + uint32_t best_id = context_best_pending(c); + uint32_t best_id_word = best_id / 32; + uint32_t best_id_mask = (1 << (best_id % 32)); + + if (best_id) { + c->claimed[best_id_word] |= best_id_mask; + } + + context_update(c); + return best_id; +} + +bool plic_t::priority_read(reg_t offset, uint32_t *val) +{ + uint32_t id = (offset >> 2); + + if (id == 0 || id >= num_ids) { + return false; + } + + *val = priority[id]; + return true; +} + +bool plic_t::priority_write(reg_t offset, uint32_t val) +{ + uint32_t id = (offset >> 2); + + if (id == 0 || id >= num_ids) { + return false; + } + + val &= ((1 << PLIC_PRIO_BITS) - 1); + priority[id] = val; + return true; +} + +bool plic_t::context_enable_read(plic_context_t *c, + reg_t offset, uint32_t *val) +{ + uint32_t id_word = offset >> 2; + + if (num_ids_word < id_word) { + return false; + } + + *val = c->enable[id_word]; + return true; +} + +bool plic_t::context_enable_write(plic_context_t *c, + reg_t offset, uint32_t val) +{ + uint8_t id_prio; + uint32_t i, id, id_mask; + uint32_t id_word = offset >> 2; + uint32_t old_val, new_val, xor_val; + + if (num_ids_word < id_word) { + return false; + } + + old_val = c->enable[id_word]; + new_val = val; + + if (id_word == 0) { + new_val &= ~0x1; + } + + c->enable[id_word] = new_val; + + xor_val = old_val ^ new_val; + for (i = 0; i < 32; i++) { + id = id_word * 32 + i; + id_mask = 1 << i; + id_prio = priority[id]; + if (!(xor_val & id_mask)) { + continue; + } + if ((new_val & id_mask) && + (level[id_word] & id_mask)) { + c->pending[id_word] |= id_mask; + c->pending_priority[id] = id_prio; + } else if (!(new_val & id_mask)) { + c->pending[id_word] &= ~id_mask; + c->pending_priority[id] = 0; + c->claimed[id_word] &= ~id_mask; + } + } + + context_update(c); + return true; +} + +bool plic_t::context_read(plic_context_t *c, + reg_t offset, uint32_t *val) +{ + bool ret = true; + + switch (offset) { + case CONTEXT_THRESHOLD: + *val = c->priority_threshold; + break; + case CONTEXT_CLAIM: + *val = context_claim(c); + break; + default: + ret = false; + break; + }; + + return ret; +} + +bool plic_t::context_write(plic_context_t *c, + reg_t offset, uint32_t val) +{ + uint32_t id_word, id_mask; + bool ret = true, update = false; + + switch (offset) { + case CONTEXT_THRESHOLD: + val &= ((1 << PLIC_PRIO_BITS) - 1); + if (val <= max_prio) + c->priority_threshold = val; + else + update = true; + break; + case CONTEXT_CLAIM: + id_word = val / 32; + id_mask = 1 << (val % 32); + if ((val < num_ids) && + (c->enable[id_word] & id_mask)) { + c->claimed[id_word] &= ~id_mask; + update = true; + } + break; + default: + ret = false; + update = true; + break; + }; + + if (update) { + context_update(c); + } + + return ret; +} + +void plic_t::set_interrupt_level(uint32_t id, int lvl) +{ + uint8_t i, id_prio, id_word; + uint32_t id_mask; + plic_context_t *c = NULL; + + if (id <= 0 || num_ids <= id) { + return; + } + + id_prio = priority[id]; + id_word = id / 32; + id_mask = 1 << (id % 32); + + if (lvl) { + level[id_word] |= id_mask; + } else { + level[id_word] &= ~id_mask; + } + + /* + * Note: PLIC interrupts are level-triggered. As of now, + * there is no notion of edge-triggered interrupts. To + * handle this we auto-clear edge-triggered interrupts + * when PLIC context CLAIM register is read. + */ + for (i = 0; i < contexts.size(); i++) { + c = &contexts[i]; + + if (c->enable[id_word] & id_mask) { + if (lvl) { + c->pending[id_word] |= id_mask; + c->pending_priority[id] = id_prio; + } else { + c->pending[id_word] &= ~id_mask; + c->pending_priority[id] = 0; + c->claimed[id_word] &= ~id_mask; + } + context_update(c); + break; + } + } +} + +bool plic_t::load(reg_t addr, size_t len, uint8_t* bytes) +{ + bool ret = false; + uint32_t cntx, val = 0; + + /* Only 32bit loads supported */ + if (len != 4) { + return false; + } + + if (PRIORITY_BASE <= addr && addr < ENABLE_BASE) { + ret = priority_read(addr, &val); + } else if (ENABLE_BASE <= addr && addr < CONTEXT_BASE) { + cntx = (addr - ENABLE_BASE) / ENABLE_PER_HART; + addr -= cntx * ENABLE_PER_HART + ENABLE_BASE; + if (cntx < contexts.size()) { + ret = context_enable_read(&contexts[cntx], addr, &val); + } + } else if (CONTEXT_BASE <= addr && addr < REG_SIZE) { + cntx = (addr - CONTEXT_BASE) / CONTEXT_PER_HART; + addr -= cntx * CONTEXT_PER_HART + CONTEXT_BASE; + if (cntx < contexts.size()) { + ret = context_read(&contexts[cntx], addr, &val); + } + } + + if (ret) { + memcpy(bytes, (uint8_t *)&val, len); + } + + return ret; +} + +bool plic_t::store(reg_t addr, size_t len, const uint8_t* bytes) +{ + bool ret = false; + uint32_t cntx, val; + + /* Only 32bit stores supported */ + if (len != 4) { + return false; + } + + memcpy((uint8_t *)&val, bytes, len); + + if (PRIORITY_BASE <= addr && addr < ENABLE_BASE) { + ret = priority_write(addr, val); + } else if (ENABLE_BASE <= addr && addr < CONTEXT_BASE) { + cntx = (addr - ENABLE_BASE) / ENABLE_PER_HART; + addr -= cntx * ENABLE_PER_HART + ENABLE_BASE; + if (cntx < contexts.size()) + ret = context_enable_write(&contexts[cntx], addr, val); + } else if (CONTEXT_BASE <= addr && addr < REG_SIZE) { + cntx = (addr - CONTEXT_BASE) / CONTEXT_PER_HART; + addr -= cntx * CONTEXT_PER_HART + CONTEXT_BASE; + if (cntx < contexts.size()) + ret = context_write(&contexts[cntx], addr, val); + } + + return ret; +} |