aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrew Waterman <andrew@sifive.com>2021-05-18 18:33:26 -0700
committerAndrew Waterman <andrew@sifive.com>2021-05-18 18:33:26 -0700
commit09521e8550750172e7f137d1ed2845c95eefc0b6 (patch)
tree46bce4a42fd15238769d3d200609645f5948780c
parente8e6b3aaee44d43b48164fbd377864c3a682dbd3 (diff)
downloadriscv-pk-remap.zip
riscv-pk-remap.tar.gz
riscv-pk-remap.tar.bz2
Add 2-stage translation (for debugging purposes only)remap
-rw-r--r--machine/minit.c8
-rw-r--r--machine/mtrap.c2
-rw-r--r--pk/entry.S11
-rw-r--r--pk/handlers.c9
-rw-r--r--pk/mmap.c66
-rw-r--r--pk/mmap.h2
-rw-r--r--pk/pk.c3
7 files changed, 99 insertions, 2 deletions
diff --git a/machine/minit.c b/machine/minit.c
index c519926..59df32b 100644
--- a/machine/minit.c
+++ b/machine/minit.c
@@ -66,8 +66,14 @@ static void delegate_traps()
write_csr(mideleg, interrupts);
write_csr(medeleg, exceptions);
- assert(read_csr(mideleg) == interrupts);
+ assert((~read_csr(mideleg) & interrupts) == 0);
assert(read_csr(medeleg) == exceptions);
+
+ uintptr_t hypervisor_exceptions =
+ (1U << CAUSE_FETCH_GUEST_PAGE_FAULT) |
+ (1U << CAUSE_LOAD_GUEST_PAGE_FAULT) |
+ (1U << CAUSE_STORE_GUEST_PAGE_FAULT);
+ set_csr(medeleg, hypervisor_exceptions);
}
static void fp_init()
diff --git a/machine/mtrap.c b/machine/mtrap.c
index dcff050..5d45f37 100644
--- a/machine/mtrap.c
+++ b/machine/mtrap.c
@@ -183,7 +183,7 @@ void redirect_trap(uintptr_t epc, uintptr_t mstatus, uintptr_t badaddr)
write_csr(scause, read_csr(mcause));
write_csr(mepc, read_csr(stvec));
- uintptr_t new_mstatus = mstatus & ~(MSTATUS_SPP | MSTATUS_SPIE | MSTATUS_SIE);
+ uintptr_t new_mstatus = mstatus & ~(MSTATUS_SPP | MSTATUS_SPIE | MSTATUS_SIE | MSTATUS_MPV);
uintptr_t mpp_s = MSTATUS_MPP & (MSTATUS_MPP >> 1);
new_mstatus |= (mstatus * (MSTATUS_SPIE / MSTATUS_SIE)) & MSTATUS_SPIE;
new_mstatus |= (mstatus / (mpp_s / MSTATUS_SPP)) & MSTATUS_SPP;
diff --git a/pk/entry.S b/pk/entry.S
index 48db0d9..8a4cdd8 100644
--- a/pk/entry.S
+++ b/pk/entry.S
@@ -76,6 +76,17 @@ start_user:
csrw sstatus, t0
csrw sepc, t1
+ # If hypervisor present, use VU-mode in place of U-mode
+ lw t2, have_hypervisor
+ beqz t2, 1f
+ li t1, HSTATUS_SPV
+ andi t0, t0, SSTATUS_SPP
+ beqz t0, 2f
+ li t1, 0
+2:
+ csrw CSR_HSTATUS, t1
+1:
+
# restore x registers
LOAD x1,1*REGBYTES(a0)
LOAD x2,2*REGBYTES(a0)
diff --git a/pk/handlers.c b/pk/handlers.c
index 4b4abaf..0a7fd23 100644
--- a/pk/handlers.c
+++ b/pk/handlers.c
@@ -78,6 +78,12 @@ static void handle_fault_store(trapframe_t* tf)
segfault(tf, tf->badvaddr, "store");
}
+static void handle_guest_fault(trapframe_t* tf)
+{
+ if (handle_guest_page_fault(tf->badvaddr) != 0)
+ panic("Unexpected guest fault @ %p!\n", tf->badvaddr);
+}
+
static void handle_syscall(trapframe_t* tf)
{
tf->gpr[10] = do_syscall(tf->gpr[10], tf->gpr[11], tf->gpr[12], tf->gpr[13],
@@ -110,6 +116,9 @@ void handle_trap(trapframe_t* tf)
[CAUSE_MISALIGNED_STORE] = handle_misaligned_store,
[CAUSE_LOAD_PAGE_FAULT] = handle_fault_load,
[CAUSE_STORE_PAGE_FAULT] = handle_fault_store,
+ [CAUSE_FETCH_GUEST_PAGE_FAULT] = handle_guest_fault,
+ [CAUSE_LOAD_GUEST_PAGE_FAULT] = handle_guest_fault,
+ [CAUSE_STORE_GUEST_PAGE_FAULT] = handle_guest_fault,
};
kassert(tf->cause < ARRAY_SIZE(trap_handlers) && trap_handlers[tf->cause]);
diff --git a/pk/mmap.c b/pk/mmap.c
index 37aa87a..9fca358 100644
--- a/pk/mmap.c
+++ b/pk/mmap.c
@@ -25,6 +25,7 @@ typedef struct vmr_t {
static vmr_t* vmr_freelist_head;
static pte_t* root_page_table;
+static pte_t* g_root_page_table;
#define RISCV_PGLEVELS ((VA_BITS - RISCV_PGSHIFT) / RISCV_PGLEVEL_BITS)
@@ -36,6 +37,7 @@ static size_t free_pages;
static size_t pages_promised;
int demand_paging = 1; // unless -p flag is given
+int have_hypervisor;
uint64_t randomize_mapping; // set by --randomize-mapping
typedef struct freelist_node_t {
@@ -293,6 +295,8 @@ int __valid_user_range(uintptr_t vaddr, size_t len)
static void flush_tlb_entry(uintptr_t vaddr)
{
+ if (have_hypervisor)
+ asm (".insn r 0x73, 0x0, 0x11, x0, %0, x0" : : "r" (vaddr)); // hfence.vvma vaddr
asm volatile ("sfence.vma %0" : : "r" (vaddr) : "memory");
}
@@ -558,6 +562,65 @@ static void init_early_alloc()
free_pages = (mem_size - (first_free_page - MEM_START)) / RISCV_PGSIZE;
}
+static bool __make_host_mapping(uintptr_t paddr)
+{
+ pte_t* hpte = __walk_internal(g_root_page_table, paddr, 1, 0);
+ kassert(hpte);
+
+ if (!*hpte) {
+ *hpte = pte_create(paddr >> RISCV_PGSHIFT, prot_to_type(PROT_READ | PROT_WRITE | PROT_EXEC, 1));
+ asm (".insn r 0x73, 0x0, 0x31, x0, %0, x0" : : "r" (paddr / 4)); // hfence.gvma paddr, x0
+ return true;
+ }
+ return false;
+}
+
+static int __handle_guest_page_fault(uintptr_t vaddr)
+{
+ for (int level = RISCV_PGLEVELS - 1; level >= 0; level--) {
+ pte_t* vpte = __walk_internal(root_page_table, vaddr, 0, level);
+ if (!vpte)
+ return -1;
+
+ if (__make_host_mapping(kva2pa(vpte)))
+ return 0;
+
+ if (level == 0 && (*vpte & PTE_V)) {
+ if (__make_host_mapping(pte_ppn(*vpte) << RISCV_PGSHIFT))
+ return 0;
+ }
+ }
+
+ return -1;
+}
+
+int handle_guest_page_fault(uintptr_t addr)
+{
+ spinlock_lock(&vm_lock);
+ int ret = __handle_guest_page_fault(addr);
+ spinlock_unlock(&vm_lock);
+ return ret;
+}
+
+static void hypervisor_vm_init()
+{
+ have_hypervisor = sizeof(uintptr_t) == 8 && ((read_csr(misa) >> ('H' - 'A')) & 1);
+ if (!have_hypervisor)
+ return;
+
+ size_t root_pages = 4;
+ g_root_page_table = (uintptr_t*)__early_pgalloc_align(root_pages, root_pages);
+ memset(g_root_page_table, 0, root_pages * RISCV_PGSIZE);
+
+ asm ("csrw %0, %1" : : "I" (CSR_HEDELEG), "r" (0));
+ asm ("csrw %0, %1" : : "I" (CSR_HIDELEG), "r" (0));
+ asm ("csrw %0, %1" : : "I" (CSR_VSSTATUS), "r" (SSTATUS_FS));
+ asm ("csrw %0, %1" : : "I" (CSR_HSTATUS), "r" (HSTATUS_SPV));
+ asm ("csrw %0, %1" : : "I" (CSR_HGATP), "r" (SATP_MODE_CHOICE | ((uintptr_t)g_root_page_table >> RISCV_PGSHIFT)));
+ asm ("csrw %0, %1" : : "I" (CSR_VSATP), "r" (SATP_MODE_CHOICE | ((uintptr_t)root_page_table >> RISCV_PGSHIFT)));
+ asm (".insn r 0x73, 0x0, 0x31, x0, x0, x0"); // hfence.gvma x0, x0
+}
+
uintptr_t pk_vm_init()
{
init_early_alloc();
@@ -571,12 +634,15 @@ uintptr_t pk_vm_init()
flush_tlb();
write_csr(satp, ((uintptr_t)root_page_table >> RISCV_PGSHIFT) | SATP_MODE_CHOICE);
+ hypervisor_vm_init();
+
uintptr_t kernel_stack_top = __page_alloc_assert() + RISCV_PGSIZE;
// relocate
kva2pa_offset = KVA_START - MEM_START;
page_freelist_storage = (void*)pa2kva(page_freelist_storage);
root_page_table = (void*)pa2kva(root_page_table);
+ g_root_page_table = (void*)pa2kva(g_root_page_table);
return kernel_stack_top;
}
diff --git a/pk/mmap.h b/pk/mmap.h
index 949b9a7..1ea2623 100644
--- a/pk/mmap.h
+++ b/pk/mmap.h
@@ -22,10 +22,12 @@
#define MREMAP_FIXED 0x2
extern int demand_paging;
+extern int have_hypervisor;
extern uint64_t randomize_mapping;
uintptr_t pk_vm_init();
int handle_page_fault(uintptr_t vaddr, int prot);
+int handle_guest_page_fault(uintptr_t vaddr);
void populate_mapping(const void* start, size_t size, int prot);
int __valid_user_range(uintptr_t vaddr, size_t len);
uintptr_t __do_mmap(uintptr_t addr, size_t length, int prot, int flags, file_t* file, off_t offset);
diff --git a/pk/pk.c b/pk/pk.c
index b8c9337..30e7d71 100644
--- a/pk/pk.c
+++ b/pk/pk.c
@@ -197,6 +197,9 @@ void rest_of_boot_loader_2(uintptr_t kstack_top)
if (!argc)
panic("tell me what ELF to load!");
+ if (!randomize_mapping)
+ have_hypervisor = 0;
+
// load program named by argv[0]
static long phdrs[128]; // avoid large stack allocation
current.phdr = (uintptr_t)phdrs;