aboutsummaryrefslogtreecommitdiff
path: root/pk/vm.c
diff options
context:
space:
mode:
Diffstat (limited to 'pk/vm.c')
-rw-r--r--pk/vm.c85
1 files changed, 40 insertions, 45 deletions
diff --git a/pk/vm.c b/pk/vm.c
index c54417f..fd40fcb 100644
--- a/pk/vm.c
+++ b/pk/vm.c
@@ -16,7 +16,7 @@ typedef struct {
#define MAX_VMR 32
spinlock_t vm_lock = SPINLOCK_INIT;
-static vmr_t vmrs[MAX_VMR];
+static vmr_t vmrs[MAX_VMR] __attribute__((aligned(PTE_TYPE+1)));
typedef uintptr_t pte_t;
static pte_t* root_page_table;
@@ -64,12 +64,17 @@ static void __vmr_decref(vmr_t* v, unsigned dec)
static size_t pte_ppn(pte_t pte)
{
- return pte >> RISCV_PGSHIFT;
+ return pte >> PTE_PPN_SHIFT;
}
static pte_t ptd_create(uintptr_t ppn)
{
- return ppn << RISCV_PGSHIFT | PTE_T | PTE_V;
+ return (ppn << PTE_PPN_SHIFT) | PTE_TYPE_TABLE;
+}
+
+static inline pte_t pte_create(uintptr_t ppn, int kprot, int uprot)
+{
+ return PTE_CREATE(ppn, uprot, kprot);
}
static uintptr_t ppn(uintptr_t addr)
@@ -83,19 +88,6 @@ static size_t pt_idx(uintptr_t addr, int level)
return idx & ((1 << RISCV_PGLEVEL_BITS) - 1);
}
-static pte_t super_pte_create(uintptr_t ppn, int kprot, int uprot, int level)
-{
- kprot &= (PROT_READ | PROT_WRITE | PROT_EXEC);
- uprot &= (PROT_READ | PROT_WRITE | PROT_EXEC);
- int perm = (kprot * PTE_SR) | (uprot * PTE_UR) | PTE_V;
- return (ppn << (RISCV_PGLEVEL_BITS*level + RISCV_PGSHIFT)) | perm;
-}
-
-static pte_t pte_create(uintptr_t ppn, int kprot, int uprot)
-{
- return super_pte_create(ppn, kprot, uprot, 0);
-}
-
static void __maybe_create_root_page_table()
{
if (root_page_table)
@@ -104,6 +96,7 @@ static void __maybe_create_root_page_table()
if (have_vm)
write_csr(sptbr, root_page_table);
}
+
static pte_t* __walk_internal(uintptr_t addr, int create)
{
const size_t pte_per_page = RISCV_PGSIZE/sizeof(void*);
@@ -113,7 +106,7 @@ static pte_t* __walk_internal(uintptr_t addr, int create)
for (unsigned i = RISCV_PGLEVELS-1; i > 0; i--)
{
size_t idx = pt_idx(addr, i);
- if (!(t[idx] & PTE_V))
+ if ((t[idx] & PTE_TYPE) == PTE_TYPE_INVALID)
{
if (!create)
return 0;
@@ -121,7 +114,7 @@ static pte_t* __walk_internal(uintptr_t addr, int create)
t[idx] = ptd_create(ppn(page));
}
else
- kassert(t[idx] & PTE_T);
+ kassert((t[idx] & PTE_TYPE) == PTE_TYPE_TABLE);
t = (pte_t*)(pte_ppn(t[idx]) << RISCV_PGSHIFT);
}
return &t[pt_idx(addr, 0)];
@@ -146,15 +139,16 @@ static int __va_avail(uintptr_t vaddr)
static uintptr_t __vm_alloc(size_t npage)
{
uintptr_t start = current.brk, end = current.mmap_max - npage*RISCV_PGSIZE;
- for (uintptr_t a = end; a >= start; a -= RISCV_PGSIZE)
+ for (uintptr_t a = start; a <= end; a += RISCV_PGSIZE)
{
if (!__va_avail(a))
continue;
- uintptr_t last = a, first = a - (npage-1) * RISCV_PGSIZE;
- for (a = first; a < last && __va_avail(a); a += RISCV_PGSIZE)
+ uintptr_t first = a, last = a + (npage-1) * RISCV_PGSIZE;
+ for (a = last; a > first && __va_avail(a); a -= RISCV_PGSIZE)
;
- if (a >= last)
- return a;
+ if (a > first)
+ continue;
+ return a;
}
return 0;
}
@@ -180,7 +174,7 @@ static int __handle_page_fault(uintptr_t vaddr, int prot)
if (pte == 0 || *pte == 0)
return -1;
- else if (!(*pte & PTE_V))
+ else if ((*pte & PTE_TYPE) == PTE_TYPE_INVALID)
{
kassert(__valid_user_range(vaddr, 1));
uintptr_t ppn = vpn;
@@ -226,7 +220,7 @@ static void __do_munmap(uintptr_t addr, size_t len)
if (pte == 0 || *pte == 0)
continue;
- if (!(*pte & PTE_V))
+ if ((*pte & PTE_TYPE) == PTE_TYPE_INVALID)
__vmr_decref((vmr_t*)*pte, 1);
*pte = 0;
@@ -377,7 +371,7 @@ uintptr_t do_mprotect(uintptr_t addr, size_t length, int prot)
break;
}
- if(!(*pte & PTE_V)){
+ if ((*pte & PTE_TYPE) == PTE_TYPE_INVALID) {
vmr_t* v = (vmr_t*)*pte;
if((v->prot ^ prot) & ~v->prot){
//TODO:look at file to find perms
@@ -385,15 +379,15 @@ uintptr_t do_mprotect(uintptr_t addr, size_t length, int prot)
break;
}
v->prot = prot;
- }else{
- pte_t perms = pte_create(0, 0, prot);
- if ((*pte & perms) != perms){
+ } else {
+ if (((prot & PROT_READ) && !PTE_UR(*pte))
+ || ((prot & PROT_WRITE) && !PTE_UW(*pte))
+ || ((prot & PROT_EXEC) && !PTE_UX(*pte))) {
//TODO:look at file to find perms
res = -EACCES;
break;
}
- pte_t permset = (*pte & ~(PTE_UR | PTE_UW | PTE_UX)) | perms;
- *pte = permset;
+ *pte = pte_create(pte_ppn(*pte), prot, prot);
}
}
spinlock_unlock(&vm_lock);
@@ -404,12 +398,11 @@ uintptr_t do_mprotect(uintptr_t addr, size_t length, int prot)
void __map_kernel_range(uintptr_t vaddr, uintptr_t paddr, size_t len, int prot)
{
uintptr_t n = ROUNDUP(len, RISCV_PGSIZE) / RISCV_PGSIZE;
- pte_t perms = pte_create(0, prot, 0);
for (uintptr_t a = vaddr, i = 0; i < n; i++, a += RISCV_PGSIZE)
{
pte_t* pte = __walk_create(a);
kassert(pte);
- *pte = (a - vaddr + paddr) | perms;
+ *pte = pte_create((a - vaddr + paddr) >> RISCV_PGSHIFT, prot, 0);
}
}
@@ -452,22 +445,25 @@ void supervisor_vm_init()
memset(sbi_pt, 0, RISCV_PGSIZE);
pte_t* middle_pt = (void*)sbi_pt + RISCV_PGSIZE;
#if RISCV_PGLEVELS == 2
+ size_t num_middle_pts = 1;
root_page_table = middle_pt;
#elif RISCV_PGLEVELS == 3
- kassert(current.first_user_vaddr >= -(SUPERPAGE_SIZE << RISCV_PGLEVEL_BITS));
- root_page_table = (void*)middle_pt + RISCV_PGSIZE;
- memset(root_page_table, 0, RISCV_PGSIZE);
- root_page_table[(1<<RISCV_PGLEVEL_BITS)-1] = (uintptr_t)middle_pt | PTE_T | PTE_V;
+ size_t num_middle_pts = (-current.first_user_vaddr - 1) / MEGAPAGE_SIZE + 1;
+ root_page_table = (void*)middle_pt + num_middle_pts * RISCV_PGSIZE;
+ for (size_t i = 0; i < num_middle_pts; i++)
+ root_page_table[(1<<RISCV_PGLEVEL_BITS)-num_middle_pts+i] = ptd_create(((uintptr_t)middle_pt >> RISCV_PGSHIFT) + i);
#else
#error
#endif
+ memset(middle_pt, 0, root_page_table - middle_pt + RISCV_PGSIZE);
write_csr(sptbr, root_page_table);
for (uintptr_t vaddr = current.first_user_vaddr, paddr = vaddr + current.bias, end = current.first_vaddr_after_user;
paddr < mem_size; vaddr += SUPERPAGE_SIZE, paddr += SUPERPAGE_SIZE) {
int l2_shift = RISCV_PGLEVEL_BITS + RISCV_PGSHIFT;
- int l2_idx = (vaddr >> l2_shift) & ((1 << RISCV_PGLEVEL_BITS)-1);
- middle_pt[l2_idx] = paddr | PTE_V | PTE_G | PTE_SR | PTE_SW | PTE_SX;
+ size_t l2_idx = (current.first_user_vaddr >> l2_shift) & ((1 << RISCV_PGLEVEL_BITS)-1);
+ l2_idx += ((vaddr - current.first_user_vaddr) >> l2_shift);
+ middle_pt[l2_idx] = pte_create(paddr >> RISCV_PGSHIFT, PROT_READ|PROT_WRITE|PROT_EXEC, 0);
}
current.first_vaddr_after_user += (void*)root_page_table + RISCV_PGSIZE - (void*)sbi_pt;
@@ -475,11 +471,11 @@ void supervisor_vm_init()
uintptr_t num_sbi_pages = sbi_top_paddr() / RISCV_PGSIZE;
for (uintptr_t i = 0; i < num_sbi_pages; i++) {
uintptr_t idx = (1 << RISCV_PGLEVEL_BITS) - num_sbi_pages + i;
- sbi_pt[idx] = (i * RISCV_PGSIZE) | PTE_V | PTE_G | PTE_SR | PTE_SX;
+ sbi_pt[idx] = pte_create(i, PROT_READ|PROT_EXEC, 0);
}
- pte_t* sbi_pte = middle_pt + ((1 << RISCV_PGLEVEL_BITS)-1);
+ pte_t* sbi_pte = middle_pt + ((num_middle_pts << RISCV_PGLEVEL_BITS)-1);
kassert(!*sbi_pte);
- *sbi_pte = (uintptr_t)sbi_pt | PTE_T | PTE_V;
+ *sbi_pte = ptd_create((uintptr_t)sbi_pt >> RISCV_PGSHIFT);
// disable our allocator
kassert(next_free_page == 0);
@@ -498,8 +494,7 @@ void pk_vm_init()
write_csr(sscratch, __page_alloc() + RISCV_PGSIZE);
size_t stack_size = RISCV_PGSIZE * CLAMP(mem_size/(RISCV_PGSIZE*32), 1, 256);
- current.stack_bottom = __do_mmap(0, stack_size, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
- kassert(current.stack_bottom != (uintptr_t)-1);
+ current.stack_bottom = __do_mmap(current.mmap_max - stack_size, stack_size, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED, 0, 0);
current.stack_top = current.stack_bottom + stack_size;
- kassert(current.stack_top == current.mmap_max);
+ kassert(current.stack_bottom != (uintptr_t)-1);
}