diff options
-rw-r--r-- | pk/atomic.h | 4 | ||||
-rw-r--r-- | pk/console.c | 2 | ||||
-rw-r--r-- | pk/encoding.h | 221 | ||||
-rw-r--r-- | pk/entry.S | 135 | ||||
-rw-r--r-- | pk/fp.c | 177 | ||||
-rw-r--r-- | pk/fp.h | 16 | ||||
-rw-r--r-- | pk/fp_asm.S | 154 | ||||
-rw-r--r-- | pk/handlers.c | 36 | ||||
-rw-r--r-- | pk/init.c | 9 | ||||
-rw-r--r-- | pk/pk.S | 22 | ||||
-rw-r--r-- | pk/pk.h | 6 | ||||
-rw-r--r-- | pk/pk.ld | 2 | ||||
-rw-r--r-- | pk/syscall.c | 1 | ||||
-rw-r--r-- | pk/vm.c | 13 |
14 files changed, 470 insertions, 328 deletions
diff --git a/pk/atomic.h b/pk/atomic.h index fea1330..24db8be 100644 --- a/pk/atomic.h +++ b/pk/atomic.h @@ -74,7 +74,7 @@ static inline void spinlock_unlock(spinlock_t* lock) static inline long spinlock_lock_irqsave(spinlock_t* lock) { - long flags = clear_csr(status, SR_EI); + long flags = clear_csr(mstatus, MSTATUS_IE); spinlock_lock(lock); return flags; } @@ -82,7 +82,7 @@ static inline long spinlock_lock_irqsave(spinlock_t* lock) static inline void spinlock_unlock_irqrestore(spinlock_t* lock, long flags) { spinlock_unlock(lock); - set_csr(status, flags & SR_EI); + set_csr(mstatus, flags & MSTATUS_IE); } #endif diff --git a/pk/console.c b/pk/console.c index 4828d3f..366c313 100644 --- a/pk/console.c +++ b/pk/console.c @@ -126,7 +126,7 @@ void dump_tf(trapframe_t* tf) for(int j = 0; j < 4; j++) printk("%s %lx%c",regnames[i+j],tf->gpr[i+j],j < 3 ? ' ' : '\n'); } - printk("sr %lx pc %lx va %lx insn %x\n",tf->sr,tf->epc,tf->badvaddr, + printk("pc %lx va %lx insn %x\n", tf->epc, tf->badvaddr, (uint32_t)tf->insn); } diff --git a/pk/encoding.h b/pk/encoding.h index 089a8a9..d20ee5b 100644 --- a/pk/encoding.h +++ b/pk/encoding.h @@ -3,25 +3,50 @@ #ifndef RISCV_CSR_ENCODING_H #define RISCV_CSR_ENCODING_H -#define SR_S 0x00000001 -#define SR_PS 0x00000002 -#define SR_EI 0x00000004 -#define SR_PEI 0x00000008 -#define SR_EF 0x00000010 -#define SR_U64 0x00000020 -#define SR_S64 0x00000040 -#define SR_VM 0x00000080 -#define SR_EA 0x00000100 -#define SR_IM 0x00FF0000 -#define SR_IP 0xFF000000 -#define SR_ZERO ~(SR_S|SR_PS|SR_EI|SR_PEI|SR_EF|SR_U64|SR_S64|SR_VM|SR_EA|SR_IM|SR_IP) -#define SR_IM_SHIFT 16 -#define SR_IP_SHIFT 24 +#define MSTATUS_SSIP 0x00000002 +#define MSTATUS_HSIP 0x00000004 +#define MSTATUS_MSIP 0x00000008 +#define MSTATUS_IE 0x00000010 +#define MSTATUS_PRV 0x00000060 +#define MSTATUS_IE1 0x00000080 +#define MSTATUS_PRV1 0x00000300 +#define MSTATUS_IE2 0x00000400 +#define MSTATUS_PRV2 0x00001800 +#define MSTATUS_IE3 0x00002000 +#define MSTATUS_PRV3 0x0000C000 +#define MSTATUS_IE4 0x00010000 +#define MSTATUS_PRV4 0x00060000 +#define MSTATUS_VM 0x00780000 +#define MSTATUS_STIE 0x01000000 +#define MSTATUS_HTIE 0x02000000 +#define MSTATUS_MTIE 0x04000000 +#define MSTATUS_FS 0x18000000 +#define MSTATUS_XS 0x60000000 +#define MSTATUS32_SD 0x80000000 +#define MSTATUS64_UA 0x0000000F00000000 +#define MSTATUS64_SA 0x000000F000000000 +#define MSTATUS64_HA 0x00000F0000000000 +#define MSTATUS64_SD 0x8000000000000000 -#define IRQ_COP 2 -#define IRQ_IPI 5 -#define IRQ_HOST 6 -#define IRQ_TIMER 7 +#define PRV_U 0 +#define PRV_S 1 +#define PRV_H 2 +#define PRV_M 3 + +#define VM_MBARE 0 +#define VM_MBB 1 +#define VM_MBBID 2 +#define VM_SV32 4 +#define VM_SV43 5 + +#define UA_RV32 0 +#define UA_RV64 4 +#define UA_RV128 8 + +#define IRQ_TIMER 0 +#define IRQ_IPI 1 +#define IRQ_HOST 2 +#define IRQ_COP 3 #define IMPL_SPIKE 1 #define IMPL_ROCKET 2 @@ -41,9 +66,14 @@ #ifdef __riscv #ifdef __riscv64 +# define MSTATUS_UA MSTATUS64_UA +# define MSTATUS_SA MSTATUS64_SA +# define MSTATUS_HA MSTATUS64_HA +# define MSTATUS_SD MSTATUS64_SD # define RISCV_PGLEVELS 3 # define RISCV_PGSHIFT 13 #else +# define MSTATUS_SD MSTATUS32_SD # define RISCV_PGLEVELS 2 # define RISCV_PGSHIFT 12 #endif @@ -113,6 +143,8 @@ #define MASK_FSGNJN_D 0xfe00707f #define MATCH_FMIN_S 0x28000053 #define MASK_FMIN_S 0xfe00707f +#define MATCH_MRET 0x30200073 +#define MASK_MRET 0xffffffff #define MATCH_CSRRW 0x1073 #define MASK_CSRRW 0x707f #define MATCH_SLLIW 0x101b @@ -201,6 +233,8 @@ #define MASK_SCALL 0xffffffff #define MATCH_FCLASS_S 0xe0001053 #define MASK_FCLASS_S 0xfff0707f +#define MATCH_SFENCE_VM 0x10400073 +#define MASK_SFENCE_VM 0xfff07fff #define MATCH_SC_W 0x1800202f #define MASK_SC_W 0xf800707f #define MATCH_REM 0x2006033 @@ -283,7 +317,7 @@ #define MASK_ANDI 0x707f #define MATCH_FMV_X_S 0xe0000053 #define MASK_FMV_X_S 0xfff0707f -#define MATCH_SRET 0x80000073 +#define MATCH_SRET 0x10200073 #define MASK_SRET 0xffffffff #define MATCH_FNMADD_S 0x4f #define MASK_FNMADD_S 0x600007f @@ -373,6 +407,8 @@ #define MASK_FMADD_S 0x600007f #define MATCH_FSQRT_S 0x58000053 #define MASK_FSQRT_S 0xfff0007f +#define MATCH_MSENTER 0x30900073 +#define MASK_MSENTER 0xffffffff #define MATCH_AMOMIN_W 0x8000202f #define MASK_AMOMIN_W 0xf800707f #define MATCH_FSGNJN_S 0x20001053 @@ -414,29 +450,10 @@ #define CSR_FFLAGS 0x1 #define CSR_FRM 0x2 #define CSR_FCSR 0x3 -#define CSR_STATS 0xc0 -#define CSR_SUP0 0x500 -#define CSR_SUP1 0x501 -#define CSR_EPC 0x502 -#define CSR_BADVADDR 0x503 -#define CSR_PTBR 0x504 -#define CSR_ASID 0x505 -#define CSR_COUNT 0x506 -#define CSR_COMPARE 0x507 -#define CSR_EVEC 0x508 -#define CSR_CAUSE 0x509 -#define CSR_STATUS 0x50a -#define CSR_HARTID 0x50b -#define CSR_IMPL 0x50c -#define CSR_FATC 0x50d -#define CSR_SEND_IPI 0x50e -#define CSR_CLEAR_IPI 0x50f -#define CSR_RESET 0x51d -#define CSR_TOHOST 0x51e -#define CSR_FROMHOST 0x51f #define CSR_CYCLE 0xc00 #define CSR_TIME 0xc01 #define CSR_INSTRET 0xc02 +#define CSR_STATS 0xc0 #define CSR_UARCH0 0xcc0 #define CSR_UARCH1 0xcc1 #define CSR_UARCH2 0xcc2 @@ -453,22 +470,41 @@ #define CSR_UARCH13 0xccd #define CSR_UARCH14 0xcce #define CSR_UARCH15 0xccf -#define CSR_COUNTH 0x586 +#define CSR_SSTATUS 0x100 +#define CSR_STVEC 0x101 +#define CSR_SCOMPARE 0x121 +#define CSR_SSCRATCH 0x140 +#define CSR_SEPC 0x141 +#define CSR_SPTBR 0x188 +#define CSR_SASID 0x189 +#define CSR_COUNT 0x900 +#define CSR_STIME 0x901 +#define CSR_SINSTRET 0x902 +#define CSR_SCAUSE 0xd40 +#define CSR_SBADADDR 0xd41 +#define CSR_TOHOST 0x580 +#define CSR_FROMHOST 0x581 +#define CSR_MSTATUS 0x300 +#define CSR_MSCRATCH 0x340 +#define CSR_MEPC 0x341 +#define CSR_MCAUSE 0xf40 +#define CSR_MBADADDR 0xf41 +#define CSR_RESET 0x780 #define CSR_CYCLEH 0xc80 #define CSR_TIMEH 0xc81 #define CSR_INSTRETH 0xc82 +#define CSR_COUNTH 0x980 +#define CSR_STIMEH 0x981 +#define CSR_SINSTRETH 0x982 #define CAUSE_MISALIGNED_FETCH 0x0 #define CAUSE_FAULT_FETCH 0x1 -#define CAUSE_ILLEGAL_INSTRUCTION 0x2 -#define CAUSE_PRIVILEGED_INSTRUCTION 0x3 -#define CAUSE_FP_DISABLED 0x4 +#define CAUSE_ILLEGAL_INSTRUCTION 0x4 #define CAUSE_SYSCALL 0x6 #define CAUSE_BREAKPOINT 0x7 #define CAUSE_MISALIGNED_LOAD 0x8 -#define CAUSE_MISALIGNED_STORE 0x9 -#define CAUSE_FAULT_LOAD 0xa +#define CAUSE_FAULT_LOAD 0x9 +#define CAUSE_MISALIGNED_STORE 0xa #define CAUSE_FAULT_STORE 0xb -#define CAUSE_ACCELERATOR_DISABLED 0xc #endif #ifdef DECLARE_INSN DECLARE_INSN(fmv_s_x, MATCH_FMV_S_X, MASK_FMV_S_X) @@ -479,6 +515,7 @@ DECLARE_INSN(amomax_d, MATCH_AMOMAX_D, MASK_AMOMAX_D) DECLARE_INSN(bltu, MATCH_BLTU, MASK_BLTU) DECLARE_INSN(fsgnjn_d, MATCH_FSGNJN_D, MASK_FSGNJN_D) DECLARE_INSN(fmin_s, MATCH_FMIN_S, MASK_FMIN_S) +DECLARE_INSN(mret, MATCH_MRET, MASK_MRET) DECLARE_INSN(csrrw, MATCH_CSRRW, MASK_CSRRW) DECLARE_INSN(slliw, MATCH_SLLIW, MASK_SLLIW) DECLARE_INSN(lb, MATCH_LB, MASK_LB) @@ -523,6 +560,7 @@ DECLARE_INSN(sub, MATCH_SUB, MASK_SUB) DECLARE_INSN(blt, MATCH_BLT, MASK_BLT) DECLARE_INSN(scall, MATCH_SCALL, MASK_SCALL) DECLARE_INSN(fclass_s, MATCH_FCLASS_S, MASK_FCLASS_S) +DECLARE_INSN(sfence_vm, MATCH_SFENCE_VM, MASK_SFENCE_VM) DECLARE_INSN(sc_w, MATCH_SC_W, MASK_SC_W) DECLARE_INSN(rem, MATCH_REM, MASK_REM) DECLARE_INSN(srliw, MATCH_SRLIW, MASK_SRLIW) @@ -609,6 +647,7 @@ DECLARE_INSN(csrrwi, MATCH_CSRRWI, MASK_CSRRWI) DECLARE_INSN(sc_d, MATCH_SC_D, MASK_SC_D) DECLARE_INSN(fmadd_s, MATCH_FMADD_S, MASK_FMADD_S) DECLARE_INSN(fsqrt_s, MATCH_FSQRT_S, MASK_FSQRT_S) +DECLARE_INSN(msenter, MATCH_MSENTER, MASK_MSENTER) DECLARE_INSN(amomin_w, MATCH_AMOMIN_W, MASK_AMOMIN_W) DECLARE_INSN(fsgnjn_s, MATCH_FSGNJN_S, MASK_FSGNJN_S) DECLARE_INSN(amoswap_d, MATCH_AMOSWAP_D, MASK_AMOSWAP_D) @@ -633,29 +672,10 @@ DECLARE_INSN(sd, MATCH_SD, MASK_SD) DECLARE_CSR(fflags, CSR_FFLAGS) DECLARE_CSR(frm, CSR_FRM) DECLARE_CSR(fcsr, CSR_FCSR) -DECLARE_CSR(stats, CSR_STATS) -DECLARE_CSR(sup0, CSR_SUP0) -DECLARE_CSR(sup1, CSR_SUP1) -DECLARE_CSR(epc, CSR_EPC) -DECLARE_CSR(badvaddr, CSR_BADVADDR) -DECLARE_CSR(ptbr, CSR_PTBR) -DECLARE_CSR(asid, CSR_ASID) -DECLARE_CSR(count, CSR_COUNT) -DECLARE_CSR(compare, CSR_COMPARE) -DECLARE_CSR(evec, CSR_EVEC) -DECLARE_CSR(cause, CSR_CAUSE) -DECLARE_CSR(status, CSR_STATUS) -DECLARE_CSR(hartid, CSR_HARTID) -DECLARE_CSR(impl, CSR_IMPL) -DECLARE_CSR(fatc, CSR_FATC) -DECLARE_CSR(send_ipi, CSR_SEND_IPI) -DECLARE_CSR(clear_ipi, CSR_CLEAR_IPI) -DECLARE_CSR(reset, CSR_RESET) -DECLARE_CSR(tohost, CSR_TOHOST) -DECLARE_CSR(fromhost, CSR_FROMHOST) DECLARE_CSR(cycle, CSR_CYCLE) DECLARE_CSR(time, CSR_TIME) DECLARE_CSR(instret, CSR_INSTRET) +DECLARE_CSR(stats, CSR_STATS) DECLARE_CSR(uarch0, CSR_UARCH0) DECLARE_CSR(uarch1, CSR_UARCH1) DECLARE_CSR(uarch2, CSR_UARCH2) @@ -672,38 +692,41 @@ DECLARE_CSR(uarch12, CSR_UARCH12) DECLARE_CSR(uarch13, CSR_UARCH13) DECLARE_CSR(uarch14, CSR_UARCH14) DECLARE_CSR(uarch15, CSR_UARCH15) -DECLARE_CSR(counth, CSR_COUNTH) +DECLARE_CSR(sstatus, CSR_SSTATUS) +DECLARE_CSR(stvec, CSR_STVEC) +DECLARE_CSR(scompare, CSR_SCOMPARE) +DECLARE_CSR(sscratch, CSR_SSCRATCH) +DECLARE_CSR(sepc, CSR_SEPC) +DECLARE_CSR(sptbr, CSR_SPTBR) +DECLARE_CSR(sasid, CSR_SASID) +DECLARE_CSR(count, CSR_COUNT) +DECLARE_CSR(stime, CSR_STIME) +DECLARE_CSR(sinstret, CSR_SINSTRET) +DECLARE_CSR(scause, CSR_SCAUSE) +DECLARE_CSR(sbadaddr, CSR_SBADADDR) +DECLARE_CSR(tohost, CSR_TOHOST) +DECLARE_CSR(fromhost, CSR_FROMHOST) +DECLARE_CSR(mstatus, CSR_MSTATUS) +DECLARE_CSR(mscratch, CSR_MSCRATCH) +DECLARE_CSR(mepc, CSR_MEPC) +DECLARE_CSR(mcause, CSR_MCAUSE) +DECLARE_CSR(mbadaddr, CSR_MBADADDR) +DECLARE_CSR(reset, CSR_RESET) DECLARE_CSR(cycleh, CSR_CYCLEH) DECLARE_CSR(timeh, CSR_TIMEH) DECLARE_CSR(instreth, CSR_INSTRETH) +DECLARE_CSR(counth, CSR_COUNTH) +DECLARE_CSR(stimeh, CSR_STIMEH) +DECLARE_CSR(sinstreth, CSR_SINSTRETH) #endif #ifdef DECLARE_CAUSE DECLARE_CAUSE("fflags", CAUSE_FFLAGS) DECLARE_CAUSE("frm", CAUSE_FRM) DECLARE_CAUSE("fcsr", CAUSE_FCSR) -DECLARE_CAUSE("stats", CAUSE_STATS) -DECLARE_CAUSE("sup0", CAUSE_SUP0) -DECLARE_CAUSE("sup1", CAUSE_SUP1) -DECLARE_CAUSE("epc", CAUSE_EPC) -DECLARE_CAUSE("badvaddr", CAUSE_BADVADDR) -DECLARE_CAUSE("ptbr", CAUSE_PTBR) -DECLARE_CAUSE("asid", CAUSE_ASID) -DECLARE_CAUSE("count", CAUSE_COUNT) -DECLARE_CAUSE("compare", CAUSE_COMPARE) -DECLARE_CAUSE("evec", CAUSE_EVEC) -DECLARE_CAUSE("cause", CAUSE_CAUSE) -DECLARE_CAUSE("status", CAUSE_STATUS) -DECLARE_CAUSE("hartid", CAUSE_HARTID) -DECLARE_CAUSE("impl", CAUSE_IMPL) -DECLARE_CAUSE("fatc", CAUSE_FATC) -DECLARE_CAUSE("send_ipi", CAUSE_SEND_IPI) -DECLARE_CAUSE("clear_ipi", CAUSE_CLEAR_IPI) -DECLARE_CAUSE("reset", CAUSE_RESET) -DECLARE_CAUSE("tohost", CAUSE_TOHOST) -DECLARE_CAUSE("fromhost", CAUSE_FROMHOST) DECLARE_CAUSE("cycle", CAUSE_CYCLE) DECLARE_CAUSE("time", CAUSE_TIME) DECLARE_CAUSE("instret", CAUSE_INSTRET) +DECLARE_CAUSE("stats", CAUSE_STATS) DECLARE_CAUSE("uarch0", CAUSE_UARCH0) DECLARE_CAUSE("uarch1", CAUSE_UARCH1) DECLARE_CAUSE("uarch2", CAUSE_UARCH2) @@ -720,8 +743,30 @@ DECLARE_CAUSE("uarch12", CAUSE_UARCH12) DECLARE_CAUSE("uarch13", CAUSE_UARCH13) DECLARE_CAUSE("uarch14", CAUSE_UARCH14) DECLARE_CAUSE("uarch15", CAUSE_UARCH15) -DECLARE_CAUSE("counth", CAUSE_COUNTH) +DECLARE_CAUSE("sstatus", CAUSE_SSTATUS) +DECLARE_CAUSE("stvec", CAUSE_STVEC) +DECLARE_CAUSE("scompare", CAUSE_SCOMPARE) +DECLARE_CAUSE("sscratch", CAUSE_SSCRATCH) +DECLARE_CAUSE("sepc", CAUSE_SEPC) +DECLARE_CAUSE("sptbr", CAUSE_SPTBR) +DECLARE_CAUSE("sasid", CAUSE_SASID) +DECLARE_CAUSE("count", CAUSE_COUNT) +DECLARE_CAUSE("stime", CAUSE_STIME) +DECLARE_CAUSE("sinstret", CAUSE_SINSTRET) +DECLARE_CAUSE("scause", CAUSE_SCAUSE) +DECLARE_CAUSE("sbadaddr", CAUSE_SBADADDR) +DECLARE_CAUSE("tohost", CAUSE_TOHOST) +DECLARE_CAUSE("fromhost", CAUSE_FROMHOST) +DECLARE_CAUSE("mstatus", CAUSE_MSTATUS) +DECLARE_CAUSE("mscratch", CAUSE_MSCRATCH) +DECLARE_CAUSE("mepc", CAUSE_MEPC) +DECLARE_CAUSE("mcause", CAUSE_MCAUSE) +DECLARE_CAUSE("mbadaddr", CAUSE_MBADADDR) +DECLARE_CAUSE("reset", CAUSE_RESET) DECLARE_CAUSE("cycleh", CAUSE_CYCLEH) DECLARE_CAUSE("timeh", CAUSE_TIMEH) DECLARE_CAUSE("instreth", CAUSE_INSTRETH) +DECLARE_CAUSE("counth", CAUSE_COUNTH) +DECLARE_CAUSE("stimeh", CAUSE_STIMEH) +DECLARE_CAUSE("sinstreth", CAUSE_SINSTRETH) #endif @@ -14,6 +14,7 @@ .macro save_tf # save gprs + STORE x1,1*REGBYTES(x2) STORE x3,3*REGBYTES(x2) STORE x4,4*REGBYTES(x2) STORE x5,5*REGBYTES(x2) @@ -45,16 +46,15 @@ STORE x31,31*REGBYTES(x2) # get sr, epc, badvaddr, cause - csrr x3,sup0 # x1 - csrr x4,sup1 # x2 - csrr x5,status - csrr x6,epc - csrr x8,cause - STORE x3,1*REGBYTES(x2) - STORE x4,2*REGBYTES(x2) - STORE x5,32*REGBYTES(x2) - STORE x6,33*REGBYTES(x2) - STORE x8,35*REGBYTES(x2) + addi t0,sp,320 + csrrw t0,mscratch,t0 + csrr t1,mstatus + csrr t2,mepc + csrr t3,mcause + STORE t0,2*REGBYTES(x2) + STORE t1,32*REGBYTES(x2) + STORE t2,33*REGBYTES(x2) + STORE t3,35*REGBYTES(x2) la gp, _gp @@ -64,71 +64,68 @@ 1: .endm - .text - .globl pop_tf -pop_tf: # write the trap frame onto the stack - # restore gprs - LOAD a1,32*REGBYTES(a0) - LOAD a2,1*REGBYTES(a0) - LOAD a3,2*REGBYTES(a0) - - csrw status, a1 # restore sr (disable interrupts) - csrw sup0, a2 - csrw sup1, a3 - - move x1,a0 - LOAD x3,3*REGBYTES(x1) - LOAD x4,4*REGBYTES(x1) - LOAD x5,5*REGBYTES(x1) - LOAD x6,6*REGBYTES(x1) - LOAD x7,7*REGBYTES(x1) - LOAD x8,8*REGBYTES(x1) - LOAD x9,9*REGBYTES(x1) - LOAD x10,10*REGBYTES(x1) - LOAD x11,11*REGBYTES(x1) - LOAD x12,12*REGBYTES(x1) - LOAD x13,13*REGBYTES(x1) - LOAD x14,14*REGBYTES(x1) - LOAD x15,15*REGBYTES(x1) - LOAD x16,16*REGBYTES(x1) - LOAD x17,17*REGBYTES(x1) - LOAD x18,18*REGBYTES(x1) - LOAD x19,19*REGBYTES(x1) - LOAD x20,20*REGBYTES(x1) - LOAD x21,21*REGBYTES(x1) - LOAD x22,22*REGBYTES(x1) - LOAD x23,23*REGBYTES(x1) - LOAD x24,24*REGBYTES(x1) - LOAD x25,25*REGBYTES(x1) - LOAD x26,26*REGBYTES(x1) - LOAD x27,27*REGBYTES(x1) - LOAD x28,28*REGBYTES(x1) - LOAD x29,29*REGBYTES(x1) - LOAD x30,30*REGBYTES(x1) - LOAD x31,31*REGBYTES(x1) - - # gtfo! - LOAD x2,33*REGBYTES(x1) - csrr x1, sup0 - csrw epc, x2 - csrr x2, sup1 - sret - + .section .text.init,"ax",@progbits .global trap_entry trap_entry: - csrw sup0, x1 - csrw sup1, sp + # entry point for reset + j _start - # when coming from kernel, continue below its stack - csrr x1, status - and x1, x1, SR_PS - add sp, sp, -320 - bnez x1, 1f - la sp, stack_top-320 + # entry point when coming from machine mode + j 1f -1:save_tf + # entry point when coming from other modes + csrrw sp, mscratch, sp + +1:addi sp,sp,-320 + save_tf move a0,sp j handle_trap + + .globl pop_tf +pop_tf: # write the trap frame onto the stack + # restore sr (disable interrupts) and epc + LOAD a1,32*REGBYTES(a0) + LOAD a2,33*REGBYTES(a0) + csrw mstatus, a1 + csrw mepc, a2 + + # restore x registers + LOAD x1,1*REGBYTES(a0) + LOAD x2,2*REGBYTES(a0) + LOAD x3,3*REGBYTES(a0) + LOAD x4,4*REGBYTES(a0) + LOAD x5,5*REGBYTES(a0) + LOAD x6,6*REGBYTES(a0) + LOAD x7,7*REGBYTES(a0) + LOAD x8,8*REGBYTES(a0) + LOAD x9,9*REGBYTES(a0) + LOAD x11,11*REGBYTES(a0) + LOAD x12,12*REGBYTES(a0) + LOAD x13,13*REGBYTES(a0) + LOAD x14,14*REGBYTES(a0) + LOAD x15,15*REGBYTES(a0) + LOAD x16,16*REGBYTES(a0) + LOAD x17,17*REGBYTES(a0) + LOAD x18,18*REGBYTES(a0) + LOAD x19,19*REGBYTES(a0) + LOAD x20,20*REGBYTES(a0) + LOAD x21,21*REGBYTES(a0) + LOAD x22,22*REGBYTES(a0) + LOAD x23,23*REGBYTES(a0) + LOAD x24,24*REGBYTES(a0) + LOAD x25,25*REGBYTES(a0) + LOAD x26,26*REGBYTES(a0) + LOAD x27,27*REGBYTES(a0) + LOAD x28,28*REGBYTES(a0) + LOAD x29,29*REGBYTES(a0) + LOAD x30,30*REGBYTES(a0) + LOAD x31,31*REGBYTES(a0) + # restore a0 last + LOAD x10,10*REGBYTES(a0) + + # gtfo + mret + .bss .align 4 @@ -4,8 +4,6 @@ #include "fp.h" #include "config.h" -static fp_state_t fp_state; - #ifdef PK_ENABLE_FP_EMULATION #include "softfloat.h" @@ -13,23 +11,60 @@ static fp_state_t fp_state; #define noisy 0 -static void set_fp_reg(unsigned int which, unsigned int dp, uint64_t val); -static uint64_t get_fp_reg(unsigned int which, unsigned int dp); - static inline void validate_address(trapframe_t* tf, long addr, int size, int store) { } +#ifdef __riscv_hard_float +# define get_fcsr() ({ fcsr_t fcsr; asm ("frcsr %0" : "=r"(fcsr)); fcsr; }) +# define put_fcsr(value) ({ asm ("fscsr %0" :: "r"(value)); }) +# define get_f32_reg(i) ({ \ + register int value asm("a0"); \ + register long offset asm("a1") = (i) * 8; \ + asm ("1: auipc %0, %%pcrel_hi(get_f32_reg); add %0, %0, %1; jalr %0, %%pcrel_lo(1b)" : "=&r"(value) : "r"(offset)); \ + value; }) +# define put_f32_reg(i, value) ({ \ + long tmp; \ + register long __value asm("a0") = (value); \ + register long offset asm("a1") = (i) * 8; \ + asm ("1: auipc %0, %%pcrel_hi(put_f32_reg); add %0, %0, %1; jalr %0, %%pcrel_lo(1b)" : "=&r"(tmp) : "r"(offset), "r"(__value)); }) +# ifdef __riscv64 +# define get_f64_reg(i) ({ \ + register long value asm("a0"); \ + register long offset asm("a1") = (i) * 8; \ + asm ("1: auipc %0, %%pcrel_hi(get_f64_reg); add %0, %0, %1; jalr %0, %%pcrel_lo(1b)" : "=&r"(value) : "r"(offset)); \ + value; }) +# define put_f64_reg(i, value) ({ \ + long tmp; \ + register long __value asm("a0") = (value); \ + register long offset asm("a1") = (i) * 8; \ + asm ("1: auipc %0, %%pcrel_hi(put_f64_reg); add %0, %0, %1; jalr %0, %%pcrel_lo(1b)" : "=&r"(tmp) : "r"(offset), "r"(__value)); }) +# else +# define get_f64_reg(i) ({ \ + long long value; \ + register long long* valuep asm("a0") = &value; \ + register long offset asm("a1") = (i) * 8; \ + asm ("1: auipc %0, %%pcrel_hi(get_f64_reg); add %0, %0, %1; jalr %0, %%pcrel_lo(1b)" : "=&r"(valuep) : "r"(offset)); \ + value; }) +# define put_f64_reg(i, value) ({ \ + long long __value = (value); \ + register long long* valuep asm("a0") = &__value; \ + register long offset asm("a1") = (i) * 8; \ + asm ("1: auipc %0, %%pcrel_hi(put_f64_reg); add %0, %0, %1; jalr %0, %%pcrel_lo(1b)" : "=&r"(tmp) : "r"(offset), "r"(__value)); }) +# endif +#else +static fp_state_t fp_state; +# define get_fcsr() fp_state.fcsr +# define put_fcsr(value) fp_state.fcsr = (value) +# define get_f32_reg(i) fp_state.fpr[i] +# define get_f64_reg(i) fp_state.fpr[i] +# define put_f32_reg(i, value) fp_state.fpr[i] = (value) +# define put_f64_reg(i, value) fp_state.fpr[i] = (value) +#endif + int emulate_fp(trapframe_t* tf) { - if (have_fp) - { - if (!(read_csr(status) & SR_EF)) - init_fp(tf); - fp_state.fsr.bits = get_fp_state(fp_state.fpr); - } - if(noisy) printk("FPU emulation at pc %lx, insn %x\n",tf->epc,(uint32_t)tf->insn); @@ -46,27 +81,25 @@ int emulate_fp(trapframe_t* tf) #define XRS2 (tf->gpr[RS2]) #define XRDR (tf->gpr[RD]) - uint64_t frs1d = fp_state.fpr[RS1]; - uint64_t frs2d = fp_state.fpr[RS2]; - uint64_t frs3d = fp_state.fpr[RS3]; - uint32_t frs1s = get_fp_reg(RS1, 0); - uint32_t frs2s = get_fp_reg(RS2, 0); - uint32_t frs3s = get_fp_reg(RS3, 0); + #define frs1d get_f64_reg(RS1) + #define frs2d get_f64_reg(RS2) + #define frs3d get_f64_reg(RS3) + #define frs1s get_f32_reg(RS1) + #define frs2s get_f32_reg(RS2) + #define frs3s get_f32_reg(RS3) long effective_address_load = XRS1 + imm; long effective_address_store = XRS1 + bimm; - softfloat_exceptionFlags = fp_state.fsr.fsr.flags; - softfloat_roundingMode = (RM == 7) ? fp_state.fsr.fsr.rm : RM; + fcsr_t fcsr = get_fcsr(); + softfloat_exceptionFlags = fcsr.fcsr.flags; + softfloat_roundingMode = (RM == 7) ? fcsr.fcsr.rm : RM; #define IS_INSN(x) ((tf->insn & MASK_ ## x) == MATCH_ ## x) - int do_writeback = 0; - int writeback_dp; - uint64_t writeback_value; - #define DO_WRITEBACK(dp, value) \ - do { do_writeback = 1; writeback_dp = (dp); writeback_value = (value); } \ - while(0) + #define DO_WRITEBACK(dp, value) ({ \ + if (dp) put_f64_reg(RD, value); \ + else put_f32_reg(RD, value); }) #define DO_CSR(which, op) ({ long tmp = which; which op; tmp; }) @@ -198,34 +231,28 @@ int emulate_fp(trapframe_t* tf) XRDR = f32_classify(frs1s); else if(IS_INSN(FCLASS_D)) XRDR = f64_classify(frs1s); - else if(IS_INSN(CSRRS) && imm == CSR_FCSR) XRDR = DO_CSR(fp_state.fsr.bits, |= XRS1); - else if(IS_INSN(CSRRS) && imm == CSR_FRM) XRDR = DO_CSR(fp_state.fsr.fsr.rm, |= XRS1); - else if(IS_INSN(CSRRS) && imm == CSR_FFLAGS) XRDR = DO_CSR(fp_state.fsr.fsr.flags, |= XRS1); - else if(IS_INSN(CSRRSI) && imm == CSR_FCSR) XRDR = DO_CSR(fp_state.fsr.bits, |= RS1); - else if(IS_INSN(CSRRSI) && imm == CSR_FRM) XRDR = DO_CSR(fp_state.fsr.fsr.rm, |= RS1); - else if(IS_INSN(CSRRSI) && imm == CSR_FFLAGS) XRDR = DO_CSR(fp_state.fsr.fsr.flags, |= RS1); - else if(IS_INSN(CSRRC) && imm == CSR_FCSR) XRDR = DO_CSR(fp_state.fsr.bits, &= ~XRS1); - else if(IS_INSN(CSRRC) && imm == CSR_FRM) XRDR = DO_CSR(fp_state.fsr.fsr.rm, &= ~XRS1); - else if(IS_INSN(CSRRC) && imm == CSR_FFLAGS) XRDR = DO_CSR(fp_state.fsr.fsr.flags, &= ~XRS1); - else if(IS_INSN(CSRRCI) && imm == CSR_FCSR) XRDR = DO_CSR(fp_state.fsr.bits, &= ~RS1); - else if(IS_INSN(CSRRCI) && imm == CSR_FRM) XRDR = DO_CSR(fp_state.fsr.fsr.rm, &= ~RS1); - else if(IS_INSN(CSRRCI) && imm == CSR_FFLAGS) XRDR = DO_CSR(fp_state.fsr.fsr.flags, &= ~RS1); - else if(IS_INSN(CSRRW) && imm == CSR_FCSR) XRDR = DO_CSR(fp_state.fsr.bits, = XRS1); - else if(IS_INSN(CSRRW) && imm == CSR_FRM) XRDR = DO_CSR(fp_state.fsr.fsr.rm, = XRS1); - else if(IS_INSN(CSRRW) && imm == CSR_FFLAGS) XRDR = DO_CSR(fp_state.fsr.fsr.flags, = XRS1); - else if(IS_INSN(CSRRWI) && imm == CSR_FCSR) XRDR = DO_CSR(fp_state.fsr.bits, = RS1); - else if(IS_INSN(CSRRWI) && imm == CSR_FRM) XRDR = DO_CSR(fp_state.fsr.fsr.rm, = RS1); - else if(IS_INSN(CSRRWI) && imm == CSR_FFLAGS) XRDR = DO_CSR(fp_state.fsr.fsr.flags, = RS1); + else if(IS_INSN(CSRRS) && imm == CSR_FCSR) XRDR = DO_CSR(fcsr.bits, |= XRS1); + else if(IS_INSN(CSRRS) && imm == CSR_FRM) XRDR = DO_CSR(fcsr.fcsr.rm, |= XRS1); + else if(IS_INSN(CSRRS) && imm == CSR_FFLAGS) XRDR = DO_CSR(fcsr.fcsr.flags, |= XRS1); + else if(IS_INSN(CSRRSI) && imm == CSR_FCSR) XRDR = DO_CSR(fcsr.bits, |= RS1); + else if(IS_INSN(CSRRSI) && imm == CSR_FRM) XRDR = DO_CSR(fcsr.fcsr.rm, |= RS1); + else if(IS_INSN(CSRRSI) && imm == CSR_FFLAGS) XRDR = DO_CSR(fcsr.fcsr.flags, |= RS1); + else if(IS_INSN(CSRRC) && imm == CSR_FCSR) XRDR = DO_CSR(fcsr.bits, &= ~XRS1); + else if(IS_INSN(CSRRC) && imm == CSR_FRM) XRDR = DO_CSR(fcsr.fcsr.rm, &= ~XRS1); + else if(IS_INSN(CSRRC) && imm == CSR_FFLAGS) XRDR = DO_CSR(fcsr.fcsr.flags, &= ~XRS1); + else if(IS_INSN(CSRRCI) && imm == CSR_FCSR) XRDR = DO_CSR(fcsr.bits, &= ~RS1); + else if(IS_INSN(CSRRCI) && imm == CSR_FRM) XRDR = DO_CSR(fcsr.fcsr.rm, &= ~RS1); + else if(IS_INSN(CSRRCI) && imm == CSR_FFLAGS) XRDR = DO_CSR(fcsr.fcsr.flags, &= ~RS1); + else if(IS_INSN(CSRRW) && imm == CSR_FCSR) XRDR = DO_CSR(fcsr.bits, = XRS1); + else if(IS_INSN(CSRRW) && imm == CSR_FRM) XRDR = DO_CSR(fcsr.fcsr.rm, = XRS1); + else if(IS_INSN(CSRRW) && imm == CSR_FFLAGS) XRDR = DO_CSR(fcsr.fcsr.flags, = XRS1); + else if(IS_INSN(CSRRWI) && imm == CSR_FCSR) XRDR = DO_CSR(fcsr.bits, = RS1); + else if(IS_INSN(CSRRWI) && imm == CSR_FRM) XRDR = DO_CSR(fcsr.fcsr.rm, = RS1); + else if(IS_INSN(CSRRWI) && imm == CSR_FFLAGS) XRDR = DO_CSR(fcsr.fcsr.flags, = RS1); else return -1; - fp_state.fsr.fsr.flags = softfloat_exceptionFlags; - - if(do_writeback) - set_fp_reg(RD, writeback_dp, writeback_value); - - if(have_fp) - put_fp_state(fp_state.fpr, fp_state.fsr.bits); + put_fcsr(fcsr); return 0; } @@ -238,51 +265,11 @@ int emulate_fp(trapframe_t* tf) #define LOAD_FP_REG(which, type, val) asm("fl" STR(type) " f" STR(which) ",%0" : : "m"(val)) #define STORE_FP_REG(which, type, val) asm("fs" STR(type) " f" STR(which) ",%0" : "=m"(val) : : "memory") -static void __attribute__((noinline)) -set_fp_reg(unsigned int which, unsigned int dp, uint64_t val) -{ - if (noisy) - printk("fpr%c[%x] <= %lx\n", dp ? 'd' : 's', which, val); - - if(dp || !have_fp) - fp_state.fpr[which] = val; - else - { - // to set an SP value, move the SP value into the FPU - // then move it back out as a DP value. OK to clobber $f0 - // because we'll restore it later. - PUT_FP_REG(0,s,val); - STORE_FP_REG(0,d,fp_state.fpr[which]); - } -} - -static uint64_t __attribute__((noinline)) -get_fp_reg(unsigned int which, unsigned int dp) -{ - uint64_t val; - if(dp || !have_fp) - val = fp_state.fpr[which]; - else - { - // to get an SP value, move the DP value into the FPU - // then move it back out as an SP value. OK to clobber $f0 - // because we'll restore it later. - LOAD_FP_REG(0,d,fp_state.fpr[which]); - GET_FP_REG(0,s,val); - } - - if (noisy) - printk("fpr%c[%x] => %lx\n", dp ? 'd' : 's', which, val); - - return val; -} - #endif -void init_fp(trapframe_t* tf) +void fp_init() { - tf->sr |= SR_EF; - set_csr(status, SR_EF); - - put_fp_state(fp_state.fpr, fp_state.fsr.bits); + if (read_csr(mstatus) & MSTATUS_FS) + for (int i = 0; i < 32; i++) + put_f64_reg(i, 0); } @@ -3,16 +3,18 @@ #ifndef _FP_H #define _FP_H +typedef union { + struct { + uint8_t flags : 5; + uint8_t rm : 3; + } fcsr; + uint8_t bits; +} fcsr_t; + typedef struct { uint64_t fpr[32]; - union { - struct { - uint8_t flags : 5; - uint8_t rm : 3; - } fsr; - uint8_t bits; - } fsr; + fcsr_t fcsr; } fp_state_t; void put_fp_state(const void* fp_regs, uint8_t fsr); diff --git a/pk/fp_asm.S b/pk/fp_asm.S index 5e00c6c..0a9f34b 100644 --- a/pk/fp_asm.S +++ b/pk/fp_asm.S @@ -1,5 +1,159 @@ // See LICENSE for license details. +#define get_f32(which) fmv.x.s a0, which; ret +#define put_f32(which) fmv.s.x which, a0; ret +#ifdef __riscv64 +# define get_f64(which) fmv.x.d a0, which; ret +# define put_f64(which) fmv.d.x which, a0; ret +#else +# define get_f64(which) fsd which, 0(a0); ret +# define put_f64(which) fld which, 0(a0); ret +#endif + + .text + .globl get_f32_reg + get_f32_reg: + get_f32(f0) + get_f32(f1) + get_f32(f2) + get_f32(f3) + get_f32(f4) + get_f32(f5) + get_f32(f6) + get_f32(f7) + get_f32(f8) + get_f32(f9) + get_f32(f10) + get_f32(f11) + get_f32(f12) + get_f32(f13) + get_f32(f14) + get_f32(f15) + get_f32(f16) + get_f32(f17) + get_f32(f18) + get_f32(f19) + get_f32(f20) + get_f32(f21) + get_f32(f22) + get_f32(f23) + get_f32(f24) + get_f32(f25) + get_f32(f26) + get_f32(f27) + get_f32(f28) + get_f32(f29) + get_f32(f30) + get_f32(f31) + + .text + .globl put_f32_reg + put_f32_reg: + put_f32(f0) + put_f32(f1) + put_f32(f2) + put_f32(f3) + put_f32(f4) + put_f32(f5) + put_f32(f6) + put_f32(f7) + put_f32(f8) + put_f32(f9) + put_f32(f10) + put_f32(f11) + put_f32(f12) + put_f32(f13) + put_f32(f14) + put_f32(f15) + put_f32(f16) + put_f32(f17) + put_f32(f18) + put_f32(f19) + put_f32(f20) + put_f32(f21) + put_f32(f22) + put_f32(f23) + put_f32(f24) + put_f32(f25) + put_f32(f26) + put_f32(f27) + put_f32(f28) + put_f32(f29) + put_f32(f30) + put_f32(f31) + + .text + .globl get_f64_reg + get_f64_reg: + get_f64(f0) + get_f64(f1) + get_f64(f2) + get_f64(f3) + get_f64(f4) + get_f64(f5) + get_f64(f6) + get_f64(f7) + get_f64(f8) + get_f64(f9) + get_f64(f10) + get_f64(f11) + get_f64(f12) + get_f64(f13) + get_f64(f14) + get_f64(f15) + get_f64(f16) + get_f64(f17) + get_f64(f18) + get_f64(f19) + get_f64(f20) + get_f64(f21) + get_f64(f22) + get_f64(f23) + get_f64(f24) + get_f64(f25) + get_f64(f26) + get_f64(f27) + get_f64(f28) + get_f64(f29) + get_f64(f30) + get_f64(f31) + + .text + .globl put_f64_reg + put_f64_reg: + put_f64(f0) + put_f64(f1) + put_f64(f2) + put_f64(f3) + put_f64(f4) + put_f64(f5) + put_f64(f6) + put_f64(f7) + put_f64(f8) + put_f64(f9) + put_f64(f10) + put_f64(f11) + put_f64(f12) + put_f64(f13) + put_f64(f14) + put_f64(f15) + put_f64(f16) + put_f64(f17) + put_f64(f18) + put_f64(f19) + put_f64(f20) + put_f64(f21) + put_f64(f22) + put_f64(f23) + put_f64(f24) + put_f64(f25) + put_f64(f26) + put_f64(f27) + put_f64(f28) + put_f64(f29) + put_f64(f30) + put_f64(f31) + .text .globl get_fp_state get_fp_state: diff --git a/pk/handlers.c b/pk/handlers.c index 2b5bf22..35adefc 100644 --- a/pk/handlers.c +++ b/pk/handlers.c @@ -4,23 +4,6 @@ #include "config.h" #include "syscall.h" #include "vm.h" - -static void handle_accelerator_disabled(trapframe_t* tf) -{ - if (have_accelerator) - tf->sr |= SR_EA; - else - { - dump_tf(tf); - panic("No accelerator hardware!"); - } -} - -static void handle_privileged_instruction(trapframe_t* tf) -{ - dump_tf(tf); - panic("A privileged instruction was executed!"); -} static void handle_illegal_instruction(trapframe_t* tf) { @@ -49,14 +32,6 @@ static void handle_illegal_instruction(trapframe_t* tf) panic("An illegal instruction was executed!"); } -static void handle_fp_disabled(trapframe_t* tf) -{ - if (have_fp && !(read_csr(status) & SR_EF)) - init_fp(tf); - else - handle_illegal_instruction(tf); -} - static void handle_breakpoint(trapframe_t* tf) { dump_tf(tf); @@ -86,7 +61,7 @@ void handle_misaligned_store(trapframe_t* tf) static void segfault(trapframe_t* tf, uintptr_t addr, const char* type) { dump_tf(tf); - const char* who = (tf->sr & SR_PS) ? "Kernel" : "User"; + const char* who = (tf->status & MSTATUS_PRV1) ? "Kernel" : "User"; panic("%s %s segfault @ %p", who, type, addr); } @@ -98,14 +73,14 @@ static void handle_fault_fetch(trapframe_t* tf) void handle_fault_load(trapframe_t* tf) { - tf->badvaddr = read_csr(badvaddr); + tf->badvaddr = read_csr(mbadaddr); if (handle_page_fault(tf->badvaddr, PROT_READ) != 0) segfault(tf, tf->badvaddr, "load"); } void handle_fault_store(trapframe_t* tf) { - tf->badvaddr = read_csr(badvaddr); + tf->badvaddr = read_csr(mbadaddr); if (handle_page_fault(tf->badvaddr, PROT_WRITE) != 0) segfault(tf, tf->badvaddr, "store"); } @@ -119,7 +94,7 @@ static void handle_syscall(trapframe_t* tf) void handle_trap(trapframe_t* tf) { - set_csr(status, SR_EI); + set_csr(mstatus, MSTATUS_IE); typedef void (*trap_handler)(trapframe_t*); @@ -127,15 +102,12 @@ void handle_trap(trapframe_t* tf) [CAUSE_MISALIGNED_FETCH] = handle_misaligned_fetch, [CAUSE_FAULT_FETCH] = handle_fault_fetch, [CAUSE_ILLEGAL_INSTRUCTION] = handle_illegal_instruction, - [CAUSE_PRIVILEGED_INSTRUCTION] = handle_privileged_instruction, - [CAUSE_FP_DISABLED] = handle_fp_disabled, [CAUSE_SYSCALL] = handle_syscall, [CAUSE_BREAKPOINT] = handle_breakpoint, [CAUSE_MISALIGNED_LOAD] = handle_misaligned_load, [CAUSE_MISALIGNED_STORE] = handle_misaligned_store, [CAUSE_FAULT_LOAD] = handle_fault_load, [CAUSE_FAULT_STORE] = handle_fault_store, - [CAUSE_ACCELERATOR_DISABLED] = handle_accelerator_disabled, }; kassert(tf->cause < ARRAY_SIZE(trap_handlers) && trap_handlers[tf->cause]); @@ -10,8 +10,6 @@ elf_info current; int have_vm = 1; // unless -p flag is given -int have_fp; -int have_accelerator; int uarch_counters_enabled; long uarch_counters[NUM_COUNTERS]; @@ -22,9 +20,9 @@ void init_tf(trapframe_t* tf, long pc, long sp, int user64) memset(tf,0,sizeof(*tf)); if(sizeof(void*) != 8) kassert(!user64); - tf->sr = (read_csr(status) & (SR_IM | SR_S64 | SR_VM)) | SR_S | SR_PEI; - if(user64) - tf->sr |= SR_U64; + tf->status = read_csr(mstatus); + if (user64) + tf->status |= (long long)UA_RV64 << __builtin_ctzll(MSTATUS_UA); tf->gpr[2] = sp; tf->epc = pc; } @@ -161,5 +159,6 @@ void boot() struct mainvars args0; struct mainvars* args = handle_args(&args0); vm_init(); + fp_init(); user_init(args); } @@ -5,25 +5,11 @@ .section .text,"ax",@progbits .globl _start _start: - - la sp, stack_top - la a0, trap_entry la gp, _gp - csrw evec, a0 - - # clear any pending interrupts - csrwi clear_ipi, 0 + la sp, stack_top + csrw mscratch, sp - li a0, SR_S | SR_PS | SR_EI | SR_S64 | SR_U64 - or a1, a0, SR_EF | SR_EA - csrw status, a1 - csrr a1, status - csrw status, a0 - - and a2, a1, SR_EF - sw a2, have_fp, t0 - - and a2, a1, SR_EA - sw a2, have_accelerator, t0 + li t0, MSTATUS_FS | MSTATUS_XS + csrs mstatus, t0 call boot @@ -12,7 +12,7 @@ typedef struct { long gpr[32]; - long sr; + long status; long epc; long badvaddr; long cause; @@ -33,12 +33,10 @@ void kassert_fail(const char* s) __attribute__((noreturn)); extern "C" { #endif -extern int have_fp; -extern int have_accelerator; extern int have_vm; extern uint32_t mem_mb; int emulate_fp(trapframe_t*); -void init_fp(trapframe_t* tf); +void fp_init(); int emulate_int(trapframe_t*); @@ -16,7 +16,7 @@ SECTIONS .text : { - pk.o(.text) + *(.text.init) } /* text: Program code section */ diff --git a/pk/syscall.c b/pk/syscall.c index 9a03cc9..1b097b7 100644 --- a/pk/syscall.c +++ b/pk/syscall.c @@ -51,7 +51,6 @@ void sys_exit(int code) } frontend_syscall(SYS_exit, code, 0, 0, 0, 0, 0, 0); - clear_csr(status, SR_EI); while (1); } @@ -154,7 +154,7 @@ static uintptr_t __vm_alloc(size_t npage) static void flush_tlb() { - write_csr(fatc, 0); + asm volatile("sfence.vm"); } static int __handle_page_fault(uintptr_t vaddr, int prot) @@ -443,11 +443,14 @@ void vm_init() __map_kernel_range(0, current.user_min, PROT_READ|PROT_WRITE|PROT_EXEC); + int vm_field = sizeof(long) == 4 ? VM_SV32 : VM_SV43; if (have_vm) { - write_csr(ptbr, root_page_table_paddr); - set_csr(status, SR_VM); - have_vm = clear_csr(status, SR_VM) & SR_VM; +#if 0 + write_csr(sptbr, root_page_table_paddr); + set_csr(mstatus, vm_field << __builtin_ctz(MSTATUS_VM)); +#endif + have_vm = (clear_csr(mstatus, MSTATUS_VM) & MSTATUS_VM) != VM_MBARE; } size_t stack_size = RISCV_PGSIZE * stack_pages; @@ -458,7 +461,7 @@ void vm_init() { __map_kernel_range(first_free_page, free_pages * RISCV_PGSIZE, PROT_READ|PROT_WRITE); kassert(__do_mmap(stack_bot, stack_size, -1, MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, 0, 0) == stack_bot); - set_csr(status, SR_VM); + set_csr(mstatus, vm_field); } current.stack_bottom = stack_bot; |