aboutsummaryrefslogtreecommitdiff
path: root/target/riscv/helper.c
diff options
context:
space:
mode:
Diffstat (limited to 'target/riscv/helper.c')
-rw-r--r--target/riscv/helper.c98
1 files changed, 57 insertions, 41 deletions
diff --git a/target/riscv/helper.c b/target/riscv/helper.c
index 29e1a60..63b3386 100644
--- a/target/riscv/helper.c
+++ b/target/riscv/helper.c
@@ -35,28 +35,18 @@ int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch)
}
#ifndef CONFIG_USER_ONLY
-/*
- * Return RISC-V IRQ number if an interrupt should be taken, else -1.
- * Used in cpu-exec.c
- *
- * Adapted from Spike's processor_t::take_interrupt()
- */
-static int riscv_cpu_hw_interrupts_pending(CPURISCVState *env)
+static int riscv_cpu_local_irq_pending(CPURISCVState *env)
{
- target_ulong pending_interrupts = atomic_read(&env->mip) & env->mie;
-
- target_ulong mie = get_field(env->mstatus, MSTATUS_MIE);
- target_ulong m_enabled = env->priv < PRV_M || (env->priv == PRV_M && mie);
- target_ulong enabled_interrupts = pending_interrupts &
- ~env->mideleg & -m_enabled;
-
- target_ulong sie = get_field(env->mstatus, MSTATUS_SIE);
- target_ulong s_enabled = env->priv < PRV_S || (env->priv == PRV_S && sie);
- enabled_interrupts |= pending_interrupts & env->mideleg &
- -s_enabled;
-
- if (enabled_interrupts) {
- return ctz64(enabled_interrupts); /* since non-zero */
+ target_ulong mstatus_mie = get_field(env->mstatus, MSTATUS_MIE);
+ target_ulong mstatus_sie = get_field(env->mstatus, MSTATUS_SIE);
+ target_ulong pending = atomic_read(&env->mip) & env->mie;
+ target_ulong mie = env->priv < PRV_M || (env->priv == PRV_M && mstatus_mie);
+ target_ulong sie = env->priv < PRV_S || (env->priv == PRV_S && mstatus_sie);
+ target_ulong irqs = (pending & ~env->mideleg & -mie) |
+ (pending & env->mideleg & -sie);
+
+ if (irqs) {
+ return ctz64(irqs); /* since non-zero */
} else {
return EXCP_NONE; /* indicates no pending interrupt */
}
@@ -69,7 +59,7 @@ bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
if (interrupt_request & CPU_INTERRUPT_HARD) {
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
- int interruptno = riscv_cpu_hw_interrupts_pending(env);
+ int interruptno = riscv_cpu_local_irq_pending(env);
if (interruptno >= 0) {
cs->exception_index = RISCV_EXCP_INT_FLAG | interruptno;
riscv_cpu_do_interrupt(cs);
@@ -185,16 +175,39 @@ restart:
#endif
target_ulong ppn = pte >> PTE_PPN_SHIFT;
- if (PTE_TABLE(pte)) { /* next level of page table */
+ if (!(pte & PTE_V)) {
+ /* Invalid PTE */
+ return TRANSLATE_FAIL;
+ } else if (!(pte & (PTE_R | PTE_W | PTE_X))) {
+ /* Inner PTE, continue walking */
base = ppn << PGSHIFT;
- } else if ((pte & PTE_U) ? (mode == PRV_S) && !sum : !(mode == PRV_S)) {
- break;
- } else if (!(pte & PTE_V) || (!(pte & PTE_R) && (pte & PTE_W))) {
- break;
- } else if (access_type == MMU_INST_FETCH ? !(pte & PTE_X) :
- access_type == MMU_DATA_LOAD ? !(pte & PTE_R) &&
- !(mxr && (pte & PTE_X)) : !((pte & PTE_R) && (pte & PTE_W))) {
- break;
+ } else if ((pte & (PTE_R | PTE_W | PTE_X)) == PTE_W) {
+ /* Reserved leaf PTE flags: PTE_W */
+ return TRANSLATE_FAIL;
+ } else if ((pte & (PTE_R | PTE_W | PTE_X)) == (PTE_W | PTE_X)) {
+ /* Reserved leaf PTE flags: PTE_W + PTE_X */
+ return TRANSLATE_FAIL;
+ } else if ((pte & PTE_U) && ((mode != PRV_U) &&
+ (!sum || access_type == MMU_INST_FETCH))) {
+ /* User PTE flags when not U mode and mstatus.SUM is not set,
+ or the access type is an instruction fetch */
+ return TRANSLATE_FAIL;
+ } else if (!(pte & PTE_U) && (mode != PRV_S)) {
+ /* Supervisor PTE flags when not S mode */
+ return TRANSLATE_FAIL;
+ } else if (ppn & ((1ULL << ptshift) - 1)) {
+ /* Misaligned PPN */
+ return TRANSLATE_FAIL;
+ } else if (access_type == MMU_DATA_LOAD && !((pte & PTE_R) ||
+ ((pte & PTE_X) && mxr))) {
+ /* Read access check failed */
+ return TRANSLATE_FAIL;
+ } else if (access_type == MMU_DATA_STORE && !(pte & PTE_W)) {
+ /* Write access check failed */
+ return TRANSLATE_FAIL;
+ } else if (access_type == MMU_INST_FETCH && !(pte & PTE_X)) {
+ /* Fetch access check failed */
+ return TRANSLATE_FAIL;
} else {
/* if necessary, set accessed and dirty bits. */
target_ulong updated_pte = pte | PTE_A |
@@ -202,16 +215,19 @@ restart:
/* Page table updates need to be atomic with MTTCG enabled */
if (updated_pte != pte) {
- /* if accessed or dirty bits need updating, and the PTE is
- * in RAM, then we do so atomically with a compare and swap.
- * if the PTE is in IO space, then it can't be updated.
- * if the PTE changed, then we must re-walk the page table
- as the PTE is no longer valid */
+ /*
+ * - if accessed or dirty bits need updating, and the PTE is
+ * in RAM, then we do so atomically with a compare and swap.
+ * - if the PTE is in IO space or ROM, then it can't be updated
+ * and we return TRANSLATE_FAIL.
+ * - if the PTE changed by the time we went to update it, then
+ * it is no longer valid and we must re-walk the page table.
+ */
MemoryRegion *mr;
hwaddr l = sizeof(target_ulong), addr1;
mr = address_space_translate(cs->as, pte_addr,
&addr1, &l, false, MEMTXATTRS_UNSPECIFIED);
- if (memory_access_is_direct(mr, true)) {
+ if (memory_region_is_ram(mr)) {
target_ulong *pte_pa =
qemu_map_ram_ptr(mr->ram_block, addr1);
#if TCG_OVERSIZED_GUEST
@@ -239,15 +255,15 @@ restart:
target_ulong vpn = addr >> PGSHIFT;
*physical = (ppn | (vpn & ((1L << ptshift) - 1))) << PGSHIFT;
- if ((pte & PTE_R)) {
+ /* set permissions on the TLB entry */
+ if ((pte & PTE_R) || ((pte & PTE_X) && mxr)) {
*prot |= PAGE_READ;
}
if ((pte & PTE_X)) {
*prot |= PAGE_EXEC;
}
- /* only add write permission on stores or if the page
- is already dirty, so that we don't miss further
- page table walks to update the dirty bit */
+ /* add write permission on stores or if the page is already dirty,
+ so that we TLB miss on later writes to update the dirty bit */
if ((pte & PTE_W) &&
(access_type == MMU_DATA_STORE || (pte & PTE_D))) {
*prot |= PAGE_WRITE;