aboutsummaryrefslogtreecommitdiff
path: root/target/riscv/cpu_helper.c
diff options
context:
space:
mode:
Diffstat (limited to 'target/riscv/cpu_helper.c')
-rw-r--r--target/riscv/cpu_helper.c1006
1 files changed, 823 insertions, 183 deletions
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
index 6709622..2ed69d7 100644
--- a/target/riscv/cpu_helper.c
+++ b/target/riscv/cpu_helper.c
@@ -23,16 +23,19 @@
#include "cpu.h"
#include "internals.h"
#include "pmu.h"
-#include "exec/exec-all.h"
+#include "exec/cputlb.h"
#include "exec/page-protection.h"
+#include "exec/target_page.h"
+#include "system/memory.h"
#include "instmap.h"
#include "tcg/tcg-op.h"
+#include "accel/tcg/cpu-ops.h"
#include "trace.h"
#include "semihosting/common-semi.h"
-#include "sysemu/cpu-timers.h"
+#include "exec/icount.h"
#include "cpu_bits.h"
#include "debug.h"
-#include "tcg/oversized-guest.h"
+#include "pmp.h"
int riscv_env_mmu_index(CPURISCVState *env, bool ifetch)
{
@@ -63,134 +66,169 @@ int riscv_env_mmu_index(CPURISCVState *env, bool ifetch)
#endif
}
-void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *pflags)
+bool cpu_get_fcfien(CPURISCVState *env)
{
- RISCVCPU *cpu = env_archcpu(env);
- RISCVExtStatus fs, vs;
- uint32_t flags = 0;
+ /* no cfi extension, return false */
+ if (!env_archcpu(env)->cfg.ext_zicfilp) {
+ return false;
+ }
- *pc = env->xl == MXL_RV32 ? env->pc & UINT32_MAX : env->pc;
- *cs_base = 0;
+ switch (env->priv) {
+ case PRV_U:
+ if (riscv_has_ext(env, RVS)) {
+ return env->senvcfg & SENVCFG_LPE;
+ }
+ return env->menvcfg & MENVCFG_LPE;
+#ifndef CONFIG_USER_ONLY
+ case PRV_S:
+ if (env->virt_enabled) {
+ return env->henvcfg & HENVCFG_LPE;
+ }
+ return env->menvcfg & MENVCFG_LPE;
+ case PRV_M:
+ return env->mseccfg & MSECCFG_MLPE;
+#endif
+ default:
+ g_assert_not_reached();
+ }
+}
- if (cpu->cfg.ext_zve32x) {
+bool cpu_get_bcfien(CPURISCVState *env)
+{
+ /* no cfi extension, return false */
+ if (!env_archcpu(env)->cfg.ext_zicfiss) {
+ return false;
+ }
+
+ switch (env->priv) {
+ case PRV_U:
/*
- * If env->vl equals to VLMAX, we can use generic vector operation
- * expanders (GVEC) to accerlate the vector operations.
- * However, as LMUL could be a fractional number. The maximum
- * vector size can be operated might be less than 8 bytes,
- * which is not supported by GVEC. So we set vl_eq_vlmax flag to true
- * only when maxsz >= 8 bytes.
+ * If S is not implemented then shadow stack for U can't be turned on
+ * It is checked in `riscv_cpu_validate_set_extensions`, so no need to
+ * check here or assert here
*/
-
- /* lmul encoded as in DisasContext::lmul */
- int8_t lmul = sextract32(FIELD_EX64(env->vtype, VTYPE, VLMUL), 0, 3);
- uint32_t vsew = FIELD_EX64(env->vtype, VTYPE, VSEW);
- uint32_t vlmax = vext_get_vlmax(cpu->cfg.vlenb, vsew, lmul);
- uint32_t maxsz = vlmax << vsew;
- bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl) &&
- (maxsz >= 8);
- flags = FIELD_DP32(flags, TB_FLAGS, VILL, env->vill);
- flags = FIELD_DP32(flags, TB_FLAGS, SEW, vsew);
- flags = FIELD_DP32(flags, TB_FLAGS, LMUL,
- FIELD_EX64(env->vtype, VTYPE, VLMUL));
- flags = FIELD_DP32(flags, TB_FLAGS, VL_EQ_VLMAX, vl_eq_vlmax);
- flags = FIELD_DP32(flags, TB_FLAGS, VTA,
- FIELD_EX64(env->vtype, VTYPE, VTA));
- flags = FIELD_DP32(flags, TB_FLAGS, VMA,
- FIELD_EX64(env->vtype, VTYPE, VMA));
- flags = FIELD_DP32(flags, TB_FLAGS, VSTART_EQ_ZERO, env->vstart == 0);
- } else {
- flags = FIELD_DP32(flags, TB_FLAGS, VILL, 1);
+ return env->senvcfg & SENVCFG_SSE;
+#ifndef CONFIG_USER_ONLY
+ case PRV_S:
+ if (env->virt_enabled) {
+ return env->henvcfg & HENVCFG_SSE;
+ }
+ return env->menvcfg & MENVCFG_SSE;
+ case PRV_M: /* M-mode shadow stack is always off */
+ return false;
+#endif
+ default:
+ g_assert_not_reached();
}
+}
+bool riscv_env_smode_dbltrp_enabled(CPURISCVState *env, bool virt)
+{
#ifdef CONFIG_USER_ONLY
- fs = EXT_STATUS_DIRTY;
- vs = EXT_STATUS_DIRTY;
+ return false;
#else
- flags = FIELD_DP32(flags, TB_FLAGS, PRIV, env->priv);
-
- flags |= riscv_env_mmu_index(env, 0);
- fs = get_field(env->mstatus, MSTATUS_FS);
- vs = get_field(env->mstatus, MSTATUS_VS);
-
- if (env->virt_enabled) {
- flags = FIELD_DP32(flags, TB_FLAGS, VIRT_ENABLED, 1);
- /*
- * Merge DISABLED and !DIRTY states using MIN.
- * We will set both fields when dirtying.
- */
- fs = MIN(fs, get_field(env->mstatus_hs, MSTATUS_FS));
- vs = MIN(vs, get_field(env->mstatus_hs, MSTATUS_VS));
+ if (virt) {
+ return (env->henvcfg & HENVCFG_DTE) != 0;
+ } else {
+ return (env->menvcfg & MENVCFG_DTE) != 0;
}
+#endif
+}
- /* With Zfinx, floating point is enabled/disabled by Smstateen. */
- if (!riscv_has_ext(env, RVF)) {
- fs = (smstateen_acc_ok(env, 0, SMSTATEEN0_FCSR) == RISCV_EXCP_NONE)
- ? EXT_STATUS_DIRTY : EXT_STATUS_DISABLED;
- }
+RISCVPmPmm riscv_pm_get_pmm(CPURISCVState *env)
+{
+#ifndef CONFIG_USER_ONLY
+ int priv_mode = cpu_address_mode(env);
- if (cpu->cfg.debug && !icount_enabled()) {
- flags = FIELD_DP32(flags, TB_FLAGS, ITRIGGER, env->itrigger_enabled);
+ if (get_field(env->mstatus, MSTATUS_MPRV) &&
+ get_field(env->mstatus, MSTATUS_MXR)) {
+ return PMM_FIELD_DISABLED;
}
-#endif
- flags = FIELD_DP32(flags, TB_FLAGS, FS, fs);
- flags = FIELD_DP32(flags, TB_FLAGS, VS, vs);
- flags = FIELD_DP32(flags, TB_FLAGS, XL, env->xl);
- flags = FIELD_DP32(flags, TB_FLAGS, AXL, cpu_address_xl(env));
- if (env->cur_pmmask != 0) {
- flags = FIELD_DP32(flags, TB_FLAGS, PM_MASK_ENABLED, 1);
- }
- if (env->cur_pmbase != 0) {
- flags = FIELD_DP32(flags, TB_FLAGS, PM_BASE_ENABLED, 1);
+ /* Get current PMM field */
+ switch (priv_mode) {
+ case PRV_M:
+ if (riscv_cpu_cfg(env)->ext_smmpm) {
+ return get_field(env->mseccfg, MSECCFG_PMM);
+ }
+ break;
+ case PRV_S:
+ if (riscv_cpu_cfg(env)->ext_smnpm) {
+ if (get_field(env->mstatus, MSTATUS_MPV)) {
+ return get_field(env->henvcfg, HENVCFG_PMM);
+ } else {
+ return get_field(env->menvcfg, MENVCFG_PMM);
+ }
+ }
+ break;
+ case PRV_U:
+ if (riscv_has_ext(env, RVS)) {
+ if (riscv_cpu_cfg(env)->ext_ssnpm) {
+ return get_field(env->senvcfg, SENVCFG_PMM);
+ }
+ } else {
+ if (riscv_cpu_cfg(env)->ext_smnpm) {
+ return get_field(env->menvcfg, MENVCFG_PMM);
+ }
+ }
+ break;
+ default:
+ g_assert_not_reached();
}
-
- *pflags = flags;
+ return PMM_FIELD_DISABLED;
+#else
+ return PMM_FIELD_DISABLED;
+#endif
}
-void riscv_cpu_update_mask(CPURISCVState *env)
+RISCVPmPmm riscv_pm_get_virt_pmm(CPURISCVState *env)
{
- target_ulong mask = 0, base = 0;
- RISCVMXL xl = env->xl;
- /*
- * TODO: Current RVJ spec does not specify
- * how the extension interacts with XLEN.
- */
#ifndef CONFIG_USER_ONLY
- int mode = cpu_address_mode(env);
- xl = cpu_get_xl(env, mode);
- if (riscv_has_ext(env, RVJ)) {
- switch (mode) {
- case PRV_M:
- if (env->mmte & M_PM_ENABLE) {
- mask = env->mpmmask;
- base = env->mpmbase;
- }
- break;
- case PRV_S:
- if (env->mmte & S_PM_ENABLE) {
- mask = env->spmmask;
- base = env->spmbase;
- }
- break;
- case PRV_U:
- if (env->mmte & U_PM_ENABLE) {
- mask = env->upmmask;
- base = env->upmbase;
- }
- break;
- default:
- g_assert_not_reached();
+ int priv_mode = cpu_address_mode(env);
+
+ if (priv_mode == PRV_U) {
+ return get_field(env->hstatus, HSTATUS_HUPMM);
+ } else {
+ if (get_field(env->hstatus, HSTATUS_SPVP)) {
+ return get_field(env->henvcfg, HENVCFG_PMM);
+ } else {
+ return get_field(env->senvcfg, SENVCFG_PMM);
}
}
+#else
+ return PMM_FIELD_DISABLED;
#endif
- if (xl == MXL_RV32) {
- env->cur_pmmask = mask & UINT32_MAX;
- env->cur_pmbase = base & UINT32_MAX;
+}
+
+bool riscv_cpu_virt_mem_enabled(CPURISCVState *env)
+{
+#ifndef CONFIG_USER_ONLY
+ int satp_mode = 0;
+ int priv_mode = cpu_address_mode(env);
+
+ if (riscv_cpu_mxl(env) == MXL_RV32) {
+ satp_mode = get_field(env->satp, SATP32_MODE);
} else {
- env->cur_pmmask = mask;
- env->cur_pmbase = base;
+ satp_mode = get_field(env->satp, SATP64_MODE);
+ }
+
+ return ((satp_mode != VM_1_10_MBARE) && (priv_mode != PRV_M));
+#else
+ return false;
+#endif
+}
+
+uint32_t riscv_pm_get_pmlen(RISCVPmPmm pmm)
+{
+ switch (pmm) {
+ case PMM_FIELD_DISABLED:
+ return 0;
+ case PMM_FIELD_PMLEN7:
+ return 7;
+ case PMM_FIELD_PMLEN16:
+ return 16;
+ default:
+ g_assert_not_reached();
}
}
@@ -434,6 +472,18 @@ static int riscv_cpu_local_irq_pending(CPURISCVState *env)
uint64_t vsbits, irq_delegated;
int virq;
+ /* Priority: RNMI > Other interrupt. */
+ if (riscv_cpu_cfg(env)->ext_smrnmi) {
+ /* If mnstatus.NMIE == 0, all interrupts are disabled. */
+ if (!get_field(env->mnstatus, MNSTATUS_NMIE)) {
+ return RISCV_EXCP_NONE;
+ }
+
+ if (env->rnmip) {
+ return ctz64(env->rnmip); /* since non-zero */
+ }
+ }
+
/* Determine interrupt enable state of all privilege modes */
if (env->virt_enabled) {
mie = 1;
@@ -496,7 +546,9 @@ static int riscv_cpu_local_irq_pending(CPURISCVState *env)
bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
{
- if (interrupt_request & CPU_INTERRUPT_HARD) {
+ uint32_t mask = CPU_INTERRUPT_HARD | CPU_INTERRUPT_RNMI;
+
+ if (interrupt_request & mask) {
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
int interruptno = riscv_cpu_local_irq_pending(env);
@@ -546,8 +598,21 @@ void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env)
}
bool current_virt = env->virt_enabled;
+ /*
+ * If zicfilp extension available and henvcfg.LPE = 1,
+ * then apply SPELP mask on mstatus
+ */
+ if (env_archcpu(env)->cfg.ext_zicfilp &&
+ get_field(env->henvcfg, HENVCFG_LPE)) {
+ mstatus_mask |= SSTATUS_SPELP;
+ }
+
g_assert(riscv_has_ext(env, RVH));
+ if (riscv_env_smode_dbltrp_enabled(env, current_virt)) {
+ mstatus_mask |= MSTATUS_SDT;
+ }
+
if (current_virt) {
/* Current V=1 and we are about to change to V=0 */
env->vsstatus = env->mstatus & mstatus_mask;
@@ -619,27 +684,27 @@ void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen)
env->geilen = geilen;
}
-/* This function can only be called to set virt when RVH is enabled */
-void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable)
+void riscv_cpu_set_rnmi(RISCVCPU *cpu, uint32_t irq, bool level)
{
- /* Flush the TLB on all virt mode changes. */
- if (env->virt_enabled != enable) {
- tlb_flush(env_cpu(env));
+ CPURISCVState *env = &cpu->env;
+ CPUState *cs = CPU(cpu);
+ bool release_lock = false;
+
+ if (!bql_locked()) {
+ release_lock = true;
+ bql_lock();
}
- env->virt_enabled = enable;
+ if (level) {
+ env->rnmip |= 1 << irq;
+ cpu_interrupt(cs, CPU_INTERRUPT_RNMI);
+ } else {
+ env->rnmip &= ~(1 << irq);
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_RNMI);
+ }
- if (enable) {
- /*
- * The guest external interrupts from an interrupt controller are
- * delivered only when the Guest/VM is running (i.e. V=1). This means
- * any guest external interrupt which is triggered while the Guest/VM
- * is not running (i.e. V=0) will be missed on QEMU resulting in guest
- * with sluggish response to serial console input and other I/O events.
- *
- * To solve this, we check and inject interrupt after setting V=1.
- */
- riscv_cpu_update_mip(env, 0, 0);
+ if (release_lock) {
+ bql_unlock();
}
}
@@ -715,17 +780,269 @@ void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv,
}
}
-void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv)
+static void riscv_ctr_freeze(CPURISCVState *env, uint64_t freeze_mask,
+ bool virt)
+{
+ uint64_t ctl = virt ? env->vsctrctl : env->mctrctl;
+
+ assert((freeze_mask & (~(XCTRCTL_BPFRZ | XCTRCTL_LCOFIFRZ))) == 0);
+
+ if (ctl & freeze_mask) {
+ env->sctrstatus |= SCTRSTATUS_FROZEN;
+ }
+}
+
+void riscv_ctr_clear(CPURISCVState *env)
+{
+ memset(env->ctr_src, 0x0, sizeof(env->ctr_src));
+ memset(env->ctr_dst, 0x0, sizeof(env->ctr_dst));
+ memset(env->ctr_data, 0x0, sizeof(env->ctr_data));
+}
+
+static uint64_t riscv_ctr_priv_to_mask(target_ulong priv, bool virt)
+{
+ switch (priv) {
+ case PRV_M:
+ return MCTRCTL_M;
+ case PRV_S:
+ if (virt) {
+ return XCTRCTL_S;
+ }
+ return XCTRCTL_S;
+ case PRV_U:
+ if (virt) {
+ return XCTRCTL_U;
+ }
+ return XCTRCTL_U;
+ }
+
+ g_assert_not_reached();
+}
+
+static uint64_t riscv_ctr_get_control(CPURISCVState *env, target_long priv,
+ bool virt)
+{
+ switch (priv) {
+ case PRV_M:
+ return env->mctrctl;
+ case PRV_S:
+ case PRV_U:
+ if (virt) {
+ return env->vsctrctl;
+ }
+ return env->mctrctl;
+ }
+
+ g_assert_not_reached();
+}
+
+/*
+ * This function assumes that src privilege and target privilege are not same
+ * and src privilege is less than target privilege. This includes the virtual
+ * state as well.
+ */
+static bool riscv_ctr_check_xte(CPURISCVState *env, target_long src_prv,
+ bool src_virt)
+{
+ target_long tgt_prv = env->priv;
+ bool res = true;
+
+ /*
+ * VS and U mode are same in terms of xTE bits required to record an
+ * external trap. See 6.1.2. External Traps, table 8 External Trap Enable
+ * Requirements. This changes VS to U to simplify the logic a bit.
+ */
+ if (src_virt && src_prv == PRV_S) {
+ src_prv = PRV_U;
+ } else if (env->virt_enabled && tgt_prv == PRV_S) {
+ tgt_prv = PRV_U;
+ }
+
+ /* VU mode is an outlier here. */
+ if (src_virt && src_prv == PRV_U) {
+ res &= !!(env->vsctrctl & XCTRCTL_STE);
+ }
+
+ switch (src_prv) {
+ case PRV_U:
+ if (tgt_prv == PRV_U) {
+ break;
+ }
+ res &= !!(env->mctrctl & XCTRCTL_STE);
+ /* fall-through */
+ case PRV_S:
+ if (tgt_prv == PRV_S) {
+ break;
+ }
+ res &= !!(env->mctrctl & MCTRCTL_MTE);
+ /* fall-through */
+ case PRV_M:
+ break;
+ }
+
+ return res;
+}
+
+/*
+ * Special cases for traps and trap returns:
+ *
+ * 1- Traps, and trap returns, between enabled modes are recorded as normal.
+ * 2- Traps from an inhibited mode to an enabled mode, and trap returns from an
+ * enabled mode back to an inhibited mode, are partially recorded. In such
+ * cases, the PC from the inhibited mode (source PC for traps, and target PC
+ * for trap returns) is 0.
+ *
+ * 3- Trap returns from an inhibited mode to an enabled mode are not recorded.
+ * Traps from an enabled mode to an inhibited mode, known as external traps,
+ * receive special handling.
+ * By default external traps are not recorded, but a handshake mechanism exists
+ * to allow partial recording. Software running in the target mode of the trap
+ * can opt-in to allowing CTR to record traps into that mode even when the mode
+ * is inhibited. The MTE, STE, and VSTE bits allow M-mode, S-mode, and VS-mode,
+ * respectively, to opt-in. When an External Trap occurs, and xTE=1, such that
+ * x is the target privilege mode of the trap, will CTR record the trap. In such
+ * cases, the target PC is 0.
+ */
+/*
+ * CTR arrays are implemented as circular buffers and new entry is stored at
+ * sctrstatus.WRPTR, but they are presented to software as moving circular
+ * buffers. Which means, software get's the illusion that whenever a new entry
+ * is added the whole buffer is moved by one place and the new entry is added at
+ * the start keeping new entry at idx 0 and older ones follow.
+ *
+ * Depth = 16.
+ *
+ * buffer [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [A] [B] [C] [D] [E] [F]
+ * WRPTR W
+ * entry 7 6 5 4 3 2 1 0 F E D C B A 9 8
+ *
+ * When a new entry is added:
+ * buffer [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [A] [B] [C] [D] [E] [F]
+ * WRPTR W
+ * entry 8 7 6 5 4 3 2 1 0 F E D C B A 9
+ *
+ * entry here denotes the logical entry number that software can access
+ * using ctrsource, ctrtarget and ctrdata registers. So xiselect 0x200
+ * will return entry 0 i-e buffer[8] and 0x201 will return entry 1 i-e
+ * buffer[7]. Here is how we convert entry to buffer idx.
+ *
+ * entry = isel - CTR_ENTRIES_FIRST;
+ * idx = (sctrstatus.WRPTR - entry - 1) & (depth - 1);
+ */
+void riscv_ctr_add_entry(CPURISCVState *env, target_long src, target_long dst,
+ enum CTRType type, target_ulong src_priv, bool src_virt)
+{
+ bool tgt_virt = env->virt_enabled;
+ uint64_t src_mask = riscv_ctr_priv_to_mask(src_priv, src_virt);
+ uint64_t tgt_mask = riscv_ctr_priv_to_mask(env->priv, tgt_virt);
+ uint64_t src_ctrl = riscv_ctr_get_control(env, src_priv, src_virt);
+ uint64_t tgt_ctrl = riscv_ctr_get_control(env, env->priv, tgt_virt);
+ uint64_t depth, head;
+ bool ext_trap = false;
+
+ /*
+ * Return immediately if both target and src recording is disabled or if
+ * CTR is in frozen state.
+ */
+ if ((!(src_ctrl & src_mask) && !(tgt_ctrl & tgt_mask)) ||
+ env->sctrstatus & SCTRSTATUS_FROZEN) {
+ return;
+ }
+
+ /*
+ * With RAS Emul enabled, only allow Indirect, direct calls, Function
+ * returns and Co-routine swap types.
+ */
+ if (tgt_ctrl & XCTRCTL_RASEMU &&
+ type != CTRDATA_TYPE_INDIRECT_CALL &&
+ type != CTRDATA_TYPE_DIRECT_CALL &&
+ type != CTRDATA_TYPE_RETURN &&
+ type != CTRDATA_TYPE_CO_ROUTINE_SWAP) {
+ return;
+ }
+
+ if (type == CTRDATA_TYPE_EXCEPTION || type == CTRDATA_TYPE_INTERRUPT) {
+ /* Case 2 for traps. */
+ if (!(src_ctrl & src_mask)) {
+ src = 0;
+ } else if (!(tgt_ctrl & tgt_mask)) {
+ /* Check if target priv-mode has allowed external trap recording. */
+ if (!riscv_ctr_check_xte(env, src_priv, src_virt)) {
+ return;
+ }
+
+ ext_trap = true;
+ dst = 0;
+ }
+ } else if (type == CTRDATA_TYPE_EXCEP_INT_RET) {
+ /*
+ * Case 3 for trap returns. Trap returns from inhibited mode are not
+ * recorded.
+ */
+ if (!(src_ctrl & src_mask)) {
+ return;
+ }
+
+ /* Case 2 for trap returns. */
+ if (!(tgt_ctrl & tgt_mask)) {
+ dst = 0;
+ }
+ }
+
+ /* Ignore filters in case of RASEMU mode or External trap. */
+ if (!(tgt_ctrl & XCTRCTL_RASEMU) && !ext_trap) {
+ /*
+ * Check if the specific type is inhibited. Not taken branch filter is
+ * an enable bit and needs to be checked separatly.
+ */
+ bool check = tgt_ctrl & BIT_ULL(type + XCTRCTL_INH_START);
+ if ((type == CTRDATA_TYPE_NONTAKEN_BRANCH && !check) ||
+ (type != CTRDATA_TYPE_NONTAKEN_BRANCH && check)) {
+ return;
+ }
+ }
+
+ head = get_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK);
+
+ depth = 16 << get_field(env->sctrdepth, SCTRDEPTH_MASK);
+ if (tgt_ctrl & XCTRCTL_RASEMU && type == CTRDATA_TYPE_RETURN) {
+ head = (head - 1) & (depth - 1);
+
+ env->ctr_src[head] &= ~CTRSOURCE_VALID;
+ env->sctrstatus =
+ set_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK, head);
+ return;
+ }
+
+ /* In case of Co-routine SWAP we overwrite latest entry. */
+ if (tgt_ctrl & XCTRCTL_RASEMU && type == CTRDATA_TYPE_CO_ROUTINE_SWAP) {
+ head = (head - 1) & (depth - 1);
+ }
+
+ env->ctr_src[head] = src | CTRSOURCE_VALID;
+ env->ctr_dst[head] = dst & ~CTRTARGET_MISP;
+ env->ctr_data[head] = set_field(0, CTRDATA_TYPE_MASK, type);
+
+ head = (head + 1) & (depth - 1);
+
+ env->sctrstatus = set_field(env->sctrstatus, SCTRSTATUS_WRPTR_MASK, head);
+}
+
+void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en)
{
g_assert(newpriv <= PRV_M && newpriv != PRV_RESERVED);
- if (icount_enabled() && newpriv != env->priv) {
- riscv_itrigger_update_priv(env);
+ if (newpriv != env->priv || env->virt_enabled != virt_en) {
+ if (icount_enabled()) {
+ riscv_itrigger_update_priv(env);
+ }
+
+ riscv_pmu_update_fixed_ctrs(env, newpriv, virt_en);
}
+
/* tlb_flush is unnecessary as mode is contained in mmu_idx */
env->priv = newpriv;
env->xl = cpu_recompute_xl(env);
- riscv_cpu_update_mask(env);
/*
* Clear the load reservation - otherwise a reservation placed in one
@@ -736,6 +1053,28 @@ void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv)
* preemptive context switch. As a result, do both.
*/
env->load_res = -1;
+
+ if (riscv_has_ext(env, RVH)) {
+ /* Flush the TLB on all virt mode changes. */
+ if (env->virt_enabled != virt_en) {
+ tlb_flush(env_cpu(env));
+ }
+
+ env->virt_enabled = virt_en;
+ if (virt_en) {
+ /*
+ * The guest external interrupts from an interrupt controller are
+ * delivered only when the Guest/VM is running (i.e. V=1). This
+ * means any guest external interrupt which is triggered while the
+ * Guest/VM is not running (i.e. V=0) will be missed on QEMU
+ * resulting in guest with sluggish response to serial console
+ * input and other I/O events.
+ *
+ * To solve this, we check and inject interrupt after setting V=1.
+ */
+ riscv_cpu_update_mip(env, 0, 0);
+ }
+ }
}
/*
@@ -774,6 +1113,55 @@ static int get_physical_address_pmp(CPURISCVState *env, int *prot, hwaddr addr,
return TRANSLATE_SUCCESS;
}
+/* Returns 'true' if a svukte address check is needed */
+static bool do_svukte_check(CPURISCVState *env, bool first_stage,
+ int mode, bool virt)
+{
+ /* Svukte extension depends on Sv39. */
+ if (!(env_archcpu(env)->cfg.ext_svukte ||
+ !first_stage ||
+ VM_1_10_SV39 != get_field(env->satp, SATP64_MODE))) {
+ return false;
+ }
+
+ /*
+ * Check hstatus.HUKTE if the effective mode is switched to VU-mode by
+ * executing HLV/HLVX/HSV in U-mode.
+ * For other cases, check senvcfg.UKTE.
+ */
+ if (env->priv == PRV_U && !env->virt_enabled && virt) {
+ if (!get_field(env->hstatus, HSTATUS_HUKTE)) {
+ return false;
+ }
+ } else if (!get_field(env->senvcfg, SENVCFG_UKTE)) {
+ return false;
+ }
+
+ /*
+ * Svukte extension is qualified only in U or VU-mode.
+ *
+ * Effective mode can be switched to U or VU-mode by:
+ * - M-mode + mstatus.MPRV=1 + mstatus.MPP=U-mode.
+ * - Execute HLV/HLVX/HSV from HS-mode + hstatus.SPVP=0.
+ * - U-mode.
+ * - VU-mode.
+ * - Execute HLV/HLVX/HSV from U-mode + hstatus.HU=1.
+ */
+ if (mode != PRV_U) {
+ return false;
+ }
+
+ return true;
+}
+
+static bool check_svukte_addr(CPURISCVState *env, vaddr addr)
+{
+ /* svukte extension excludes RV32 */
+ uint32_t sxlen = 32 * riscv_cpu_sxl(env);
+ uint64_t high_bit = addr & (1UL << (sxlen - 1));
+ return !high_bit;
+}
+
/*
* get_physical_address - get the physical address for this virtual address
*
@@ -801,7 +1189,7 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
target_ulong *fault_pte_addr,
int access_type, int mmu_idx,
bool first_stage, bool two_stage,
- bool is_debug)
+ bool is_debug, bool is_probe)
{
/*
* NOTE: the env->pc value visible here will not be
@@ -811,10 +1199,18 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
MemTxResult res;
MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
int mode = mmuidx_priv(mmu_idx);
+ bool virt = mmuidx_2stage(mmu_idx);
bool use_background = false;
hwaddr ppn;
int napot_bits = 0;
target_ulong napot_mask;
+ bool is_sstack_idx = ((mmu_idx & MMU_IDX_SS_WRITE) == MMU_IDX_SS_WRITE);
+ bool sstack_page = false;
+
+ if (do_svukte_check(env, first_stage, mode, virt) &&
+ !check_svukte_addr(env, addr)) {
+ return TRANSLATE_FAIL;
+ }
/*
* Check if we should use the background registers for the two
@@ -887,12 +1283,14 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
CPUState *cs = env_cpu(env);
int va_bits = PGSHIFT + levels * ptidxbits + widened;
+ int sxlen = 16 << riscv_cpu_sxl(env);
+ int sxlen_bytes = sxlen / 8;
if (first_stage == true) {
target_ulong mask, masked_msbs;
- if (TARGET_LONG_BITS > (va_bits - 1)) {
- mask = (1L << (TARGET_LONG_BITS - (va_bits - 1))) - 1;
+ if (sxlen > (va_bits - 1)) {
+ mask = (1L << (sxlen - (va_bits - 1))) - 1;
} else {
mask = 0;
}
@@ -922,9 +1320,7 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
hwaddr pte_addr;
int i;
-#if !TCG_OVERSIZED_GUEST
-restart:
-#endif
+ restart:
for (i = 0; i < levels; i++, ptshift -= ptidxbits) {
target_ulong idx;
if (i == 0) {
@@ -945,7 +1341,7 @@ restart:
int vbase_ret = get_physical_address(env, &vbase, &vbase_prot,
base, NULL, MMU_DATA_LOAD,
MMUIdx_U, false, true,
- is_debug);
+ is_debug, false);
if (vbase_ret != TRANSLATE_SUCCESS) {
if (fault_pte_addr) {
@@ -961,7 +1357,7 @@ restart:
int pmp_prot;
int pmp_ret = get_physical_address_pmp(env, &pmp_prot, pte_addr,
- sizeof(target_ulong),
+ sxlen_bytes,
MMU_DATA_LOAD, PRV_S);
if (pmp_ret != TRANSLATE_SUCCESS) {
return TRANSLATE_PMP_FAIL;
@@ -981,14 +1377,27 @@ restart:
ppn = pte >> PTE_PPN_SHIFT;
} else {
if (pte & PTE_RESERVED) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: reserved bits set in PTE: "
+ "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
+ __func__, pte_addr, pte);
return TRANSLATE_FAIL;
}
if (!pbmte && (pte & PTE_PBMT)) {
+ /* Reserved without Svpbmt. */
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: PBMT bits set in PTE, "
+ "and Svpbmt extension is disabled: "
+ "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
+ __func__, pte_addr, pte);
return TRANSLATE_FAIL;
}
if (!riscv_cpu_cfg(env)->ext_svnapot && (pte & PTE_N)) {
+ /* Reserved without Svnapot extension */
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: N bit set in PTE, "
+ "and Svnapot extension is disabled: "
+ "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
+ __func__, pte_addr, pte);
return TRANSLATE_FAIL;
}
@@ -999,14 +1408,19 @@ restart:
/* Invalid PTE */
return TRANSLATE_FAIL;
}
+
if (pte & (PTE_R | PTE_W | PTE_X)) {
goto leaf;
}
- /* Inner PTE, continue walking */
if (pte & (PTE_D | PTE_A | PTE_U | PTE_ATTR)) {
+ /* D, A, and U bits are reserved in non-leaf/inner PTEs */
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: D, A, or U bits set in non-leaf PTE: "
+ "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
+ __func__, pte_addr, pte);
return TRANSLATE_FAIL;
}
+ /* Inner PTE, continue walking */
base = ppn << PGSHIFT;
}
@@ -1016,28 +1430,57 @@ restart:
leaf:
if (ppn & ((1ULL << ptshift) - 1)) {
/* Misaligned PPN */
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: PPN bits in PTE is misaligned: "
+ "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
+ __func__, pte_addr, pte);
return TRANSLATE_FAIL;
}
if (!pbmte && (pte & PTE_PBMT)) {
/* Reserved without Svpbmt. */
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: PBMT bits set in PTE, "
+ "and Svpbmt extension is disabled: "
+ "addr: 0x%" HWADDR_PRIx " pte: 0x" TARGET_FMT_lx "\n",
+ __func__, pte_addr, pte);
return TRANSLATE_FAIL;
}
+ target_ulong rwx = pte & (PTE_R | PTE_W | PTE_X);
/* Check for reserved combinations of RWX flags. */
- switch (pte & (PTE_R | PTE_W | PTE_X)) {
- case PTE_W:
+ switch (rwx) {
case PTE_W | PTE_X:
return TRANSLATE_FAIL;
+ case PTE_W:
+ /* if bcfi enabled, PTE_W is not reserved and shadow stack page */
+ if (cpu_get_bcfien(env) && first_stage) {
+ sstack_page = true;
+ /*
+ * if ss index, read and write allowed. else if not a probe
+ * then only read allowed
+ */
+ rwx = is_sstack_idx ? (PTE_R | PTE_W) : (is_probe ? 0 : PTE_R);
+ break;
+ }
+ return TRANSLATE_FAIL;
+ case PTE_R:
+ /*
+ * no matter what's the `access_type`, shadow stack access to readonly
+ * memory are always store page faults. During unwind, loads will be
+ * promoted as store fault.
+ */
+ if (is_sstack_idx) {
+ return TRANSLATE_FAIL;
+ }
+ break;
}
int prot = 0;
- if (pte & PTE_R) {
+ if (rwx & PTE_R) {
prot |= PAGE_READ;
}
- if (pte & PTE_W) {
+ if (rwx & PTE_W) {
prot |= PAGE_WRITE;
}
- if (pte & PTE_X) {
+ if (rwx & PTE_X) {
bool mxr = false;
/*
@@ -1081,8 +1524,11 @@ restart:
}
if (!((prot >> access_type) & 1)) {
- /* Access check failed */
- return TRANSLATE_FAIL;
+ /*
+ * Access check failed, access check failures for shadow stack are
+ * access faults.
+ */
+ return sstack_page ? TRANSLATE_PMP_FAIL : TRANSLATE_FAIL;
}
target_ulong updated_pte = pte;
@@ -1113,24 +1559,23 @@ restart:
* it is no longer valid and we must re-walk the page table.
*/
MemoryRegion *mr;
- hwaddr l = sizeof(target_ulong), addr1;
+ hwaddr l = sxlen_bytes, addr1;
mr = address_space_translate(cs->as, pte_addr, &addr1, &l,
false, MEMTXATTRS_UNSPECIFIED);
if (memory_region_is_ram(mr)) {
target_ulong *pte_pa = qemu_map_ram_ptr(mr->ram_block, addr1);
-#if TCG_OVERSIZED_GUEST
- /*
- * MTTCG is not enabled on oversized TCG guests so
- * page table updates do not need to be atomic
- */
- *pte_pa = pte = updated_pte;
-#else
- target_ulong old_pte = qatomic_cmpxchg(pte_pa, pte, updated_pte);
+ target_ulong old_pte;
+ if (riscv_cpu_sxl(env) == MXL_RV32) {
+ old_pte = qatomic_cmpxchg((uint32_t *)pte_pa, cpu_to_le32(pte), cpu_to_le32(updated_pte));
+ old_pte = le32_to_cpu(old_pte);
+ } else {
+ old_pte = qatomic_cmpxchg(pte_pa, cpu_to_le64(pte), cpu_to_le64(updated_pte));
+ old_pte = le64_to_cpu(old_pte);
+ }
if (old_pte != pte) {
goto restart;
}
pte = updated_pte;
-#endif
} else {
/*
* Misconfigured PTE in ROM (AD bits are not preset) or
@@ -1220,13 +1665,13 @@ hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
int mmu_idx = riscv_env_mmu_index(&cpu->env, false);
if (get_physical_address(env, &phys_addr, &prot, addr, NULL, 0, mmu_idx,
- true, env->virt_enabled, true)) {
+ true, env->virt_enabled, true, false)) {
return -1;
}
if (env->virt_enabled) {
if (get_physical_address(env, &phys_addr, &prot, phys_addr, NULL,
- 0, MMUIdx_U, false, true, true)) {
+ 0, MMUIdx_U, false, true, true, false)) {
return -1;
}
}
@@ -1269,9 +1714,17 @@ void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
break;
case MMU_DATA_LOAD:
cs->exception_index = RISCV_EXCP_LOAD_ADDR_MIS;
+ /* shadow stack mis aligned accesses are access faults */
+ if (mmu_idx & MMU_IDX_SS_WRITE) {
+ cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT;
+ }
break;
case MMU_DATA_STORE:
cs->exception_index = RISCV_EXCP_STORE_AMO_ADDR_MIS;
+ /* shadow stack mis aligned accesses are access faults */
+ if (mmu_idx & MMU_IDX_SS_WRITE) {
+ cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
+ }
break;
default:
g_assert_not_reached();
@@ -1320,7 +1773,7 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
int ret = TRANSLATE_FAIL;
int mode = mmuidx_priv(mmu_idx);
/* default TLB page size */
- target_ulong tlb_size = TARGET_PAGE_SIZE;
+ hwaddr tlb_size = TARGET_PAGE_SIZE;
env->guest_phys_fault_addr = 0;
@@ -1332,7 +1785,7 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
/* Two stage lookup */
ret = get_physical_address(env, &pa, &prot, address,
&env->guest_phys_fault_addr, access_type,
- mmu_idx, true, true, false);
+ mmu_idx, true, true, false, probe);
/*
* A G-stage exception may be triggered during two state lookup.
@@ -1355,7 +1808,7 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
ret = get_physical_address(env, &pa, &prot2, im_address, NULL,
access_type, MMUIdx_U, false, true,
- false);
+ false, probe);
qemu_log_mask(CPU_LOG_MMU,
"%s 2nd-stage address=%" VADDR_PRIx
@@ -1372,7 +1825,7 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
qemu_log_mask(CPU_LOG_MMU,
"%s PMP address=" HWADDR_FMT_plx " ret %d prot"
- " %d tlb_size " TARGET_FMT_lu "\n",
+ " %d tlb_size %" HWADDR_PRIu "\n",
__func__, pa, ret, prot_pmp, tlb_size);
prot &= prot_pmp;
@@ -1392,7 +1845,8 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
} else {
/* Single stage lookup */
ret = get_physical_address(env, &pa, &prot, address, NULL,
- access_type, mmu_idx, true, false, false);
+ access_type, mmu_idx, true, false, false,
+ probe);
qemu_log_mask(CPU_LOG_MMU,
"%s address=%" VADDR_PRIx " ret %d physical "
@@ -1406,7 +1860,7 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
qemu_log_mask(CPU_LOG_MMU,
"%s PMP address=" HWADDR_FMT_plx " ret %d prot"
- " %d tlb_size " TARGET_FMT_lu "\n",
+ " %d tlb_size %" HWADDR_PRIu "\n",
__func__, pa, ret, prot_pmp, tlb_size);
prot &= prot_pmp;
@@ -1424,6 +1878,23 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
} else if (probe) {
return false;
} else {
+ int wp_access = 0;
+
+ if (access_type == MMU_DATA_LOAD) {
+ wp_access |= BP_MEM_READ;
+ } else if (access_type == MMU_DATA_STORE) {
+ wp_access |= BP_MEM_WRITE;
+ }
+
+ /*
+ * If a watchpoint isn't found for 'addr' this will
+ * be a no-op and we'll resume the mmu_exception path.
+ * Otherwise we'll throw a debug exception and execution
+ * will continue elsewhere.
+ */
+ cpu_check_watchpoint(cs, address, size, MEMTXATTRS_UNSPECIFIED,
+ wp_access, retaddr);
+
raise_mmu_exception(env, address, access_type, pmp_violation,
first_stage_error, two_stage_lookup,
two_stage_indirect_error);
@@ -1638,6 +2109,40 @@ static target_ulong riscv_transformed_insn(CPURISCVState *env,
return xinsn;
}
+static target_ulong promote_load_fault(target_ulong orig_cause)
+{
+ switch (orig_cause) {
+ case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT:
+ return RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT;
+
+ case RISCV_EXCP_LOAD_ACCESS_FAULT:
+ return RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
+
+ case RISCV_EXCP_LOAD_PAGE_FAULT:
+ return RISCV_EXCP_STORE_PAGE_FAULT;
+ }
+
+ /* if no promotion, return original cause */
+ return orig_cause;
+}
+
+static void riscv_do_nmi(CPURISCVState *env, target_ulong cause, bool virt)
+{
+ env->mnstatus = set_field(env->mnstatus, MNSTATUS_NMIE, false);
+ env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPV, virt);
+ env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPP, env->priv);
+ env->mncause = cause;
+ env->mnepc = env->pc;
+ env->pc = env->rnmi_irqvec;
+
+ if (cpu_get_fcfien(env)) {
+ env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPELP, env->elp);
+ }
+
+ /* Trapping to M mode, virt is disabled */
+ riscv_cpu_set_mode(env, PRV_M, false);
+}
+
/*
* Handle Traps
*
@@ -1648,8 +2153,12 @@ void riscv_cpu_do_interrupt(CPUState *cs)
{
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
+ bool virt = env->virt_enabled;
bool write_gva = false;
+ bool always_storeamo = (env->excp_uw2 & RISCV_UW2_ALWAYS_STORE_AMO);
+ bool vsmode_exc;
uint64_t s;
+ int mode;
/*
* cs->exception is 32-bits wide unlike mcause which is XLEN-bits wide
@@ -1658,22 +2167,38 @@ void riscv_cpu_do_interrupt(CPUState *cs)
bool async = !!(cs->exception_index & RISCV_EXCP_INT_FLAG);
target_ulong cause = cs->exception_index & RISCV_EXCP_INT_MASK;
uint64_t deleg = async ? env->mideleg : env->medeleg;
- bool s_injected = env->mvip & (1 << cause) & env->mvien &&
- !(env->mip & (1 << cause));
- bool vs_injected = env->hvip & (1 << cause) & env->hvien &&
- !(env->mip & (1 << cause));
+ bool s_injected = env->mvip & (1ULL << cause) & env->mvien &&
+ !(env->mip & (1ULL << cause));
+ bool vs_injected = env->hvip & (1ULL << cause) & env->hvien &&
+ !(env->mip & (1ULL << cause));
+ bool smode_double_trap = false;
+ uint64_t hdeleg = async ? env->hideleg : env->hedeleg;
+ const bool prev_virt = env->virt_enabled;
+ const target_ulong prev_priv = env->priv;
target_ulong tval = 0;
target_ulong tinst = 0;
target_ulong htval = 0;
target_ulong mtval2 = 0;
+ target_ulong src;
+ int sxlen = 0;
+ int mxlen = 16 << riscv_cpu_mxl(env);
+ bool nnmi_excep = false;
+
+ if (cpu->cfg.ext_smrnmi && env->rnmip && async) {
+ riscv_do_nmi(env, cause | ((target_ulong)1U << (mxlen - 1)),
+ env->virt_enabled);
+ return;
+ }
if (!async) {
/* set tval to badaddr for traps with address information */
switch (cause) {
+#ifdef CONFIG_TCG
case RISCV_EXCP_SEMIHOST:
do_common_semihosting(cs);
env->pc += 4;
return;
+#endif
case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT:
case RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT:
case RISCV_EXCP_LOAD_ADDR_MIS:
@@ -1682,6 +2207,9 @@ void riscv_cpu_do_interrupt(CPUState *cs)
case RISCV_EXCP_STORE_AMO_ACCESS_FAULT:
case RISCV_EXCP_LOAD_PAGE_FAULT:
case RISCV_EXCP_STORE_PAGE_FAULT:
+ if (always_storeamo) {
+ cause = promote_load_fault(cause);
+ }
write_gva = env->two_stage_lookup;
tval = env->badaddr;
if (env->two_stage_indirect_lookup) {
@@ -1723,6 +2251,9 @@ void riscv_cpu_do_interrupt(CPUState *cs)
cs->watchpoint_hit = NULL;
}
break;
+ case RISCV_EXCP_SW_CHECK:
+ tval = env->sw_check_code;
+ break;
default:
break;
}
@@ -1751,14 +2282,44 @@ void riscv_cpu_do_interrupt(CPUState *cs)
__func__, env->mhartid, async, cause, env->pc, tval,
riscv_cpu_get_trap_name(cause, async));
- if (env->priv <= PRV_S && cause < 64 &&
- (((deleg >> cause) & 1) || s_injected || vs_injected)) {
- /* handle the trap in S-mode */
+ mode = env->priv <= PRV_S && cause < 64 &&
+ (((deleg >> cause) & 1) || s_injected || vs_injected) ? PRV_S : PRV_M;
+
+ vsmode_exc = env->virt_enabled && cause < 64 &&
+ (((hdeleg >> cause) & 1) || vs_injected);
+
+ /*
+ * Check double trap condition only if already in S-mode and targeting
+ * S-mode
+ */
+ if (cpu->cfg.ext_ssdbltrp && env->priv == PRV_S && mode == PRV_S) {
+ bool dte = (env->menvcfg & MENVCFG_DTE) != 0;
+ bool sdt = (env->mstatus & MSTATUS_SDT) != 0;
+ /* In VS or HS */
if (riscv_has_ext(env, RVH)) {
- uint64_t hdeleg = async ? env->hideleg : env->hedeleg;
+ if (vsmode_exc) {
+ /* VS -> VS, use henvcfg instead of menvcfg*/
+ dte = (env->henvcfg & HENVCFG_DTE) != 0;
+ } else if (env->virt_enabled) {
+ /* VS -> HS, use mstatus_hs */
+ sdt = (env->mstatus_hs & MSTATUS_SDT) != 0;
+ }
+ }
+ smode_double_trap = dte && sdt;
+ if (smode_double_trap) {
+ mode = PRV_M;
+ }
+ }
+
+ if (mode == PRV_S) {
+ /* handle the trap in S-mode */
+ /* save elp status */
+ if (cpu_get_fcfien(env)) {
+ env->mstatus = set_field(env->mstatus, MSTATUS_SPELP, env->elp);
+ }
- if (env->virt_enabled &&
- (((hdeleg >> cause) & 1) || vs_injected)) {
+ if (riscv_has_ext(env, RVH)) {
+ if (vsmode_exc) {
/* Trap to VS mode */
/*
* See if we need to adjust cause. Yes if its VS mode interrupt
@@ -1778,7 +2339,7 @@ void riscv_cpu_do_interrupt(CPUState *cs)
htval = env->guest_phys_fault_addr;
- riscv_cpu_set_virt_enabled(env, 0);
+ virt = false;
} else {
/* Trap into HS mode */
env->hstatus = set_field(env->hstatus, HSTATUS_SPV, false);
@@ -1791,17 +2352,41 @@ void riscv_cpu_do_interrupt(CPUState *cs)
s = set_field(s, MSTATUS_SPIE, get_field(s, MSTATUS_SIE));
s = set_field(s, MSTATUS_SPP, env->priv);
s = set_field(s, MSTATUS_SIE, 0);
+ if (riscv_env_smode_dbltrp_enabled(env, virt)) {
+ s = set_field(s, MSTATUS_SDT, 1);
+ }
env->mstatus = s;
- env->scause = cause | ((target_ulong)async << (TARGET_LONG_BITS - 1));
+ sxlen = 16 << riscv_cpu_sxl(env);
+ env->scause = cause | ((target_ulong)async << (sxlen - 1));
env->sepc = env->pc;
env->stval = tval;
env->htval = htval;
env->htinst = tinst;
env->pc = (env->stvec >> 2 << 2) +
((async && (env->stvec & 3) == 1) ? cause * 4 : 0);
- riscv_cpu_set_mode(env, PRV_S);
+ riscv_cpu_set_mode(env, PRV_S, virt);
+
+ src = env->sepc;
} else {
+ /*
+ * If the hart encounters an exception while executing in M-mode
+ * with the mnstatus.NMIE bit clear, the exception is an RNMI exception.
+ */
+ nnmi_excep = cpu->cfg.ext_smrnmi &&
+ !get_field(env->mnstatus, MNSTATUS_NMIE) &&
+ !async;
+
/* handle the trap in M-mode */
+ /* save elp status */
+ if (cpu_get_fcfien(env)) {
+ if (nnmi_excep) {
+ env->mnstatus = set_field(env->mnstatus, MNSTATUS_MNPELP,
+ env->elp);
+ } else {
+ env->mstatus = set_field(env->mstatus, MSTATUS_MPELP, env->elp);
+ }
+ }
+
if (riscv_has_ext(env, RVH)) {
if (env->virt_enabled) {
riscv_cpu_swap_hypervisor_regs(env);
@@ -1815,25 +2400,80 @@ void riscv_cpu_do_interrupt(CPUState *cs)
mtval2 = env->guest_phys_fault_addr;
/* Trapping to M mode, virt is disabled */
- riscv_cpu_set_virt_enabled(env, 0);
+ virt = false;
}
+ /*
+ * If the hart encounters an exception while executing in M-mode,
+ * with the mnstatus.NMIE bit clear, the program counter is set to
+ * the RNMI exception trap handler address.
+ */
+ nnmi_excep = cpu->cfg.ext_smrnmi &&
+ !get_field(env->mnstatus, MNSTATUS_NMIE) &&
+ !async;
s = env->mstatus;
s = set_field(s, MSTATUS_MPIE, get_field(s, MSTATUS_MIE));
s = set_field(s, MSTATUS_MPP, env->priv);
s = set_field(s, MSTATUS_MIE, 0);
+ if (cpu->cfg.ext_smdbltrp) {
+ if (env->mstatus & MSTATUS_MDT) {
+ assert(env->priv == PRV_M);
+ if (!cpu->cfg.ext_smrnmi || nnmi_excep) {
+ cpu_abort(CPU(cpu), "M-mode double trap\n");
+ } else {
+ riscv_do_nmi(env, cause, false);
+ return;
+ }
+ }
+
+ s = set_field(s, MSTATUS_MDT, 1);
+ }
env->mstatus = s;
- env->mcause = cause | ~(((target_ulong)-1) >> async);
+ env->mcause = cause | ((target_ulong)async << (mxlen - 1));
+ if (smode_double_trap) {
+ env->mtval2 = env->mcause;
+ env->mcause = RISCV_EXCP_DOUBLE_TRAP;
+ } else {
+ env->mtval2 = mtval2;
+ }
env->mepc = env->pc;
env->mtval = tval;
- env->mtval2 = mtval2;
env->mtinst = tinst;
- env->pc = (env->mtvec >> 2 << 2) +
- ((async && (env->mtvec & 3) == 1) ? cause * 4 : 0);
- riscv_cpu_set_mode(env, PRV_M);
+
+ /*
+ * For RNMI exception, program counter is set to the RNMI exception
+ * trap handler address.
+ */
+ if (nnmi_excep) {
+ env->pc = env->rnmi_excpvec;
+ } else {
+ env->pc = (env->mtvec >> 2 << 2) +
+ ((async && (env->mtvec & 3) == 1) ? cause * 4 : 0);
+ }
+ riscv_cpu_set_mode(env, PRV_M, virt);
+ src = env->mepc;
+ }
+
+ if (riscv_cpu_cfg(env)->ext_smctr || riscv_cpu_cfg(env)->ext_ssctr) {
+ if (async && cause == IRQ_PMU_OVF) {
+ riscv_ctr_freeze(env, XCTRCTL_LCOFIFRZ, virt);
+ } else if (!async && cause == RISCV_EXCP_BREAKPOINT) {
+ riscv_ctr_freeze(env, XCTRCTL_BPFRZ, virt);
+ }
+
+ riscv_ctr_add_entry(env, src, env->pc,
+ async ? CTRDATA_TYPE_INTERRUPT : CTRDATA_TYPE_EXCEPTION,
+ prev_priv, prev_virt);
}
/*
+ * Interrupt/exception/trap delivery is asynchronous event and as per
+ * zicfilp spec CPU should clear up the ELP state. No harm in clearing
+ * unconditionally.
+ */
+ env->elp = false;
+
+ /*
* NOTE: it is not necessary to yield load reservations here. It is only
* necessary for an SC from "another hart" to cause a load reservation
* to be yielded. Refer to the memory consistency model section of the