aboutsummaryrefslogtreecommitdiff
path: root/target/arm/tcg/hflags.c
diff options
context:
space:
mode:
Diffstat (limited to 'target/arm/tcg/hflags.c')
-rw-r--r--target/arm/tcg/hflags.c198
1 files changed, 190 insertions, 8 deletions
diff --git a/target/arm/tcg/hflags.c b/target/arm/tcg/hflags.c
index 8d79b8b..5c9b9be 100644
--- a/target/arm/tcg/hflags.c
+++ b/target/arm/tcg/hflags.c
@@ -9,9 +9,13 @@
#include "cpu.h"
#include "internals.h"
#include "cpu-features.h"
-#include "exec/helper-proto.h"
+#include "exec/translation-block.h"
+#include "accel/tcg/cpu-ops.h"
#include "cpregs.h"
+#define HELPER_H "tcg/helper.h"
+#include "exec/helper-proto.h.inc"
+
static inline bool fgt_svc(CPUARMState *env, int el)
{
/*
@@ -210,6 +214,31 @@ static CPUARMTBFlags rebuild_hflags_a32(CPUARMState *env, int fp_el,
return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
}
+/*
+ * Return the exception level to which exceptions should be taken for ZT0.
+ * C.f. the ARM pseudocode function CheckSMEZT0Enabled, after the ZA check.
+ */
+static int zt0_exception_el(CPUARMState *env, int el)
+{
+#ifndef CONFIG_USER_ONLY
+ if (el <= 1
+ && !el_is_in_host(env, el)
+ && !FIELD_EX64(env->vfp.smcr_el[1], SMCR, EZT0)) {
+ return 1;
+ }
+ if (el <= 2
+ && arm_is_el2_enabled(env)
+ && !FIELD_EX64(env->vfp.smcr_el[2], SMCR, EZT0)) {
+ return 2;
+ }
+ if (arm_feature(env, ARM_FEATURE_EL3)
+ && !FIELD_EX64(env->vfp.smcr_el[3], SMCR, EZT0)) {
+ return 3;
+ }
+#endif
+ return 0;
+}
+
static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
ARMMMUIdx mmu_idx)
{
@@ -229,6 +258,11 @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
DP_TBFLAG_A64(flags, TBII, tbii);
DP_TBFLAG_A64(flags, TBID, tbid);
+ /* E2H is used by both VHE and NV2. */
+ if (hcr & HCR_E2H) {
+ DP_TBFLAG_A64(flags, E2H, 1);
+ }
+
if (cpu_isar_feature(aa64_sve, env_archcpu(env))) {
int sve_el = sve_exception_el(env, el);
@@ -265,7 +299,14 @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
DP_TBFLAG_A64(flags, PSTATE_SM, 1);
DP_TBFLAG_A64(flags, SME_TRAP_NONSTREAMING, !sme_fa64(env, el));
}
- DP_TBFLAG_A64(flags, PSTATE_ZA, FIELD_EX64(env->svcr, SVCR, ZA));
+
+ if (FIELD_EX64(env->svcr, SVCR, ZA)) {
+ DP_TBFLAG_A64(flags, PSTATE_ZA, 1);
+ if (cpu_isar_feature(aa64_sme2, env_archcpu(env))) {
+ int zt0_el = zt0_exception_el(env, el);
+ DP_TBFLAG_A64(flags, ZT0EXC_EL, zt0_el);
+ }
+ }
}
sctlr = regime_sctlr(env, stage1);
@@ -354,9 +395,6 @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
}
if (hcr & HCR_NV2) {
DP_TBFLAG_A64(flags, NV2, 1);
- if (hcr & HCR_E2H) {
- DP_TBFLAG_A64(flags, NV2_MEM_E20, 1);
- }
if (env->cp15.sctlr_el[2] & SCTLR_EE) {
DP_TBFLAG_A64(flags, NV2_MEM_BE, 1);
}
@@ -413,6 +451,44 @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
DP_TBFLAG_A64(flags, TCMA, aa64_va_parameter_tcma(tcr, mmu_idx));
}
+ if (cpu_isar_feature(aa64_gcs, env_archcpu(env))) {
+ /* C.f. GCSEnabled */
+ if (env->cp15.gcscr_el[el] & GCSCR_PCRSEL) {
+ switch (el) {
+ default:
+ if (!el_is_in_host(env, el)
+ && !(arm_hcrx_el2_eff(env) & HCRX_GCSEN)) {
+ break;
+ }
+ /* fall through */
+ case 2:
+ if (arm_feature(env, ARM_FEATURE_EL3)
+ && !(env->cp15.scr_el3 & SCR_GCSEN)) {
+ break;
+ }
+ /* fall through */
+ case 3:
+ DP_TBFLAG_A64(flags, GCS_EN, 1);
+ break;
+ }
+ }
+
+ /* C.f. GCSReturnValueCheckEnabled */
+ if (env->cp15.gcscr_el[el] & GCSCR_RVCHKEN) {
+ DP_TBFLAG_A64(flags, GCS_RVCEN, 1);
+ }
+
+ /* C.f. CheckGCSSTREnabled */
+ if (!(env->cp15.gcscr_el[el] & GCSCR_STREN)) {
+ DP_TBFLAG_A64(flags, GCSSTR_EL, el ? el : 1);
+ } else if (el == 1
+ && EX_TBFLAG_ANY(flags, FGT_ACTIVE)
+ && !FIELD_EX64(env->cp15.fgt_exec[FGTREG_HFGITR],
+ HFGITR_EL2, NGCSSTR_EL1)) {
+ DP_TBFLAG_A64(flags, GCSSTR_EL, 2);
+ }
+ }
+
if (env->vfp.fpcr & FPCR_AH) {
DP_TBFLAG_A64(flags, AH, 1);
}
@@ -498,7 +574,7 @@ void HELPER(rebuild_hflags_a64)(CPUARMState *env, int el)
env->hflags = rebuild_hflags_a64(env, el, fp_el, mmu_idx);
}
-void assert_hflags_rebuild_correctly(CPUARMState *env)
+static void assert_hflags_rebuild_correctly(CPUARMState *env)
{
#ifdef CONFIG_DEBUG_TCG
CPUARMTBFlags c = env->hflags;
@@ -506,10 +582,116 @@ void assert_hflags_rebuild_correctly(CPUARMState *env)
if (unlikely(c.flags != r.flags || c.flags2 != r.flags2)) {
fprintf(stderr, "TCG hflags mismatch "
- "(current:(0x%08x,0x" TARGET_FMT_lx ")"
- " rebuilt:(0x%08x,0x" TARGET_FMT_lx ")\n",
+ "(current:(0x%08x,0x%016" PRIx64 ")"
+ " rebuilt:(0x%08x,0x%016" PRIx64 ")\n",
c.flags, c.flags2, r.flags, r.flags2);
abort();
}
#endif
}
+
+static bool mve_no_pred(CPUARMState *env)
+{
+ /*
+ * Return true if there is definitely no predication of MVE
+ * instructions by VPR or LTPSIZE. (Returning false even if there
+ * isn't any predication is OK; generated code will just be
+ * a little worse.)
+ * If the CPU does not implement MVE then this TB flag is always 0.
+ *
+ * NOTE: if you change this logic, the "recalculate s->mve_no_pred"
+ * logic in gen_update_fp_context() needs to be updated to match.
+ *
+ * We do not include the effect of the ECI bits here -- they are
+ * tracked in other TB flags. This simplifies the logic for
+ * "when did we emit code that changes the MVE_NO_PRED TB flag
+ * and thus need to end the TB?".
+ */
+ if (cpu_isar_feature(aa32_mve, env_archcpu(env))) {
+ return false;
+ }
+ if (env->v7m.vpr) {
+ return false;
+ }
+ if (env->v7m.ltpsize < 4) {
+ return false;
+ }
+ return true;
+}
+
+TCGTBCPUState arm_get_tb_cpu_state(CPUState *cs)
+{
+ CPUARMState *env = cpu_env(cs);
+ CPUARMTBFlags flags;
+ vaddr pc;
+
+ assert_hflags_rebuild_correctly(env);
+ flags = env->hflags;
+
+ if (EX_TBFLAG_ANY(flags, AARCH64_STATE)) {
+ pc = env->pc;
+ if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
+ DP_TBFLAG_A64(flags, BTYPE, env->btype);
+ }
+ } else {
+ pc = env->regs[15];
+
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
+ FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S)
+ != env->v7m.secure) {
+ DP_TBFLAG_M32(flags, FPCCR_S_WRONG, 1);
+ }
+
+ if ((env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) &&
+ (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) ||
+ (env->v7m.secure &&
+ !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)))) {
+ /*
+ * ASPEN is set, but FPCA/SFPA indicate that there is no
+ * active FP context; we must create a new FP context before
+ * executing any FP insn.
+ */
+ DP_TBFLAG_M32(flags, NEW_FP_CTXT_NEEDED, 1);
+ }
+
+ bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
+ if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) {
+ DP_TBFLAG_M32(flags, LSPACT, 1);
+ }
+
+ if (mve_no_pred(env)) {
+ DP_TBFLAG_M32(flags, MVE_NO_PRED, 1);
+ }
+ } else {
+ /* Note that VECLEN+VECSTRIDE are RES0 for M-profile. */
+ DP_TBFLAG_A32(flags, VECLEN, env->vfp.vec_len);
+ DP_TBFLAG_A32(flags, VECSTRIDE, env->vfp.vec_stride);
+ if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) {
+ DP_TBFLAG_A32(flags, VFPEN, 1);
+ }
+ }
+
+ DP_TBFLAG_AM32(flags, THUMB, env->thumb);
+ DP_TBFLAG_AM32(flags, CONDEXEC, env->condexec_bits);
+ }
+
+ /*
+ * The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
+ * states defined in the ARM ARM for software singlestep:
+ * SS_ACTIVE PSTATE.SS State
+ * 0 x Inactive (the TB flag for SS is always 0)
+ * 1 0 Active-pending
+ * 1 1 Active-not-pending
+ * SS_ACTIVE is set in hflags; PSTATE__SS is computed every TB.
+ */
+ if (EX_TBFLAG_ANY(flags, SS_ACTIVE) && (env->pstate & PSTATE_SS)) {
+ DP_TBFLAG_ANY(flags, PSTATE__SS, 1);
+ }
+
+ return (TCGTBCPUState){
+ .pc = pc,
+ .flags = flags.flags,
+ .cs_base = flags.flags2,
+ };
+}