aboutsummaryrefslogtreecommitdiff
path: root/linux-user/aarch64/signal.c
diff options
context:
space:
mode:
Diffstat (limited to 'linux-user/aarch64/signal.c')
-rw-r--r--linux-user/aarch64/signal.c311
1 files changed, 301 insertions, 10 deletions
diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c
index bc7a138..f7edfa2 100644
--- a/linux-user/aarch64/signal.c
+++ b/linux-user/aarch64/signal.c
@@ -22,6 +22,7 @@
#include "signal-common.h"
#include "linux-user/trace.h"
#include "target/arm/cpu-features.h"
+#include "gcs-internal.h"
struct target_sigcontext {
uint64_t fault_address;
@@ -65,6 +66,13 @@ struct target_fpsimd_context {
uint64_t vregs[32 * 2]; /* really uint128_t vregs[32] */
};
+#define TARGET_ESR_MAGIC 0x45535201
+
+struct target_esr_context {
+ struct target_aarch64_ctx head;
+ uint64_t esr;
+};
+
#define TARGET_EXTRA_MAGIC 0x45585401
struct target_extra_context {
@@ -121,6 +129,40 @@ struct target_za_context {
#define TARGET_ZA_SIG_CONTEXT_SIZE(VQ) \
TARGET_ZA_SIG_ZAV_OFFSET(VQ, VQ * TARGET_SVE_VQ_BYTES)
+#define TARGET_TPIDR2_MAGIC 0x54504902
+
+struct target_tpidr2_context {
+ struct target_aarch64_ctx head;
+ uint64_t tpidr2;
+};
+
+#define TARGET_ZT_MAGIC 0x5a544e01
+
+struct target_zt_context {
+ struct target_aarch64_ctx head;
+ uint16_t nregs;
+ uint16_t reserved[3];
+ /* ZTn register data immediately follows */
+};
+
+#define TARGET_ZT_SIG_REG_BYTES (512 / 8)
+#define TARGET_ZT_SIG_REGS_SIZE(n) (TARGET_ZT_SIG_REG_BYTES * (n))
+#define TARGET_ZT_SIG_CONTEXT_SIZE(n) (sizeof(struct target_zt_context) + \
+ TARGET_ZT_SIG_REGS_SIZE(n))
+#define TARGET_ZT_SIG_REGS_OFFSET sizeof(struct target_zt_context)
+QEMU_BUILD_BUG_ON(TARGET_ZT_SIG_REG_BYTES != \
+ sizeof_field(CPUARMState, za_state.zt0));
+
+#define TARGET_GCS_MAGIC 0x47435300
+#define GCS_SIGNAL_CAP(X) ((X) & TARGET_PAGE_MASK)
+
+struct target_gcs_context {
+ struct target_aarch64_ctx head;
+ uint64_t gcspr;
+ uint64_t features_enabled;
+ uint64_t reserved;
+};
+
struct target_rt_sigframe {
struct target_siginfo info;
struct target_ucontext uc;
@@ -177,6 +219,14 @@ static void target_setup_fpsimd_record(struct target_fpsimd_context *fpsimd,
}
}
+static void target_setup_esr_record(struct target_esr_context *ctx,
+ CPUARMState *env)
+{
+ __put_user(TARGET_ESR_MAGIC, &ctx->head.magic);
+ __put_user(sizeof(*ctx), &ctx->head.size);
+ __put_user(env->cp15.esr_el[1], &ctx->esr);
+}
+
static void target_setup_extra_record(struct target_extra_context *extra,
uint64_t datap, uint32_t extra_size)
{
@@ -248,9 +298,68 @@ static void target_setup_za_record(struct target_za_context *za,
for (i = 0; i < vl; ++i) {
uint64_t *z = (void *)za + TARGET_ZA_SIG_ZAV_OFFSET(vq, i);
for (j = 0; j < vq * 2; ++j) {
- __put_user_e(env->zarray[i].d[j], z + j, le);
+ __put_user_e(env->za_state.za[i].d[j], z + j, le);
+ }
+ }
+}
+
+static void target_setup_tpidr2_record(struct target_tpidr2_context *tpidr2,
+ CPUARMState *env)
+{
+ __put_user(TARGET_TPIDR2_MAGIC, &tpidr2->head.magic);
+ __put_user(sizeof(struct target_tpidr2_context), &tpidr2->head.size);
+ __put_user(env->cp15.tpidr2_el0, &tpidr2->tpidr2);
+}
+
+static void target_setup_zt_record(struct target_zt_context *zt,
+ CPUARMState *env, int size)
+{
+ uint64_t *z;
+
+ memset(zt, 0, sizeof(*zt));
+ __put_user(TARGET_ZT_MAGIC, &zt->head.magic);
+ __put_user(size, &zt->head.size);
+ /*
+ * The record format allows for multiple ZT regs, but
+ * currently there is only one, ZT0.
+ */
+ __put_user(1, &zt->nregs);
+ assert(size == TARGET_ZT_SIG_CONTEXT_SIZE(1));
+
+ /* ZT0 is the same byte-stream format as SVE regs and ZA */
+ z = (void *)zt + TARGET_ZT_SIG_REGS_OFFSET;
+ for (int i = 0; i < ARRAY_SIZE(env->za_state.zt0); i++) {
+ __put_user_e(env->za_state.zt0[i], z + i, le);
+ }
+}
+
+static bool target_setup_gcs_record(struct target_gcs_context *ctx,
+ CPUARMState *env, uint64_t return_addr)
+{
+ uint64_t mode = gcs_get_el0_mode(env);
+ uint64_t gcspr = env->cp15.gcspr_el[0];
+
+ if (mode & PR_SHADOW_STACK_ENABLE) {
+ /* Push a cap for the signal frame. */
+ gcspr -= 8;
+ if (put_user_u64(GCS_SIGNAL_CAP(gcspr), gcspr)) {
+ return false;
+ }
+
+ /* Push a gcs entry for the trampoline. */
+ if (put_user_u64(return_addr, gcspr - 8)) {
+ return false;
}
+ env->cp15.gcspr_el[0] = gcspr - 8;
}
+
+ __put_user(TARGET_GCS_MAGIC, &ctx->head.magic);
+ __put_user(sizeof(*ctx), &ctx->head.size);
+ __put_user(gcspr, &ctx->gcspr);
+ __put_user(mode, &ctx->features_enabled);
+ __put_user(0, &ctx->reserved);
+
+ return true;
}
static void target_restore_general_frame(CPUARMState *env,
@@ -397,12 +506,100 @@ static bool target_restore_za_record(CPUARMState *env,
for (i = 0; i < vl; ++i) {
uint64_t *z = (void *)za + TARGET_ZA_SIG_ZAV_OFFSET(vq, i);
for (j = 0; j < vq * 2; ++j) {
- __get_user_e(env->zarray[i].d[j], z + j, le);
+ __get_user_e(env->za_state.za[i].d[j], z + j, le);
}
}
return true;
}
+static void target_restore_tpidr2_record(CPUARMState *env,
+ struct target_tpidr2_context *tpidr2)
+{
+ __get_user(env->cp15.tpidr2_el0, &tpidr2->tpidr2);
+}
+
+static bool target_restore_zt_record(CPUARMState *env,
+ struct target_zt_context *zt, int size,
+ int svcr)
+{
+ uint16_t nregs;
+ uint64_t *z;
+
+ if (!(FIELD_EX64(svcr, SVCR, ZA))) {
+ return false;
+ }
+
+ __get_user(nregs, &zt->nregs);
+
+ if (nregs != 1) {
+ return false;
+ }
+
+ z = (void *)zt + TARGET_ZT_SIG_REGS_OFFSET;
+ for (int i = 0; i < ARRAY_SIZE(env->za_state.zt0); i++) {
+ __get_user_e(env->za_state.zt0[i], z + i, le);
+ }
+ return true;
+}
+
+static bool target_restore_gcs_record(CPUARMState *env,
+ struct target_gcs_context *ctx,
+ bool *rebuild_hflags)
+{
+ TaskState *ts = get_task_state(env_cpu(env));
+ uint64_t cur_mode = gcs_get_el0_mode(env);
+ uint64_t new_mode, gcspr;
+
+ __get_user(new_mode, &ctx->features_enabled);
+ __get_user(gcspr, &ctx->gcspr);
+
+ /*
+ * The kernel pushes the value through the hw register:
+ * write_sysreg_s(gcspr, SYS_GCSPR_EL0) in restore_gcs_context,
+ * then read_sysreg_s(SYS_GCSPR_EL0) in gcs_restore_signal.
+ * Since the bottom 3 bits are RES0, this can (CONSTRAINED UNPREDICTABLE)
+ * force align the value. Mirror the choice from gcspr_write().
+ */
+ gcspr &= ~7;
+
+ if (new_mode & ~(PR_SHADOW_STACK_ENABLE |
+ PR_SHADOW_STACK_WRITE |
+ PR_SHADOW_STACK_PUSH)) {
+ return false;
+ }
+ if ((new_mode ^ cur_mode) & ts->gcs_el0_locked) {
+ return false;
+ }
+ if (new_mode & ~cur_mode & PR_SHADOW_STACK_ENABLE) {
+ return false;
+ }
+
+ if (new_mode & PR_SHADOW_STACK_ENABLE) {
+ uint64_t cap;
+
+ /* Pop and clear the signal cap. */
+ if (get_user_u64(cap, gcspr)) {
+ return false;
+ }
+ if (cap != GCS_SIGNAL_CAP(gcspr)) {
+ return false;
+ }
+ if (put_user_u64(0, gcspr)) {
+ return false;
+ }
+ gcspr += 8;
+ } else {
+ new_mode = 0;
+ }
+
+ env->cp15.gcspr_el[0] = gcspr;
+ if (new_mode != cur_mode) {
+ *rebuild_hflags = true;
+ gcs_set_el0_mode(env, new_mode);
+ }
+ return true;
+}
+
static int target_restore_sigframe(CPUARMState *env,
struct target_rt_sigframe *sf)
{
@@ -410,10 +607,15 @@ static int target_restore_sigframe(CPUARMState *env,
struct target_fpsimd_context *fpsimd = NULL;
struct target_sve_context *sve = NULL;
struct target_za_context *za = NULL;
+ struct target_tpidr2_context *tpidr2 = NULL;
+ struct target_zt_context *zt = NULL;
+ struct target_gcs_context *gcs = NULL;
uint64_t extra_datap = 0;
bool used_extra = false;
+ bool rebuild_hflags = false;
int sve_size = 0;
int za_size = 0;
+ int zt_size = 0;
int svcr = 0;
target_restore_general_frame(env, sf);
@@ -444,6 +646,9 @@ static int target_restore_sigframe(CPUARMState *env,
fpsimd = (struct target_fpsimd_context *)ctx;
break;
+ case TARGET_ESR_MAGIC:
+ break; /* ignore */
+
case TARGET_SVE_MAGIC:
if (sve || size < sizeof(struct target_sve_context)) {
goto err;
@@ -460,6 +665,32 @@ static int target_restore_sigframe(CPUARMState *env,
za_size = size;
break;
+ case TARGET_TPIDR2_MAGIC:
+ if (tpidr2 || size != sizeof(struct target_tpidr2_context) ||
+ !cpu_isar_feature(aa64_sme, env_archcpu(env))) {
+ goto err;
+ }
+ tpidr2 = (struct target_tpidr2_context *)ctx;
+ break;
+
+ case TARGET_ZT_MAGIC:
+ if (zt || size != TARGET_ZT_SIG_CONTEXT_SIZE(1) ||
+ !cpu_isar_feature(aa64_sme2, env_archcpu(env))) {
+ goto err;
+ }
+ zt = (struct target_zt_context *)ctx;
+ zt_size = size;
+ break;
+
+ case TARGET_GCS_MAGIC:
+ if (gcs
+ || size != sizeof(struct target_gcs_context)
+ || !cpu_isar_feature(aa64_gcs, env_archcpu(env))) {
+ goto err;
+ }
+ gcs = (struct target_gcs_context *)ctx;
+ break;
+
case TARGET_EXTRA_MAGIC:
if (extra || size != sizeof(struct target_extra_context)) {
goto err;
@@ -490,6 +721,10 @@ static int target_restore_sigframe(CPUARMState *env,
goto err;
}
+ if (gcs && !target_restore_gcs_record(env, gcs, &rebuild_hflags)) {
+ goto err;
+ }
+
/* SVE data, if present, overwrites FPSIMD data. */
if (sve && !target_restore_sve_record(env, sve, sve_size, &svcr)) {
goto err;
@@ -497,8 +732,21 @@ static int target_restore_sigframe(CPUARMState *env,
if (za && !target_restore_za_record(env, za, za_size, &svcr)) {
goto err;
}
+ if (tpidr2) {
+ target_restore_tpidr2_record(env, tpidr2);
+ }
+ /*
+ * NB that we must restore ZT after ZA so the check that there's
+ * no ZT record if SVCR.ZA is 0 gets the right value of SVCR.
+ */
+ if (zt && !target_restore_zt_record(env, zt, zt_size, svcr)) {
+ goto err;
+ }
if (env->svcr != svcr) {
env->svcr = svcr;
+ rebuild_hflags = true;
+ }
+ if (rebuild_hflags) {
arm_rebuild_hflags(env);
}
unlock_user(extra, extra_datap, 0);
@@ -568,8 +816,9 @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
.total_size = offsetof(struct target_rt_sigframe,
uc.tuc_mcontext.__reserved),
};
- int fpsimd_ofs, fr_ofs, sve_ofs = 0, za_ofs = 0;
- int sve_size = 0, za_size = 0;
+ int fpsimd_ofs, fr_ofs, sve_ofs = 0, za_ofs = 0, tpidr2_ofs = 0;
+ int zt_ofs = 0, esr_ofs = 0, gcs_ofs = 0;
+ int sve_size = 0, za_size = 0, tpidr2_size = 0, zt_size = 0;
struct target_rt_sigframe *frame;
struct target_rt_frame_record *fr;
abi_ulong frame_addr, return_addr;
@@ -578,6 +827,20 @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
fpsimd_ofs = alloc_sigframe_space(sizeof(struct target_fpsimd_context),
&layout);
+ /*
+ * In user mode, ESR_EL1 is only set by cpu_loop while queueing the
+ * signal, and it's only valid for the one sync insn.
+ */
+ if (env->cp15.esr_el[1]) {
+ esr_ofs = alloc_sigframe_space(sizeof(struct target_esr_context),
+ &layout);
+ }
+
+ if (env->cp15.gcspr_el[0]) {
+ gcs_ofs = alloc_sigframe_space(sizeof(struct target_gcs_context),
+ &layout);
+ }
+
/* SVE state needs saving only if it exists. */
if (cpu_isar_feature(aa64_sve, env_archcpu(env)) ||
cpu_isar_feature(aa64_sme, env_archcpu(env))) {
@@ -585,6 +848,8 @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
sve_ofs = alloc_sigframe_space(sve_size, &layout);
}
if (cpu_isar_feature(aa64_sme, env_archcpu(env))) {
+ tpidr2_size = sizeof(struct target_tpidr2_context);
+ tpidr2_ofs = alloc_sigframe_space(tpidr2_size, &layout);
/* ZA state needs saving only if it is enabled. */
if (FIELD_EX64(env->svcr, SVCR, ZA)) {
za_size = TARGET_ZA_SIG_CONTEXT_SIZE(sme_vq(env));
@@ -593,6 +858,12 @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
}
za_ofs = alloc_sigframe_space(za_size, &layout);
}
+ if (cpu_isar_feature(aa64_sme2, env_archcpu(env)) &&
+ FIELD_EX64(env->svcr, SVCR, ZA)) {
+ /* If SME ZA storage is enabled, we must also save SME2 ZT0 */
+ zt_size = TARGET_ZT_SIG_CONTEXT_SIZE(1);
+ zt_ofs = alloc_sigframe_space(zt_size, &layout);
+ }
if (layout.extra_ofs) {
/* Reserve space for the extra end marker. The standard end marker
@@ -629,8 +900,23 @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
goto give_sigsegv;
}
+ if (ka->sa_flags & TARGET_SA_RESTORER) {
+ return_addr = ka->sa_restorer;
+ } else {
+ return_addr = default_rt_sigreturn;
+ }
+
target_setup_general_frame(frame, env, set);
target_setup_fpsimd_record((void *)frame + fpsimd_ofs, env);
+ if (esr_ofs) {
+ target_setup_esr_record((void *)frame + esr_ofs, env);
+ /* Leave ESR_EL1 clear while it's not relevant. */
+ env->cp15.esr_el[1] = 0;
+ }
+ if (gcs_ofs &&
+ !target_setup_gcs_record((void *)frame + gcs_ofs, env, return_addr)) {
+ goto give_sigsegv;
+ }
target_setup_end_record((void *)frame + layout.std_end_ofs);
if (layout.extra_ofs) {
target_setup_extra_record((void *)frame + layout.extra_ofs,
@@ -644,17 +930,18 @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
if (za_ofs) {
target_setup_za_record((void *)frame + za_ofs, env, za_size);
}
+ if (tpidr2_ofs) {
+ target_setup_tpidr2_record((void *)frame + tpidr2_ofs, env);
+ }
+ if (zt_ofs) {
+ target_setup_zt_record((void *)frame + zt_ofs, env, zt_size);
+ }
/* Set up the stack frame for unwinding. */
fr = (void *)frame + fr_ofs;
__put_user(env->xregs[29], &fr->fp);
__put_user(env->xregs[30], &fr->lr);
- if (ka->sa_flags & TARGET_SA_RESTORER) {
- return_addr = ka->sa_restorer;
- } else {
- return_addr = default_rt_sigreturn;
- }
env->xregs[0] = usig;
env->xregs[29] = frame_addr + fr_ofs;
env->xregs[30] = return_addr;
@@ -666,8 +953,12 @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
env->btype = 2;
}
- /* Invoke the signal handler with both SM and ZA disabled. */
+ /*
+ * Invoke the signal handler with a clean SME state: both SM and ZA
+ * disabled and TPIDR2_EL0 cleared.
+ */
aarch64_set_svcr(env, 0, R_SVCR_SM_MASK | R_SVCR_ZA_MASK);
+ env->cp15.tpidr2_el0 = 0;
if (info) {
frame->info = *info;