aboutsummaryrefslogtreecommitdiff
path: root/linux-user
diff options
context:
space:
mode:
Diffstat (limited to 'linux-user')
-rw-r--r--linux-user/aarch64/cpu_loop.c5
-rw-r--r--linux-user/aarch64/elfload.c1
-rw-r--r--linux-user/aarch64/gcs-internal.h38
-rw-r--r--linux-user/aarch64/signal.c138
-rw-r--r--linux-user/aarch64/target_prctl.h96
-rw-r--r--linux-user/aarch64/target_signal.h1
-rw-r--r--linux-user/qemu.h5
-rw-r--r--linux-user/syscall.c114
8 files changed, 392 insertions, 6 deletions
diff --git a/linux-user/aarch64/cpu_loop.c b/linux-user/aarch64/cpu_loop.c
index 50a4c99..7f66a87 100644
--- a/linux-user/aarch64/cpu_loop.c
+++ b/linux-user/aarch64/cpu_loop.c
@@ -89,6 +89,11 @@ static void signal_for_exception(CPUARMState *env, vaddr addr)
si_code = TARGET_ILL_ILLOPN;
break;
+ case EC_GCS:
+ si_signo = TARGET_SIGSEGV;
+ si_code = TARGET_SEGV_CPERR;
+ break;
+
case EC_MOP:
/*
* FIXME: The kernel fixes up wrong-option exceptions.
diff --git a/linux-user/aarch64/elfload.c b/linux-user/aarch64/elfload.c
index 77d03b5..3af5a37 100644
--- a/linux-user/aarch64/elfload.c
+++ b/linux-user/aarch64/elfload.c
@@ -169,6 +169,7 @@ abi_ulong get_elf_hwcap(CPUState *cs)
GET_FEATURE_ID(aa64_dcpop, ARM_HWCAP_A64_DCPOP);
GET_FEATURE_ID(aa64_rcpc_8_3, ARM_HWCAP_A64_LRCPC);
GET_FEATURE_ID(aa64_rcpc_8_4, ARM_HWCAP_A64_ILRCPC);
+ GET_FEATURE_ID(aa64_gcs, ARM_HWCAP_A64_GCS);
return hwcaps;
}
diff --git a/linux-user/aarch64/gcs-internal.h b/linux-user/aarch64/gcs-internal.h
new file mode 100644
index 0000000..e586c7e
--- /dev/null
+++ b/linux-user/aarch64/gcs-internal.h
@@ -0,0 +1,38 @@
+/*
+ * AArch64 gcs functions for linux-user
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef AARCH64_GCS_INTERNAL_H
+#define AARCH64_GCS_INTERNAL_H
+
+#ifndef PR_SHADOW_STACK_ENABLE
+# define PR_SHADOW_STACK_ENABLE (1U << 0)
+# define PR_SHADOW_STACK_WRITE (1U << 1)
+# define PR_SHADOW_STACK_PUSH (1U << 2)
+#endif
+
+static inline uint64_t gcs_get_el0_mode(CPUArchState *env)
+{
+ uint64_t cr = env->cp15.gcscr_el[0];
+ abi_ulong flags = 0;
+
+ flags |= cr & GCSCR_PCRSEL ? PR_SHADOW_STACK_ENABLE : 0;
+ flags |= cr & GCSCR_STREN ? PR_SHADOW_STACK_WRITE : 0;
+ flags |= cr & GCSCR_PUSHMEN ? PR_SHADOW_STACK_PUSH : 0;
+
+ return flags;
+}
+
+static inline void gcs_set_el0_mode(CPUArchState *env, uint64_t flags)
+{
+ uint64_t cr = GCSCRE0_NTR;
+
+ cr |= flags & PR_SHADOW_STACK_ENABLE ? GCSCR_RVCHKEN | GCSCR_PCRSEL : 0;
+ cr |= flags & PR_SHADOW_STACK_WRITE ? GCSCR_STREN : 0;
+ cr |= flags & PR_SHADOW_STACK_PUSH ? GCSCR_PUSHMEN : 0;
+
+ env->cp15.gcscr_el[0] = cr;
+}
+
+#endif
diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c
index ef97be3..f7edfa2 100644
--- a/linux-user/aarch64/signal.c
+++ b/linux-user/aarch64/signal.c
@@ -22,6 +22,7 @@
#include "signal-common.h"
#include "linux-user/trace.h"
#include "target/arm/cpu-features.h"
+#include "gcs-internal.h"
struct target_sigcontext {
uint64_t fault_address;
@@ -152,6 +153,16 @@ struct target_zt_context {
QEMU_BUILD_BUG_ON(TARGET_ZT_SIG_REG_BYTES != \
sizeof_field(CPUARMState, za_state.zt0));
+#define TARGET_GCS_MAGIC 0x47435300
+#define GCS_SIGNAL_CAP(X) ((X) & TARGET_PAGE_MASK)
+
+struct target_gcs_context {
+ struct target_aarch64_ctx head;
+ uint64_t gcspr;
+ uint64_t features_enabled;
+ uint64_t reserved;
+};
+
struct target_rt_sigframe {
struct target_siginfo info;
struct target_ucontext uc;
@@ -322,6 +333,35 @@ static void target_setup_zt_record(struct target_zt_context *zt,
}
}
+static bool target_setup_gcs_record(struct target_gcs_context *ctx,
+ CPUARMState *env, uint64_t return_addr)
+{
+ uint64_t mode = gcs_get_el0_mode(env);
+ uint64_t gcspr = env->cp15.gcspr_el[0];
+
+ if (mode & PR_SHADOW_STACK_ENABLE) {
+ /* Push a cap for the signal frame. */
+ gcspr -= 8;
+ if (put_user_u64(GCS_SIGNAL_CAP(gcspr), gcspr)) {
+ return false;
+ }
+
+ /* Push a gcs entry for the trampoline. */
+ if (put_user_u64(return_addr, gcspr - 8)) {
+ return false;
+ }
+ env->cp15.gcspr_el[0] = gcspr - 8;
+ }
+
+ __put_user(TARGET_GCS_MAGIC, &ctx->head.magic);
+ __put_user(sizeof(*ctx), &ctx->head.size);
+ __put_user(gcspr, &ctx->gcspr);
+ __put_user(mode, &ctx->features_enabled);
+ __put_user(0, &ctx->reserved);
+
+ return true;
+}
+
static void target_restore_general_frame(CPUARMState *env,
struct target_rt_sigframe *sf)
{
@@ -502,6 +542,64 @@ static bool target_restore_zt_record(CPUARMState *env,
return true;
}
+static bool target_restore_gcs_record(CPUARMState *env,
+ struct target_gcs_context *ctx,
+ bool *rebuild_hflags)
+{
+ TaskState *ts = get_task_state(env_cpu(env));
+ uint64_t cur_mode = gcs_get_el0_mode(env);
+ uint64_t new_mode, gcspr;
+
+ __get_user(new_mode, &ctx->features_enabled);
+ __get_user(gcspr, &ctx->gcspr);
+
+ /*
+ * The kernel pushes the value through the hw register:
+ * write_sysreg_s(gcspr, SYS_GCSPR_EL0) in restore_gcs_context,
+ * then read_sysreg_s(SYS_GCSPR_EL0) in gcs_restore_signal.
+ * Since the bottom 3 bits are RES0, this can (CONSTRAINED UNPREDICTABLE)
+ * force align the value. Mirror the choice from gcspr_write().
+ */
+ gcspr &= ~7;
+
+ if (new_mode & ~(PR_SHADOW_STACK_ENABLE |
+ PR_SHADOW_STACK_WRITE |
+ PR_SHADOW_STACK_PUSH)) {
+ return false;
+ }
+ if ((new_mode ^ cur_mode) & ts->gcs_el0_locked) {
+ return false;
+ }
+ if (new_mode & ~cur_mode & PR_SHADOW_STACK_ENABLE) {
+ return false;
+ }
+
+ if (new_mode & PR_SHADOW_STACK_ENABLE) {
+ uint64_t cap;
+
+ /* Pop and clear the signal cap. */
+ if (get_user_u64(cap, gcspr)) {
+ return false;
+ }
+ if (cap != GCS_SIGNAL_CAP(gcspr)) {
+ return false;
+ }
+ if (put_user_u64(0, gcspr)) {
+ return false;
+ }
+ gcspr += 8;
+ } else {
+ new_mode = 0;
+ }
+
+ env->cp15.gcspr_el[0] = gcspr;
+ if (new_mode != cur_mode) {
+ *rebuild_hflags = true;
+ gcs_set_el0_mode(env, new_mode);
+ }
+ return true;
+}
+
static int target_restore_sigframe(CPUARMState *env,
struct target_rt_sigframe *sf)
{
@@ -511,8 +609,10 @@ static int target_restore_sigframe(CPUARMState *env,
struct target_za_context *za = NULL;
struct target_tpidr2_context *tpidr2 = NULL;
struct target_zt_context *zt = NULL;
+ struct target_gcs_context *gcs = NULL;
uint64_t extra_datap = 0;
bool used_extra = false;
+ bool rebuild_hflags = false;
int sve_size = 0;
int za_size = 0;
int zt_size = 0;
@@ -582,6 +682,15 @@ static int target_restore_sigframe(CPUARMState *env,
zt_size = size;
break;
+ case TARGET_GCS_MAGIC:
+ if (gcs
+ || size != sizeof(struct target_gcs_context)
+ || !cpu_isar_feature(aa64_gcs, env_archcpu(env))) {
+ goto err;
+ }
+ gcs = (struct target_gcs_context *)ctx;
+ break;
+
case TARGET_EXTRA_MAGIC:
if (extra || size != sizeof(struct target_extra_context)) {
goto err;
@@ -612,6 +721,10 @@ static int target_restore_sigframe(CPUARMState *env,
goto err;
}
+ if (gcs && !target_restore_gcs_record(env, gcs, &rebuild_hflags)) {
+ goto err;
+ }
+
/* SVE data, if present, overwrites FPSIMD data. */
if (sve && !target_restore_sve_record(env, sve, sve_size, &svcr)) {
goto err;
@@ -631,6 +744,9 @@ static int target_restore_sigframe(CPUARMState *env,
}
if (env->svcr != svcr) {
env->svcr = svcr;
+ rebuild_hflags = true;
+ }
+ if (rebuild_hflags) {
arm_rebuild_hflags(env);
}
unlock_user(extra, extra_datap, 0);
@@ -701,7 +817,7 @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
uc.tuc_mcontext.__reserved),
};
int fpsimd_ofs, fr_ofs, sve_ofs = 0, za_ofs = 0, tpidr2_ofs = 0;
- int zt_ofs = 0, esr_ofs = 0;
+ int zt_ofs = 0, esr_ofs = 0, gcs_ofs = 0;
int sve_size = 0, za_size = 0, tpidr2_size = 0, zt_size = 0;
struct target_rt_sigframe *frame;
struct target_rt_frame_record *fr;
@@ -720,6 +836,11 @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
&layout);
}
+ if (env->cp15.gcspr_el[0]) {
+ gcs_ofs = alloc_sigframe_space(sizeof(struct target_gcs_context),
+ &layout);
+ }
+
/* SVE state needs saving only if it exists. */
if (cpu_isar_feature(aa64_sve, env_archcpu(env)) ||
cpu_isar_feature(aa64_sme, env_archcpu(env))) {
@@ -779,6 +900,12 @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
goto give_sigsegv;
}
+ if (ka->sa_flags & TARGET_SA_RESTORER) {
+ return_addr = ka->sa_restorer;
+ } else {
+ return_addr = default_rt_sigreturn;
+ }
+
target_setup_general_frame(frame, env, set);
target_setup_fpsimd_record((void *)frame + fpsimd_ofs, env);
if (esr_ofs) {
@@ -786,6 +913,10 @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
/* Leave ESR_EL1 clear while it's not relevant. */
env->cp15.esr_el[1] = 0;
}
+ if (gcs_ofs &&
+ !target_setup_gcs_record((void *)frame + gcs_ofs, env, return_addr)) {
+ goto give_sigsegv;
+ }
target_setup_end_record((void *)frame + layout.std_end_ofs);
if (layout.extra_ofs) {
target_setup_extra_record((void *)frame + layout.extra_ofs,
@@ -811,11 +942,6 @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
__put_user(env->xregs[29], &fr->fp);
__put_user(env->xregs[30], &fr->lr);
- if (ka->sa_flags & TARGET_SA_RESTORER) {
- return_addr = ka->sa_restorer;
- } else {
- return_addr = default_rt_sigreturn;
- }
env->xregs[0] = usig;
env->xregs[29] = frame_addr + fr_ofs;
env->xregs[30] = return_addr;
diff --git a/linux-user/aarch64/target_prctl.h b/linux-user/aarch64/target_prctl.h
index ed75b9e..621be57 100644
--- a/linux-user/aarch64/target_prctl.h
+++ b/linux-user/aarch64/target_prctl.h
@@ -6,8 +6,10 @@
#ifndef AARCH64_TARGET_PRCTL_H
#define AARCH64_TARGET_PRCTL_H
+#include "qemu/units.h"
#include "target/arm/cpu-features.h"
#include "mte_user_helper.h"
+#include "gcs-internal.h"
static abi_long do_prctl_sve_get_vl(CPUArchState *env)
{
@@ -206,4 +208,98 @@ static abi_long do_prctl_get_tagged_addr_ctrl(CPUArchState *env)
}
#define do_prctl_get_tagged_addr_ctrl do_prctl_get_tagged_addr_ctrl
+static abi_long do_prctl_get_shadow_stack_status(CPUArchState *env,
+ abi_long arg2)
+{
+ ARMCPU *cpu = env_archcpu(env);
+
+ if (!cpu_isar_feature(aa64_gcs, cpu)) {
+ return -TARGET_EINVAL;
+ }
+ return put_user_ual(gcs_get_el0_mode(env), arg2);
+}
+#define do_prctl_get_shadow_stack_status do_prctl_get_shadow_stack_status
+
+static abi_long gcs_alloc(abi_ulong hint, abi_ulong size)
+{
+ /*
+ * Without softmmu, we cannot protect GCS memory properly.
+ * Make do with normal read/write permissions. This at least allows
+ * emulation of correct programs which don't access the gcs stack
+ * with normal instructions.
+ */
+ return target_mmap(hint, size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS |
+ (hint ? MAP_FIXED_NOREPLACE : 0), -1, 0);
+}
+
+static abi_ulong gcs_new_stack(TaskState *ts)
+{
+ /* Use guest_stack_size as a proxy for RLIMIT_STACK. */
+ abi_ulong size = MIN(MAX(guest_stack_size / 2, TARGET_PAGE_SIZE), 2 * GiB);
+ abi_ulong base = gcs_alloc(0, size);
+
+ if (base == -1) {
+ return -1;
+ }
+
+ ts->gcs_base = base;
+ ts->gcs_size = size;
+ return base + size - 8;
+}
+
+static abi_long do_prctl_set_shadow_stack_status(CPUArchState *env,
+ abi_long new_mode)
+{
+ ARMCPU *cpu = env_archcpu(env);
+ TaskState *ts = get_task_state(env_cpu(env));
+ abi_long cur_mode;
+
+ if (!cpu_isar_feature(aa64_gcs, cpu)) {
+ return -TARGET_EINVAL;
+ }
+ if (new_mode & ~(PR_SHADOW_STACK_ENABLE |
+ PR_SHADOW_STACK_WRITE |
+ PR_SHADOW_STACK_PUSH)) {
+ return -TARGET_EINVAL;
+ }
+
+ cur_mode = gcs_get_el0_mode(env);
+ if ((new_mode ^ cur_mode) & ts->gcs_el0_locked) {
+ return -TARGET_EBUSY;
+ }
+
+ if (new_mode & ~cur_mode & PR_SHADOW_STACK_ENABLE) {
+ abi_long gcspr;
+
+ if (ts->gcs_base || env->cp15.gcspr_el[0]) {
+ return -EINVAL;
+ }
+ gcspr = gcs_new_stack(ts);
+ if (gcspr == -1) {
+ return -TARGET_ENOMEM;
+ }
+ env->cp15.gcspr_el[0] = gcspr;
+ }
+
+ gcs_set_el0_mode(env, new_mode);
+ arm_rebuild_hflags(env);
+ return 0;
+}
+#define do_prctl_set_shadow_stack_status do_prctl_set_shadow_stack_status
+
+static abi_long do_prctl_lock_shadow_stack_status(CPUArchState *env,
+ abi_long arg2)
+{
+ ARMCPU *cpu = env_archcpu(env);
+ TaskState *ts = get_task_state(env_cpu(env));
+
+ if (!cpu_isar_feature(aa64_gcs, cpu)) {
+ return -EINVAL;
+ }
+ ts->gcs_el0_locked |= arg2;
+ return 0;
+}
+#define do_prctl_lock_shadow_stack_status do_prctl_lock_shadow_stack_status
+
#endif /* AARCH64_TARGET_PRCTL_H */
diff --git a/linux-user/aarch64/target_signal.h b/linux-user/aarch64/target_signal.h
index 6f66a50..e509ac1 100644
--- a/linux-user/aarch64/target_signal.h
+++ b/linux-user/aarch64/target_signal.h
@@ -7,6 +7,7 @@
#define TARGET_SEGV_MTEAERR 8 /* Asynchronous ARM MTE error */
#define TARGET_SEGV_MTESERR 9 /* Synchronous ARM MTE exception */
+#define TARGET_SEGV_CPERR 10 /* Control protection fault */
#define TARGET_ARCH_HAS_SETUP_FRAME
#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1
diff --git a/linux-user/qemu.h b/linux-user/qemu.h
index cabb7bd..85e68ef 100644
--- a/linux-user/qemu.h
+++ b/linux-user/qemu.h
@@ -122,6 +122,11 @@ struct TaskState {
#ifdef TARGET_M68K
abi_ulong tp_value;
#endif
+#if defined(TARGET_AARCH64)
+ vaddr gcs_base;
+ abi_ulong gcs_size;
+ abi_ulong gcs_el0_locked;
+#endif
int used; /* non zero if used */
struct image_info *info;
struct linux_binprm *bprm;
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
index d78b202..8546f48 100644
--- a/linux-user/syscall.c
+++ b/linux-user/syscall.c
@@ -6353,6 +6353,17 @@ abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
# define PR_SME_VL_LEN_MASK 0xffff
# define PR_SME_VL_INHERIT (1 << 17)
#endif
+#ifndef PR_GET_SHADOW_STACK_STATUS
+# define PR_GET_SHADOW_STACK_STATUS 74
+# define PR_SET_SHADOW_STACK_STATUS 75
+# define PR_LOCK_SHADOW_STACK_STATUS 76
+#endif
+#ifndef SHADOW_STACK_SET_TOKEN
+# define SHADOW_STACK_SET_TOKEN (1u << 0)
+#endif
+#ifndef SHADOW_STACK_SET_MARKER
+# define SHADOW_STACK_SET_MARKER (1u << 1)
+#endif
#include "target_prctl.h"
@@ -6399,6 +6410,15 @@ static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
#ifndef do_prctl_sme_set_vl
#define do_prctl_sme_set_vl do_prctl_inval1
#endif
+#ifndef do_prctl_get_shadow_stack_status
+#define do_prctl_get_shadow_stack_status do_prctl_inval1
+#endif
+#ifndef do_prctl_set_shadow_stack_status
+#define do_prctl_set_shadow_stack_status do_prctl_inval1
+#endif
+#ifndef do_prctl_lock_shadow_stack_status
+#define do_prctl_lock_shadow_stack_status do_prctl_inval1
+#endif
static abi_long do_prctl_syscall_user_dispatch(CPUArchState *env,
abi_ulong arg2, abi_ulong arg3,
@@ -6499,6 +6519,21 @@ static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
return -TARGET_EINVAL;
}
return do_prctl_get_tagged_addr_ctrl(env);
+ case PR_GET_SHADOW_STACK_STATUS:
+ if (arg3 || arg4 || arg5) {
+ return -TARGET_EINVAL;
+ }
+ return do_prctl_get_shadow_stack_status(env, arg2);
+ case PR_SET_SHADOW_STACK_STATUS:
+ if (arg3 || arg4 || arg5) {
+ return -TARGET_EINVAL;
+ }
+ return do_prctl_set_shadow_stack_status(env, arg2);
+ case PR_LOCK_SHADOW_STACK_STATUS:
+ if (arg3 || arg4 || arg5) {
+ return -TARGET_EINVAL;
+ }
+ return do_prctl_lock_shadow_stack_status(env, arg2);
case PR_GET_UNALIGN:
return do_prctl_get_unalign(env, arg2);
@@ -6576,6 +6611,54 @@ static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
}
}
+#ifdef TARGET_AARCH64
+static abi_long do_map_shadow_stack(CPUArchState *env, abi_ulong addr,
+ abi_ulong size, abi_int flags)
+{
+ ARMCPU *cpu = env_archcpu(env);
+ abi_ulong alloc_size;
+
+ if (!cpu_isar_feature(aa64_gcs, cpu)) {
+ return -TARGET_EOPNOTSUPP;
+ }
+ if (flags & ~(SHADOW_STACK_SET_TOKEN | SHADOW_STACK_SET_MARKER)) {
+ return -TARGET_EINVAL;
+ }
+ if (addr & ~TARGET_PAGE_MASK) {
+ return -TARGET_EINVAL;
+ }
+ if (size == 8 || !QEMU_IS_ALIGNED(size, 8)) {
+ return -TARGET_EINVAL;
+ }
+
+ alloc_size = TARGET_PAGE_ALIGN(size);
+ if (alloc_size < size) {
+ return -TARGET_EOVERFLOW;
+ }
+
+ mmap_lock();
+ addr = gcs_alloc(addr, alloc_size);
+ if (addr != -1) {
+ if (flags & SHADOW_STACK_SET_TOKEN) {
+ abi_ptr cap_ptr = addr + size - 8;
+ uint64_t cap_val;
+
+ if (flags & SHADOW_STACK_SET_MARKER) {
+ /* Leave an extra empty frame at top-of-stack. */
+ cap_ptr -= 8;
+ }
+ cap_val = (cap_ptr & TARGET_PAGE_MASK) | 1;
+ if (put_user_u64(cap_val, cap_ptr)) {
+ /* Allocation succeeded above. */
+ g_assert_not_reached();
+ }
+ }
+ }
+ mmap_unlock();
+ return get_errno(addr);
+}
+#endif
+
#define NEW_STACK_SIZE 0x40000
@@ -6657,6 +6740,21 @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
ts = g_new0(TaskState, 1);
init_task_state(ts);
+#ifdef TARGET_AARCH64
+ /*
+ * If GCS is enabled in the parent thread, it is also enabled
+ * in the child thread, but with a newly allocated stack.
+ */
+ abi_long new_gcspr = 0;
+ if (env->cp15.gcscr_el[0] & GCSCR_PCRSEL) {
+ new_gcspr = gcs_new_stack(ts);
+ if (new_gcspr == -1) {
+ g_free(ts);
+ return -TARGET_ENOMEM;
+ }
+ }
+#endif
+
/* Grab a mutex so that thread setup appears atomic. */
pthread_mutex_lock(&clone_lock);
@@ -6678,6 +6776,11 @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
ts->info = parent_ts->info;
ts->signal_mask = parent_ts->signal_mask;
+#ifdef TARGET_AARCH64
+ ts->gcs_el0_locked = parent_ts->gcs_el0_locked;
+ new_env->cp15.gcspr_el[0] = new_gcspr;
+#endif
+
if (flags & CLONE_CHILD_CLEARTID) {
ts->child_tidptr = child_tidptr;
}
@@ -9380,6 +9483,12 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
}
+#ifdef TARGET_AARCH64
+ if (ts->gcs_base) {
+ target_munmap(ts->gcs_base, ts->gcs_size);
+ }
+#endif
+
object_unparent(OBJECT(cpu));
object_unref(OBJECT(cpu));
/*
@@ -14010,6 +14119,11 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
return do_riscv_hwprobe(cpu_env, arg1, arg2, arg3, arg4, arg5);
#endif
+#ifdef TARGET_AARCH64
+ case TARGET_NR_map_shadow_stack:
+ return do_map_shadow_stack(cpu_env, arg1, arg2, arg3);
+#endif
+
default:
qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
return -TARGET_ENOSYS;