aboutsummaryrefslogtreecommitdiff
path: root/linux-user/aarch64
diff options
context:
space:
mode:
Diffstat (limited to 'linux-user/aarch64')
-rw-r--r--linux-user/aarch64/cpu_loop.c191
-rw-r--r--linux-user/aarch64/elfload.c381
-rw-r--r--linux-user/aarch64/gcs-internal.h38
-rw-r--r--linux-user/aarch64/signal.c311
-rw-r--r--linux-user/aarch64/target_elf.h30
-rw-r--r--linux-user/aarch64/target_prctl.h96
-rw-r--r--linux-user/aarch64/target_ptrace.h14
-rw-r--r--linux-user/aarch64/target_signal.h1
-rw-r--r--linux-user/aarch64/target_syscall.h7
-rwxr-xr-xlinux-user/aarch64/vdso-be.sobin3224 -> 3320 bytes
-rwxr-xr-xlinux-user/aarch64/vdso-le.sobin3224 -> 3320 bytes
-rw-r--r--linux-user/aarch64/vdso.S2
12 files changed, 995 insertions, 76 deletions
diff --git a/linux-user/aarch64/cpu_loop.c b/linux-user/aarch64/cpu_loop.c
index fea43ce..7f66a87 100644
--- a/linux-user/aarch64/cpu_loop.c
+++ b/linux-user/aarch64/cpu_loop.c
@@ -27,18 +27,144 @@
#include "target/arm/syndrome.h"
#include "target/arm/cpu-features.h"
+/* Use the exception syndrome to map a cpu exception to a signal. */
+static void signal_for_exception(CPUARMState *env, vaddr addr)
+{
+ uint32_t syn = env->exception.syndrome;
+ int si_code, si_signo;
+
+ /* Let signal delivery see that ESR is live. */
+ env->cp15.esr_el[1] = syn;
+
+ switch (syn_get_ec(syn)) {
+ case EC_DATAABORT:
+ case EC_INSNABORT:
+ /* Both EC have the same format for FSC, or close enough. */
+ switch (extract32(syn, 0, 6)) {
+ case 0x04 ... 0x07: /* Translation fault, level {0-3} */
+ si_signo = TARGET_SIGSEGV;
+ si_code = TARGET_SEGV_MAPERR;
+ break;
+ case 0x09 ... 0x0b: /* Access flag fault, level {1-3} */
+ case 0x0d ... 0x0f: /* Permission fault, level {1-3} */
+ si_signo = TARGET_SIGSEGV;
+ si_code = TARGET_SEGV_ACCERR;
+ break;
+ case 0x11: /* Synchronous Tag Check Fault */
+ si_signo = TARGET_SIGSEGV;
+ si_code = TARGET_SEGV_MTESERR;
+ break;
+ case 0x21: /* Alignment fault */
+ si_signo = TARGET_SIGBUS;
+ si_code = TARGET_BUS_ADRALN;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ break;
+
+ case EC_PCALIGNMENT:
+ si_signo = TARGET_SIGBUS;
+ si_code = TARGET_BUS_ADRALN;
+ break;
+
+ case EC_UNCATEGORIZED: /* E.g. undefined instruction */
+ case EC_SYSTEMREGISTERTRAP: /* E.g. inaccessible register */
+ case EC_SMETRAP: /* E.g. invalid insn in streaming state */
+ case EC_BTITRAP: /* E.g. invalid guarded branch target */
+ case EC_ILLEGALSTATE:
+ /*
+ * Illegal state happens via an ERET from a privileged mode,
+ * so is not normally possible from user-only. However, gdbstub
+ * is not prevented from writing CPSR_IL, aka PSTATE.IL, which
+ * would generate a trap from the next translated block.
+ * In the kernel, default case -> el0_inv -> bad_el0_sync.
+ */
+ si_signo = TARGET_SIGILL;
+ si_code = TARGET_ILL_ILLOPC;
+ break;
+
+ case EC_PACFAIL:
+ si_signo = TARGET_SIGILL;
+ si_code = TARGET_ILL_ILLOPN;
+ break;
+
+ case EC_GCS:
+ si_signo = TARGET_SIGSEGV;
+ si_code = TARGET_SEGV_CPERR;
+ break;
+
+ case EC_MOP:
+ /*
+ * FIXME: The kernel fixes up wrong-option exceptions.
+ * For QEMU linux-user mode, you can only get these if
+ * the process is doing something silly (not executing
+ * the MOPS instructions in the required P/M/E sequence),
+ * so it is not a problem in practice that we do not.
+ *
+ * We ought ideally to implement the same "rewind to the
+ * start of the sequence" logic that the kernel does in
+ * arm64_mops_reset_regs(). In the meantime, deliver
+ * the guest a SIGILL, with the same ILLOPN si_code
+ * we've always used for this.
+ */
+ si_signo = TARGET_SIGILL;
+ si_code = TARGET_ILL_ILLOPN;
+ break;
+
+ case EC_WFX_TRAP: /* user-only WFI implemented as NOP */
+ case EC_CP15RTTRAP: /* AArch32 */
+ case EC_CP15RRTTRAP: /* AArch32 */
+ case EC_CP14RTTRAP: /* AArch32 */
+ case EC_CP14DTTRAP: /* AArch32 */
+ case EC_ADVSIMDFPACCESSTRAP: /* user-only does not disable fpu */
+ case EC_FPIDTRAP: /* AArch32 */
+ case EC_PACTRAP: /* user-only does not disable pac regs */
+ case EC_BXJTRAP: /* AArch32 */
+ case EC_CP14RRTTRAP: /* AArch32 */
+ case EC_AA32_SVC: /* AArch32 */
+ case EC_AA32_HVC: /* AArch32 */
+ case EC_AA32_SMC: /* AArch32 */
+ case EC_AA64_SVC: /* generates EXCP_SWI */
+ case EC_AA64_HVC: /* user-only generates EC_UNCATEGORIZED */
+ case EC_AA64_SMC: /* user-only generates EC_UNCATEGORIZED */
+ case EC_SVEACCESSTRAP: /* user-only does not disable sve */
+ case EC_ERETTRAP: /* user-only generates EC_UNCATEGORIZED */
+ case EC_GPC: /* user-only has no EL3 gpc tables */
+ case EC_INSNABORT_SAME_EL: /* el0 cannot trap to el0 */
+ case EC_DATAABORT_SAME_EL: /* el0 cannot trap to el0 */
+ case EC_SPALIGNMENT: /* sp alignment checks not implemented */
+ case EC_AA32_FPTRAP: /* fp exceptions not implemented */
+ case EC_AA64_FPTRAP: /* fp exceptions not implemented */
+ case EC_SERROR: /* user-only does not have hw faults */
+ case EC_BREAKPOINT: /* user-only does not have hw debug */
+ case EC_BREAKPOINT_SAME_EL: /* user-only does not have hw debug */
+ case EC_SOFTWARESTEP: /* user-only does not have hw debug */
+ case EC_SOFTWARESTEP_SAME_EL: /* user-only does not have hw debug */
+ case EC_WATCHPOINT: /* user-only does not have hw debug */
+ case EC_WATCHPOINT_SAME_EL: /* user-only does not have hw debug */
+ case EC_AA32_BKPT: /* AArch32 */
+ case EC_VECTORCATCH: /* AArch32 */
+ case EC_AA64_BKPT: /* generates EXCP_BKPT */
+ default:
+ g_assert_not_reached();
+ }
+
+ force_sig_fault(si_signo, si_code, addr);
+}
+
/* AArch64 main loop */
void cpu_loop(CPUARMState *env)
{
CPUState *cs = env_cpu(env);
- int trapnr, ec, fsc, si_code, si_signo;
+ int trapnr;
abi_long ret;
for (;;) {
cpu_exec_start(cs);
trapnr = cpu_exec(cs);
cpu_exec_end(cs);
- process_queued_cpu_work(cs);
+ qemu_process_cpu_events(cs);
switch (trapnr) {
case EXCP_SWI:
@@ -63,46 +189,11 @@ void cpu_loop(CPUARMState *env)
/* just indicate that signals should be handled asap */
break;
case EXCP_UDEF:
- force_sig_fault(TARGET_SIGILL, TARGET_ILL_ILLOPN, env->pc);
+ signal_for_exception(env, env->pc);
break;
case EXCP_PREFETCH_ABORT:
case EXCP_DATA_ABORT:
- ec = syn_get_ec(env->exception.syndrome);
- switch (ec) {
- case EC_DATAABORT:
- case EC_INSNABORT:
- /* Both EC have the same format for FSC, or close enough. */
- fsc = extract32(env->exception.syndrome, 0, 6);
- switch (fsc) {
- case 0x04 ... 0x07: /* Translation fault, level {0-3} */
- si_signo = TARGET_SIGSEGV;
- si_code = TARGET_SEGV_MAPERR;
- break;
- case 0x09 ... 0x0b: /* Access flag fault, level {1-3} */
- case 0x0d ... 0x0f: /* Permission fault, level {1-3} */
- si_signo = TARGET_SIGSEGV;
- si_code = TARGET_SEGV_ACCERR;
- break;
- case 0x11: /* Synchronous Tag Check Fault */
- si_signo = TARGET_SIGSEGV;
- si_code = TARGET_SEGV_MTESERR;
- break;
- case 0x21: /* Alignment fault */
- si_signo = TARGET_SIGBUS;
- si_code = TARGET_BUS_ADRALN;
- break;
- default:
- g_assert_not_reached();
- }
- break;
- case EC_PCALIGNMENT:
- si_signo = TARGET_SIGBUS;
- si_code = TARGET_BUS_ADRALN;
- break;
- default:
- g_assert_not_reached();
- }
- force_sig_fault(si_signo, si_code, env->exception.vaddress);
+ signal_for_exception(env, env->exception.vaddress);
break;
case EXCP_DEBUG:
case EXCP_BKPT:
@@ -137,13 +228,10 @@ void cpu_loop(CPUARMState *env)
}
}
-void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs)
+void init_main_thread(CPUState *cs, struct image_info *info)
{
+ CPUARMState *env = cpu_env(cs);
ARMCPU *cpu = env_archcpu(env);
- CPUState *cs = env_cpu(env);
- TaskState *ts = get_task_state(cs);
- struct image_info *info = ts->info;
- int i;
if (!(arm_feature(env, ARM_FEATURE_AARCH64))) {
fprintf(stderr,
@@ -151,14 +239,12 @@ void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs)
exit(EXIT_FAILURE);
}
- for (i = 0; i < 31; i++) {
- env->xregs[i] = regs->regs[i];
- }
- env->pc = regs->pc;
- env->xregs[31] = regs->sp;
+ env->pc = info->entry & ~0x3ULL;
+ env->xregs[31] = info->start_stack;
+
#if TARGET_BIG_ENDIAN
env->cp15.sctlr_el[1] |= SCTLR_E0E;
- for (i = 1; i < 4; ++i) {
+ for (int i = 1; i < 4; ++i) {
env->cp15.sctlr_el[i] |= SCTLR_EE;
}
arm_rebuild_hflags(env);
@@ -167,9 +253,4 @@ void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs)
if (cpu_isar_feature(aa64_pauth, cpu)) {
qemu_guest_getrandom_nofail(&env->keys, sizeof(env->keys));
}
-
- ts->stack_base = info->start_stack;
- ts->heap_base = info->brk;
- /* This will be filled in on the first SYS_HEAPINFO call. */
- ts->heap_limit = 0;
}
diff --git a/linux-user/aarch64/elfload.c b/linux-user/aarch64/elfload.c
new file mode 100644
index 0000000..3af5a37
--- /dev/null
+++ b/linux-user/aarch64/elfload.c
@@ -0,0 +1,381 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qemu.h"
+#include "loader.h"
+#include "target/arm/cpu-features.h"
+#include "target_elf.h"
+#include "elf.h"
+
+
+const char *get_elf_cpu_model(uint32_t eflags)
+{
+ return "any";
+}
+
+enum {
+ ARM_HWCAP_A64_FP = 1 << 0,
+ ARM_HWCAP_A64_ASIMD = 1 << 1,
+ ARM_HWCAP_A64_EVTSTRM = 1 << 2,
+ ARM_HWCAP_A64_AES = 1 << 3,
+ ARM_HWCAP_A64_PMULL = 1 << 4,
+ ARM_HWCAP_A64_SHA1 = 1 << 5,
+ ARM_HWCAP_A64_SHA2 = 1 << 6,
+ ARM_HWCAP_A64_CRC32 = 1 << 7,
+ ARM_HWCAP_A64_ATOMICS = 1 << 8,
+ ARM_HWCAP_A64_FPHP = 1 << 9,
+ ARM_HWCAP_A64_ASIMDHP = 1 << 10,
+ ARM_HWCAP_A64_CPUID = 1 << 11,
+ ARM_HWCAP_A64_ASIMDRDM = 1 << 12,
+ ARM_HWCAP_A64_JSCVT = 1 << 13,
+ ARM_HWCAP_A64_FCMA = 1 << 14,
+ ARM_HWCAP_A64_LRCPC = 1 << 15,
+ ARM_HWCAP_A64_DCPOP = 1 << 16,
+ ARM_HWCAP_A64_SHA3 = 1 << 17,
+ ARM_HWCAP_A64_SM3 = 1 << 18,
+ ARM_HWCAP_A64_SM4 = 1 << 19,
+ ARM_HWCAP_A64_ASIMDDP = 1 << 20,
+ ARM_HWCAP_A64_SHA512 = 1 << 21,
+ ARM_HWCAP_A64_SVE = 1 << 22,
+ ARM_HWCAP_A64_ASIMDFHM = 1 << 23,
+ ARM_HWCAP_A64_DIT = 1 << 24,
+ ARM_HWCAP_A64_USCAT = 1 << 25,
+ ARM_HWCAP_A64_ILRCPC = 1 << 26,
+ ARM_HWCAP_A64_FLAGM = 1 << 27,
+ ARM_HWCAP_A64_SSBS = 1 << 28,
+ ARM_HWCAP_A64_SB = 1 << 29,
+ ARM_HWCAP_A64_PACA = 1 << 30,
+ ARM_HWCAP_A64_PACG = 1ULL << 31,
+ ARM_HWCAP_A64_GCS = 1ULL << 32,
+ ARM_HWCAP_A64_CMPBR = 1ULL << 33,
+ ARM_HWCAP_A64_FPRCVT = 1ULL << 34,
+ ARM_HWCAP_A64_F8MM8 = 1ULL << 35,
+ ARM_HWCAP_A64_F8MM4 = 1ULL << 36,
+ ARM_HWCAP_A64_SVE_F16MM = 1ULL << 37,
+ ARM_HWCAP_A64_SVE_ELTPERM = 1ULL << 38,
+ ARM_HWCAP_A64_SVE_AES2 = 1ULL << 39,
+ ARM_HWCAP_A64_SVE_BFSCALE = 1ULL << 40,
+ ARM_HWCAP_A64_SVE2P2 = 1ULL << 41,
+ ARM_HWCAP_A64_SME2P2 = 1ULL << 42,
+ ARM_HWCAP_A64_SME_SBITPERM = 1ULL << 43,
+ ARM_HWCAP_A64_SME_AES = 1ULL << 44,
+ ARM_HWCAP_A64_SME_SFEXPA = 1ULL << 45,
+ ARM_HWCAP_A64_SME_STMOP = 1ULL << 46,
+ ARM_HWCAP_A64_SME_SMOP4 = 1ULL << 47,
+
+ ARM_HWCAP2_A64_DCPODP = 1 << 0,
+ ARM_HWCAP2_A64_SVE2 = 1 << 1,
+ ARM_HWCAP2_A64_SVEAES = 1 << 2,
+ ARM_HWCAP2_A64_SVEPMULL = 1 << 3,
+ ARM_HWCAP2_A64_SVEBITPERM = 1 << 4,
+ ARM_HWCAP2_A64_SVESHA3 = 1 << 5,
+ ARM_HWCAP2_A64_SVESM4 = 1 << 6,
+ ARM_HWCAP2_A64_FLAGM2 = 1 << 7,
+ ARM_HWCAP2_A64_FRINT = 1 << 8,
+ ARM_HWCAP2_A64_SVEI8MM = 1 << 9,
+ ARM_HWCAP2_A64_SVEF32MM = 1 << 10,
+ ARM_HWCAP2_A64_SVEF64MM = 1 << 11,
+ ARM_HWCAP2_A64_SVEBF16 = 1 << 12,
+ ARM_HWCAP2_A64_I8MM = 1 << 13,
+ ARM_HWCAP2_A64_BF16 = 1 << 14,
+ ARM_HWCAP2_A64_DGH = 1 << 15,
+ ARM_HWCAP2_A64_RNG = 1 << 16,
+ ARM_HWCAP2_A64_BTI = 1 << 17,
+ ARM_HWCAP2_A64_MTE = 1 << 18,
+ ARM_HWCAP2_A64_ECV = 1 << 19,
+ ARM_HWCAP2_A64_AFP = 1 << 20,
+ ARM_HWCAP2_A64_RPRES = 1 << 21,
+ ARM_HWCAP2_A64_MTE3 = 1 << 22,
+ ARM_HWCAP2_A64_SME = 1 << 23,
+ ARM_HWCAP2_A64_SME_I16I64 = 1 << 24,
+ ARM_HWCAP2_A64_SME_F64F64 = 1 << 25,
+ ARM_HWCAP2_A64_SME_I8I32 = 1 << 26,
+ ARM_HWCAP2_A64_SME_F16F32 = 1 << 27,
+ ARM_HWCAP2_A64_SME_B16F32 = 1 << 28,
+ ARM_HWCAP2_A64_SME_F32F32 = 1 << 29,
+ ARM_HWCAP2_A64_SME_FA64 = 1 << 30,
+ ARM_HWCAP2_A64_WFXT = 1ULL << 31,
+ ARM_HWCAP2_A64_EBF16 = 1ULL << 32,
+ ARM_HWCAP2_A64_SVE_EBF16 = 1ULL << 33,
+ ARM_HWCAP2_A64_CSSC = 1ULL << 34,
+ ARM_HWCAP2_A64_RPRFM = 1ULL << 35,
+ ARM_HWCAP2_A64_SVE2P1 = 1ULL << 36,
+ ARM_HWCAP2_A64_SME2 = 1ULL << 37,
+ ARM_HWCAP2_A64_SME2P1 = 1ULL << 38,
+ ARM_HWCAP2_A64_SME_I16I32 = 1ULL << 39,
+ ARM_HWCAP2_A64_SME_BI32I32 = 1ULL << 40,
+ ARM_HWCAP2_A64_SME_B16B16 = 1ULL << 41,
+ ARM_HWCAP2_A64_SME_F16F16 = 1ULL << 42,
+ ARM_HWCAP2_A64_MOPS = 1ULL << 43,
+ ARM_HWCAP2_A64_HBC = 1ULL << 44,
+ ARM_HWCAP2_A64_SVE_B16B16 = 1ULL << 45,
+ ARM_HWCAP2_A64_LRCPC3 = 1ULL << 46,
+ ARM_HWCAP2_A64_LSE128 = 1ULL << 47,
+ ARM_HWCAP2_A64_FPMR = 1ULL << 48,
+ ARM_HWCAP2_A64_LUT = 1ULL << 49,
+ ARM_HWCAP2_A64_FAMINMAX = 1ULL << 50,
+ ARM_HWCAP2_A64_F8CVT = 1ULL << 51,
+ ARM_HWCAP2_A64_F8FMA = 1ULL << 52,
+ ARM_HWCAP2_A64_F8DP4 = 1ULL << 53,
+ ARM_HWCAP2_A64_F8DP2 = 1ULL << 54,
+ ARM_HWCAP2_A64_F8E4M3 = 1ULL << 55,
+ ARM_HWCAP2_A64_F8E5M2 = 1ULL << 56,
+ ARM_HWCAP2_A64_SME_LUTV2 = 1ULL << 57,
+ ARM_HWCAP2_A64_SME_F8F16 = 1ULL << 58,
+ ARM_HWCAP2_A64_SME_F8F32 = 1ULL << 59,
+ ARM_HWCAP2_A64_SME_SF8FMA = 1ULL << 60,
+ ARM_HWCAP2_A64_SME_SF8DP4 = 1ULL << 61,
+ ARM_HWCAP2_A64_SME_SF8DP2 = 1ULL << 62,
+ ARM_HWCAP2_A64_POE = 1ULL << 63,
+};
+
+#define GET_FEATURE_ID(feat, hwcap) \
+ do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0)
+
+abi_ulong get_elf_hwcap(CPUState *cs)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ abi_ulong hwcaps = 0;
+
+ hwcaps |= ARM_HWCAP_A64_FP;
+ hwcaps |= ARM_HWCAP_A64_ASIMD;
+ hwcaps |= ARM_HWCAP_A64_CPUID;
+
+ /* probe for the extra features */
+
+ GET_FEATURE_ID(aa64_aes, ARM_HWCAP_A64_AES);
+ GET_FEATURE_ID(aa64_pmull, ARM_HWCAP_A64_PMULL);
+ GET_FEATURE_ID(aa64_sha1, ARM_HWCAP_A64_SHA1);
+ GET_FEATURE_ID(aa64_sha256, ARM_HWCAP_A64_SHA2);
+ GET_FEATURE_ID(aa64_sha512, ARM_HWCAP_A64_SHA512);
+ GET_FEATURE_ID(aa64_crc32, ARM_HWCAP_A64_CRC32);
+ GET_FEATURE_ID(aa64_sha3, ARM_HWCAP_A64_SHA3);
+ GET_FEATURE_ID(aa64_sm3, ARM_HWCAP_A64_SM3);
+ GET_FEATURE_ID(aa64_sm4, ARM_HWCAP_A64_SM4);
+ GET_FEATURE_ID(aa64_fp16, ARM_HWCAP_A64_FPHP | ARM_HWCAP_A64_ASIMDHP);
+ GET_FEATURE_ID(aa64_lse, ARM_HWCAP_A64_ATOMICS);
+ GET_FEATURE_ID(aa64_lse2, ARM_HWCAP_A64_USCAT);
+ GET_FEATURE_ID(aa64_rdm, ARM_HWCAP_A64_ASIMDRDM);
+ GET_FEATURE_ID(aa64_dp, ARM_HWCAP_A64_ASIMDDP);
+ GET_FEATURE_ID(aa64_fcma, ARM_HWCAP_A64_FCMA);
+ GET_FEATURE_ID(aa64_sve, ARM_HWCAP_A64_SVE);
+ GET_FEATURE_ID(aa64_pauth, ARM_HWCAP_A64_PACA | ARM_HWCAP_A64_PACG);
+ GET_FEATURE_ID(aa64_fhm, ARM_HWCAP_A64_ASIMDFHM);
+ GET_FEATURE_ID(aa64_dit, ARM_HWCAP_A64_DIT);
+ GET_FEATURE_ID(aa64_jscvt, ARM_HWCAP_A64_JSCVT);
+ GET_FEATURE_ID(aa64_sb, ARM_HWCAP_A64_SB);
+ GET_FEATURE_ID(aa64_condm_4, ARM_HWCAP_A64_FLAGM);
+ GET_FEATURE_ID(aa64_dcpop, ARM_HWCAP_A64_DCPOP);
+ GET_FEATURE_ID(aa64_rcpc_8_3, ARM_HWCAP_A64_LRCPC);
+ GET_FEATURE_ID(aa64_rcpc_8_4, ARM_HWCAP_A64_ILRCPC);
+ GET_FEATURE_ID(aa64_gcs, ARM_HWCAP_A64_GCS);
+
+ return hwcaps;
+}
+
+abi_ulong get_elf_hwcap2(CPUState *cs)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ abi_ulong hwcaps = 0;
+
+ GET_FEATURE_ID(aa64_dcpodp, ARM_HWCAP2_A64_DCPODP);
+ GET_FEATURE_ID(aa64_sve2, ARM_HWCAP2_A64_SVE2);
+ GET_FEATURE_ID(aa64_sve2_aes, ARM_HWCAP2_A64_SVEAES);
+ GET_FEATURE_ID(aa64_sve2_pmull128, ARM_HWCAP2_A64_SVEPMULL);
+ GET_FEATURE_ID(aa64_sve2_bitperm, ARM_HWCAP2_A64_SVEBITPERM);
+ GET_FEATURE_ID(aa64_sve2_sha3, ARM_HWCAP2_A64_SVESHA3);
+ GET_FEATURE_ID(aa64_sve2_sm4, ARM_HWCAP2_A64_SVESM4);
+ GET_FEATURE_ID(aa64_condm_5, ARM_HWCAP2_A64_FLAGM2);
+ GET_FEATURE_ID(aa64_frint, ARM_HWCAP2_A64_FRINT);
+ GET_FEATURE_ID(aa64_sve_i8mm, ARM_HWCAP2_A64_SVEI8MM);
+ GET_FEATURE_ID(aa64_sve_f32mm, ARM_HWCAP2_A64_SVEF32MM);
+ GET_FEATURE_ID(aa64_sve_f64mm, ARM_HWCAP2_A64_SVEF64MM);
+ GET_FEATURE_ID(aa64_sve_bf16, ARM_HWCAP2_A64_SVEBF16);
+ GET_FEATURE_ID(aa64_i8mm, ARM_HWCAP2_A64_I8MM);
+ GET_FEATURE_ID(aa64_bf16, ARM_HWCAP2_A64_BF16);
+ GET_FEATURE_ID(aa64_rndr, ARM_HWCAP2_A64_RNG);
+ GET_FEATURE_ID(aa64_bti, ARM_HWCAP2_A64_BTI);
+ GET_FEATURE_ID(aa64_mte, ARM_HWCAP2_A64_MTE);
+ GET_FEATURE_ID(aa64_mte3, ARM_HWCAP2_A64_MTE3);
+ GET_FEATURE_ID(aa64_sme, (ARM_HWCAP2_A64_SME |
+ ARM_HWCAP2_A64_SME_F32F32 |
+ ARM_HWCAP2_A64_SME_B16F32 |
+ ARM_HWCAP2_A64_SME_F16F32 |
+ ARM_HWCAP2_A64_SME_I8I32));
+ GET_FEATURE_ID(aa64_sme_f64f64, ARM_HWCAP2_A64_SME_F64F64);
+ GET_FEATURE_ID(aa64_sme_i16i64, ARM_HWCAP2_A64_SME_I16I64);
+ GET_FEATURE_ID(aa64_sme_fa64, ARM_HWCAP2_A64_SME_FA64);
+ GET_FEATURE_ID(aa64_hbc, ARM_HWCAP2_A64_HBC);
+ GET_FEATURE_ID(aa64_mops, ARM_HWCAP2_A64_MOPS);
+ GET_FEATURE_ID(aa64_sve2p1, ARM_HWCAP2_A64_SVE2P1);
+ GET_FEATURE_ID(aa64_sme2, (ARM_HWCAP2_A64_SME2 |
+ ARM_HWCAP2_A64_SME_I16I32 |
+ ARM_HWCAP2_A64_SME_BI32I32));
+ GET_FEATURE_ID(aa64_sme2p1, ARM_HWCAP2_A64_SME2P1);
+ GET_FEATURE_ID(aa64_sme_b16b16, ARM_HWCAP2_A64_SME_B16B16);
+ GET_FEATURE_ID(aa64_sme_f16f16, ARM_HWCAP2_A64_SME_F16F16);
+ GET_FEATURE_ID(aa64_sve_b16b16, ARM_HWCAP2_A64_SVE_B16B16);
+ GET_FEATURE_ID(aa64_cssc, ARM_HWCAP2_A64_CSSC);
+ GET_FEATURE_ID(aa64_lse128, ARM_HWCAP2_A64_LSE128);
+
+ return hwcaps;
+}
+
+const char *elf_hwcap_str(uint32_t bit)
+{
+ static const char * const hwcap_str[] = {
+ [__builtin_ctz(ARM_HWCAP_A64_FP )] = "fp",
+ [__builtin_ctz(ARM_HWCAP_A64_ASIMD )] = "asimd",
+ [__builtin_ctz(ARM_HWCAP_A64_EVTSTRM )] = "evtstrm",
+ [__builtin_ctz(ARM_HWCAP_A64_AES )] = "aes",
+ [__builtin_ctz(ARM_HWCAP_A64_PMULL )] = "pmull",
+ [__builtin_ctz(ARM_HWCAP_A64_SHA1 )] = "sha1",
+ [__builtin_ctz(ARM_HWCAP_A64_SHA2 )] = "sha2",
+ [__builtin_ctz(ARM_HWCAP_A64_CRC32 )] = "crc32",
+ [__builtin_ctz(ARM_HWCAP_A64_ATOMICS )] = "atomics",
+ [__builtin_ctz(ARM_HWCAP_A64_FPHP )] = "fphp",
+ [__builtin_ctz(ARM_HWCAP_A64_ASIMDHP )] = "asimdhp",
+ [__builtin_ctz(ARM_HWCAP_A64_CPUID )] = "cpuid",
+ [__builtin_ctz(ARM_HWCAP_A64_ASIMDRDM)] = "asimdrdm",
+ [__builtin_ctz(ARM_HWCAP_A64_JSCVT )] = "jscvt",
+ [__builtin_ctz(ARM_HWCAP_A64_FCMA )] = "fcma",
+ [__builtin_ctz(ARM_HWCAP_A64_LRCPC )] = "lrcpc",
+ [__builtin_ctz(ARM_HWCAP_A64_DCPOP )] = "dcpop",
+ [__builtin_ctz(ARM_HWCAP_A64_SHA3 )] = "sha3",
+ [__builtin_ctz(ARM_HWCAP_A64_SM3 )] = "sm3",
+ [__builtin_ctz(ARM_HWCAP_A64_SM4 )] = "sm4",
+ [__builtin_ctz(ARM_HWCAP_A64_ASIMDDP )] = "asimddp",
+ [__builtin_ctz(ARM_HWCAP_A64_SHA512 )] = "sha512",
+ [__builtin_ctz(ARM_HWCAP_A64_SVE )] = "sve",
+ [__builtin_ctz(ARM_HWCAP_A64_ASIMDFHM)] = "asimdfhm",
+ [__builtin_ctz(ARM_HWCAP_A64_DIT )] = "dit",
+ [__builtin_ctz(ARM_HWCAP_A64_USCAT )] = "uscat",
+ [__builtin_ctz(ARM_HWCAP_A64_ILRCPC )] = "ilrcpc",
+ [__builtin_ctz(ARM_HWCAP_A64_FLAGM )] = "flagm",
+ [__builtin_ctz(ARM_HWCAP_A64_SSBS )] = "ssbs",
+ [__builtin_ctz(ARM_HWCAP_A64_SB )] = "sb",
+ [__builtin_ctz(ARM_HWCAP_A64_PACA )] = "paca",
+ [__builtin_ctz(ARM_HWCAP_A64_PACG )] = "pacg",
+ [__builtin_ctzll(ARM_HWCAP_A64_GCS )] = "gcs",
+ [__builtin_ctzll(ARM_HWCAP_A64_CMPBR )] = "cmpbr",
+ [__builtin_ctzll(ARM_HWCAP_A64_FPRCVT)] = "fprcvt",
+ [__builtin_ctzll(ARM_HWCAP_A64_F8MM8 )] = "f8mm8",
+ [__builtin_ctzll(ARM_HWCAP_A64_F8MM4 )] = "f8mm4",
+ [__builtin_ctzll(ARM_HWCAP_A64_SVE_F16MM)] = "svef16mm",
+ [__builtin_ctzll(ARM_HWCAP_A64_SVE_ELTPERM)] = "sveeltperm",
+ [__builtin_ctzll(ARM_HWCAP_A64_SVE_AES2)] = "sveaes2",
+ [__builtin_ctzll(ARM_HWCAP_A64_SVE_BFSCALE)] = "svebfscale",
+ [__builtin_ctzll(ARM_HWCAP_A64_SVE2P2)] = "sve2p2",
+ [__builtin_ctzll(ARM_HWCAP_A64_SME2P2)] = "sme2p2",
+ [__builtin_ctzll(ARM_HWCAP_A64_SME_SBITPERM)] = "smesbitperm",
+ [__builtin_ctzll(ARM_HWCAP_A64_SME_AES)] = "smeaes",
+ [__builtin_ctzll(ARM_HWCAP_A64_SME_SFEXPA)] = "smesfexpa",
+ [__builtin_ctzll(ARM_HWCAP_A64_SME_STMOP)] = "smestmop",
+ [__builtin_ctzll(ARM_HWCAP_A64_SME_SMOP4)] = "smesmop4",
+ };
+
+ return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL;
+}
+
+const char *elf_hwcap2_str(uint32_t bit)
+{
+ static const char * const hwcap_str[] = {
+ [__builtin_ctz(ARM_HWCAP2_A64_DCPODP )] = "dcpodp",
+ [__builtin_ctz(ARM_HWCAP2_A64_SVE2 )] = "sve2",
+ [__builtin_ctz(ARM_HWCAP2_A64_SVEAES )] = "sveaes",
+ [__builtin_ctz(ARM_HWCAP2_A64_SVEPMULL )] = "svepmull",
+ [__builtin_ctz(ARM_HWCAP2_A64_SVEBITPERM )] = "svebitperm",
+ [__builtin_ctz(ARM_HWCAP2_A64_SVESHA3 )] = "svesha3",
+ [__builtin_ctz(ARM_HWCAP2_A64_SVESM4 )] = "svesm4",
+ [__builtin_ctz(ARM_HWCAP2_A64_FLAGM2 )] = "flagm2",
+ [__builtin_ctz(ARM_HWCAP2_A64_FRINT )] = "frint",
+ [__builtin_ctz(ARM_HWCAP2_A64_SVEI8MM )] = "svei8mm",
+ [__builtin_ctz(ARM_HWCAP2_A64_SVEF32MM )] = "svef32mm",
+ [__builtin_ctz(ARM_HWCAP2_A64_SVEF64MM )] = "svef64mm",
+ [__builtin_ctz(ARM_HWCAP2_A64_SVEBF16 )] = "svebf16",
+ [__builtin_ctz(ARM_HWCAP2_A64_I8MM )] = "i8mm",
+ [__builtin_ctz(ARM_HWCAP2_A64_BF16 )] = "bf16",
+ [__builtin_ctz(ARM_HWCAP2_A64_DGH )] = "dgh",
+ [__builtin_ctz(ARM_HWCAP2_A64_RNG )] = "rng",
+ [__builtin_ctz(ARM_HWCAP2_A64_BTI )] = "bti",
+ [__builtin_ctz(ARM_HWCAP2_A64_MTE )] = "mte",
+ [__builtin_ctz(ARM_HWCAP2_A64_ECV )] = "ecv",
+ [__builtin_ctz(ARM_HWCAP2_A64_AFP )] = "afp",
+ [__builtin_ctz(ARM_HWCAP2_A64_RPRES )] = "rpres",
+ [__builtin_ctz(ARM_HWCAP2_A64_MTE3 )] = "mte3",
+ [__builtin_ctz(ARM_HWCAP2_A64_SME )] = "sme",
+ [__builtin_ctz(ARM_HWCAP2_A64_SME_I16I64 )] = "smei16i64",
+ [__builtin_ctz(ARM_HWCAP2_A64_SME_F64F64 )] = "smef64f64",
+ [__builtin_ctz(ARM_HWCAP2_A64_SME_I8I32 )] = "smei8i32",
+ [__builtin_ctz(ARM_HWCAP2_A64_SME_F16F32 )] = "smef16f32",
+ [__builtin_ctz(ARM_HWCAP2_A64_SME_B16F32 )] = "smeb16f32",
+ [__builtin_ctz(ARM_HWCAP2_A64_SME_F32F32 )] = "smef32f32",
+ [__builtin_ctz(ARM_HWCAP2_A64_SME_FA64 )] = "smefa64",
+ [__builtin_ctz(ARM_HWCAP2_A64_WFXT )] = "wfxt",
+ [__builtin_ctzll(ARM_HWCAP2_A64_EBF16 )] = "ebf16",
+ [__builtin_ctzll(ARM_HWCAP2_A64_SVE_EBF16 )] = "sveebf16",
+ [__builtin_ctzll(ARM_HWCAP2_A64_CSSC )] = "cssc",
+ [__builtin_ctzll(ARM_HWCAP2_A64_RPRFM )] = "rprfm",
+ [__builtin_ctzll(ARM_HWCAP2_A64_SVE2P1 )] = "sve2p1",
+ [__builtin_ctzll(ARM_HWCAP2_A64_SME2 )] = "sme2",
+ [__builtin_ctzll(ARM_HWCAP2_A64_SME2P1 )] = "sme2p1",
+ [__builtin_ctzll(ARM_HWCAP2_A64_SME_I16I32 )] = "smei16i32",
+ [__builtin_ctzll(ARM_HWCAP2_A64_SME_BI32I32)] = "smebi32i32",
+ [__builtin_ctzll(ARM_HWCAP2_A64_SME_B16B16 )] = "smeb16b16",
+ [__builtin_ctzll(ARM_HWCAP2_A64_SME_F16F16 )] = "smef16f16",
+ [__builtin_ctzll(ARM_HWCAP2_A64_MOPS )] = "mops",
+ [__builtin_ctzll(ARM_HWCAP2_A64_HBC )] = "hbc",
+ [__builtin_ctzll(ARM_HWCAP2_A64_SVE_B16B16 )] = "sveb16b16",
+ [__builtin_ctzll(ARM_HWCAP2_A64_LRCPC3 )] = "lrcpc3",
+ [__builtin_ctzll(ARM_HWCAP2_A64_LSE128 )] = "lse128",
+ [__builtin_ctzll(ARM_HWCAP2_A64_FPMR )] = "fpmr",
+ [__builtin_ctzll(ARM_HWCAP2_A64_LUT )] = "lut",
+ [__builtin_ctzll(ARM_HWCAP2_A64_FAMINMAX )] = "faminmax",
+ [__builtin_ctzll(ARM_HWCAP2_A64_F8CVT )] = "f8cvt",
+ [__builtin_ctzll(ARM_HWCAP2_A64_F8FMA )] = "f8fma",
+ [__builtin_ctzll(ARM_HWCAP2_A64_F8DP4 )] = "f8dp4",
+ [__builtin_ctzll(ARM_HWCAP2_A64_F8DP2 )] = "f8dp2",
+ [__builtin_ctzll(ARM_HWCAP2_A64_F8E4M3 )] = "f8e4m3",
+ [__builtin_ctzll(ARM_HWCAP2_A64_F8E5M2 )] = "f8e5m2",
+ [__builtin_ctzll(ARM_HWCAP2_A64_SME_LUTV2 )] = "smelutv2",
+ [__builtin_ctzll(ARM_HWCAP2_A64_SME_F8F16 )] = "smef8f16",
+ [__builtin_ctzll(ARM_HWCAP2_A64_SME_F8F32 )] = "smef8f32",
+ [__builtin_ctzll(ARM_HWCAP2_A64_SME_SF8DP4 )] = "smesf8dp4",
+ [__builtin_ctzll(ARM_HWCAP2_A64_SME_SF8DP2 )] = "smesf8dp2",
+ [__builtin_ctzll(ARM_HWCAP2_A64_POE )] = "poe",
+ };
+
+ return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL;
+}
+
+const char *get_elf_platform(CPUState *cs)
+{
+ return TARGET_BIG_ENDIAN ? "aarch64_be" : "aarch64";
+}
+
+bool arch_parse_elf_property(uint32_t pr_type, uint32_t pr_datasz,
+ const uint32_t *data,
+ struct image_info *info,
+ Error **errp)
+{
+ if (pr_type == GNU_PROPERTY_AARCH64_FEATURE_1_AND) {
+ if (pr_datasz != sizeof(uint32_t)) {
+ error_setg(errp, "Ill-formed GNU_PROPERTY_AARCH64_FEATURE_1_AND");
+ return false;
+ }
+ /* We will extract GNU_PROPERTY_AARCH64_FEATURE_1_BTI later. */
+ info->note_flags = *data;
+ }
+ return true;
+}
+
+void elf_core_copy_regs(target_elf_gregset_t *r, const CPUARMState *env)
+{
+ for (int i = 0; i < 31; i++) {
+ r->pt.regs[i] = tswap64(env->xregs[i]);
+ }
+ r->pt.sp = tswap64(env->xregs[31]);
+ r->pt.pc = tswap64(env->pc);
+ r->pt.pstate = tswap64(pstate_read((CPUARMState *)env));
+}
diff --git a/linux-user/aarch64/gcs-internal.h b/linux-user/aarch64/gcs-internal.h
new file mode 100644
index 0000000..e586c7e
--- /dev/null
+++ b/linux-user/aarch64/gcs-internal.h
@@ -0,0 +1,38 @@
+/*
+ * AArch64 gcs functions for linux-user
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef AARCH64_GCS_INTERNAL_H
+#define AARCH64_GCS_INTERNAL_H
+
+#ifndef PR_SHADOW_STACK_ENABLE
+# define PR_SHADOW_STACK_ENABLE (1U << 0)
+# define PR_SHADOW_STACK_WRITE (1U << 1)
+# define PR_SHADOW_STACK_PUSH (1U << 2)
+#endif
+
+static inline uint64_t gcs_get_el0_mode(CPUArchState *env)
+{
+ uint64_t cr = env->cp15.gcscr_el[0];
+ abi_ulong flags = 0;
+
+ flags |= cr & GCSCR_PCRSEL ? PR_SHADOW_STACK_ENABLE : 0;
+ flags |= cr & GCSCR_STREN ? PR_SHADOW_STACK_WRITE : 0;
+ flags |= cr & GCSCR_PUSHMEN ? PR_SHADOW_STACK_PUSH : 0;
+
+ return flags;
+}
+
+static inline void gcs_set_el0_mode(CPUArchState *env, uint64_t flags)
+{
+ uint64_t cr = GCSCRE0_NTR;
+
+ cr |= flags & PR_SHADOW_STACK_ENABLE ? GCSCR_RVCHKEN | GCSCR_PCRSEL : 0;
+ cr |= flags & PR_SHADOW_STACK_WRITE ? GCSCR_STREN : 0;
+ cr |= flags & PR_SHADOW_STACK_PUSH ? GCSCR_PUSHMEN : 0;
+
+ env->cp15.gcscr_el[0] = cr;
+}
+
+#endif
diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c
index bc7a138..f7edfa2 100644
--- a/linux-user/aarch64/signal.c
+++ b/linux-user/aarch64/signal.c
@@ -22,6 +22,7 @@
#include "signal-common.h"
#include "linux-user/trace.h"
#include "target/arm/cpu-features.h"
+#include "gcs-internal.h"
struct target_sigcontext {
uint64_t fault_address;
@@ -65,6 +66,13 @@ struct target_fpsimd_context {
uint64_t vregs[32 * 2]; /* really uint128_t vregs[32] */
};
+#define TARGET_ESR_MAGIC 0x45535201
+
+struct target_esr_context {
+ struct target_aarch64_ctx head;
+ uint64_t esr;
+};
+
#define TARGET_EXTRA_MAGIC 0x45585401
struct target_extra_context {
@@ -121,6 +129,40 @@ struct target_za_context {
#define TARGET_ZA_SIG_CONTEXT_SIZE(VQ) \
TARGET_ZA_SIG_ZAV_OFFSET(VQ, VQ * TARGET_SVE_VQ_BYTES)
+#define TARGET_TPIDR2_MAGIC 0x54504902
+
+struct target_tpidr2_context {
+ struct target_aarch64_ctx head;
+ uint64_t tpidr2;
+};
+
+#define TARGET_ZT_MAGIC 0x5a544e01
+
+struct target_zt_context {
+ struct target_aarch64_ctx head;
+ uint16_t nregs;
+ uint16_t reserved[3];
+ /* ZTn register data immediately follows */
+};
+
+#define TARGET_ZT_SIG_REG_BYTES (512 / 8)
+#define TARGET_ZT_SIG_REGS_SIZE(n) (TARGET_ZT_SIG_REG_BYTES * (n))
+#define TARGET_ZT_SIG_CONTEXT_SIZE(n) (sizeof(struct target_zt_context) + \
+ TARGET_ZT_SIG_REGS_SIZE(n))
+#define TARGET_ZT_SIG_REGS_OFFSET sizeof(struct target_zt_context)
+QEMU_BUILD_BUG_ON(TARGET_ZT_SIG_REG_BYTES != \
+ sizeof_field(CPUARMState, za_state.zt0));
+
+#define TARGET_GCS_MAGIC 0x47435300
+#define GCS_SIGNAL_CAP(X) ((X) & TARGET_PAGE_MASK)
+
+struct target_gcs_context {
+ struct target_aarch64_ctx head;
+ uint64_t gcspr;
+ uint64_t features_enabled;
+ uint64_t reserved;
+};
+
struct target_rt_sigframe {
struct target_siginfo info;
struct target_ucontext uc;
@@ -177,6 +219,14 @@ static void target_setup_fpsimd_record(struct target_fpsimd_context *fpsimd,
}
}
+static void target_setup_esr_record(struct target_esr_context *ctx,
+ CPUARMState *env)
+{
+ __put_user(TARGET_ESR_MAGIC, &ctx->head.magic);
+ __put_user(sizeof(*ctx), &ctx->head.size);
+ __put_user(env->cp15.esr_el[1], &ctx->esr);
+}
+
static void target_setup_extra_record(struct target_extra_context *extra,
uint64_t datap, uint32_t extra_size)
{
@@ -248,9 +298,68 @@ static void target_setup_za_record(struct target_za_context *za,
for (i = 0; i < vl; ++i) {
uint64_t *z = (void *)za + TARGET_ZA_SIG_ZAV_OFFSET(vq, i);
for (j = 0; j < vq * 2; ++j) {
- __put_user_e(env->zarray[i].d[j], z + j, le);
+ __put_user_e(env->za_state.za[i].d[j], z + j, le);
+ }
+ }
+}
+
+static void target_setup_tpidr2_record(struct target_tpidr2_context *tpidr2,
+ CPUARMState *env)
+{
+ __put_user(TARGET_TPIDR2_MAGIC, &tpidr2->head.magic);
+ __put_user(sizeof(struct target_tpidr2_context), &tpidr2->head.size);
+ __put_user(env->cp15.tpidr2_el0, &tpidr2->tpidr2);
+}
+
+static void target_setup_zt_record(struct target_zt_context *zt,
+ CPUARMState *env, int size)
+{
+ uint64_t *z;
+
+ memset(zt, 0, sizeof(*zt));
+ __put_user(TARGET_ZT_MAGIC, &zt->head.magic);
+ __put_user(size, &zt->head.size);
+ /*
+ * The record format allows for multiple ZT regs, but
+ * currently there is only one, ZT0.
+ */
+ __put_user(1, &zt->nregs);
+ assert(size == TARGET_ZT_SIG_CONTEXT_SIZE(1));
+
+ /* ZT0 is the same byte-stream format as SVE regs and ZA */
+ z = (void *)zt + TARGET_ZT_SIG_REGS_OFFSET;
+ for (int i = 0; i < ARRAY_SIZE(env->za_state.zt0); i++) {
+ __put_user_e(env->za_state.zt0[i], z + i, le);
+ }
+}
+
+static bool target_setup_gcs_record(struct target_gcs_context *ctx,
+ CPUARMState *env, uint64_t return_addr)
+{
+ uint64_t mode = gcs_get_el0_mode(env);
+ uint64_t gcspr = env->cp15.gcspr_el[0];
+
+ if (mode & PR_SHADOW_STACK_ENABLE) {
+ /* Push a cap for the signal frame. */
+ gcspr -= 8;
+ if (put_user_u64(GCS_SIGNAL_CAP(gcspr), gcspr)) {
+ return false;
+ }
+
+ /* Push a gcs entry for the trampoline. */
+ if (put_user_u64(return_addr, gcspr - 8)) {
+ return false;
}
+ env->cp15.gcspr_el[0] = gcspr - 8;
}
+
+ __put_user(TARGET_GCS_MAGIC, &ctx->head.magic);
+ __put_user(sizeof(*ctx), &ctx->head.size);
+ __put_user(gcspr, &ctx->gcspr);
+ __put_user(mode, &ctx->features_enabled);
+ __put_user(0, &ctx->reserved);
+
+ return true;
}
static void target_restore_general_frame(CPUARMState *env,
@@ -397,12 +506,100 @@ static bool target_restore_za_record(CPUARMState *env,
for (i = 0; i < vl; ++i) {
uint64_t *z = (void *)za + TARGET_ZA_SIG_ZAV_OFFSET(vq, i);
for (j = 0; j < vq * 2; ++j) {
- __get_user_e(env->zarray[i].d[j], z + j, le);
+ __get_user_e(env->za_state.za[i].d[j], z + j, le);
}
}
return true;
}
+static void target_restore_tpidr2_record(CPUARMState *env,
+ struct target_tpidr2_context *tpidr2)
+{
+ __get_user(env->cp15.tpidr2_el0, &tpidr2->tpidr2);
+}
+
+static bool target_restore_zt_record(CPUARMState *env,
+ struct target_zt_context *zt, int size,
+ int svcr)
+{
+ uint16_t nregs;
+ uint64_t *z;
+
+ if (!(FIELD_EX64(svcr, SVCR, ZA))) {
+ return false;
+ }
+
+ __get_user(nregs, &zt->nregs);
+
+ if (nregs != 1) {
+ return false;
+ }
+
+ z = (void *)zt + TARGET_ZT_SIG_REGS_OFFSET;
+ for (int i = 0; i < ARRAY_SIZE(env->za_state.zt0); i++) {
+ __get_user_e(env->za_state.zt0[i], z + i, le);
+ }
+ return true;
+}
+
+static bool target_restore_gcs_record(CPUARMState *env,
+ struct target_gcs_context *ctx,
+ bool *rebuild_hflags)
+{
+ TaskState *ts = get_task_state(env_cpu(env));
+ uint64_t cur_mode = gcs_get_el0_mode(env);
+ uint64_t new_mode, gcspr;
+
+ __get_user(new_mode, &ctx->features_enabled);
+ __get_user(gcspr, &ctx->gcspr);
+
+ /*
+ * The kernel pushes the value through the hw register:
+ * write_sysreg_s(gcspr, SYS_GCSPR_EL0) in restore_gcs_context,
+ * then read_sysreg_s(SYS_GCSPR_EL0) in gcs_restore_signal.
+ * Since the bottom 3 bits are RES0, this can (CONSTRAINED UNPREDICTABLE)
+ * force align the value. Mirror the choice from gcspr_write().
+ */
+ gcspr &= ~7;
+
+ if (new_mode & ~(PR_SHADOW_STACK_ENABLE |
+ PR_SHADOW_STACK_WRITE |
+ PR_SHADOW_STACK_PUSH)) {
+ return false;
+ }
+ if ((new_mode ^ cur_mode) & ts->gcs_el0_locked) {
+ return false;
+ }
+ if (new_mode & ~cur_mode & PR_SHADOW_STACK_ENABLE) {
+ return false;
+ }
+
+ if (new_mode & PR_SHADOW_STACK_ENABLE) {
+ uint64_t cap;
+
+ /* Pop and clear the signal cap. */
+ if (get_user_u64(cap, gcspr)) {
+ return false;
+ }
+ if (cap != GCS_SIGNAL_CAP(gcspr)) {
+ return false;
+ }
+ if (put_user_u64(0, gcspr)) {
+ return false;
+ }
+ gcspr += 8;
+ } else {
+ new_mode = 0;
+ }
+
+ env->cp15.gcspr_el[0] = gcspr;
+ if (new_mode != cur_mode) {
+ *rebuild_hflags = true;
+ gcs_set_el0_mode(env, new_mode);
+ }
+ return true;
+}
+
static int target_restore_sigframe(CPUARMState *env,
struct target_rt_sigframe *sf)
{
@@ -410,10 +607,15 @@ static int target_restore_sigframe(CPUARMState *env,
struct target_fpsimd_context *fpsimd = NULL;
struct target_sve_context *sve = NULL;
struct target_za_context *za = NULL;
+ struct target_tpidr2_context *tpidr2 = NULL;
+ struct target_zt_context *zt = NULL;
+ struct target_gcs_context *gcs = NULL;
uint64_t extra_datap = 0;
bool used_extra = false;
+ bool rebuild_hflags = false;
int sve_size = 0;
int za_size = 0;
+ int zt_size = 0;
int svcr = 0;
target_restore_general_frame(env, sf);
@@ -444,6 +646,9 @@ static int target_restore_sigframe(CPUARMState *env,
fpsimd = (struct target_fpsimd_context *)ctx;
break;
+ case TARGET_ESR_MAGIC:
+ break; /* ignore */
+
case TARGET_SVE_MAGIC:
if (sve || size < sizeof(struct target_sve_context)) {
goto err;
@@ -460,6 +665,32 @@ static int target_restore_sigframe(CPUARMState *env,
za_size = size;
break;
+ case TARGET_TPIDR2_MAGIC:
+ if (tpidr2 || size != sizeof(struct target_tpidr2_context) ||
+ !cpu_isar_feature(aa64_sme, env_archcpu(env))) {
+ goto err;
+ }
+ tpidr2 = (struct target_tpidr2_context *)ctx;
+ break;
+
+ case TARGET_ZT_MAGIC:
+ if (zt || size != TARGET_ZT_SIG_CONTEXT_SIZE(1) ||
+ !cpu_isar_feature(aa64_sme2, env_archcpu(env))) {
+ goto err;
+ }
+ zt = (struct target_zt_context *)ctx;
+ zt_size = size;
+ break;
+
+ case TARGET_GCS_MAGIC:
+ if (gcs
+ || size != sizeof(struct target_gcs_context)
+ || !cpu_isar_feature(aa64_gcs, env_archcpu(env))) {
+ goto err;
+ }
+ gcs = (struct target_gcs_context *)ctx;
+ break;
+
case TARGET_EXTRA_MAGIC:
if (extra || size != sizeof(struct target_extra_context)) {
goto err;
@@ -490,6 +721,10 @@ static int target_restore_sigframe(CPUARMState *env,
goto err;
}
+ if (gcs && !target_restore_gcs_record(env, gcs, &rebuild_hflags)) {
+ goto err;
+ }
+
/* SVE data, if present, overwrites FPSIMD data. */
if (sve && !target_restore_sve_record(env, sve, sve_size, &svcr)) {
goto err;
@@ -497,8 +732,21 @@ static int target_restore_sigframe(CPUARMState *env,
if (za && !target_restore_za_record(env, za, za_size, &svcr)) {
goto err;
}
+ if (tpidr2) {
+ target_restore_tpidr2_record(env, tpidr2);
+ }
+ /*
+ * NB that we must restore ZT after ZA so the check that there's
+ * no ZT record if SVCR.ZA is 0 gets the right value of SVCR.
+ */
+ if (zt && !target_restore_zt_record(env, zt, zt_size, svcr)) {
+ goto err;
+ }
if (env->svcr != svcr) {
env->svcr = svcr;
+ rebuild_hflags = true;
+ }
+ if (rebuild_hflags) {
arm_rebuild_hflags(env);
}
unlock_user(extra, extra_datap, 0);
@@ -568,8 +816,9 @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
.total_size = offsetof(struct target_rt_sigframe,
uc.tuc_mcontext.__reserved),
};
- int fpsimd_ofs, fr_ofs, sve_ofs = 0, za_ofs = 0;
- int sve_size = 0, za_size = 0;
+ int fpsimd_ofs, fr_ofs, sve_ofs = 0, za_ofs = 0, tpidr2_ofs = 0;
+ int zt_ofs = 0, esr_ofs = 0, gcs_ofs = 0;
+ int sve_size = 0, za_size = 0, tpidr2_size = 0, zt_size = 0;
struct target_rt_sigframe *frame;
struct target_rt_frame_record *fr;
abi_ulong frame_addr, return_addr;
@@ -578,6 +827,20 @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
fpsimd_ofs = alloc_sigframe_space(sizeof(struct target_fpsimd_context),
&layout);
+ /*
+ * In user mode, ESR_EL1 is only set by cpu_loop while queueing the
+ * signal, and it's only valid for the one sync insn.
+ */
+ if (env->cp15.esr_el[1]) {
+ esr_ofs = alloc_sigframe_space(sizeof(struct target_esr_context),
+ &layout);
+ }
+
+ if (env->cp15.gcspr_el[0]) {
+ gcs_ofs = alloc_sigframe_space(sizeof(struct target_gcs_context),
+ &layout);
+ }
+
/* SVE state needs saving only if it exists. */
if (cpu_isar_feature(aa64_sve, env_archcpu(env)) ||
cpu_isar_feature(aa64_sme, env_archcpu(env))) {
@@ -585,6 +848,8 @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
sve_ofs = alloc_sigframe_space(sve_size, &layout);
}
if (cpu_isar_feature(aa64_sme, env_archcpu(env))) {
+ tpidr2_size = sizeof(struct target_tpidr2_context);
+ tpidr2_ofs = alloc_sigframe_space(tpidr2_size, &layout);
/* ZA state needs saving only if it is enabled. */
if (FIELD_EX64(env->svcr, SVCR, ZA)) {
za_size = TARGET_ZA_SIG_CONTEXT_SIZE(sme_vq(env));
@@ -593,6 +858,12 @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
}
za_ofs = alloc_sigframe_space(za_size, &layout);
}
+ if (cpu_isar_feature(aa64_sme2, env_archcpu(env)) &&
+ FIELD_EX64(env->svcr, SVCR, ZA)) {
+ /* If SME ZA storage is enabled, we must also save SME2 ZT0 */
+ zt_size = TARGET_ZT_SIG_CONTEXT_SIZE(1);
+ zt_ofs = alloc_sigframe_space(zt_size, &layout);
+ }
if (layout.extra_ofs) {
/* Reserve space for the extra end marker. The standard end marker
@@ -629,8 +900,23 @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
goto give_sigsegv;
}
+ if (ka->sa_flags & TARGET_SA_RESTORER) {
+ return_addr = ka->sa_restorer;
+ } else {
+ return_addr = default_rt_sigreturn;
+ }
+
target_setup_general_frame(frame, env, set);
target_setup_fpsimd_record((void *)frame + fpsimd_ofs, env);
+ if (esr_ofs) {
+ target_setup_esr_record((void *)frame + esr_ofs, env);
+ /* Leave ESR_EL1 clear while it's not relevant. */
+ env->cp15.esr_el[1] = 0;
+ }
+ if (gcs_ofs &&
+ !target_setup_gcs_record((void *)frame + gcs_ofs, env, return_addr)) {
+ goto give_sigsegv;
+ }
target_setup_end_record((void *)frame + layout.std_end_ofs);
if (layout.extra_ofs) {
target_setup_extra_record((void *)frame + layout.extra_ofs,
@@ -644,17 +930,18 @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
if (za_ofs) {
target_setup_za_record((void *)frame + za_ofs, env, za_size);
}
+ if (tpidr2_ofs) {
+ target_setup_tpidr2_record((void *)frame + tpidr2_ofs, env);
+ }
+ if (zt_ofs) {
+ target_setup_zt_record((void *)frame + zt_ofs, env, zt_size);
+ }
/* Set up the stack frame for unwinding. */
fr = (void *)frame + fr_ofs;
__put_user(env->xregs[29], &fr->fp);
__put_user(env->xregs[30], &fr->lr);
- if (ka->sa_flags & TARGET_SA_RESTORER) {
- return_addr = ka->sa_restorer;
- } else {
- return_addr = default_rt_sigreturn;
- }
env->xregs[0] = usig;
env->xregs[29] = frame_addr + fr_ofs;
env->xregs[30] = return_addr;
@@ -666,8 +953,12 @@ static void target_setup_frame(int usig, struct target_sigaction *ka,
env->btype = 2;
}
- /* Invoke the signal handler with both SM and ZA disabled. */
+ /*
+ * Invoke the signal handler with a clean SME state: both SM and ZA
+ * disabled and TPIDR2_EL0 cleared.
+ */
aarch64_set_svcr(env, 0, R_SVCR_SM_MASK | R_SVCR_ZA_MASK);
+ env->cp15.tpidr2_el0 = 0;
if (info) {
frame->info = *info;
diff --git a/linux-user/aarch64/target_elf.h b/linux-user/aarch64/target_elf.h
index a7eb962..4cdeb64 100644
--- a/linux-user/aarch64/target_elf.h
+++ b/linux-user/aarch64/target_elf.h
@@ -7,8 +7,30 @@
#ifndef AARCH64_TARGET_ELF_H
#define AARCH64_TARGET_ELF_H
-static inline const char *cpu_get_model(uint32_t eflags)
-{
- return "any";
-}
+
+#include "target_ptrace.h"
+
+#define ELF_MACHINE EM_AARCH64
+#define ELF_CLASS ELFCLASS64
+
+#define HAVE_ELF_HWCAP 1
+#define HAVE_ELF_HWCAP2 1
+#define HAVE_ELF_PLATFORM 1
+#define HAVE_ELF_CORE_DUMP 1
+#define HAVE_ELF_GNU_PROPERTY 1
+
+/*
+ * See linux kernel: arch/arm64/include/asm/elf.h, where
+ * elf_gregset_t is mapped to struct user_pt_regs via sizeof.
+ */
+typedef struct target_elf_gregset_t {
+ struct target_user_pt_regs pt;
+} target_elf_gregset_t;
+
+#if TARGET_BIG_ENDIAN
+# define VDSO_HEADER "vdso-be.c.inc"
+#else
+# define VDSO_HEADER "vdso-le.c.inc"
+#endif
+
#endif
diff --git a/linux-user/aarch64/target_prctl.h b/linux-user/aarch64/target_prctl.h
index ed75b9e..621be57 100644
--- a/linux-user/aarch64/target_prctl.h
+++ b/linux-user/aarch64/target_prctl.h
@@ -6,8 +6,10 @@
#ifndef AARCH64_TARGET_PRCTL_H
#define AARCH64_TARGET_PRCTL_H
+#include "qemu/units.h"
#include "target/arm/cpu-features.h"
#include "mte_user_helper.h"
+#include "gcs-internal.h"
static abi_long do_prctl_sve_get_vl(CPUArchState *env)
{
@@ -206,4 +208,98 @@ static abi_long do_prctl_get_tagged_addr_ctrl(CPUArchState *env)
}
#define do_prctl_get_tagged_addr_ctrl do_prctl_get_tagged_addr_ctrl
+static abi_long do_prctl_get_shadow_stack_status(CPUArchState *env,
+ abi_long arg2)
+{
+ ARMCPU *cpu = env_archcpu(env);
+
+ if (!cpu_isar_feature(aa64_gcs, cpu)) {
+ return -TARGET_EINVAL;
+ }
+ return put_user_ual(gcs_get_el0_mode(env), arg2);
+}
+#define do_prctl_get_shadow_stack_status do_prctl_get_shadow_stack_status
+
+static abi_long gcs_alloc(abi_ulong hint, abi_ulong size)
+{
+ /*
+ * Without softmmu, we cannot protect GCS memory properly.
+ * Make do with normal read/write permissions. This at least allows
+ * emulation of correct programs which don't access the gcs stack
+ * with normal instructions.
+ */
+ return target_mmap(hint, size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS |
+ (hint ? MAP_FIXED_NOREPLACE : 0), -1, 0);
+}
+
+static abi_ulong gcs_new_stack(TaskState *ts)
+{
+ /* Use guest_stack_size as a proxy for RLIMIT_STACK. */
+ abi_ulong size = MIN(MAX(guest_stack_size / 2, TARGET_PAGE_SIZE), 2 * GiB);
+ abi_ulong base = gcs_alloc(0, size);
+
+ if (base == -1) {
+ return -1;
+ }
+
+ ts->gcs_base = base;
+ ts->gcs_size = size;
+ return base + size - 8;
+}
+
+static abi_long do_prctl_set_shadow_stack_status(CPUArchState *env,
+ abi_long new_mode)
+{
+ ARMCPU *cpu = env_archcpu(env);
+ TaskState *ts = get_task_state(env_cpu(env));
+ abi_long cur_mode;
+
+ if (!cpu_isar_feature(aa64_gcs, cpu)) {
+ return -TARGET_EINVAL;
+ }
+ if (new_mode & ~(PR_SHADOW_STACK_ENABLE |
+ PR_SHADOW_STACK_WRITE |
+ PR_SHADOW_STACK_PUSH)) {
+ return -TARGET_EINVAL;
+ }
+
+ cur_mode = gcs_get_el0_mode(env);
+ if ((new_mode ^ cur_mode) & ts->gcs_el0_locked) {
+ return -TARGET_EBUSY;
+ }
+
+ if (new_mode & ~cur_mode & PR_SHADOW_STACK_ENABLE) {
+ abi_long gcspr;
+
+ if (ts->gcs_base || env->cp15.gcspr_el[0]) {
+ return -EINVAL;
+ }
+ gcspr = gcs_new_stack(ts);
+ if (gcspr == -1) {
+ return -TARGET_ENOMEM;
+ }
+ env->cp15.gcspr_el[0] = gcspr;
+ }
+
+ gcs_set_el0_mode(env, new_mode);
+ arm_rebuild_hflags(env);
+ return 0;
+}
+#define do_prctl_set_shadow_stack_status do_prctl_set_shadow_stack_status
+
+static abi_long do_prctl_lock_shadow_stack_status(CPUArchState *env,
+ abi_long arg2)
+{
+ ARMCPU *cpu = env_archcpu(env);
+ TaskState *ts = get_task_state(env_cpu(env));
+
+ if (!cpu_isar_feature(aa64_gcs, cpu)) {
+ return -EINVAL;
+ }
+ ts->gcs_el0_locked |= arg2;
+ return 0;
+}
+#define do_prctl_lock_shadow_stack_status do_prctl_lock_shadow_stack_status
+
#endif /* AARCH64_TARGET_PRCTL_H */
diff --git a/linux-user/aarch64/target_ptrace.h b/linux-user/aarch64/target_ptrace.h
new file mode 100644
index 0000000..1068133
--- /dev/null
+++ b/linux-user/aarch64/target_ptrace.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef AARCH64_TARGET_PTRACE_H
+#define AARCH64_TARGET_PTRACE_H
+
+/* See arch/arm64/include/uapi/asm/ptrace.h. */
+struct target_user_pt_regs {
+ uint64_t regs[31];
+ uint64_t sp;
+ uint64_t pc;
+ uint64_t pstate;
+};
+
+#endif /* AARCH64_TARGET_PTRACE_H */
diff --git a/linux-user/aarch64/target_signal.h b/linux-user/aarch64/target_signal.h
index 6f66a50..e509ac1 100644
--- a/linux-user/aarch64/target_signal.h
+++ b/linux-user/aarch64/target_signal.h
@@ -7,6 +7,7 @@
#define TARGET_SEGV_MTEAERR 8 /* Asynchronous ARM MTE error */
#define TARGET_SEGV_MTESERR 9 /* Synchronous ARM MTE exception */
+#define TARGET_SEGV_CPERR 10 /* Control protection fault */
#define TARGET_ARCH_HAS_SETUP_FRAME
#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1
diff --git a/linux-user/aarch64/target_syscall.h b/linux-user/aarch64/target_syscall.h
index c055133..bd05f6c 100644
--- a/linux-user/aarch64/target_syscall.h
+++ b/linux-user/aarch64/target_syscall.h
@@ -1,13 +1,6 @@
#ifndef AARCH64_TARGET_SYSCALL_H
#define AARCH64_TARGET_SYSCALL_H
-struct target_pt_regs {
- uint64_t regs[31];
- uint64_t sp;
- uint64_t pc;
- uint64_t pstate;
-};
-
#if TARGET_BIG_ENDIAN
#define UNAME_MACHINE "aarch64_be"
#else
diff --git a/linux-user/aarch64/vdso-be.so b/linux-user/aarch64/vdso-be.so
index d43c3b1..4089838 100755
--- a/linux-user/aarch64/vdso-be.so
+++ b/linux-user/aarch64/vdso-be.so
Binary files differ
diff --git a/linux-user/aarch64/vdso-le.so b/linux-user/aarch64/vdso-le.so
index aaedc9d..2408028 100755
--- a/linux-user/aarch64/vdso-le.so
+++ b/linux-user/aarch64/vdso-le.so
Binary files differ
diff --git a/linux-user/aarch64/vdso.S b/linux-user/aarch64/vdso.S
index a0ac148..59dd94d 100644
--- a/linux-user/aarch64/vdso.S
+++ b/linux-user/aarch64/vdso.S
@@ -71,5 +71,7 @@ vdso_syscall __kernel_clock_getres, __NR_clock_getres
__kernel_rt_sigreturn:
/* No BTI C insn here -- we arrive via RET. */
mov x8, #__NR_rt_sigreturn
+sigreturn_region_start:
svc #0
+sigreturn_region_end:
endf __kernel_rt_sigreturn