aboutsummaryrefslogtreecommitdiff
path: root/target/arm
diff options
context:
space:
mode:
Diffstat (limited to 'target/arm')
-rw-r--r--target/arm/arch_dump.c6
-rw-r--r--target/arm/arm-qmp-cmds.c7
-rw-r--r--target/arm/cpregs-pmu.c1309
-rw-r--r--target/arm/cpregs.h4
-rw-r--r--target/arm/cpu-features.h381
-rw-r--r--target/arm/cpu-param.h18
-rw-r--r--target/arm/cpu-qom.h5
-rw-r--r--target/arm/cpu-sysregs.h42
-rw-r--r--target/arm/cpu-sysregs.h.inc40
-rw-r--r--target/arm/cpu.c315
-rw-r--r--target/arm/cpu.h249
-rw-r--r--target/arm/cpu32-stubs.c26
-rw-r--r--target/arm/cpu64.c238
-rw-r--r--target/arm/debug_helper.c8
-rw-r--r--target/arm/gdbstub.c12
-rw-r--r--target/arm/gdbstub64.c4
-rw-r--r--target/arm/helper.c2024
-rw-r--r--target/arm/helper.h1152
-rw-r--r--target/arm/hvf-stub.c20
-rw-r--r--target/arm/hvf/hvf.c106
-rw-r--r--target/arm/hvf/trace-events5
-rw-r--r--target/arm/hvf_arm.h18
-rw-r--r--target/arm/hyp_gdbstub.c14
-rw-r--r--target/arm/internals.h52
-rw-r--r--target/arm/kvm-stub.c102
-rw-r--r--target/arm/kvm.c268
-rw-r--r--target/arm/kvm_arm.h102
-rw-r--r--target/arm/machine.c37
-rw-r--r--target/arm/meson.build47
-rw-r--r--target/arm/ptw.c81
-rw-r--r--target/arm/syndrome.h1
-rw-r--r--target/arm/tcg-stubs.c9
-rw-r--r--target/arm/tcg/arith_helper.c5
-rw-r--r--target/arm/tcg/cpregs-at.c519
-rw-r--r--target/arm/tcg/cpu-v7m.c201
-rw-r--r--target/arm/tcg/cpu32.c354
-rw-r--r--target/arm/tcg/cpu64.c525
-rw-r--r--target/arm/tcg/crypto_helper.c6
-rw-r--r--target/arm/tcg/gengvec64.c11
-rw-r--r--target/arm/tcg/helper-a64.c29
-rw-r--r--target/arm/tcg/helper-sme.h213
-rw-r--r--target/arm/tcg/helper-sve.h212
-rw-r--r--target/arm/tcg/helper.h1213
-rw-r--r--target/arm/tcg/hflags.c159
-rw-r--r--target/arm/tcg/iwmmxt_helper.c4
-rw-r--r--target/arm/tcg/m_helper.c36
-rw-r--r--target/arm/tcg/meson.build32
-rw-r--r--target/arm/tcg/mte_helper.c11
-rw-r--r--target/arm/tcg/mve_helper.c186
-rw-r--r--target/arm/tcg/neon_helper.c34
-rw-r--r--target/arm/tcg/op_helper.c7
-rw-r--r--target/arm/tcg/pauth_helper.c3
-rw-r--r--target/arm/tcg/sme.decode937
-rw-r--r--target/arm/tcg/sme_helper.c1676
-rw-r--r--target/arm/tcg/sve.decode327
-rw-r--r--target/arm/tcg/sve_helper.c1197
-rw-r--r--target/arm/tcg/sve_ldst_internal.h91
-rw-r--r--target/arm/tcg/tlb-insns.c8
-rw-r--r--target/arm/tcg/tlb_helper.c6
-rw-r--r--target/arm/tcg/translate-a64.c70
-rw-r--r--target/arm/tcg/translate-a64.h10
-rw-r--r--target/arm/tcg/translate-neon.c18
-rw-r--r--target/arm/tcg/translate-sme.c1476
-rw-r--r--target/arm/tcg/translate-sve.c1023
-rw-r--r--target/arm/tcg/translate.c25
-rw-r--r--target/arm/tcg/translate.h11
-rw-r--r--target/arm/tcg/vec_helper.c383
-rw-r--r--target/arm/tcg/vec_internal.h150
-rw-r--r--target/arm/tcg/vfp_helper.c16
69 files changed, 12451 insertions, 5435 deletions
diff --git a/target/arm/arch_dump.c b/target/arm/arch_dump.c
index c40df4e..1dd7984 100644
--- a/target/arm/arch_dump.c
+++ b/target/arm/arch_dump.c
@@ -143,7 +143,6 @@ static int aarch64_write_elf64_prfpreg(WriteCoreDumpFunction f,
return 0;
}
-#ifdef TARGET_AARCH64
static off_t sve_zreg_offset(uint32_t vq, int n)
{
off_t off = sizeof(struct aarch64_user_sve_header);
@@ -231,7 +230,6 @@ static int aarch64_write_elf64_sve(WriteCoreDumpFunction f,
return 0;
}
-#endif
int arm_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
int cpuid, DumpState *s)
@@ -273,11 +271,9 @@ int arm_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
return ret;
}
-#ifdef TARGET_AARCH64
if (cpu_isar_feature(aa64_sve, cpu)) {
ret = aarch64_write_elf64_sve(f, env, cpuid, s);
}
-#endif
return ret;
}
@@ -451,11 +447,9 @@ ssize_t cpu_get_note_size(int class, int machine, int nr_cpus)
if (class == ELFCLASS64) {
note_size = AARCH64_PRSTATUS_NOTE_SIZE;
note_size += AARCH64_PRFPREG_NOTE_SIZE;
-#ifdef TARGET_AARCH64
if (cpu_isar_feature(aa64_sve, cpu)) {
note_size += AARCH64_SVE_NOTE_SIZE(&cpu->env);
}
-#endif
} else {
note_size = ARM_PRSTATUS_NOTE_SIZE;
if (cpu_isar_feature(aa32_vfp_simd, cpu)) {
diff --git a/target/arm/arm-qmp-cmds.c b/target/arm/arm-qmp-cmds.c
index 883c0a0..cefd235 100644
--- a/target/arm/arm-qmp-cmds.c
+++ b/target/arm/arm-qmp-cmds.c
@@ -26,10 +26,11 @@
#include "qapi/error.h"
#include "qapi/visitor.h"
#include "qapi/qobject-input-visitor.h"
-#include "qapi/qapi-commands-machine-target.h"
-#include "qapi/qapi-commands-misc-target.h"
+#include "qapi/qapi-commands-machine.h"
+#include "qapi/qapi-commands-misc-arm.h"
#include "qobject/qdict.h"
#include "qom/qom-qobject.h"
+#include "cpu.h"
static GICCapability *gic_cap_new(int version)
{
@@ -46,7 +47,7 @@ static inline void gic_cap_kvm_probe(GICCapability *v2, GICCapability *v3)
#ifdef CONFIG_KVM
int fdarray[3];
- if (!kvm_arm_create_scratch_host_vcpu(NULL, fdarray, NULL)) {
+ if (!kvm_arm_create_scratch_host_vcpu(fdarray, NULL)) {
return;
}
diff --git a/target/arm/cpregs-pmu.c b/target/arm/cpregs-pmu.c
new file mode 100644
index 0000000..0f295b1
--- /dev/null
+++ b/target/arm/cpregs-pmu.c
@@ -0,0 +1,1309 @@
+/*
+ * QEMU ARM CP Register PMU insns
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/timer.h"
+#include "exec/icount.h"
+#include "hw/irq.h"
+#include "cpu.h"
+#include "cpu-features.h"
+#include "cpregs.h"
+#include "internals.h"
+
+
+#define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
+
+/*
+ * Check for traps to performance monitor registers, which are controlled
+ * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
+ */
+static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ int el = arm_current_el(env);
+ uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
+
+ if (el < 2 && (mdcr_el2 & MDCR_TPM)) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
+ return CP_ACCESS_TRAP_EL3;
+ }
+ return CP_ACCESS_OK;
+}
+
+typedef struct pm_event {
+ uint16_t number; /* PMEVTYPER.evtCount is 16 bits wide */
+ /* If the event is supported on this CPU (used to generate PMCEID[01]) */
+ bool (*supported)(CPUARMState *);
+ /*
+ * Retrieve the current count of the underlying event. The programmed
+ * counters hold a difference from the return value from this function
+ */
+ uint64_t (*get_count)(CPUARMState *);
+ /*
+ * Return how many nanoseconds it will take (at a minimum) for count events
+ * to occur. A negative value indicates the counter will never overflow, or
+ * that the counter has otherwise arranged for the overflow bit to be set
+ * and the PMU interrupt to be raised on overflow.
+ */
+ int64_t (*ns_per_count)(uint64_t);
+} pm_event;
+
+static bool event_always_supported(CPUARMState *env)
+{
+ return true;
+}
+
+static uint64_t swinc_get_count(CPUARMState *env)
+{
+ /*
+ * SW_INCR events are written directly to the pmevcntr's by writes to
+ * PMSWINC, so there is no underlying count maintained by the PMU itself
+ */
+ return 0;
+}
+
+static int64_t swinc_ns_per(uint64_t ignored)
+{
+ return -1;
+}
+
+/*
+ * Return the underlying cycle count for the PMU cycle counters. If we're in
+ * usermode, simply return 0.
+ */
+static uint64_t cycles_get_count(CPUARMState *env)
+{
+#ifndef CONFIG_USER_ONLY
+ return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
+ ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
+#else
+ return cpu_get_host_ticks();
+#endif
+}
+
+#ifndef CONFIG_USER_ONLY
+static int64_t cycles_ns_per(uint64_t cycles)
+{
+ return (ARM_CPU_FREQ / NANOSECONDS_PER_SECOND) * cycles;
+}
+
+static bool instructions_supported(CPUARMState *env)
+{
+ /* Precise instruction counting */
+ return icount_enabled() == ICOUNT_PRECISE;
+}
+
+static uint64_t instructions_get_count(CPUARMState *env)
+{
+ assert(icount_enabled() == ICOUNT_PRECISE);
+ return (uint64_t)icount_get_raw();
+}
+
+static int64_t instructions_ns_per(uint64_t icount)
+{
+ assert(icount_enabled() == ICOUNT_PRECISE);
+ return icount_to_ns((int64_t)icount);
+}
+#endif
+
+static bool pmuv3p1_events_supported(CPUARMState *env)
+{
+ /* For events which are supported in any v8.1 PMU */
+ return cpu_isar_feature(any_pmuv3p1, env_archcpu(env));
+}
+
+static bool pmuv3p4_events_supported(CPUARMState *env)
+{
+ /* For events which are supported in any v8.1 PMU */
+ return cpu_isar_feature(any_pmuv3p4, env_archcpu(env));
+}
+
+static uint64_t zero_event_get_count(CPUARMState *env)
+{
+ /* For events which on QEMU never fire, so their count is always zero */
+ return 0;
+}
+
+static int64_t zero_event_ns_per(uint64_t cycles)
+{
+ /* An event which never fires can never overflow */
+ return -1;
+}
+
+static const pm_event pm_events[] = {
+ { .number = 0x000, /* SW_INCR */
+ .supported = event_always_supported,
+ .get_count = swinc_get_count,
+ .ns_per_count = swinc_ns_per,
+ },
+#ifndef CONFIG_USER_ONLY
+ { .number = 0x008, /* INST_RETIRED, Instruction architecturally executed */
+ .supported = instructions_supported,
+ .get_count = instructions_get_count,
+ .ns_per_count = instructions_ns_per,
+ },
+ { .number = 0x011, /* CPU_CYCLES, Cycle */
+ .supported = event_always_supported,
+ .get_count = cycles_get_count,
+ .ns_per_count = cycles_ns_per,
+ },
+#endif
+ { .number = 0x023, /* STALL_FRONTEND */
+ .supported = pmuv3p1_events_supported,
+ .get_count = zero_event_get_count,
+ .ns_per_count = zero_event_ns_per,
+ },
+ { .number = 0x024, /* STALL_BACKEND */
+ .supported = pmuv3p1_events_supported,
+ .get_count = zero_event_get_count,
+ .ns_per_count = zero_event_ns_per,
+ },
+ { .number = 0x03c, /* STALL */
+ .supported = pmuv3p4_events_supported,
+ .get_count = zero_event_get_count,
+ .ns_per_count = zero_event_ns_per,
+ },
+};
+
+/*
+ * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of
+ * events (i.e. the statistical profiling extension), this implementation
+ * should first be updated to something sparse instead of the current
+ * supported_event_map[] array.
+ */
+#define MAX_EVENT_ID 0x3c
+#define UNSUPPORTED_EVENT UINT16_MAX
+static uint16_t supported_event_map[MAX_EVENT_ID + 1];
+
+/*
+ * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map
+ * of ARM event numbers to indices in our pm_events array.
+ *
+ * Note: Events in the 0x40XX range are not currently supported.
+ */
+void pmu_init(ARMCPU *cpu)
+{
+ unsigned int i;
+
+ /*
+ * Empty supported_event_map and cpu->pmceid[01] before adding supported
+ * events to them
+ */
+ for (i = 0; i < ARRAY_SIZE(supported_event_map); i++) {
+ supported_event_map[i] = UNSUPPORTED_EVENT;
+ }
+ cpu->pmceid0 = 0;
+ cpu->pmceid1 = 0;
+
+ for (i = 0; i < ARRAY_SIZE(pm_events); i++) {
+ const pm_event *cnt = &pm_events[i];
+ assert(cnt->number <= MAX_EVENT_ID);
+ /* We do not currently support events in the 0x40xx range */
+ assert(cnt->number <= 0x3f);
+
+ if (cnt->supported(&cpu->env)) {
+ supported_event_map[cnt->number] = i;
+ uint64_t event_mask = 1ULL << (cnt->number & 0x1f);
+ if (cnt->number & 0x20) {
+ cpu->pmceid1 |= event_mask;
+ } else {
+ cpu->pmceid0 |= event_mask;
+ }
+ }
+ }
+}
+
+/*
+ * Check at runtime whether a PMU event is supported for the current machine
+ */
+static bool event_supported(uint16_t number)
+{
+ if (number > MAX_EVENT_ID) {
+ return false;
+ }
+ return supported_event_map[number] != UNSUPPORTED_EVENT;
+}
+
+static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ /*
+ * Performance monitor registers user accessibility is controlled
+ * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
+ * trapping to EL2 or EL3 for other accesses.
+ */
+ int el = arm_current_el(env);
+ uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
+
+ if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) {
+ return CP_ACCESS_TRAP_EL1;
+ }
+ if (el < 2 && (mdcr_el2 & MDCR_TPM)) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
+ return CP_ACCESS_TRAP_EL3;
+ }
+
+ return CP_ACCESS_OK;
+}
+
+static CPAccessResult pmreg_access_xevcntr(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ bool isread)
+{
+ /* ER: event counter read trap control */
+ if (arm_feature(env, ARM_FEATURE_V8)
+ && arm_current_el(env) == 0
+ && (env->cp15.c9_pmuserenr & (1 << 3)) != 0
+ && isread) {
+ return CP_ACCESS_OK;
+ }
+
+ return pmreg_access(env, ri, isread);
+}
+
+static CPAccessResult pmreg_access_swinc(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ bool isread)
+{
+ /* SW: software increment write trap control */
+ if (arm_feature(env, ARM_FEATURE_V8)
+ && arm_current_el(env) == 0
+ && (env->cp15.c9_pmuserenr & (1 << 1)) != 0
+ && !isread) {
+ return CP_ACCESS_OK;
+ }
+
+ return pmreg_access(env, ri, isread);
+}
+
+static CPAccessResult pmreg_access_selr(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ bool isread)
+{
+ /* ER: event counter read trap control */
+ if (arm_feature(env, ARM_FEATURE_V8)
+ && arm_current_el(env) == 0
+ && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) {
+ return CP_ACCESS_OK;
+ }
+
+ return pmreg_access(env, ri, isread);
+}
+
+static CPAccessResult pmreg_access_ccntr(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ bool isread)
+{
+ /* CR: cycle counter read trap control */
+ if (arm_feature(env, ARM_FEATURE_V8)
+ && arm_current_el(env) == 0
+ && (env->cp15.c9_pmuserenr & (1 << 2)) != 0
+ && isread) {
+ return CP_ACCESS_OK;
+ }
+
+ return pmreg_access(env, ri, isread);
+}
+
+/*
+ * Returns true if the counter (pass 31 for PMCCNTR) should count events using
+ * the current EL, security state, and register configuration.
+ */
+static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
+{
+ uint64_t filter;
+ bool e, p, u, nsk, nsu, nsh, m;
+ bool enabled, prohibited = false, filtered;
+ bool secure = arm_is_secure(env);
+ int el = arm_current_el(env);
+ uint64_t mdcr_el2;
+ uint8_t hpmn;
+
+ /*
+ * We might be called for M-profile cores where MDCR_EL2 doesn't
+ * exist and arm_mdcr_el2_eff() will assert, so this early-exit check
+ * must be before we read that value.
+ */
+ if (!arm_feature(env, ARM_FEATURE_PMU)) {
+ return false;
+ }
+
+ mdcr_el2 = arm_mdcr_el2_eff(env);
+ hpmn = mdcr_el2 & MDCR_HPMN;
+
+ if (!arm_feature(env, ARM_FEATURE_EL2) ||
+ (counter < hpmn || counter == 31)) {
+ e = env->cp15.c9_pmcr & PMCRE;
+ } else {
+ e = mdcr_el2 & MDCR_HPME;
+ }
+ enabled = e && (env->cp15.c9_pmcnten & (1 << counter));
+
+ /* Is event counting prohibited? */
+ if (el == 2 && (counter < hpmn || counter == 31)) {
+ prohibited = mdcr_el2 & MDCR_HPMD;
+ }
+ if (secure) {
+ prohibited = prohibited || !(env->cp15.mdcr_el3 & MDCR_SPME);
+ }
+
+ if (counter == 31) {
+ /*
+ * The cycle counter defaults to running. PMCR.DP says "disable
+ * the cycle counter when event counting is prohibited".
+ * Some MDCR bits disable the cycle counter specifically.
+ */
+ prohibited = prohibited && env->cp15.c9_pmcr & PMCRDP;
+ if (cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
+ if (secure) {
+ prohibited = prohibited || (env->cp15.mdcr_el3 & MDCR_SCCD);
+ }
+ if (el == 2) {
+ prohibited = prohibited || (mdcr_el2 & MDCR_HCCD);
+ }
+ }
+ }
+
+ if (counter == 31) {
+ filter = env->cp15.pmccfiltr_el0;
+ } else {
+ filter = env->cp15.c14_pmevtyper[counter];
+ }
+
+ p = filter & PMXEVTYPER_P;
+ u = filter & PMXEVTYPER_U;
+ nsk = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSK);
+ nsu = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSU);
+ nsh = arm_feature(env, ARM_FEATURE_EL2) && (filter & PMXEVTYPER_NSH);
+ m = arm_el_is_aa64(env, 1) &&
+ arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_M);
+
+ if (el == 0) {
+ filtered = secure ? u : u != nsu;
+ } else if (el == 1) {
+ filtered = secure ? p : p != nsk;
+ } else if (el == 2) {
+ filtered = !nsh;
+ } else { /* EL3 */
+ filtered = m != p;
+ }
+
+ if (counter != 31) {
+ /*
+ * If not checking PMCCNTR, ensure the counter is setup to an event we
+ * support
+ */
+ uint16_t event = filter & PMXEVTYPER_EVTCOUNT;
+ if (!event_supported(event)) {
+ return false;
+ }
+ }
+
+ return enabled && !prohibited && !filtered;
+}
+
+static void pmu_update_irq(CPUARMState *env)
+{
+ ARMCPU *cpu = env_archcpu(env);
+ qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) &&
+ (env->cp15.c9_pminten & env->cp15.c9_pmovsr));
+}
+
+static bool pmccntr_clockdiv_enabled(CPUARMState *env)
+{
+ /*
+ * Return true if the clock divider is enabled and the cycle counter
+ * is supposed to tick only once every 64 clock cycles. This is
+ * controlled by PMCR.D, but if PMCR.LC is set to enable the long
+ * (64-bit) cycle counter PMCR.D has no effect.
+ */
+ return (env->cp15.c9_pmcr & (PMCRD | PMCRLC)) == PMCRD;
+}
+
+static bool pmevcntr_is_64_bit(CPUARMState *env, int counter)
+{
+ /* Return true if the specified event counter is configured to be 64 bit */
+
+ /* This isn't intended to be used with the cycle counter */
+ assert(counter < 31);
+
+ if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
+ return false;
+ }
+
+ if (arm_feature(env, ARM_FEATURE_EL2)) {
+ /*
+ * MDCR_EL2.HLP still applies even when EL2 is disabled in the
+ * current security state, so we don't use arm_mdcr_el2_eff() here.
+ */
+ bool hlp = env->cp15.mdcr_el2 & MDCR_HLP;
+ int hpmn = env->cp15.mdcr_el2 & MDCR_HPMN;
+
+ if (counter >= hpmn) {
+ return hlp;
+ }
+ }
+ return env->cp15.c9_pmcr & PMCRLP;
+}
+
+/*
+ * Ensure c15_ccnt is the guest-visible count so that operations such as
+ * enabling/disabling the counter or filtering, modifying the count itself,
+ * etc. can be done logically. This is essentially a no-op if the counter is
+ * not enabled at the time of the call.
+ */
+static void pmccntr_op_start(CPUARMState *env)
+{
+ uint64_t cycles = cycles_get_count(env);
+
+ if (pmu_counter_enabled(env, 31)) {
+ uint64_t eff_cycles = cycles;
+ if (pmccntr_clockdiv_enabled(env)) {
+ eff_cycles /= 64;
+ }
+
+ uint64_t new_pmccntr = eff_cycles - env->cp15.c15_ccnt_delta;
+
+ uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \
+ 1ull << 63 : 1ull << 31;
+ if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) {
+ env->cp15.c9_pmovsr |= (1ULL << 31);
+ pmu_update_irq(env);
+ }
+
+ env->cp15.c15_ccnt = new_pmccntr;
+ }
+ env->cp15.c15_ccnt_delta = cycles;
+}
+
+/*
+ * If PMCCNTR is enabled, recalculate the delta between the clock and the
+ * guest-visible count. A call to pmccntr_op_finish should follow every call to
+ * pmccntr_op_start.
+ */
+static void pmccntr_op_finish(CPUARMState *env)
+{
+ if (pmu_counter_enabled(env, 31)) {
+#ifndef CONFIG_USER_ONLY
+ /* Calculate when the counter will next overflow */
+ uint64_t remaining_cycles = -env->cp15.c15_ccnt;
+ if (!(env->cp15.c9_pmcr & PMCRLC)) {
+ remaining_cycles = (uint32_t)remaining_cycles;
+ }
+ int64_t overflow_in = cycles_ns_per(remaining_cycles);
+
+ if (overflow_in > 0) {
+ int64_t overflow_at;
+
+ if (!sadd64_overflow(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
+ overflow_in, &overflow_at)) {
+ ARMCPU *cpu = env_archcpu(env);
+ timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
+ }
+ }
+#endif
+
+ uint64_t prev_cycles = env->cp15.c15_ccnt_delta;
+ if (pmccntr_clockdiv_enabled(env)) {
+ prev_cycles /= 64;
+ }
+ env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt;
+ }
+}
+
+static void pmevcntr_op_start(CPUARMState *env, uint8_t counter)
+{
+
+ uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
+ uint64_t count = 0;
+ if (event_supported(event)) {
+ uint16_t event_idx = supported_event_map[event];
+ count = pm_events[event_idx].get_count(env);
+ }
+
+ if (pmu_counter_enabled(env, counter)) {
+ uint64_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter];
+ uint64_t overflow_mask = pmevcntr_is_64_bit(env, counter) ?
+ 1ULL << 63 : 1ULL << 31;
+
+ if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & overflow_mask) {
+ env->cp15.c9_pmovsr |= (1 << counter);
+ pmu_update_irq(env);
+ }
+ env->cp15.c14_pmevcntr[counter] = new_pmevcntr;
+ }
+ env->cp15.c14_pmevcntr_delta[counter] = count;
+}
+
+static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter)
+{
+ if (pmu_counter_enabled(env, counter)) {
+#ifndef CONFIG_USER_ONLY
+ uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
+ uint16_t event_idx = supported_event_map[event];
+ uint64_t delta = -(env->cp15.c14_pmevcntr[counter] + 1);
+ int64_t overflow_in;
+
+ if (!pmevcntr_is_64_bit(env, counter)) {
+ delta = (uint32_t)delta;
+ }
+ overflow_in = pm_events[event_idx].ns_per_count(delta);
+
+ if (overflow_in > 0) {
+ int64_t overflow_at;
+
+ if (!sadd64_overflow(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
+ overflow_in, &overflow_at)) {
+ ARMCPU *cpu = env_archcpu(env);
+ timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
+ }
+ }
+#endif
+
+ env->cp15.c14_pmevcntr_delta[counter] -=
+ env->cp15.c14_pmevcntr[counter];
+ }
+}
+
+void pmu_op_start(CPUARMState *env)
+{
+ unsigned int i;
+ pmccntr_op_start(env);
+ for (i = 0; i < pmu_num_counters(env); i++) {
+ pmevcntr_op_start(env, i);
+ }
+}
+
+void pmu_op_finish(CPUARMState *env)
+{
+ unsigned int i;
+ pmccntr_op_finish(env);
+ for (i = 0; i < pmu_num_counters(env); i++) {
+ pmevcntr_op_finish(env, i);
+ }
+}
+
+void pmu_pre_el_change(ARMCPU *cpu, void *ignored)
+{
+ pmu_op_start(&cpu->env);
+}
+
+void pmu_post_el_change(ARMCPU *cpu, void *ignored)
+{
+ pmu_op_finish(&cpu->env);
+}
+
+void arm_pmu_timer_cb(void *opaque)
+{
+ ARMCPU *cpu = opaque;
+
+ /*
+ * Update all the counter values based on the current underlying counts,
+ * triggering interrupts to be raised, if necessary. pmu_op_finish() also
+ * has the effect of setting the cpu->pmu_timer to the next earliest time a
+ * counter may expire.
+ */
+ pmu_op_start(&cpu->env);
+ pmu_op_finish(&cpu->env);
+}
+
+static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ pmu_op_start(env);
+
+ if (value & PMCRC) {
+ /* The counter has been reset */
+ env->cp15.c15_ccnt = 0;
+ }
+
+ if (value & PMCRP) {
+ unsigned int i;
+ for (i = 0; i < pmu_num_counters(env); i++) {
+ env->cp15.c14_pmevcntr[i] = 0;
+ }
+ }
+
+ env->cp15.c9_pmcr &= ~PMCR_WRITABLE_MASK;
+ env->cp15.c9_pmcr |= (value & PMCR_WRITABLE_MASK);
+
+ pmu_op_finish(env);
+}
+
+static uint64_t pmcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ uint64_t pmcr = env->cp15.c9_pmcr;
+
+ /*
+ * If EL2 is implemented and enabled for the current security state, reads
+ * of PMCR.N from EL1 or EL0 return the value of MDCR_EL2.HPMN or HDCR.HPMN.
+ */
+ if (arm_current_el(env) <= 1 && arm_is_el2_enabled(env)) {
+ pmcr &= ~PMCRN_MASK;
+ pmcr |= (env->cp15.mdcr_el2 & MDCR_HPMN) << PMCRN_SHIFT;
+ }
+
+ return pmcr;
+}
+
+static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ unsigned int i;
+ uint64_t overflow_mask, new_pmswinc;
+
+ for (i = 0; i < pmu_num_counters(env); i++) {
+ /* Increment a counter's count iff: */
+ if ((value & (1 << i)) && /* counter's bit is set */
+ /* counter is enabled and not filtered */
+ pmu_counter_enabled(env, i) &&
+ /* counter is SW_INCR */
+ (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) {
+ pmevcntr_op_start(env, i);
+
+ /*
+ * Detect if this write causes an overflow since we can't predict
+ * PMSWINC overflows like we can for other events
+ */
+ new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
+
+ overflow_mask = pmevcntr_is_64_bit(env, i) ?
+ 1ULL << 63 : 1ULL << 31;
+
+ if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & overflow_mask) {
+ env->cp15.c9_pmovsr |= (1 << i);
+ pmu_update_irq(env);
+ }
+
+ env->cp15.c14_pmevcntr[i] = new_pmswinc;
+
+ pmevcntr_op_finish(env, i);
+ }
+ }
+}
+
+static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ uint64_t ret;
+ pmccntr_op_start(env);
+ ret = env->cp15.c15_ccnt;
+ pmccntr_op_finish(env);
+ return ret;
+}
+
+static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /*
+ * The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
+ * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
+ * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
+ * accessed.
+ */
+ env->cp15.c9_pmselr = value & 0x1f;
+}
+
+static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ pmccntr_op_start(env);
+ env->cp15.c15_ccnt = value;
+ pmccntr_op_finish(env);
+}
+
+static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ uint64_t cur_val = pmccntr_read(env, NULL);
+
+ pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
+}
+
+static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ pmccntr_op_start(env);
+ env->cp15.pmccfiltr_el0 = value & PMCCFILTR_EL0;
+ pmccntr_op_finish(env);
+}
+
+static void pmccfiltr_write_a32(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ pmccntr_op_start(env);
+ /* M is not accessible from AArch32 */
+ env->cp15.pmccfiltr_el0 = (env->cp15.pmccfiltr_el0 & PMCCFILTR_M) |
+ (value & PMCCFILTR);
+ pmccntr_op_finish(env);
+}
+
+static uint64_t pmccfiltr_read_a32(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ /* M is not visible in AArch32 */
+ return env->cp15.pmccfiltr_el0 & PMCCFILTR;
+}
+
+static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ pmu_op_start(env);
+ value &= pmu_counter_mask(env);
+ env->cp15.c9_pmcnten |= value;
+ pmu_op_finish(env);
+}
+
+static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ pmu_op_start(env);
+ value &= pmu_counter_mask(env);
+ env->cp15.c9_pmcnten &= ~value;
+ pmu_op_finish(env);
+}
+
+static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ value &= pmu_counter_mask(env);
+ env->cp15.c9_pmovsr &= ~value;
+ pmu_update_irq(env);
+}
+
+static void pmovsset_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ value &= pmu_counter_mask(env);
+ env->cp15.c9_pmovsr |= value;
+ pmu_update_irq(env);
+}
+
+static void pmevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value, const uint8_t counter)
+{
+ if (counter == 31) {
+ pmccfiltr_write(env, ri, value);
+ } else if (counter < pmu_num_counters(env)) {
+ pmevcntr_op_start(env, counter);
+
+ /*
+ * If this counter's event type is changing, store the current
+ * underlying count for the new type in c14_pmevcntr_delta[counter] so
+ * pmevcntr_op_finish has the correct baseline when it converts back to
+ * a delta.
+ */
+ uint16_t old_event = env->cp15.c14_pmevtyper[counter] &
+ PMXEVTYPER_EVTCOUNT;
+ uint16_t new_event = value & PMXEVTYPER_EVTCOUNT;
+ if (old_event != new_event) {
+ uint64_t count = 0;
+ if (event_supported(new_event)) {
+ uint16_t event_idx = supported_event_map[new_event];
+ count = pm_events[event_idx].get_count(env);
+ }
+ env->cp15.c14_pmevcntr_delta[counter] = count;
+ }
+
+ env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK;
+ pmevcntr_op_finish(env, counter);
+ }
+ /*
+ * Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
+ * PMSELR value is equal to or greater than the number of implemented
+ * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
+ */
+}
+
+static uint64_t pmevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri,
+ const uint8_t counter)
+{
+ if (counter == 31) {
+ return env->cp15.pmccfiltr_el0;
+ } else if (counter < pmu_num_counters(env)) {
+ return env->cp15.c14_pmevtyper[counter];
+ } else {
+ /*
+ * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
+ * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write().
+ */
+ return 0;
+ }
+}
+
+static void pmevtyper_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
+ pmevtyper_write(env, ri, value, counter);
+}
+
+static void pmevtyper_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
+ env->cp15.c14_pmevtyper[counter] = value;
+
+ /*
+ * pmevtyper_rawwrite is called between a pair of pmu_op_start and
+ * pmu_op_finish calls when loading saved state for a migration. Because
+ * we're potentially updating the type of event here, the value written to
+ * c14_pmevcntr_delta by the preceding pmu_op_start call may be for a
+ * different counter type. Therefore, we need to set this value to the
+ * current count for the counter type we're writing so that pmu_op_finish
+ * has the correct count for its calculation.
+ */
+ uint16_t event = value & PMXEVTYPER_EVTCOUNT;
+ if (event_supported(event)) {
+ uint16_t event_idx = supported_event_map[event];
+ env->cp15.c14_pmevcntr_delta[counter] =
+ pm_events[event_idx].get_count(env);
+ }
+}
+
+static uint64_t pmevtyper_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
+ return pmevtyper_read(env, ri, counter);
+}
+
+static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ pmevtyper_write(env, ri, value, env->cp15.c9_pmselr & 31);
+}
+
+static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ return pmevtyper_read(env, ri, env->cp15.c9_pmselr & 31);
+}
+
+static void pmevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value, uint8_t counter)
+{
+ if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
+ /* Before FEAT_PMUv3p5, top 32 bits of event counters are RES0 */
+ value &= MAKE_64BIT_MASK(0, 32);
+ }
+ if (counter < pmu_num_counters(env)) {
+ pmevcntr_op_start(env, counter);
+ env->cp15.c14_pmevcntr[counter] = value;
+ pmevcntr_op_finish(env, counter);
+ }
+ /*
+ * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
+ * are CONSTRAINED UNPREDICTABLE.
+ */
+}
+
+static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint8_t counter)
+{
+ if (counter < pmu_num_counters(env)) {
+ uint64_t ret;
+ pmevcntr_op_start(env, counter);
+ ret = env->cp15.c14_pmevcntr[counter];
+ pmevcntr_op_finish(env, counter);
+ if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
+ /* Before FEAT_PMUv3p5, top 32 bits of event counters are RES0 */
+ ret &= MAKE_64BIT_MASK(0, 32);
+ }
+ return ret;
+ } else {
+ /*
+ * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
+ * are CONSTRAINED UNPREDICTABLE.
+ */
+ return 0;
+ }
+}
+
+static void pmevcntr_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
+ pmevcntr_write(env, ri, value, counter);
+}
+
+static uint64_t pmevcntr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
+ return pmevcntr_read(env, ri, counter);
+}
+
+static void pmevcntr_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
+ assert(counter < pmu_num_counters(env));
+ env->cp15.c14_pmevcntr[counter] = value;
+ pmevcntr_write(env, ri, value, counter);
+}
+
+static uint64_t pmevcntr_rawread(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
+ assert(counter < pmu_num_counters(env));
+ return env->cp15.c14_pmevcntr[counter];
+}
+
+static void pmxevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ pmevcntr_write(env, ri, value, env->cp15.c9_pmselr & 31);
+}
+
+static uint64_t pmxevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ return pmevcntr_read(env, ri, env->cp15.c9_pmselr & 31);
+}
+
+static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ if (arm_feature(env, ARM_FEATURE_V8)) {
+ env->cp15.c9_pmuserenr = value & 0xf;
+ } else {
+ env->cp15.c9_pmuserenr = value & 1;
+ }
+}
+
+static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* We have no event counters so only the C bit can be changed */
+ value &= pmu_counter_mask(env);
+ env->cp15.c9_pminten |= value;
+ pmu_update_irq(env);
+}
+
+static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ value &= pmu_counter_mask(env);
+ env->cp15.c9_pminten &= ~value;
+ pmu_update_irq(env);
+}
+
+static const ARMCPRegInfo v7_pm_reginfo[] = {
+ /*
+ * Performance monitors are implementation defined in v7,
+ * but with an ARM recommended set of registers, which we
+ * follow.
+ *
+ * Performance registers fall into three categories:
+ * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
+ * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
+ * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
+ * For the cases controlled by PMUSERENR we must set .access to PL0_RW
+ * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
+ */
+ { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
+ .access = PL0_RW, .type = ARM_CP_ALIAS | ARM_CP_IO,
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
+ .writefn = pmcntenset_write,
+ .accessfn = pmreg_access,
+ .fgt = FGT_PMCNTEN,
+ .raw_writefn = raw_write },
+ { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64, .type = ARM_CP_IO,
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1,
+ .access = PL0_RW, .accessfn = pmreg_access,
+ .fgt = FGT_PMCNTEN,
+ .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0,
+ .writefn = pmcntenset_write, .raw_writefn = raw_write },
+ { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
+ .access = PL0_RW,
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
+ .accessfn = pmreg_access,
+ .fgt = FGT_PMCNTEN,
+ .writefn = pmcntenclr_write, .raw_writefn = raw_write,
+ .type = ARM_CP_ALIAS | ARM_CP_IO },
+ { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
+ .access = PL0_RW, .accessfn = pmreg_access,
+ .fgt = FGT_PMCNTEN,
+ .type = ARM_CP_ALIAS | ARM_CP_IO,
+ .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
+ .writefn = pmcntenclr_write, .raw_writefn = raw_write },
+ { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
+ .access = PL0_RW, .type = ARM_CP_IO,
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
+ .accessfn = pmreg_access,
+ .fgt = FGT_PMOVS,
+ .writefn = pmovsr_write,
+ .raw_writefn = raw_write },
+ { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3,
+ .access = PL0_RW, .accessfn = pmreg_access,
+ .fgt = FGT_PMOVS,
+ .type = ARM_CP_ALIAS | ARM_CP_IO,
+ .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
+ .writefn = pmovsr_write,
+ .raw_writefn = raw_write },
+ { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
+ .access = PL0_W, .accessfn = pmreg_access_swinc,
+ .fgt = FGT_PMSWINC_EL0,
+ .type = ARM_CP_NO_RAW | ARM_CP_IO,
+ .writefn = pmswinc_write },
+ { .name = "PMSWINC_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 4,
+ .access = PL0_W, .accessfn = pmreg_access_swinc,
+ .fgt = FGT_PMSWINC_EL0,
+ .type = ARM_CP_NO_RAW | ARM_CP_IO,
+ .writefn = pmswinc_write },
+ { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
+ .access = PL0_RW, .type = ARM_CP_ALIAS,
+ .fgt = FGT_PMSELR_EL0,
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr),
+ .accessfn = pmreg_access_selr, .writefn = pmselr_write,
+ .raw_writefn = raw_write},
+ { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5,
+ .access = PL0_RW, .accessfn = pmreg_access_selr,
+ .fgt = FGT_PMSELR_EL0,
+ .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr),
+ .writefn = pmselr_write, .raw_writefn = raw_write, },
+ { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
+ .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO,
+ .fgt = FGT_PMCCNTR_EL0,
+ .readfn = pmccntr_read, .writefn = pmccntr_write32,
+ .accessfn = pmreg_access_ccntr },
+ { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0,
+ .access = PL0_RW, .accessfn = pmreg_access_ccntr,
+ .fgt = FGT_PMCCNTR_EL0,
+ .type = ARM_CP_IO,
+ .fieldoffset = offsetof(CPUARMState, cp15.c15_ccnt),
+ .readfn = pmccntr_read, .writefn = pmccntr_write,
+ .raw_readfn = raw_read, .raw_writefn = raw_write, },
+ { .name = "PMCCFILTR", .cp = 15, .opc1 = 0, .crn = 14, .crm = 15, .opc2 = 7,
+ .writefn = pmccfiltr_write_a32, .readfn = pmccfiltr_read_a32,
+ .access = PL0_RW, .accessfn = pmreg_access,
+ .fgt = FGT_PMCCFILTR_EL0,
+ .type = ARM_CP_ALIAS | ARM_CP_IO,
+ .resetvalue = 0, },
+ { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7,
+ .writefn = pmccfiltr_write, .raw_writefn = raw_write,
+ .access = PL0_RW, .accessfn = pmreg_access,
+ .fgt = FGT_PMCCFILTR_EL0,
+ .type = ARM_CP_IO,
+ .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0),
+ .resetvalue = 0, },
+ { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
+ .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
+ .accessfn = pmreg_access,
+ .fgt = FGT_PMEVTYPERN_EL0,
+ .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
+ { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1,
+ .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
+ .accessfn = pmreg_access,
+ .fgt = FGT_PMEVTYPERN_EL0,
+ .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
+ { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
+ .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
+ .accessfn = pmreg_access_xevcntr,
+ .fgt = FGT_PMEVCNTRN_EL0,
+ .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
+ { .name = "PMXEVCNTR_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 2,
+ .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
+ .accessfn = pmreg_access_xevcntr,
+ .fgt = FGT_PMEVCNTRN_EL0,
+ .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
+ { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
+ .access = PL0_R | PL1_RW, .accessfn = access_tpm,
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr),
+ .resetvalue = 0,
+ .writefn = pmuserenr_write, .raw_writefn = raw_write },
+ { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0,
+ .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
+ .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
+ .resetvalue = 0,
+ .writefn = pmuserenr_write, .raw_writefn = raw_write },
+ { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
+ .access = PL1_RW, .accessfn = access_tpm,
+ .fgt = FGT_PMINTEN,
+ .type = ARM_CP_ALIAS | ARM_CP_IO,
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten),
+ .resetvalue = 0,
+ .writefn = pmintenset_write, .raw_writefn = raw_write },
+ { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1,
+ .access = PL1_RW, .accessfn = access_tpm,
+ .fgt = FGT_PMINTEN,
+ .type = ARM_CP_IO,
+ .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
+ .writefn = pmintenset_write, .raw_writefn = raw_write,
+ .resetvalue = 0x0 },
+ { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
+ .access = PL1_RW, .accessfn = access_tpm,
+ .fgt = FGT_PMINTEN,
+ .type = ARM_CP_ALIAS | ARM_CP_IO,
+ .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
+ .writefn = pmintenclr_write, .raw_writefn = raw_write },
+ { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2,
+ .access = PL1_RW, .accessfn = access_tpm,
+ .fgt = FGT_PMINTEN,
+ .type = ARM_CP_ALIAS | ARM_CP_IO,
+ .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
+ .writefn = pmintenclr_write, .raw_writefn = raw_write },
+};
+
+static const ARMCPRegInfo pmovsset_cp_reginfo[] = {
+ /* PMOVSSET is not implemented in v7 before v7ve */
+ { .name = "PMOVSSET", .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 3,
+ .access = PL0_RW, .accessfn = pmreg_access,
+ .fgt = FGT_PMOVS,
+ .type = ARM_CP_ALIAS | ARM_CP_IO,
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
+ .writefn = pmovsset_write,
+ .raw_writefn = raw_write },
+ { .name = "PMOVSSET_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 3,
+ .access = PL0_RW, .accessfn = pmreg_access,
+ .fgt = FGT_PMOVS,
+ .type = ARM_CP_ALIAS | ARM_CP_IO,
+ .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
+ .writefn = pmovsset_write,
+ .raw_writefn = raw_write },
+};
+
+void define_pm_cpregs(ARMCPU *cpu)
+{
+ CPUARMState *env = &cpu->env;
+
+ if (arm_feature(env, ARM_FEATURE_V7)) {
+ /*
+ * v7 performance monitor control register: same implementor
+ * field as main ID register, and we implement four counters in
+ * addition to the cycle count register.
+ */
+ static const ARMCPRegInfo pmcr = {
+ .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
+ .access = PL0_RW,
+ .fgt = FGT_PMCR_EL0,
+ .type = ARM_CP_IO | ARM_CP_ALIAS,
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr),
+ .accessfn = pmreg_access,
+ .readfn = pmcr_read, .raw_readfn = raw_read,
+ .writefn = pmcr_write, .raw_writefn = raw_write,
+ };
+ const ARMCPRegInfo pmcr64 = {
+ .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0,
+ .access = PL0_RW, .accessfn = pmreg_access,
+ .fgt = FGT_PMCR_EL0,
+ .type = ARM_CP_IO,
+ .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr),
+ .resetvalue = cpu->isar.reset_pmcr_el0,
+ .readfn = pmcr_read, .raw_readfn = raw_read,
+ .writefn = pmcr_write, .raw_writefn = raw_write,
+ };
+
+ define_one_arm_cp_reg(cpu, &pmcr);
+ define_one_arm_cp_reg(cpu, &pmcr64);
+ define_arm_cp_regs(cpu, v7_pm_reginfo);
+
+ for (unsigned i = 0, pmcrn = pmu_num_counters(env); i < pmcrn; i++) {
+ g_autofree char *pmevcntr_name = g_strdup_printf("PMEVCNTR%d", i);
+ g_autofree char *pmevcntr_el0_name = g_strdup_printf("PMEVCNTR%d_EL0", i);
+ g_autofree char *pmevtyper_name = g_strdup_printf("PMEVTYPER%d", i);
+ g_autofree char *pmevtyper_el0_name = g_strdup_printf("PMEVTYPER%d_EL0", i);
+
+ ARMCPRegInfo pmev_regs[] = {
+ { .name = pmevcntr_name, .cp = 15, .crn = 14,
+ .crm = 8 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
+ .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
+ .fgt = FGT_PMEVCNTRN_EL0,
+ .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
+ .accessfn = pmreg_access_xevcntr },
+ { .name = pmevcntr_el0_name, .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 8 | (3 & (i >> 3)),
+ .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access_xevcntr,
+ .type = ARM_CP_IO,
+ .fgt = FGT_PMEVCNTRN_EL0,
+ .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
+ .raw_readfn = pmevcntr_rawread,
+ .raw_writefn = pmevcntr_rawwrite },
+ { .name = pmevtyper_name, .cp = 15, .crn = 14,
+ .crm = 12 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
+ .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
+ .fgt = FGT_PMEVTYPERN_EL0,
+ .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
+ .accessfn = pmreg_access },
+ { .name = pmevtyper_el0_name, .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 12 | (3 & (i >> 3)),
+ .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access,
+ .fgt = FGT_PMEVTYPERN_EL0,
+ .type = ARM_CP_IO,
+ .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
+ .raw_writefn = pmevtyper_rawwrite },
+ };
+ define_arm_cp_regs(cpu, pmev_regs);
+ }
+ }
+ if (arm_feature(env, ARM_FEATURE_V7VE)) {
+ define_arm_cp_regs(cpu, pmovsset_cp_reginfo);
+ }
+
+ if (arm_feature(env, ARM_FEATURE_V8)) {
+ const ARMCPRegInfo v8_pm_reginfo[] = {
+ { .name = "PMCEID0", .state = ARM_CP_STATE_AA32,
+ .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6,
+ .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
+ .fgt = FGT_PMCEIDN_EL0,
+ .resetvalue = extract64(cpu->pmceid0, 0, 32) },
+ { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6,
+ .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
+ .fgt = FGT_PMCEIDN_EL0,
+ .resetvalue = cpu->pmceid0 },
+ { .name = "PMCEID1", .state = ARM_CP_STATE_AA32,
+ .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7,
+ .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
+ .fgt = FGT_PMCEIDN_EL0,
+ .resetvalue = extract64(cpu->pmceid1, 0, 32) },
+ { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7,
+ .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
+ .fgt = FGT_PMCEIDN_EL0,
+ .resetvalue = cpu->pmceid1 },
+ };
+ define_arm_cp_regs(cpu, v8_pm_reginfo);
+ }
+
+ if (cpu_isar_feature(aa32_pmuv3p1, cpu)) {
+ ARMCPRegInfo v81_pmu_regs[] = {
+ { .name = "PMCEID2", .state = ARM_CP_STATE_AA32,
+ .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 4,
+ .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
+ .fgt = FGT_PMCEIDN_EL0,
+ .resetvalue = extract64(cpu->pmceid0, 32, 32) },
+ { .name = "PMCEID3", .state = ARM_CP_STATE_AA32,
+ .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 5,
+ .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
+ .fgt = FGT_PMCEIDN_EL0,
+ .resetvalue = extract64(cpu->pmceid1, 32, 32) },
+ };
+ define_arm_cp_regs(cpu, v81_pmu_regs);
+ }
+
+ if (cpu_isar_feature(any_pmuv3p4, cpu)) {
+ static const ARMCPRegInfo v84_pmmir = {
+ .name = "PMMIR_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 6,
+ .access = PL1_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
+ .fgt = FGT_PMMIR_EL1,
+ .resetvalue = 0
+ };
+ define_one_arm_cp_reg(cpu, &v84_pmmir);
+ }
+}
diff --git a/target/arm/cpregs.h b/target/arm/cpregs.h
index 2183de8..c9506aa 100644
--- a/target/arm/cpregs.h
+++ b/target/arm/cpregs.h
@@ -23,6 +23,7 @@
#include "hw/registerfields.h"
#include "target/arm/kvm-consts.h"
+#include "cpu.h"
/*
* ARMCPRegInfo type field bits:
@@ -1064,6 +1065,9 @@ void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
/* CPReadFn that can be used for read-as-zero behaviour */
uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri);
+/* CPReadFn that just reads the value from ri->fieldoffset */
+uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri);
+
/* CPWriteFn that just writes the value to ri->fieldoffset */
void raw_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value);
diff --git a/target/arm/cpu-features.h b/target/arm/cpu-features.h
index 525e4ce..5876162 100644
--- a/target/arm/cpu-features.h
+++ b/target/arm/cpu-features.h
@@ -22,6 +22,8 @@
#include "hw/registerfields.h"
#include "qemu/host-utils.h"
+#include "cpu.h"
+#include "cpu-sysregs.h"
/*
* Naming convention for isar_feature functions:
@@ -44,103 +46,103 @@
*/
static inline bool isar_feature_aa32_thumb_div(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_isar0, ID_ISAR0, DIVIDE) != 0;
+ return FIELD_EX32_IDREG(id, ID_ISAR0, DIVIDE) != 0;
}
static inline bool isar_feature_aa32_arm_div(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_isar0, ID_ISAR0, DIVIDE) > 1;
+ return FIELD_EX32_IDREG(id, ID_ISAR0, DIVIDE) > 1;
}
static inline bool isar_feature_aa32_lob(const ARMISARegisters *id)
{
/* (M-profile) low-overhead loops and branch future */
- return FIELD_EX32(id->id_isar0, ID_ISAR0, CMPBRANCH) >= 3;
+ return FIELD_EX32_IDREG(id, ID_ISAR0, CMPBRANCH) >= 3;
}
static inline bool isar_feature_aa32_jazelle(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_isar1, ID_ISAR1, JAZELLE) != 0;
+ return FIELD_EX32_IDREG(id, ID_ISAR1, JAZELLE) != 0;
}
static inline bool isar_feature_aa32_aes(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_isar5, ID_ISAR5, AES) != 0;
+ return FIELD_EX32_IDREG(id, ID_ISAR5, AES) != 0;
}
static inline bool isar_feature_aa32_pmull(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_isar5, ID_ISAR5, AES) > 1;
+ return FIELD_EX32_IDREG(id, ID_ISAR5, AES) > 1;
}
static inline bool isar_feature_aa32_sha1(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_isar5, ID_ISAR5, SHA1) != 0;
+ return FIELD_EX32_IDREG(id, ID_ISAR5, SHA1) != 0;
}
static inline bool isar_feature_aa32_sha2(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_isar5, ID_ISAR5, SHA2) != 0;
+ return FIELD_EX32_IDREG(id, ID_ISAR5, SHA2) != 0;
}
static inline bool isar_feature_aa32_crc32(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_isar5, ID_ISAR5, CRC32) != 0;
+ return FIELD_EX32_IDREG(id, ID_ISAR5, CRC32) != 0;
}
static inline bool isar_feature_aa32_rdm(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_isar5, ID_ISAR5, RDM) != 0;
+ return FIELD_EX32_IDREG(id, ID_ISAR5, RDM) != 0;
}
static inline bool isar_feature_aa32_vcma(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_isar5, ID_ISAR5, VCMA) != 0;
+ return FIELD_EX32_IDREG(id, ID_ISAR5, VCMA) != 0;
}
static inline bool isar_feature_aa32_jscvt(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_isar6, ID_ISAR6, JSCVT) != 0;
+ return FIELD_EX32_IDREG(id, ID_ISAR6, JSCVT) != 0;
}
static inline bool isar_feature_aa32_dp(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_isar6, ID_ISAR6, DP) != 0;
+ return FIELD_EX32_IDREG(id, ID_ISAR6, DP) != 0;
}
static inline bool isar_feature_aa32_fhm(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_isar6, ID_ISAR6, FHM) != 0;
+ return FIELD_EX32_IDREG(id, ID_ISAR6, FHM) != 0;
}
static inline bool isar_feature_aa32_sb(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_isar6, ID_ISAR6, SB) != 0;
+ return FIELD_EX32_IDREG(id, ID_ISAR6, SB) != 0;
}
static inline bool isar_feature_aa32_predinv(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_isar6, ID_ISAR6, SPECRES) != 0;
+ return FIELD_EX32_IDREG(id, ID_ISAR6, SPECRES) != 0;
}
static inline bool isar_feature_aa32_bf16(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_isar6, ID_ISAR6, BF16) != 0;
+ return FIELD_EX32_IDREG(id, ID_ISAR6, BF16) != 0;
}
static inline bool isar_feature_aa32_i8mm(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_isar6, ID_ISAR6, I8MM) != 0;
+ return FIELD_EX32_IDREG(id, ID_ISAR6, I8MM) != 0;
}
static inline bool isar_feature_aa32_ras(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_pfr0, ID_PFR0, RAS) != 0;
+ return FIELD_EX32_IDREG(id, ID_PFR0, RAS) != 0;
}
static inline bool isar_feature_aa32_mprofile(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_pfr1, ID_PFR1, MPROGMOD) != 0;
+ return FIELD_EX32_IDREG(id, ID_PFR1, MPROGMOD) != 0;
}
static inline bool isar_feature_aa32_m_sec_state(const ARMISARegisters *id)
@@ -149,7 +151,7 @@ static inline bool isar_feature_aa32_m_sec_state(const ARMISARegisters *id)
* Return true if M-profile state handling insns
* (VSCCLRM, CLRM, FPCTX access insns) are implemented
*/
- return FIELD_EX32(id->id_pfr1, ID_PFR1, SECURITY) >= 3;
+ return FIELD_EX32_IDREG(id, ID_PFR1, SECURITY) >= 3;
}
static inline bool isar_feature_aa32_fp16_arith(const ARMISARegisters *id)
@@ -282,88 +284,88 @@ static inline bool isar_feature_aa32_vminmaxnm(const ARMISARegisters *id)
static inline bool isar_feature_aa32_pxn(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_mmfr0, ID_MMFR0, VMSA) >= 4;
+ return FIELD_EX32_IDREG(id, ID_MMFR0, VMSA) >= 4;
}
static inline bool isar_feature_aa32_pan(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_mmfr3, ID_MMFR3, PAN) != 0;
+ return FIELD_EX32_IDREG(id, ID_MMFR3, PAN) != 0;
}
static inline bool isar_feature_aa32_ats1e1(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_mmfr3, ID_MMFR3, PAN) >= 2;
+ return FIELD_EX32_IDREG(id, ID_MMFR3, PAN) >= 2;
}
static inline bool isar_feature_aa32_pmuv3p1(const ARMISARegisters *id)
{
/* 0xf means "non-standard IMPDEF PMU" */
- return FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) >= 4 &&
- FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) != 0xf;
+ return FIELD_EX32_IDREG(id, ID_DFR0, PERFMON) >= 4 &&
+ FIELD_EX32_IDREG(id, ID_DFR0, PERFMON) != 0xf;
}
static inline bool isar_feature_aa32_pmuv3p4(const ARMISARegisters *id)
{
/* 0xf means "non-standard IMPDEF PMU" */
- return FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) >= 5 &&
- FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) != 0xf;
+ return FIELD_EX32_IDREG(id, ID_DFR0, PERFMON) >= 5 &&
+ FIELD_EX32_IDREG(id, ID_DFR0, PERFMON) != 0xf;
}
static inline bool isar_feature_aa32_pmuv3p5(const ARMISARegisters *id)
{
/* 0xf means "non-standard IMPDEF PMU" */
- return FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) >= 6 &&
- FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) != 0xf;
+ return FIELD_EX32_IDREG(id, ID_DFR0, PERFMON) >= 6 &&
+ FIELD_EX32_IDREG(id, ID_DFR0, PERFMON) != 0xf;
}
static inline bool isar_feature_aa32_hpd(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_mmfr4, ID_MMFR4, HPDS) != 0;
+ return FIELD_EX32_IDREG(id, ID_MMFR4, HPDS) != 0;
}
static inline bool isar_feature_aa32_ac2(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_mmfr4, ID_MMFR4, AC2) != 0;
+ return FIELD_EX32_IDREG(id, ID_MMFR4, AC2) != 0;
}
static inline bool isar_feature_aa32_ccidx(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_mmfr4, ID_MMFR4, CCIDX) != 0;
+ return FIELD_EX32_IDREG(id, ID_MMFR4, CCIDX) != 0;
}
static inline bool isar_feature_aa32_tts2uxn(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_mmfr4, ID_MMFR4, XNX) != 0;
+ return FIELD_EX32_IDREG(id, ID_MMFR4, XNX) != 0;
}
static inline bool isar_feature_aa32_half_evt(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_mmfr4, ID_MMFR4, EVT) >= 1;
+ return FIELD_EX32_IDREG(id, ID_MMFR4, EVT) >= 1;
}
static inline bool isar_feature_aa32_evt(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_mmfr4, ID_MMFR4, EVT) >= 2;
+ return FIELD_EX32_IDREG(id, ID_MMFR4, EVT) >= 2;
}
static inline bool isar_feature_aa32_dit(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_pfr0, ID_PFR0, DIT) != 0;
+ return FIELD_EX32_IDREG(id, ID_PFR0, DIT) != 0;
}
static inline bool isar_feature_aa32_ssbs(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_pfr2, ID_PFR2, SSBS) != 0;
+ return FIELD_EX32_IDREG(id, ID_PFR2, SSBS) != 0;
}
static inline bool isar_feature_aa32_debugv7p1(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_dfr0, ID_DFR0, COPDBG) >= 5;
+ return FIELD_EX32_IDREG(id, ID_DFR0, COPDBG) >= 5;
}
static inline bool isar_feature_aa32_debugv8p2(const ARMISARegisters *id)
{
- return FIELD_EX32(id->id_dfr0, ID_DFR0, COPDBG) >= 8;
+ return FIELD_EX32_IDREG(id, ID_DFR0, COPDBG) >= 8;
}
static inline bool isar_feature_aa32_doublelock(const ARMISARegisters *id)
@@ -376,107 +378,107 @@ static inline bool isar_feature_aa32_doublelock(const ARMISARegisters *id)
*/
static inline bool isar_feature_aa64_aes(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, AES) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR0, AES) != 0;
}
static inline bool isar_feature_aa64_pmull(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, AES) > 1;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR0, AES) > 1;
}
static inline bool isar_feature_aa64_sha1(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA1) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR0, SHA1) != 0;
}
static inline bool isar_feature_aa64_sha256(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA2) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR0, SHA2) != 0;
}
static inline bool isar_feature_aa64_sha512(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA2) > 1;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR0, SHA2) > 1;
}
static inline bool isar_feature_aa64_crc32(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, CRC32) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR0, CRC32) != 0;
}
static inline bool isar_feature_aa64_atomics(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, ATOMIC) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR0, ATOMIC) != 0;
}
static inline bool isar_feature_aa64_rdm(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, RDM) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR0, RDM) != 0;
}
static inline bool isar_feature_aa64_sha3(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA3) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR0, SHA3) != 0;
}
static inline bool isar_feature_aa64_sm3(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SM3) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR0, SM3) != 0;
}
static inline bool isar_feature_aa64_sm4(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SM4) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR0, SM4) != 0;
}
static inline bool isar_feature_aa64_dp(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, DP) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR0, DP) != 0;
}
static inline bool isar_feature_aa64_fhm(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, FHM) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR0, FHM) != 0;
}
static inline bool isar_feature_aa64_condm_4(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TS) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR0, TS) != 0;
}
static inline bool isar_feature_aa64_condm_5(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TS) >= 2;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR0, TS) >= 2;
}
static inline bool isar_feature_aa64_rndr(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, RNDR) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR0, RNDR) != 0;
}
static inline bool isar_feature_aa64_tlbirange(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TLB) == 2;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR0, TLB) == 2;
}
static inline bool isar_feature_aa64_tlbios(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TLB) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR0, TLB) != 0;
}
static inline bool isar_feature_aa64_jscvt(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, JSCVT) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR1, JSCVT) != 0;
}
static inline bool isar_feature_aa64_fcma(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, FCMA) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR1, FCMA) != 0;
}
static inline bool isar_feature_aa64_xs(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, XS) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR1, XS) != 0;
}
/*
@@ -500,9 +502,9 @@ isar_feature_pauth_feature(const ARMISARegisters *id)
* Architecturally, only one of {APA,API,APA3} may be active (non-zero)
* and the other two must be zero. Thus we may avoid conditionals.
*/
- return (FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, APA) |
- FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, API) |
- FIELD_EX64(id->id_aa64isar2, ID_AA64ISAR2, APA3));
+ return (FIELD_EX64_IDREG(id, ID_AA64ISAR1, APA) |
+ FIELD_EX64_IDREG(id, ID_AA64ISAR1, API) |
+ FIELD_EX64_IDREG(id, ID_AA64ISAR2, APA3));
}
static inline bool isar_feature_aa64_pauth(const ARMISARegisters *id)
@@ -520,7 +522,7 @@ static inline bool isar_feature_aa64_pauth_qarma5(const ARMISARegisters *id)
* Return true if pauth is enabled with the architected QARMA5 algorithm.
* QEMU will always enable or disable both APA and GPA.
*/
- return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, APA) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR1, APA) != 0;
}
static inline bool isar_feature_aa64_pauth_qarma3(const ARMISARegisters *id)
@@ -529,144 +531,149 @@ static inline bool isar_feature_aa64_pauth_qarma3(const ARMISARegisters *id)
* Return true if pauth is enabled with the architected QARMA3 algorithm.
* QEMU will always enable or disable both APA3 and GPA3.
*/
- return FIELD_EX64(id->id_aa64isar2, ID_AA64ISAR2, APA3) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR2, APA3) != 0;
}
static inline bool isar_feature_aa64_sb(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, SB) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR1, SB) != 0;
}
static inline bool isar_feature_aa64_predinv(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, SPECRES) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR1, SPECRES) != 0;
}
static inline bool isar_feature_aa64_frint(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, FRINTTS) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR1, FRINTTS) != 0;
}
static inline bool isar_feature_aa64_dcpop(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, DPB) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR1, DPB) != 0;
}
static inline bool isar_feature_aa64_dcpodp(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, DPB) >= 2;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR1, DPB) >= 2;
}
static inline bool isar_feature_aa64_bf16(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, BF16) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR1, BF16) != 0;
}
static inline bool isar_feature_aa64_ebf16(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, BF16) > 1;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR1, BF16) > 1;
}
static inline bool isar_feature_aa64_rcpc_8_3(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, LRCPC) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR1, LRCPC) != 0;
}
static inline bool isar_feature_aa64_rcpc_8_4(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, LRCPC) >= 2;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR1, LRCPC) >= 2;
}
static inline bool isar_feature_aa64_i8mm(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, I8MM) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR1, I8MM) != 0;
}
static inline bool isar_feature_aa64_wfxt(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar2, ID_AA64ISAR2, WFXT) >= 2;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR2, WFXT) >= 2;
}
static inline bool isar_feature_aa64_hbc(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar2, ID_AA64ISAR2, BC) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR2, BC) != 0;
}
static inline bool isar_feature_aa64_mops(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar2, ID_AA64ISAR2, MOPS);
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR2, MOPS);
}
static inline bool isar_feature_aa64_rpres(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64isar2, ID_AA64ISAR2, RPRES);
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR2, RPRES);
+}
+
+static inline bool isar_feature_aa64_lut(const ARMISARegisters *id)
+{
+ return FIELD_EX64_IDREG(id, ID_AA64ISAR2, LUT);
}
static inline bool isar_feature_aa64_fp_simd(const ARMISARegisters *id)
{
/* We always set the AdvSIMD and FP fields identically. */
- return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, FP) != 0xf;
+ return FIELD_EX64_IDREG(id, ID_AA64PFR0, FP) != 0xf;
}
static inline bool isar_feature_aa64_fp16(const ARMISARegisters *id)
{
/* We always set the AdvSIMD and FP fields identically wrt FP16. */
- return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, FP) == 1;
+ return FIELD_EX64_IDREG(id, ID_AA64PFR0, FP) == 1;
}
static inline bool isar_feature_aa64_aa32(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, EL0) >= 2;
+ return FIELD_EX64_IDREG(id, ID_AA64PFR0, EL0) >= 2;
}
static inline bool isar_feature_aa64_aa32_el1(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, EL1) >= 2;
+ return FIELD_EX64_IDREG(id, ID_AA64PFR0, EL1) >= 2;
}
static inline bool isar_feature_aa64_aa32_el2(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, EL2) >= 2;
+ return FIELD_EX64_IDREG(id, ID_AA64PFR0, EL2) >= 2;
}
static inline bool isar_feature_aa64_ras(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, RAS) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64PFR0, RAS) != 0;
}
static inline bool isar_feature_aa64_doublefault(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, RAS) >= 2;
+ return FIELD_EX64_IDREG(id, ID_AA64PFR0, RAS) >= 2;
}
static inline bool isar_feature_aa64_sve(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, SVE) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64PFR0, SVE) != 0;
}
static inline bool isar_feature_aa64_sel2(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, SEL2) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64PFR0, SEL2) != 0;
}
static inline bool isar_feature_aa64_rme(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, RME) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64PFR0, RME) != 0;
}
static inline bool isar_feature_aa64_dit(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, DIT) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64PFR0, DIT) != 0;
}
static inline bool isar_feature_aa64_scxtnum(const ARMISARegisters *id)
{
- int key = FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, CSV2);
+ int key = FIELD_EX64_IDREG(id, ID_AA64PFR0, CSV2);
if (key >= 2) {
return true; /* FEAT_CSV2_2 */
}
if (key == 1) {
- key = FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, CSV2_FRAC);
+ key = FIELD_EX64_IDREG(id, ID_AA64PFR1, CSV2_FRAC);
return key >= 2; /* FEAT_CSV2_1p2 */
}
return false;
@@ -674,320 +681,378 @@ static inline bool isar_feature_aa64_scxtnum(const ARMISARegisters *id)
static inline bool isar_feature_aa64_ssbs(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, SSBS) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64PFR1, SSBS) != 0;
}
static inline bool isar_feature_aa64_bti(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, BT) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64PFR1, BT) != 0;
}
static inline bool isar_feature_aa64_mte_insn_reg(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, MTE) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64PFR1, MTE) != 0;
}
static inline bool isar_feature_aa64_mte(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, MTE) >= 2;
+ return FIELD_EX64_IDREG(id, ID_AA64PFR1, MTE) >= 2;
}
static inline bool isar_feature_aa64_mte3(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, MTE) >= 3;
+ return FIELD_EX64_IDREG(id, ID_AA64PFR1, MTE) >= 3;
}
static inline bool isar_feature_aa64_sme(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, SME) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64PFR1, SME) != 0;
}
static inline bool isar_feature_aa64_nmi(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, NMI) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64PFR1, NMI) != 0;
}
static inline bool isar_feature_aa64_tgran4_lpa2(const ARMISARegisters *id)
{
- return FIELD_SEX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN4) >= 1;
+ return FIELD_SEX64_IDREG(id, ID_AA64MMFR0, TGRAN4) >= 1;
}
static inline bool isar_feature_aa64_tgran4_2_lpa2(const ARMISARegisters *id)
{
- unsigned t = FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN4_2);
+ unsigned t = FIELD_EX64_IDREG(id, ID_AA64MMFR0, TGRAN4_2);
return t >= 3 || (t == 0 && isar_feature_aa64_tgran4_lpa2(id));
}
static inline bool isar_feature_aa64_tgran16_lpa2(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN16) >= 2;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR0, TGRAN16) >= 2;
}
static inline bool isar_feature_aa64_tgran16_2_lpa2(const ARMISARegisters *id)
{
- unsigned t = FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN16_2);
+ unsigned t = FIELD_EX64_IDREG(id, ID_AA64MMFR0, TGRAN16_2);
return t >= 3 || (t == 0 && isar_feature_aa64_tgran16_lpa2(id));
}
static inline bool isar_feature_aa64_tgran4(const ARMISARegisters *id)
{
- return FIELD_SEX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN4) >= 0;
+ return FIELD_SEX64_IDREG(id, ID_AA64MMFR0, TGRAN4) >= 0;
}
static inline bool isar_feature_aa64_tgran16(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN16) >= 1;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR0, TGRAN16) >= 1;
}
static inline bool isar_feature_aa64_tgran64(const ARMISARegisters *id)
{
- return FIELD_SEX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN64) >= 0;
+ return FIELD_SEX64_IDREG(id, ID_AA64MMFR0, TGRAN64) >= 0;
}
static inline bool isar_feature_aa64_tgran4_2(const ARMISARegisters *id)
{
- unsigned t = FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN4_2);
+ unsigned t = FIELD_EX64_IDREG(id, ID_AA64MMFR0, TGRAN4_2);
return t >= 2 || (t == 0 && isar_feature_aa64_tgran4(id));
}
static inline bool isar_feature_aa64_tgran16_2(const ARMISARegisters *id)
{
- unsigned t = FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN16_2);
+ unsigned t = FIELD_EX64_IDREG(id, ID_AA64MMFR0, TGRAN16_2);
return t >= 2 || (t == 0 && isar_feature_aa64_tgran16(id));
}
static inline bool isar_feature_aa64_tgran64_2(const ARMISARegisters *id)
{
- unsigned t = FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN64_2);
+ unsigned t = FIELD_EX64_IDREG(id, ID_AA64MMFR0, TGRAN64_2);
return t >= 2 || (t == 0 && isar_feature_aa64_tgran64(id));
}
static inline bool isar_feature_aa64_fgt(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, FGT) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR0, FGT) != 0;
}
static inline bool isar_feature_aa64_ecv_traps(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, ECV) > 0;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR0, ECV) > 0;
}
static inline bool isar_feature_aa64_ecv(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, ECV) > 1;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR0, ECV) > 1;
}
static inline bool isar_feature_aa64_vh(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, VH) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR1, VH) != 0;
}
static inline bool isar_feature_aa64_lor(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, LO) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR1, LO) != 0;
}
static inline bool isar_feature_aa64_pan(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, PAN) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR1, PAN) != 0;
}
static inline bool isar_feature_aa64_ats1e1(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, PAN) >= 2;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR1, PAN) >= 2;
}
static inline bool isar_feature_aa64_pan3(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, PAN) >= 3;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR1, PAN) >= 3;
}
static inline bool isar_feature_aa64_hcx(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, HCX) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR1, HCX) != 0;
}
static inline bool isar_feature_aa64_afp(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, AFP) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR1, AFP) != 0;
}
static inline bool isar_feature_aa64_tidcp1(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, TIDCP1) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR1, TIDCP1) != 0;
}
static inline bool isar_feature_aa64_cmow(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, CMOW) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR1, CMOW) != 0;
}
static inline bool isar_feature_aa64_hafs(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, HAFDBS) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR1, HAFDBS) != 0;
}
static inline bool isar_feature_aa64_hdbs(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, HAFDBS) >= 2;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR1, HAFDBS) >= 2;
}
static inline bool isar_feature_aa64_tts2uxn(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, XNX) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR1, XNX) != 0;
}
static inline bool isar_feature_aa64_uao(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, UAO) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR2, UAO) != 0;
}
static inline bool isar_feature_aa64_st(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, ST) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR2, ST) != 0;
}
static inline bool isar_feature_aa64_lse2(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, AT) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR2, AT) != 0;
}
static inline bool isar_feature_aa64_fwb(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, FWB) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR2, FWB) != 0;
}
static inline bool isar_feature_aa64_ids(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, IDS) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR2, IDS) != 0;
}
static inline bool isar_feature_aa64_half_evt(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, EVT) >= 1;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR2, EVT) >= 1;
}
static inline bool isar_feature_aa64_evt(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, EVT) >= 2;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR2, EVT) >= 2;
}
static inline bool isar_feature_aa64_ccidx(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, CCIDX) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR2, CCIDX) != 0;
}
static inline bool isar_feature_aa64_lva(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, VARANGE) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR2, VARANGE) != 0;
}
static inline bool isar_feature_aa64_e0pd(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, E0PD) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR2, E0PD) != 0;
}
static inline bool isar_feature_aa64_nv(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, NV) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR2, NV) != 0;
}
static inline bool isar_feature_aa64_nv2(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, NV) >= 2;
+ return FIELD_EX64_IDREG(id, ID_AA64MMFR2, NV) >= 2;
}
static inline bool isar_feature_aa64_pmuv3p1(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) >= 4 &&
- FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) != 0xf;
+ return FIELD_EX64_IDREG(id, ID_AA64DFR0, PMUVER) >= 4 &&
+ FIELD_EX64_IDREG(id, ID_AA64DFR0, PMUVER) != 0xf;
}
static inline bool isar_feature_aa64_pmuv3p4(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) >= 5 &&
- FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) != 0xf;
+ return FIELD_EX64_IDREG(id, ID_AA64DFR0, PMUVER) >= 5 &&
+ FIELD_EX64_IDREG(id, ID_AA64DFR0, PMUVER) != 0xf;
}
static inline bool isar_feature_aa64_pmuv3p5(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) >= 6 &&
- FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) != 0xf;
+ return FIELD_EX64_IDREG(id, ID_AA64DFR0, PMUVER) >= 6 &&
+ FIELD_EX64_IDREG(id, ID_AA64DFR0, PMUVER) != 0xf;
}
static inline bool isar_feature_aa64_debugv8p2(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, DEBUGVER) >= 8;
+ return FIELD_EX64_IDREG(id, ID_AA64DFR0, DEBUGVER) >= 8;
}
static inline bool isar_feature_aa64_doublelock(const ARMISARegisters *id)
{
- return FIELD_SEX64(id->id_aa64dfr0, ID_AA64DFR0, DOUBLELOCK) >= 0;
+ return FIELD_SEX64_IDREG(id, ID_AA64DFR0, DOUBLELOCK) >= 0;
}
static inline bool isar_feature_aa64_sve2(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, SVEVER) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ZFR0, SVEVER) != 0;
+}
+
+static inline bool isar_feature_aa64_sve2p1(const ARMISARegisters *id)
+{
+ return FIELD_EX64_IDREG(id, ID_AA64ZFR0, SVEVER) >=2;
}
static inline bool isar_feature_aa64_sve2_aes(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, AES) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ZFR0, AES) != 0;
}
static inline bool isar_feature_aa64_sve2_pmull128(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, AES) >= 2;
+ return FIELD_EX64_IDREG(id, ID_AA64ZFR0, AES) >= 2;
}
static inline bool isar_feature_aa64_sve2_bitperm(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, BITPERM) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ZFR0, BITPERM) != 0;
}
static inline bool isar_feature_aa64_sve_bf16(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, BFLOAT16) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ZFR0, BFLOAT16) != 0;
}
static inline bool isar_feature_aa64_sve2_sha3(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, SHA3) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ZFR0, SHA3) != 0;
}
static inline bool isar_feature_aa64_sve2_sm4(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, SM4) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ZFR0, SM4) != 0;
}
static inline bool isar_feature_aa64_sve_i8mm(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, I8MM) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ZFR0, I8MM) != 0;
}
static inline bool isar_feature_aa64_sve_f32mm(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, F32MM) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ZFR0, F32MM) != 0;
}
static inline bool isar_feature_aa64_sve_f64mm(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, F64MM) != 0;
+ return FIELD_EX64_IDREG(id, ID_AA64ZFR0, F64MM) != 0;
+}
+
+static inline bool isar_feature_aa64_sve_b16b16(const ARMISARegisters *id)
+{
+ return FIELD_EX64_IDREG(id, ID_AA64ZFR0, B16B16);
+}
+
+static inline bool isar_feature_aa64_sme_b16b16(const ARMISARegisters *id)
+{
+ return FIELD_EX64_IDREG(id, ID_AA64SMFR0, B16B16);
+}
+
+static inline bool isar_feature_aa64_sme_f16f16(const ARMISARegisters *id)
+{
+ return FIELD_EX64_IDREG(id, ID_AA64SMFR0, F16F16);
}
static inline bool isar_feature_aa64_sme_f64f64(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64smfr0, ID_AA64SMFR0, F64F64);
+ return FIELD_EX64_IDREG(id, ID_AA64SMFR0, F64F64);
}
static inline bool isar_feature_aa64_sme_i16i64(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64smfr0, ID_AA64SMFR0, I16I64) == 0xf;
+ return FIELD_EX64_IDREG(id, ID_AA64SMFR0, I16I64) == 0xf;
}
static inline bool isar_feature_aa64_sme_fa64(const ARMISARegisters *id)
{
- return FIELD_EX64(id->id_aa64smfr0, ID_AA64SMFR0, FA64);
+ return FIELD_EX64_IDREG(id, ID_AA64SMFR0, FA64);
+}
+
+static inline bool isar_feature_aa64_sme2(const ARMISARegisters *id)
+{
+ return FIELD_EX64_IDREG(id, ID_AA64SMFR0, SMEVER) != 0;
+}
+
+static inline bool isar_feature_aa64_sme2p1(const ARMISARegisters *id)
+{
+ return FIELD_EX64_IDREG(id, ID_AA64SMFR0, SMEVER) >= 2;
+}
+
+/*
+ * Combinations of feature tests, for ease of use with TRANS_FEAT.
+ */
+static inline bool isar_feature_aa64_sme_or_sve2p1(const ARMISARegisters *id)
+{
+ return isar_feature_aa64_sme(id) || isar_feature_aa64_sve2p1(id);
+}
+
+static inline bool isar_feature_aa64_sme2_or_sve2p1(const ARMISARegisters *id)
+{
+ return isar_feature_aa64_sme2(id) || isar_feature_aa64_sve2p1(id);
+}
+
+static inline bool isar_feature_aa64_sme2p1_or_sve2p1(const ARMISARegisters *id)
+{
+ return isar_feature_aa64_sme2p1(id) || isar_feature_aa64_sve2p1(id);
+}
+
+static inline bool isar_feature_aa64_sme2_i16i64(const ARMISARegisters *id)
+{
+ return isar_feature_aa64_sme2(id) && isar_feature_aa64_sme_i16i64(id);
+}
+
+static inline bool isar_feature_aa64_sme2_f64f64(const ARMISARegisters *id)
+{
+ return isar_feature_aa64_sme2(id) && isar_feature_aa64_sme_f64f64(id);
}
/*
diff --git a/target/arm/cpu-param.h b/target/arm/cpu-param.h
index 896b35b..8b46c7c 100644
--- a/target/arm/cpu-param.h
+++ b/target/arm/cpu-param.h
@@ -17,15 +17,9 @@
#endif
#ifdef CONFIG_USER_ONLY
-# ifdef TARGET_AARCH64
-# define TARGET_TAGGED_ADDRESSES
-# ifdef __FreeBSD__
-# define TARGET_PAGE_BITS 12
-# else
+# if defined(TARGET_AARCH64) && defined(CONFIG_LINUX)
/* Allow user-only to vary page size from 4k */
# define TARGET_PAGE_BITS_VARY
-# define TARGET_PAGE_BITS_MIN 12
-# endif
# else
# define TARGET_PAGE_BITS 12
# endif
@@ -35,10 +29,14 @@
* have to support 1K tiny pages.
*/
# define TARGET_PAGE_BITS_VARY
-# define TARGET_PAGE_BITS_MIN 10
+# define TARGET_PAGE_BITS_LEGACY 10
#endif /* !CONFIG_USER_ONLY */
-/* ARM processors have a weak memory model */
-#define TCG_GUEST_DEFAULT_MO (0)
+/*
+ * ARM-specific extra insn start words:
+ * 1: Conditional execution bits
+ * 2: Partial exception syndrome for data aborts
+ */
+#define TARGET_INSN_START_EXTRA_WORDS 2
#endif
diff --git a/target/arm/cpu-qom.h b/target/arm/cpu-qom.h
index b497667..2fcb0e1 100644
--- a/target/arm/cpu-qom.h
+++ b/target/arm/cpu-qom.h
@@ -28,11 +28,6 @@ OBJECT_DECLARE_CPU_TYPE(ARMCPU, ARMCPUClass, ARM_CPU)
#define TYPE_ARM_MAX_CPU "max-" TYPE_ARM_CPU
-#define TYPE_AARCH64_CPU "aarch64-cpu"
-typedef struct AArch64CPUClass AArch64CPUClass;
-DECLARE_CLASS_CHECKERS(AArch64CPUClass, AARCH64_CPU,
- TYPE_AARCH64_CPU)
-
#define ARM_CPU_TYPE_SUFFIX "-" TYPE_ARM_CPU
#define ARM_CPU_TYPE_NAME(name) (name ARM_CPU_TYPE_SUFFIX)
diff --git a/target/arm/cpu-sysregs.h b/target/arm/cpu-sysregs.h
new file mode 100644
index 0000000..7877a3b
--- /dev/null
+++ b/target/arm/cpu-sysregs.h
@@ -0,0 +1,42 @@
+/*
+ * Definitions for Arm ID system registers
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef ARM_CPU_SYSREGS_H
+#define ARM_CPU_SYSREGS_H
+
+/*
+ * Following is similar to the coprocessor regs encodings, but with an argument
+ * ordering that matches the ARM ARM. We also reuse the various CP_REG_ defines
+ * that actually are the same as the equivalent KVM_REG_ values.
+ */
+#define ENCODE_ID_REG(op0, op1, crn, crm, op2) \
+ (((op0) << CP_REG_ARM64_SYSREG_OP0_SHIFT) | \
+ ((op1) << CP_REG_ARM64_SYSREG_OP1_SHIFT) | \
+ ((crn) << CP_REG_ARM64_SYSREG_CRN_SHIFT) | \
+ ((crm) << CP_REG_ARM64_SYSREG_CRM_SHIFT) | \
+ ((op2) << CP_REG_ARM64_SYSREG_OP2_SHIFT))
+
+#define DEF(NAME, OP0, OP1, CRN, CRM, OP2) NAME##_IDX,
+
+typedef enum ARMIDRegisterIdx {
+#include "cpu-sysregs.h.inc"
+ NUM_ID_IDX,
+} ARMIDRegisterIdx;
+
+#undef DEF
+#define DEF(NAME, OP0, OP1, CRN, CRM, OP2) \
+ SYS_##NAME = ENCODE_ID_REG(OP0, OP1, CRN, CRM, OP2),
+
+typedef enum ARMSysRegs {
+#include "cpu-sysregs.h.inc"
+} ARMSysRegs;
+
+#undef DEF
+
+extern const uint32_t id_register_sysreg[NUM_ID_IDX];
+
+int get_sysreg_idx(ARMSysRegs sysreg);
+
+#endif /* ARM_CPU_SYSREGS_H */
diff --git a/target/arm/cpu-sysregs.h.inc b/target/arm/cpu-sysregs.h.inc
new file mode 100644
index 0000000..f48a9da
--- /dev/null
+++ b/target/arm/cpu-sysregs.h.inc
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+DEF(ID_AA64PFR0_EL1, 3, 0, 0, 4, 0)
+DEF(ID_AA64PFR1_EL1, 3, 0, 0, 4, 1)
+DEF(ID_AA64SMFR0_EL1, 3, 0, 0, 4, 5)
+DEF(ID_AA64DFR0_EL1, 3, 0, 0, 5, 0)
+DEF(ID_AA64DFR1_EL1, 3, 0, 0, 5, 1)
+DEF(ID_AA64AFR0_EL1, 3, 0, 0, 5, 4)
+DEF(ID_AA64AFR1_EL1, 3, 0, 0, 5, 5)
+DEF(ID_AA64ISAR0_EL1, 3, 0, 0, 6, 0)
+DEF(ID_AA64ISAR1_EL1, 3, 0, 0, 6, 1)
+DEF(ID_AA64ISAR2_EL1, 3, 0, 0, 6, 2)
+DEF(ID_AA64MMFR0_EL1, 3, 0, 0, 7, 0)
+DEF(ID_AA64MMFR1_EL1, 3, 0, 0, 7, 1)
+DEF(ID_AA64MMFR2_EL1, 3, 0, 0, 7, 2)
+DEF(ID_AA64MMFR3_EL1, 3, 0, 0, 7, 3)
+DEF(ID_PFR0_EL1, 3, 0, 0, 1, 0)
+DEF(ID_PFR1_EL1, 3, 0, 0, 1, 1)
+DEF(ID_DFR0_EL1, 3, 0, 0, 1, 2)
+DEF(ID_AFR0_EL1, 3, 0, 0, 1, 3)
+DEF(ID_MMFR0_EL1, 3, 0, 0, 1, 4)
+DEF(ID_MMFR1_EL1, 3, 0, 0, 1, 5)
+DEF(ID_MMFR2_EL1, 3, 0, 0, 1, 6)
+DEF(ID_MMFR3_EL1, 3, 0, 0, 1, 7)
+DEF(ID_ISAR0_EL1, 3, 0, 0, 2, 0)
+DEF(ID_ISAR1_EL1, 3, 0, 0, 2, 1)
+DEF(ID_ISAR2_EL1, 3, 0, 0, 2, 2)
+DEF(ID_ISAR3_EL1, 3, 0, 0, 2, 3)
+DEF(ID_ISAR4_EL1, 3, 0, 0, 2, 4)
+DEF(ID_ISAR5_EL1, 3, 0, 0, 2, 5)
+DEF(ID_MMFR4_EL1, 3, 0, 0, 2, 6)
+DEF(ID_ISAR6_EL1, 3, 0, 0, 2, 7)
+DEF(MVFR0_EL1, 3, 0, 0, 3, 0)
+DEF(MVFR1_EL1, 3, 0, 0, 3, 1)
+DEF(MVFR2_EL1, 3, 0, 0, 3, 2)
+DEF(ID_PFR2_EL1, 3, 0, 0, 3, 4)
+DEF(ID_DFR1_EL1, 3, 0, 0, 3, 5)
+DEF(ID_MMFR5_EL1, 3, 0, 0, 3, 6)
+DEF(CLIDR_EL1, 3, 1, 0, 0, 1)
+DEF(ID_AA64ZFR0_EL1, 3, 0, 0, 4, 4)
+DEF(CTR_EL0, 3, 3, 0, 0, 1)
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index 01786ac..08c43f6 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -23,6 +23,7 @@
#include "qemu/timer.h"
#include "qemu/log.h"
#include "exec/page-vary.h"
+#include "exec/tswap.h"
#include "target/arm/idau.h"
#include "qemu/module.h"
#include "qapi/error.h"
@@ -33,7 +34,7 @@
#endif /* CONFIG_TCG */
#include "internals.h"
#include "cpu-features.h"
-#include "exec/exec-all.h"
+#include "exec/target_page.h"
#include "hw/qdev-properties.h"
#if !defined(CONFIG_USER_ONLY)
#include "hw/loader.h"
@@ -121,6 +122,12 @@ void arm_restore_state_to_opc(CPUState *cs,
env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
}
}
+
+int arm_cpu_mmu_index(CPUState *cs, bool ifetch)
+{
+ return arm_env_mmu_index(cpu_env(cs));
+}
+
#endif /* CONFIG_TCG */
#ifndef CONFIG_USER_ONLY
@@ -144,11 +151,6 @@ static bool arm_cpu_has_work(CPUState *cs)
}
#endif /* !CONFIG_USER_ONLY */
-static int arm_cpu_mmu_index(CPUState *cs, bool ifetch)
-{
- return arm_env_mmu_index(cpu_env(cs));
-}
-
void arm_register_pre_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *hook,
void *opaque)
{
@@ -552,11 +554,15 @@ static void arm_cpu_reset_hold(Object *obj, ResetType type)
set_flush_inputs_to_zero(1, &env->vfp.fp_status[FPST_STD]);
set_default_nan_mode(1, &env->vfp.fp_status[FPST_STD]);
set_default_nan_mode(1, &env->vfp.fp_status[FPST_STD_F16]);
+ set_default_nan_mode(1, &env->vfp.fp_status[FPST_ZA]);
+ set_default_nan_mode(1, &env->vfp.fp_status[FPST_ZA_F16]);
arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_A32]);
arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_A64]);
+ arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_ZA]);
arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_STD]);
arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_A32_F16]);
arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_A64_F16]);
+ arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_ZA_F16]);
arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_STD_F16]);
arm_set_ah_fp_behaviours(&env->vfp.fp_status[FPST_AH]);
set_flush_to_zero(1, &env->vfp.fp_status[FPST_AH]);
@@ -629,6 +635,9 @@ void arm_emulate_firmware_reset(CPUState *cpustate, int target_el)
env->cp15.cptr_el[3] |= R_CPTR_EL3_ESM_MASK;
env->cp15.scr_el3 |= SCR_ENTP2;
env->vfp.smcr_el[3] = 0xf;
+ if (cpu_isar_feature(aa64_sme2, cpu)) {
+ env->vfp.smcr_el[3] |= R_SMCR_EZT0_MASK;
+ }
}
if (cpu_isar_feature(aa64_hcx, cpu)) {
env->cp15.scr_el3 |= SCR_HXEN;
@@ -1097,37 +1106,6 @@ static void arm_cpu_set_irq(void *opaque, int irq, int level)
}
}
-static void arm_cpu_kvm_set_irq(void *opaque, int irq, int level)
-{
-#ifdef CONFIG_KVM
- ARMCPU *cpu = opaque;
- CPUARMState *env = &cpu->env;
- CPUState *cs = CPU(cpu);
- uint32_t linestate_bit;
- int irq_id;
-
- switch (irq) {
- case ARM_CPU_IRQ:
- irq_id = KVM_ARM_IRQ_CPU_IRQ;
- linestate_bit = CPU_INTERRUPT_HARD;
- break;
- case ARM_CPU_FIQ:
- irq_id = KVM_ARM_IRQ_CPU_FIQ;
- linestate_bit = CPU_INTERRUPT_FIQ;
- break;
- default:
- g_assert_not_reached();
- }
-
- if (level) {
- env->irq_line_state |= linestate_bit;
- } else {
- env->irq_line_state &= ~linestate_bit;
- }
- kvm_arm_set_irq(cs->cpu_index, KVM_ARM_IRQ_TYPE_CPU, irq_id, !!level);
-#endif
-}
-
static bool arm_cpu_virtio_is_big_endian(CPUState *cs)
{
ARMCPU *cpu = ARM_CPU(cs);
@@ -1201,7 +1179,7 @@ static void arm_disas_set_info(CPUState *cpu, disassemble_info *info)
info->endian = BFD_ENDIAN_LITTLE;
if (bswap_code(sctlr_b)) {
- info->endian = TARGET_BIG_ENDIAN ? BFD_ENDIAN_LITTLE : BFD_ENDIAN_BIG;
+ info->endian = target_big_endian() ? BFD_ENDIAN_LITTLE : BFD_ENDIAN_BIG;
}
info->flags &= ~INSN_ARM_BE32;
#ifndef CONFIG_USER_ONLY
@@ -1211,8 +1189,6 @@ static void arm_disas_set_info(CPUState *cpu, disassemble_info *info)
#endif
}
-#ifdef TARGET_AARCH64
-
static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
{
ARMCPU *cpu = ARM_CPU(cs);
@@ -1362,23 +1338,14 @@ static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
qemu_fprintf(f, "ZA[%0*d]=", svl_lg10, i);
for (j = zcr_len; j >= 0; --j) {
qemu_fprintf(f, "%016" PRIx64 ":%016" PRIx64 "%c",
- env->zarray[i].d[2 * j + 1],
- env->zarray[i].d[2 * j],
+ env->za_state.za[i].d[2 * j + 1],
+ env->za_state.za[i].d[2 * j],
j ? ':' : '\n');
}
}
}
}
-#else
-
-static inline void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
-{
- g_assert_not_reached();
-}
-
-#endif
-
static void arm_cpu_dump_state(CPUState *cs, FILE *f, int flags)
{
ARMCPU *cpu = ARM_CPU(cs);
@@ -1540,6 +1507,7 @@ static void arm_cpu_initfn(Object *obj)
* 0 means "unset, use the default value". That default might vary depending
* on the CPU type, and is set in the realize fn.
*/
+#ifndef CONFIG_USER_ONLY
static const Property arm_cpu_gt_cntfrq_property =
DEFINE_PROP_UINT64("cntfrq", ARMCPU, gt_cntfrq_hz, 0);
@@ -1549,7 +1517,6 @@ static const Property arm_cpu_reset_cbar_property =
static const Property arm_cpu_reset_hivecs_property =
DEFINE_PROP_BOOL("reset-hivecs", ARMCPU, reset_hivecs, false);
-#ifndef CONFIG_USER_ONLY
static const Property arm_cpu_has_el2_property =
DEFINE_PROP_BOOL("has_el2", ARMCPU, has_el2, true);
@@ -1572,6 +1539,7 @@ static const Property arm_cpu_has_neon_property =
static const Property arm_cpu_has_dsp_property =
DEFINE_PROP_BOOL("dsp", ARMCPU, has_dsp, true);
+#ifndef CONFIG_USER_ONLY
static const Property arm_cpu_has_mpu_property =
DEFINE_PROP_BOOL("has-mpu", ARMCPU, has_mpu, true);
@@ -1584,6 +1552,7 @@ static const Property arm_cpu_pmsav7_dregion_property =
DEFINE_PROP_UNSIGNED_NODEFAULT("pmsav7-dregion", ARMCPU,
pmsav7_dregion,
qdev_prop_uint32, uint32_t);
+#endif
static bool arm_get_pmu(Object *obj, Error **errp)
{
@@ -1608,6 +1577,35 @@ static void arm_set_pmu(Object *obj, bool value, Error **errp)
cpu->has_pmu = value;
}
+static bool aarch64_cpu_get_aarch64(Object *obj, Error **errp)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ return arm_feature(&cpu->env, ARM_FEATURE_AARCH64);
+}
+
+static void aarch64_cpu_set_aarch64(Object *obj, bool value, Error **errp)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ /*
+ * At this time, this property is only allowed if KVM is enabled. This
+ * restriction allows us to avoid fixing up functionality that assumes a
+ * uniform execution state like do_interrupt.
+ */
+ if (value == false) {
+ if (!kvm_enabled() || !kvm_arm_aarch32_supported()) {
+ error_setg(errp, "'aarch64' feature cannot be disabled "
+ "unless KVM is enabled and 32-bit EL1 "
+ "is supported");
+ return;
+ }
+ unset_feature(&cpu->env, ARM_FEATURE_AARCH64);
+ } else {
+ set_feature(&cpu->env, ARM_FEATURE_AARCH64);
+ }
+}
+
unsigned int gt_cntfrq_period_ns(ARMCPU *cpu)
{
/*
@@ -1724,7 +1722,7 @@ static void arm_cpu_propagate_feature_implications(ARMCPU *cpu)
}
}
-void arm_cpu_post_init(Object *obj)
+static void arm_cpu_post_init(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
@@ -1735,6 +1733,14 @@ void arm_cpu_post_init(Object *obj)
*/
arm_cpu_propagate_feature_implications(cpu);
+ if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
+ object_property_add_bool(obj, "aarch64", aarch64_cpu_get_aarch64,
+ aarch64_cpu_set_aarch64);
+ object_property_set_description(obj, "aarch64",
+ "Set on/off to enable/disable aarch64 "
+ "execution state ");
+ }
+#ifndef CONFIG_USER_ONLY
if (arm_feature(&cpu->env, ARM_FEATURE_CBAR) ||
arm_feature(&cpu->env, ARM_FEATURE_CBAR_RO)) {
qdev_property_add_static(DEVICE(obj), &arm_cpu_reset_cbar_property);
@@ -1750,7 +1756,6 @@ void arm_cpu_post_init(Object *obj)
OBJ_PROP_FLAG_READWRITE);
}
-#ifndef CONFIG_USER_ONLY
if (arm_feature(&cpu->env, ARM_FEATURE_EL3)) {
/* Add the has_el3 state CPU property only if EL3 is allowed. This will
* prevent "has_el3" from existing on CPUs which cannot support EL3.
@@ -1822,6 +1827,7 @@ void arm_cpu_post_init(Object *obj)
qdev_property_add_static(DEVICE(obj), &arm_cpu_has_dsp_property);
}
+#ifndef CONFIG_USER_ONLY
if (arm_feature(&cpu->env, ARM_FEATURE_PMSA)) {
qdev_property_add_static(DEVICE(obj), &arm_cpu_has_mpu_property);
if (arm_feature(&cpu->env, ARM_FEATURE_V7)) {
@@ -1858,8 +1864,6 @@ void arm_cpu_post_init(Object *obj)
&cpu->psci_conduit,
OBJ_PROP_FLAG_READWRITE);
- qdev_property_add_static(DEVICE(obj), &arm_cpu_cfgend_property);
-
if (arm_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER)) {
qdev_property_add_static(DEVICE(cpu), &arm_cpu_gt_cntfrq_property);
}
@@ -1868,7 +1872,6 @@ void arm_cpu_post_init(Object *obj)
kvm_arm_add_vcpu_properties(cpu);
}
-#ifndef CONFIG_USER_ONLY
if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64) &&
cpu_isar_feature(aa64_mte, cpu)) {
object_property_add_link(obj, "tag-memory",
@@ -1886,6 +1889,7 @@ void arm_cpu_post_init(Object *obj)
}
}
#endif
+ qdev_property_add_static(DEVICE(obj), &arm_cpu_cfgend_property);
}
static void arm_cpu_finalizefn(Object *obj)
@@ -1917,7 +1921,6 @@ void arm_cpu_finalize_features(ARMCPU *cpu, Error **errp)
{
Error *local_err = NULL;
-#ifdef TARGET_AARCH64
if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
arm_cpu_sve_finalize(cpu, &local_err);
if (local_err != NULL) {
@@ -1953,7 +1956,6 @@ void arm_cpu_finalize_features(ARMCPU *cpu, Error **errp)
return;
}
}
-#endif
if (kvm_enabled()) {
kvm_arm_steal_time_finalize(cpu, &local_err);
@@ -1968,6 +1970,7 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
{
CPUState *cs = CPU(dev);
ARMCPU *cpu = ARM_CPU(dev);
+ ARMISARegisters *isar = &cpu->isar;
ARMCPUClass *acc = ARM_CPU_GET_CLASS(dev);
CPUARMState *env = &cpu->env;
Error *local_err = NULL;
@@ -2125,21 +2128,16 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
}
if (!cpu->has_vfp) {
- uint64_t t;
uint32_t u;
- t = cpu->isar.id_aa64isar1;
- t = FIELD_DP64(t, ID_AA64ISAR1, JSCVT, 0);
- cpu->isar.id_aa64isar1 = t;
+ FIELD_DP64_IDREG(isar, ID_AA64ISAR1, JSCVT, 0);
- t = cpu->isar.id_aa64pfr0;
- t = FIELD_DP64(t, ID_AA64PFR0, FP, 0xf);
- cpu->isar.id_aa64pfr0 = t;
+ FIELD_DP64_IDREG(isar, ID_AA64PFR0, FP, 0xf);
- u = cpu->isar.id_isar6;
+ u = GET_IDREG(isar, ID_ISAR6);
u = FIELD_DP32(u, ID_ISAR6, JSCVT, 0);
u = FIELD_DP32(u, ID_ISAR6, BF16, 0);
- cpu->isar.id_isar6 = u;
+ SET_IDREG(isar, ID_ISAR6, u);
u = cpu->isar.mvfr0;
u = FIELD_DP32(u, MVFR0, FPSP, 0);
@@ -2173,7 +2171,7 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
unset_feature(env, ARM_FEATURE_NEON);
- t = cpu->isar.id_aa64isar0;
+ t = GET_IDREG(isar, ID_AA64ISAR0);
t = FIELD_DP64(t, ID_AA64ISAR0, AES, 0);
t = FIELD_DP64(t, ID_AA64ISAR0, SHA1, 0);
t = FIELD_DP64(t, ID_AA64ISAR0, SHA2, 0);
@@ -2181,32 +2179,30 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
t = FIELD_DP64(t, ID_AA64ISAR0, SM3, 0);
t = FIELD_DP64(t, ID_AA64ISAR0, SM4, 0);
t = FIELD_DP64(t, ID_AA64ISAR0, DP, 0);
- cpu->isar.id_aa64isar0 = t;
+ SET_IDREG(isar, ID_AA64ISAR0, t);
- t = cpu->isar.id_aa64isar1;
+ t = GET_IDREG(isar, ID_AA64ISAR1);
t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 0);
t = FIELD_DP64(t, ID_AA64ISAR1, BF16, 0);
t = FIELD_DP64(t, ID_AA64ISAR1, I8MM, 0);
- cpu->isar.id_aa64isar1 = t;
+ SET_IDREG(isar, ID_AA64ISAR1, t);
- t = cpu->isar.id_aa64pfr0;
- t = FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 0xf);
- cpu->isar.id_aa64pfr0 = t;
+ FIELD_DP64_IDREG(isar, ID_AA64PFR0, ADVSIMD, 0xf);
- u = cpu->isar.id_isar5;
+ u = GET_IDREG(isar, ID_ISAR5);
u = FIELD_DP32(u, ID_ISAR5, AES, 0);
u = FIELD_DP32(u, ID_ISAR5, SHA1, 0);
u = FIELD_DP32(u, ID_ISAR5, SHA2, 0);
u = FIELD_DP32(u, ID_ISAR5, RDM, 0);
u = FIELD_DP32(u, ID_ISAR5, VCMA, 0);
- cpu->isar.id_isar5 = u;
+ SET_IDREG(isar, ID_ISAR5, u);
- u = cpu->isar.id_isar6;
+ u = GET_IDREG(isar, ID_ISAR6);
u = FIELD_DP32(u, ID_ISAR6, DP, 0);
u = FIELD_DP32(u, ID_ISAR6, FHM, 0);
u = FIELD_DP32(u, ID_ISAR6, BF16, 0);
u = FIELD_DP32(u, ID_ISAR6, I8MM, 0);
- cpu->isar.id_isar6 = u;
+ SET_IDREG(isar, ID_ISAR6, u);
if (!arm_feature(env, ARM_FEATURE_M)) {
u = cpu->isar.mvfr1;
@@ -2223,16 +2219,11 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
}
if (!cpu->has_neon && !cpu->has_vfp) {
- uint64_t t;
uint32_t u;
- t = cpu->isar.id_aa64isar0;
- t = FIELD_DP64(t, ID_AA64ISAR0, FHM, 0);
- cpu->isar.id_aa64isar0 = t;
+ FIELD_DP64_IDREG(isar, ID_AA64ISAR0, FHM, 0);
- t = cpu->isar.id_aa64isar1;
- t = FIELD_DP64(t, ID_AA64ISAR1, FRINTTS, 0);
- cpu->isar.id_aa64isar1 = t;
+ FIELD_DP64_IDREG(isar, ID_AA64ISAR1, FRINTTS, 0);
u = cpu->isar.mvfr0;
u = FIELD_DP32(u, MVFR0, SIMDREG, 0);
@@ -2249,19 +2240,17 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
unset_feature(env, ARM_FEATURE_THUMB_DSP);
- u = cpu->isar.id_isar1;
- u = FIELD_DP32(u, ID_ISAR1, EXTEND, 1);
- cpu->isar.id_isar1 = u;
+ FIELD_DP32_IDREG(isar, ID_ISAR1, EXTEND, 1);
- u = cpu->isar.id_isar2;
+ u = GET_IDREG(isar, ID_ISAR2);
u = FIELD_DP32(u, ID_ISAR2, MULTU, 1);
u = FIELD_DP32(u, ID_ISAR2, MULTS, 1);
- cpu->isar.id_isar2 = u;
+ SET_IDREG(isar, ID_ISAR2, u);
- u = cpu->isar.id_isar3;
+ u = GET_IDREG(isar, ID_ISAR3);
u = FIELD_DP32(u, ID_ISAR3, SIMD, 1);
u = FIELD_DP32(u, ID_ISAR3, SATURATE, 0);
- cpu->isar.id_isar3 = u;
+ SET_IDREG(isar, ID_ISAR3, u);
}
@@ -2336,14 +2325,12 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
* Disable the security extension feature bits in the processor
* feature registers as well.
*/
- cpu->isar.id_pfr1 = FIELD_DP32(cpu->isar.id_pfr1, ID_PFR1, SECURITY, 0);
- cpu->isar.id_dfr0 = FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, COPSDBG, 0);
- cpu->isar.id_aa64pfr0 = FIELD_DP64(cpu->isar.id_aa64pfr0,
- ID_AA64PFR0, EL3, 0);
+ FIELD_DP32_IDREG(isar, ID_PFR1, SECURITY, 0);
+ FIELD_DP32_IDREG(isar, ID_DFR0, COPSDBG, 0);
+ FIELD_DP64_IDREG(isar, ID_AA64PFR0, EL3, 0);
/* Disable the realm management extension, which requires EL3. */
- cpu->isar.id_aa64pfr0 = FIELD_DP64(cpu->isar.id_aa64pfr0,
- ID_AA64PFR0, RME, 0);
+ FIELD_DP64_IDREG(isar, ID_AA64PFR0, RME, 0);
}
if (!cpu->has_el2) {
@@ -2366,9 +2353,8 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
cpu);
#endif
} else {
- cpu->isar.id_aa64dfr0 =
- FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, PMUVER, 0);
- cpu->isar.id_dfr0 = FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, PERFMON, 0);
+ FIELD_DP64_IDREG(isar, ID_AA64DFR0, PMUVER, 0);
+ FIELD_DP32_IDREG(isar, ID_DFR0, PERFMON, 0);
cpu->pmceid0 = 0;
cpu->pmceid1 = 0;
}
@@ -2378,10 +2364,8 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
* Disable the hypervisor feature bits in the processor feature
* registers if we don't have EL2.
*/
- cpu->isar.id_aa64pfr0 = FIELD_DP64(cpu->isar.id_aa64pfr0,
- ID_AA64PFR0, EL2, 0);
- cpu->isar.id_pfr1 = FIELD_DP32(cpu->isar.id_pfr1,
- ID_PFR1, VIRTUALIZATION, 0);
+ FIELD_DP64_IDREG(isar, ID_AA64PFR0, EL2, 0);
+ FIELD_DP32_IDREG(isar, ID_PFR1, VIRTUALIZATION, 0);
}
if (cpu_isar_feature(aa64_mte, cpu)) {
@@ -2400,8 +2384,7 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
* This matches Cortex-A710 BROADCASTMTE input being LOW.
*/
if (tcg_enabled() && cpu->tag_memory == NULL) {
- cpu->isar.id_aa64pfr1 =
- FIELD_DP64(cpu->isar.id_aa64pfr1, ID_AA64PFR1, MTE, 1);
+ FIELD_DP64_IDREG(isar, ID_AA64PFR1, MTE, 1);
}
/*
@@ -2409,7 +2392,7 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
* enabled on the guest (i.e mte=off), clear guest's MTE bits."
*/
if (kvm_enabled() && !cpu->kvm_mte) {
- FIELD_DP64(cpu->isar.id_aa64pfr1, ID_AA64PFR1, MTE, 0);
+ FIELD_DP64_IDREG(isar, ID_AA64PFR1, MTE, 0);
}
#endif
}
@@ -2429,32 +2412,22 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
* try to access the non-existent system registers for them.
*/
/* FEAT_SPE (Statistical Profiling Extension) */
- cpu->isar.id_aa64dfr0 =
- FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, PMSVER, 0);
+ FIELD_DP64_IDREG(isar, ID_AA64DFR0, PMSVER, 0);
/* FEAT_TRBE (Trace Buffer Extension) */
- cpu->isar.id_aa64dfr0 =
- FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, TRACEBUFFER, 0);
+ FIELD_DP64_IDREG(isar, ID_AA64DFR0, TRACEBUFFER, 0);
/* FEAT_TRF (Self-hosted Trace Extension) */
- cpu->isar.id_aa64dfr0 =
- FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, TRACEFILT, 0);
- cpu->isar.id_dfr0 =
- FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, TRACEFILT, 0);
+ FIELD_DP64_IDREG(isar, ID_AA64DFR0, TRACEFILT, 0);
+ FIELD_DP32_IDREG(isar, ID_DFR0, TRACEFILT, 0);
/* Trace Macrocell system register access */
- cpu->isar.id_aa64dfr0 =
- FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, TRACEVER, 0);
- cpu->isar.id_dfr0 =
- FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, COPTRC, 0);
+ FIELD_DP64_IDREG(isar, ID_AA64DFR0, TRACEVER, 0);
+ FIELD_DP32_IDREG(isar, ID_DFR0, COPTRC, 0);
/* Memory mapped trace */
- cpu->isar.id_dfr0 =
- FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, MMAPTRC, 0);
+ FIELD_DP32_IDREG(isar, ID_DFR0, MMAPTRC, 0);
/* FEAT_AMU (Activity Monitors Extension) */
- cpu->isar.id_aa64pfr0 =
- FIELD_DP64(cpu->isar.id_aa64pfr0, ID_AA64PFR0, AMU, 0);
- cpu->isar.id_pfr0 =
- FIELD_DP32(cpu->isar.id_pfr0, ID_PFR0, AMU, 0);
+ FIELD_DP64_IDREG(isar, ID_AA64PFR0, AMU, 0);
+ FIELD_DP32_IDREG(isar, ID_PFR0, AMU, 0);
/* FEAT_MPAM (Memory Partitioning and Monitoring Extension) */
- cpu->isar.id_aa64pfr0 =
- FIELD_DP64(cpu->isar.id_aa64pfr0, ID_AA64PFR0, MPAM, 0);
+ FIELD_DP64_IDREG(isar, ID_AA64PFR0, MPAM, 0);
}
/* MPU can be configured out of a PMSA CPU either by setting has-mpu
@@ -2647,13 +2620,54 @@ static const gchar *arm_gdb_arch_name(CPUState *cs)
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
+ if (arm_gdbstub_is_aarch64(cpu)) {
+ return "aarch64";
+ }
if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
return "iwmmxt";
}
return "arm";
}
-#ifndef CONFIG_USER_ONLY
+static const char *arm_gdb_get_core_xml_file(CPUState *cs)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+
+ if (arm_gdbstub_is_aarch64(cpu)) {
+ return "aarch64-core.xml";
+ }
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ return "arm-m-profile.xml";
+ }
+ return "arm-core.xml";
+}
+
+#ifdef CONFIG_USER_ONLY
+/**
+ * aarch64_untagged_addr:
+ *
+ * Remove any address tag from @x. This is explicitly related to the
+ * linux syscall TIF_TAGGED_ADDR setting, not TBI in general.
+ *
+ * There should be a better place to put this, but we need this in
+ * include/accel/tcg/cpu-ldst.h, and not some place linux-user specific.
+ *
+ * Note that arm-*-user will never set tagged_addr_enable.
+ */
+static vaddr aarch64_untagged_addr(CPUState *cs, vaddr x)
+{
+ CPUARMState *env = cpu_env(cs);
+ if (env->tagged_addr_enable) {
+ /*
+ * TBI is enabled for userspace but not kernelspace addresses.
+ * Only clear the tag if bit 55 is clear.
+ */
+ x &= sextract64(x, 0, 56);
+ }
+ return x;
+}
+#else
#include "hw/core/sysemu-cpu-ops.h"
static const struct SysemuCPUOps arm_sysemu_ops = {
@@ -2668,20 +2682,52 @@ static const struct SysemuCPUOps arm_sysemu_ops = {
#endif
#ifdef CONFIG_TCG
+#ifndef CONFIG_USER_ONLY
+static vaddr aprofile_pointer_wrap(CPUState *cs, int mmu_idx,
+ vaddr result, vaddr base)
+{
+ /*
+ * The Stage2 and Phys indexes are only used for ptw on arm32,
+ * and all pte's are aligned, so we never produce a wrap for these.
+ * Double check that we're not truncating a 40-bit physical address.
+ */
+ assert((unsigned)mmu_idx < (ARMMMUIdx_Stage2_S & ARM_MMU_IDX_COREIDX_MASK));
+
+ if (!is_a64(cpu_env(cs))) {
+ return (uint32_t)result;
+ }
+
+ /*
+ * TODO: For FEAT_CPA2, decide how to we want to resolve
+ * Unpredictable_CPACHECK in AddressIncrement.
+ */
+ return result;
+}
+#endif /* !CONFIG_USER_ONLY */
+
static const TCGCPUOps arm_tcg_ops = {
+ .mttcg_supported = true,
+ /* ARM processors have a weak memory model */
+ .guest_default_memory_order = 0,
+
.initialize = arm_translate_init,
.translate_code = arm_translate_code,
+ .get_tb_cpu_state = arm_get_tb_cpu_state,
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
.debug_excp_handler = arm_debug_excp_handler,
.restore_state_to_opc = arm_restore_state_to_opc,
+ .mmu_index = arm_cpu_mmu_index,
#ifdef CONFIG_USER_ONLY
.record_sigsegv = arm_cpu_record_sigsegv,
.record_sigbus = arm_cpu_record_sigbus,
+ .untagged_addr = aarch64_untagged_addr,
#else
.tlb_fill_align = arm_cpu_tlb_fill_align,
+ .pointer_wrap = aprofile_pointer_wrap,
.cpu_exec_interrupt = arm_cpu_exec_interrupt,
.cpu_exec_halt = arm_cpu_exec_halt,
+ .cpu_exec_reset = cpu_reset,
.do_interrupt = arm_cpu_do_interrupt,
.do_transaction_failed = arm_cpu_do_transaction_failed,
.do_unaligned_access = arm_cpu_do_unaligned_access,
@@ -2692,7 +2738,7 @@ static const TCGCPUOps arm_tcg_ops = {
};
#endif /* CONFIG_TCG */
-static void arm_cpu_class_init(ObjectClass *oc, void *data)
+static void arm_cpu_class_init(ObjectClass *oc, const void *data)
{
ARMCPUClass *acc = ARM_CPU_CLASS(oc);
CPUClass *cc = CPU_CLASS(acc);
@@ -2708,7 +2754,6 @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
&acc->parent_phases);
cc->class_by_name = arm_cpu_class_by_name;
- cc->mmu_index = arm_cpu_mmu_index;
cc->dump_state = arm_cpu_dump_state;
cc->set_pc = arm_cpu_set_pc;
cc->get_pc = arm_cpu_get_pc;
@@ -2718,6 +2763,7 @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
cc->sysemu_ops = &arm_sysemu_ops;
#endif
cc->gdb_arch_name = arm_gdb_arch_name;
+ cc->gdb_get_core_xml_file = arm_gdb_get_core_xml_file;
cc->gdb_stop_before_watchpoint = true;
cc->disas_set_info = arm_disas_set_info;
@@ -2734,13 +2780,12 @@ static void arm_cpu_instance_init(Object *obj)
arm_cpu_post_init(obj);
}
-static void cpu_register_class_init(ObjectClass *oc, void *data)
+static void cpu_register_class_init(ObjectClass *oc, const void *data)
{
ARMCPUClass *acc = ARM_CPU_CLASS(oc);
CPUClass *cc = CPU_CLASS(acc);
acc->info = data;
- cc->gdb_core_xml_file = "arm-core.xml";
if (acc->info->deprecation_note) {
cc->deprecation_note = acc->info->deprecation_note;
}
@@ -2752,7 +2797,7 @@ void arm_cpu_register(const ARMCPUInfo *info)
.parent = TYPE_ARM_CPU,
.instance_init = arm_cpu_instance_init,
.class_init = info->class_init ?: cpu_register_class_init,
- .class_data = (void *)info,
+ .class_data = info,
};
type_info.name = g_strdup_printf("%s-" TYPE_ARM_CPU, info->name);
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index a8177c6..dc9b6dc 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -24,16 +24,15 @@
#include "qemu/cpu-float.h"
#include "hw/registerfields.h"
#include "cpu-qom.h"
+#include "exec/cpu-common.h"
#include "exec/cpu-defs.h"
+#include "exec/cpu-interrupt.h"
#include "exec/gdbstub.h"
#include "exec/page-protection.h"
#include "qapi/qapi-types-common.h"
#include "target/arm/multiprocessing.h"
#include "target/arm/gtimer.h"
-
-#ifdef TARGET_AARCH64
-#define KVM_HAVE_MCE_INJECTION 1
-#endif
+#include "target/arm/cpu-sysregs.h"
#define EXCP_UDEF 1 /* undefined instruction */
#define EXCP_SWI 2 /* software interrupt */
@@ -100,12 +99,6 @@
#define offsetofhigh32(S, M) (offsetof(S, M) + sizeof(uint32_t))
#endif
-/* ARM-specific extra insn start words:
- * 1: Conditional execution bits
- * 2: Partial exception syndrome for data aborts
- */
-#define TARGET_INSN_START_EXTRA_WORDS 2
-
/* The 2nd extra word holding syndrome info for data aborts does not use
* the upper 6 bits nor the lower 13 bits. We mask and shift it down to
* help the sleb128 encoder do a better job.
@@ -171,17 +164,12 @@ typedef struct ARMGenericTimer {
* Align the data for use with TCG host vector operations.
*/
-#ifdef TARGET_AARCH64
-# define ARM_MAX_VQ 16
-#else
-# define ARM_MAX_VQ 1
-#endif
+#define ARM_MAX_VQ 16
typedef struct ARMVectorReg {
uint64_t d[2 * ARM_MAX_VQ] QEMU_ALIGNED(16);
} ARMVectorReg;
-#ifdef TARGET_AARCH64
/* In AArch32 mode, predicate registers do not exist at all. */
typedef struct ARMPredicateReg {
uint64_t p[DIV_ROUND_UP(2 * ARM_MAX_VQ, 8)] QEMU_ALIGNED(16);
@@ -191,12 +179,11 @@ typedef struct ARMPredicateReg {
typedef struct ARMPACKey {
uint64_t lo, hi;
} ARMPACKey;
-#endif
/* See the commentary above the TBFLAG field definitions. */
typedef struct CPUARMTBFlags {
uint32_t flags;
- target_ulong flags2;
+ uint64_t flags2;
} CPUARMTBFlags;
typedef struct ARMMMUFaultInfo ARMMMUFaultInfo;
@@ -220,6 +207,8 @@ typedef struct NVICState NVICState;
* when FPCR.AH == 1 (bfloat16 conversions and multiplies,
* and the reciprocal and square root estimate/step insns);
* for half-precision
+ * ZA: the "streaming sve" fp status.
+ * ZA_F16: likewise for half-precision.
*
* Half-precision operations are governed by a separate
* flush-to-zero control bit in FPSCR:FZ16. We pass a separate
@@ -240,6 +229,12 @@ typedef struct NVICState NVICState;
* they ignore FPCR.RMode. But they don't ignore FPCR.FZ16,
* which means we need an FPST_AH_F16 as well.
*
+ * The "ZA" float_status are for Streaming SVE operations which use
+ * default-NaN and do not generate fp exceptions, which means that they
+ * do not accumulate exception bits back into FPCR.
+ * See e.g. FPAdd vs FPAdd_ZA pseudocode functions, and the setting
+ * of fpcr.DN and fpexec parameters.
+ *
* To avoid having to transfer exception bits around, we simply
* say that the FPSCR cumulative exception flags are the logical
* OR of the flags in the four fp statuses. This relies on the
@@ -253,10 +248,12 @@ typedef enum ARMFPStatusFlavour {
FPST_A64_F16,
FPST_AH,
FPST_AH_F16,
+ FPST_ZA,
+ FPST_ZA_F16,
FPST_STD,
FPST_STD_F16,
} ARMFPStatusFlavour;
-#define FPST_COUNT 8
+#define FPST_COUNT 10
typedef struct CPUArchState {
/* Regs for current mode. */
@@ -662,13 +659,11 @@ typedef struct CPUArchState {
struct {
ARMVectorReg zregs[32];
-#ifdef TARGET_AARCH64
/* Store FFR as pregs[16] to make it easier to treat as any other. */
#define FFR_PRED_NUM 16
ARMPredicateReg pregs[17];
/* Scratch space for aa64 sve predicate temporary. */
ARMPredicateReg preg_tmp;
-#endif
/* We store these fpcsr fields separately for convenience. */
uint32_t qc[4] QEMU_ALIGNED(16);
@@ -684,9 +679,6 @@ typedef struct CPUArchState {
uint32_t xregs[16];
- /* Scratch space for aa32 neon expansion. */
- uint32_t scratch[8];
-
/* There are a number of distinct float control structures. */
float_status fp_status[FPST_COUNT];
@@ -713,7 +705,6 @@ typedef struct CPUArchState {
uint32_t cregs[16];
} iwmmxt;
-#ifdef TARGET_AARCH64
struct {
ARMPACKey apia;
ARMPACKey apib;
@@ -724,28 +715,36 @@ typedef struct CPUArchState {
uint64_t scxtnum_el[4];
- /*
- * SME ZA storage -- 256 x 256 byte array, with bytes in host word order,
- * as we do with vfp.zregs[]. This corresponds to the architectural ZA
- * array, where ZA[N] is in the least-significant bytes of env->zarray[N].
- * When SVL is less than the architectural maximum, the accessible
- * storage is restricted, such that if the SVL is X bytes the guest can
- * see only the bottom X elements of zarray[], and only the least
- * significant X bytes of each element of the array. (In other words,
- * the observable part is always square.)
- *
- * The ZA storage can also be considered as a set of square tiles of
- * elements of different sizes. The mapping from tiles to the ZA array
- * is architecturally defined, such that for tiles of elements of esz
- * bytes, the Nth row (or "horizontal slice") of tile T is in
- * ZA[T + N * esz]. Note that this means that each tile is not contiguous
- * in the ZA storage, because its rows are striped through the ZA array.
- *
- * Because this is so large, keep this toward the end of the reset area,
- * to keep the offsets into the rest of the structure smaller.
- */
- ARMVectorReg zarray[ARM_MAX_VQ * 16];
-#endif
+ struct {
+ /* SME2 ZT0 -- 512 bit array, with data ordered like ARMVectorReg. */
+ uint64_t zt0[512 / 64] QEMU_ALIGNED(16);
+
+ /*
+ * SME ZA storage -- 256 x 256 byte array, with bytes in host
+ * word order, as we do with vfp.zregs[]. This corresponds to
+ * the architectural ZA array, where ZA[N] is in the least
+ * significant bytes of env->za_state.za[N].
+ *
+ * When SVL is less than the architectural maximum, the accessible
+ * storage is restricted, such that if the SVL is X bytes the guest
+ * can see only the bottom X elements of zarray[], and only the least
+ * significant X bytes of each element of the array. (In other words,
+ * the observable part is always square.)
+ *
+ * The ZA storage can also be considered as a set of square tiles of
+ * elements of different sizes. The mapping from tiles to the ZA array
+ * is architecturally defined, such that for tiles of elements of esz
+ * bytes, the Nth row (or "horizontal slice") of tile T is in
+ * ZA[T + N * esz]. Note that this means that each tile is not
+ * contiguous in the ZA storage, because its rows are striped through
+ * the ZA array.
+ *
+ * Because this is so large, keep this toward the end of the
+ * reset area, to keep the offsets into the rest of the structure
+ * smaller.
+ */
+ ARMVectorReg za[ARM_MAX_VQ * 16];
+ } za_state;
struct CPUBreakpoint *cpu_breakpoint[16];
struct CPUWatchpoint *cpu_watchpoint[16];
@@ -801,12 +800,9 @@ typedef struct CPUArchState {
#else /* CONFIG_USER_ONLY */
/* For usermode syscall translation. */
bool eabi;
-#endif /* CONFIG_USER_ONLY */
-
-#ifdef TARGET_TAGGED_ADDRESSES
/* Linux syscall tagged address support */
bool tagged_addr_enable;
-#endif
+#endif /* CONFIG_USER_ONLY */
} CPUARMState;
static inline void set_feature(CPUARMState *env, int feature)
@@ -855,6 +851,53 @@ typedef struct {
uint32_t map, init, supported;
} ARMVQMap;
+/* REG is ID_XXX */
+#define FIELD_DP64_IDREG(ISAR, REG, FIELD, VALUE) \
+ ({ \
+ ARMISARegisters *i_ = (ISAR); \
+ uint64_t regval = i_->idregs[REG ## _EL1_IDX]; \
+ regval = FIELD_DP64(regval, REG, FIELD, VALUE); \
+ i_->idregs[REG ## _EL1_IDX] = regval; \
+ })
+
+#define FIELD_DP32_IDREG(ISAR, REG, FIELD, VALUE) \
+ ({ \
+ ARMISARegisters *i_ = (ISAR); \
+ uint64_t regval = i_->idregs[REG ## _EL1_IDX]; \
+ regval = FIELD_DP32(regval, REG, FIELD, VALUE); \
+ i_->idregs[REG ## _EL1_IDX] = regval; \
+ })
+
+#define FIELD_EX64_IDREG(ISAR, REG, FIELD) \
+ ({ \
+ const ARMISARegisters *i_ = (ISAR); \
+ FIELD_EX64(i_->idregs[REG ## _EL1_IDX], REG, FIELD); \
+ })
+
+#define FIELD_EX32_IDREG(ISAR, REG, FIELD) \
+ ({ \
+ const ARMISARegisters *i_ = (ISAR); \
+ FIELD_EX32(i_->idregs[REG ## _EL1_IDX], REG, FIELD); \
+ })
+
+#define FIELD_SEX64_IDREG(ISAR, REG, FIELD) \
+ ({ \
+ const ARMISARegisters *i_ = (ISAR); \
+ FIELD_SEX64(i_->idregs[REG ## _EL1_IDX], REG, FIELD); \
+ })
+
+#define SET_IDREG(ISAR, REG, VALUE) \
+ ({ \
+ ARMISARegisters *i_ = (ISAR); \
+ i_->idregs[REG ## _EL1_IDX] = VALUE; \
+ })
+
+#define GET_IDREG(ISAR, REG) \
+ ({ \
+ const ARMISARegisters *i_ = (ISAR); \
+ i_->idregs[REG ## _EL1_IDX]; \
+ })
+
/**
* ARMCPU:
* @env: #CPUARMState
@@ -973,7 +1016,6 @@ struct ArchCPU {
*/
uint32_t kvm_target;
-#ifdef CONFIG_KVM
/* KVM init features for this CPU */
uint32_t kvm_init_features[7];
@@ -986,7 +1028,6 @@ struct ArchCPU {
/* KVM steal time */
OnOffAuto kvm_steal_time;
-#endif /* CONFIG_KVM */
/* Uniprocessor system with MP extensions */
bool mp_is_up;
@@ -1025,44 +1066,14 @@ struct ArchCPU {
* field by reading the value from the KVM vCPU.
*/
struct ARMISARegisters {
- uint32_t id_isar0;
- uint32_t id_isar1;
- uint32_t id_isar2;
- uint32_t id_isar3;
- uint32_t id_isar4;
- uint32_t id_isar5;
- uint32_t id_isar6;
- uint32_t id_mmfr0;
- uint32_t id_mmfr1;
- uint32_t id_mmfr2;
- uint32_t id_mmfr3;
- uint32_t id_mmfr4;
- uint32_t id_mmfr5;
- uint32_t id_pfr0;
- uint32_t id_pfr1;
- uint32_t id_pfr2;
uint32_t mvfr0;
uint32_t mvfr1;
uint32_t mvfr2;
- uint32_t id_dfr0;
- uint32_t id_dfr1;
uint32_t dbgdidr;
uint32_t dbgdevid;
uint32_t dbgdevid1;
- uint64_t id_aa64isar0;
- uint64_t id_aa64isar1;
- uint64_t id_aa64isar2;
- uint64_t id_aa64pfr0;
- uint64_t id_aa64pfr1;
- uint64_t id_aa64mmfr0;
- uint64_t id_aa64mmfr1;
- uint64_t id_aa64mmfr2;
- uint64_t id_aa64mmfr3;
- uint64_t id_aa64dfr0;
- uint64_t id_aa64dfr1;
- uint64_t id_aa64zfr0;
- uint64_t id_aa64smfr0;
uint64_t reset_pmcr_el0;
+ uint64_t idregs[NUM_ID_IDX];
} isar;
uint64_t midr;
uint32_t revidr;
@@ -1071,10 +1082,6 @@ struct ArchCPU {
uint32_t reset_sctlr;
uint64_t pmceid0;
uint64_t pmceid1;
- uint32_t id_afr0;
- uint64_t id_aa64afr0;
- uint64_t id_aa64afr1;
- uint64_t clidr;
uint64_t mp_affinity; /* MP ID without feature bits */
/* The elements of this array are the CCSIDR values for each cache,
* in the order L1DCache, L1ICache, L2DCache, L2ICache, etc.
@@ -1125,6 +1132,7 @@ struct ArchCPU {
/* Used to set the maximum vector length the cpu will support. */
uint32_t sve_max_vq;
+ uint32_t sme_max_vq;
#ifdef CONFIG_USER_ONLY
/* Used to set the default vector length at process start. */
@@ -1143,7 +1151,7 @@ typedef struct ARMCPUInfo {
const char *name;
const char *deprecation_note;
void (*initfn)(Object *obj);
- void (*class_init)(ObjectClass *oc, void *data);
+ void (*class_init)(ObjectClass *oc, const void *data);
} ARMCPUInfo;
/**
@@ -1161,10 +1169,6 @@ struct ARMCPUClass {
ResettablePhases parent_phases;
};
-struct AArch64CPUClass {
- ARMCPUClass parent_class;
-};
-
/* Callback functions for the generic timer's timers. */
void arm_gt_ptimer_cb(void *opaque);
void arm_gt_vtimer_cb(void *opaque);
@@ -1177,8 +1181,6 @@ void arm_gt_sel2vtimer_cb(void *opaque);
unsigned int gt_cntfrq_period_ns(ARMCPU *cpu);
void gt_rme_post_el_change(ARMCPU *cpu, void *opaque);
-void arm_cpu_post_init(Object *obj);
-
#define ARM_AFF0_SHIFT 0
#define ARM_AFF0_MASK (0xFFULL << ARM_AFF0_SHIFT)
#define ARM_AFF1_SHIFT 8
@@ -1236,7 +1238,6 @@ int arm_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
*/
void arm_emulate_firmware_reset(CPUState *cpustate, int target_el);
-#ifdef TARGET_AARCH64
int aarch64_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
int aarch64_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq);
@@ -1268,13 +1269,6 @@ static inline uint64_t *sve_bswap64(uint64_t *dst, uint64_t *src, int nr)
#endif
}
-#else
-static inline void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq) { }
-static inline void aarch64_sve_change_el(CPUARMState *env, int o,
- int n, bool a)
-{ }
-#endif
-
void aarch64_sync_32_to_64(CPUARMState *env);
void aarch64_sync_64_to_32(CPUARMState *env);
@@ -1516,6 +1510,7 @@ FIELD(SVCR, ZA, 1, 1)
/* Fields for SMCR_ELx. */
FIELD(SMCR, LEN, 0, 4)
+FIELD(SMCR, EZT0, 30, 1)
FIELD(SMCR, FA64, 31, 1)
/* Write a new value to v7m.exception, thus transitioning into or out
@@ -2217,6 +2212,7 @@ FIELD(ID_AA64ISAR2, SYSINSTR_128, 36, 4)
FIELD(ID_AA64ISAR2, PRFMSLC, 40, 4)
FIELD(ID_AA64ISAR2, RPRFM, 48, 4)
FIELD(ID_AA64ISAR2, CSSC, 52, 4)
+FIELD(ID_AA64ISAR2, LUT, 56, 4)
FIELD(ID_AA64ISAR2, ATS1A, 60, 4)
FIELD(ID_AA64PFR0, EL0, 0, 4)
@@ -2948,7 +2944,7 @@ static inline bool arm_v7m_csselr_razwi(ARMCPU *cpu)
/* If all the CLIDR.Ctypem bits are 0 there are no caches, and
* CSSELR is RAZ/WI.
*/
- return (cpu->clidr & R_V7M_CLIDR_CTYPE_ALL_MASK) != 0;
+ return (GET_IDREG(&cpu->isar, CLIDR) & R_V7M_CLIDR_CTYPE_ALL_MASK) != 0;
}
static inline bool arm_sctlr_b(CPUARMState *env)
@@ -2966,19 +2962,15 @@ static inline bool arm_sctlr_b(CPUARMState *env)
uint64_t arm_sctlr(CPUARMState *env, int el);
-#include "exec/cpu-all.h"
-
/*
* We have more than 32-bits worth of state per TB, so we split the data
* between tb->flags and tb->cs_base, which is otherwise unused for ARM.
* We collect these two parts in CPUARMTBFlags where they are named
* flags and flags2 respectively.
*
- * The flags that are shared between all execution modes, TBFLAG_ANY,
- * are stored in flags. The flags that are specific to a given mode
- * are stores in flags2. Since cs_base is sized on the configured
- * address size, flags2 always has 64-bits for A64, and a minimum of
- * 32-bits for A32 and M32.
+ * The flags that are shared between all execution modes, TBFLAG_ANY, are stored
+ * in flags. The flags that are specific to a given mode are stored in flags2.
+ * flags2 always has 64-bits, even though only 32-bits are used for A32 and M32.
*
* The bits for 32-bit A-profile and M-profile partially overlap:
*
@@ -3090,6 +3082,7 @@ FIELD(TBFLAG_A64, NV2_MEM_E20, 35, 1)
FIELD(TBFLAG_A64, NV2_MEM_BE, 36, 1)
FIELD(TBFLAG_A64, AH, 37, 1) /* FPCR.AH */
FIELD(TBFLAG_A64, NEP, 38, 1) /* FPCR.NEP */
+FIELD(TBFLAG_A64, ZT0EXC_EL, 39, 2)
/*
* Helpers for using the above. Note that only the A64 accessors use
@@ -3151,9 +3144,6 @@ static inline bool bswap_code(bool sctlr_b)
#endif
}
-void cpu_get_tb_cpu_state(CPUARMState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *flags);
-
enum {
QEMU_PSCI_CONDUIT_DISABLED = 0,
QEMU_PSCI_CONDUIT_SMC = 1,
@@ -3251,35 +3241,4 @@ extern const uint64_t pred_esz_masks[5];
#define LOG2_TAG_GRANULE 4
#define TAG_GRANULE (1 << LOG2_TAG_GRANULE)
-#ifdef CONFIG_USER_ONLY
-
-#define TARGET_PAGE_DATA_SIZE (TARGET_PAGE_SIZE >> (LOG2_TAG_GRANULE + 1))
-
-#ifdef TARGET_TAGGED_ADDRESSES
-/**
- * cpu_untagged_addr:
- * @cs: CPU context
- * @x: tagged address
- *
- * Remove any address tag from @x. This is explicitly related to the
- * linux syscall TIF_TAGGED_ADDR setting, not TBI in general.
- *
- * There should be a better place to put this, but we need this in
- * include/exec/cpu_ldst.h, and not some place linux-user specific.
- */
-static inline target_ulong cpu_untagged_addr(CPUState *cs, target_ulong x)
-{
- CPUARMState *env = cpu_env(cs);
- if (env->tagged_addr_enable) {
- /*
- * TBI is enabled for userspace but not kernelspace addresses.
- * Only clear the tag if bit 55 is clear.
- */
- x &= sextract64(x, 0, 56);
- }
- return x;
-}
-#endif /* TARGET_TAGGED_ADDRESSES */
-#endif /* CONFIG_USER_ONLY */
-
#endif
diff --git a/target/arm/cpu32-stubs.c b/target/arm/cpu32-stubs.c
new file mode 100644
index 0000000..81be44d
--- /dev/null
+++ b/target/arm/cpu32-stubs.c
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#include "qemu/osdep.h"
+#include "target/arm/cpu.h"
+#include "target/arm/internals.h"
+#include <glib.h>
+
+void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp)
+{
+ g_assert_not_reached();
+}
+
+void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp)
+{
+ g_assert_not_reached();
+}
+
+void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp)
+{
+ g_assert_not_reached();
+}
+
+void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp)
+{
+ g_assert_not_reached();
+}
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
index 8188ede..26cf7e6 100644
--- a/target/arm/cpu64.c
+++ b/target/arm/cpu64.c
@@ -36,6 +36,28 @@
#include "cpu-features.h"
#include "cpregs.h"
+/* convert between <register>_IDX and SYS_<register> */
+#define DEF(NAME, OP0, OP1, CRN, CRM, OP2) \
+ [NAME##_IDX] = SYS_##NAME,
+
+const uint32_t id_register_sysreg[NUM_ID_IDX] = {
+#include "cpu-sysregs.h.inc"
+};
+
+#undef DEF
+#define DEF(NAME, OP0, OP1, CRN, CRM, OP2) \
+ case SYS_##NAME: return NAME##_IDX;
+
+int get_sysreg_idx(ARMSysRegs sysreg)
+{
+ switch (sysreg) {
+#include "cpu-sysregs.h.inc"
+ }
+ g_assert_not_reached();
+}
+
+#undef DEF
+
void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp)
{
/*
@@ -114,7 +136,7 @@ void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp)
* SVE is disabled and so are all vector lengths. Good.
* Disable all SVE extensions as well.
*/
- cpu->isar.id_aa64zfr0 = 0;
+ SET_IDREG(&cpu->isar, ID_AA64ZFR0, 0);
return;
}
@@ -237,6 +259,13 @@ void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp)
/* From now on sve_max_vq is the actual maximum supported length. */
cpu->sve_max_vq = max_vq;
cpu->sve_vq.map = vq_map;
+
+ /* FEAT_F64MM requires the existence of a 256-bit vector size. */
+ if (max_vq < 2) {
+ uint64_t t = GET_IDREG(&cpu->isar, ID_AA64ZFR0);
+ t = FIELD_DP64(t, ID_AA64ZFR0, F64MM, 0);
+ SET_IDREG(&cpu->isar, ID_AA64ZFR0, t);
+ }
}
/*
@@ -288,16 +317,13 @@ static bool cpu_arm_get_sve(Object *obj, Error **errp)
static void cpu_arm_set_sve(Object *obj, bool value, Error **errp)
{
ARMCPU *cpu = ARM_CPU(obj);
- uint64_t t;
if (value && kvm_enabled() && !kvm_arm_sve_supported()) {
error_setg(errp, "'sve' feature not supported by KVM on this host");
return;
}
- t = cpu->isar.id_aa64pfr0;
- t = FIELD_DP64(t, ID_AA64PFR0, SVE, value);
- cpu->isar.id_aa64pfr0 = t;
+ FIELD_DP64_IDREG(&cpu->isar, ID_AA64PFR0, SVE, value);
}
void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp)
@@ -309,7 +335,7 @@ void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp)
if (vq_map == 0) {
if (!cpu_isar_feature(aa64_sme, cpu)) {
- cpu->isar.id_aa64smfr0 = 0;
+ SET_IDREG(&cpu->isar, ID_AA64SMFR0, 0);
return;
}
@@ -337,6 +363,7 @@ void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp)
}
cpu->sme_vq.map = vq_map;
+ cpu->sme_max_vq = 32 - clz32(vq_map);
}
static bool cpu_arm_get_sme(Object *obj, Error **errp)
@@ -348,11 +375,8 @@ static bool cpu_arm_get_sme(Object *obj, Error **errp)
static void cpu_arm_set_sme(Object *obj, bool value, Error **errp)
{
ARMCPU *cpu = ARM_CPU(obj);
- uint64_t t;
- t = cpu->isar.id_aa64pfr1;
- t = FIELD_DP64(t, ID_AA64PFR1, SME, value);
- cpu->isar.id_aa64pfr1 = t;
+ FIELD_DP64_IDREG(&cpu->isar, ID_AA64PFR1, SME, value);
}
static bool cpu_arm_get_sme_fa64(Object *obj, Error **errp)
@@ -365,11 +389,8 @@ static bool cpu_arm_get_sme_fa64(Object *obj, Error **errp)
static void cpu_arm_set_sme_fa64(Object *obj, bool value, Error **errp)
{
ARMCPU *cpu = ARM_CPU(obj);
- uint64_t t;
- t = cpu->isar.id_aa64smfr0;
- t = FIELD_DP64(t, ID_AA64SMFR0, FA64, value);
- cpu->isar.id_aa64smfr0 = t;
+ FIELD_DP64_IDREG(&cpu->isar, ID_AA64SMFR0, FA64, value);
}
#ifdef CONFIG_USER_ONLY
@@ -480,6 +501,7 @@ void aarch64_add_sme_properties(Object *obj)
void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp)
{
ARMPauthFeature features = cpu_isar_feature(pauth_feature, cpu);
+ ARMISARegisters *isar = &cpu->isar;
uint64_t isar1, isar2;
/*
@@ -490,13 +512,13 @@ void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp)
*
* Begin by disabling all fields.
*/
- isar1 = cpu->isar.id_aa64isar1;
+ isar1 = GET_IDREG(isar, ID_AA64ISAR1);
isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, APA, 0);
isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, GPA, 0);
isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, API, 0);
isar1 = FIELD_DP64(isar1, ID_AA64ISAR1, GPI, 0);
- isar2 = cpu->isar.id_aa64isar2;
+ isar2 = GET_IDREG(isar, ID_AA64ISAR2);
isar2 = FIELD_DP64(isar2, ID_AA64ISAR2, APA3, 0);
isar2 = FIELD_DP64(isar2, ID_AA64ISAR2, GPA3, 0);
@@ -558,8 +580,8 @@ void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp)
}
}
- cpu->isar.id_aa64isar1 = isar1;
- cpu->isar.id_aa64isar2 = isar2;
+ SET_IDREG(isar, ID_AA64ISAR1, isar1);
+ SET_IDREG(isar, ID_AA64ISAR2, isar2);
}
static const Property arm_cpu_pauth_property =
@@ -606,17 +628,18 @@ void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp)
return;
}
- t = cpu->isar.id_aa64mmfr0;
+ t = GET_IDREG(&cpu->isar, ID_AA64MMFR0);
t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN16, 2); /* 16k pages w/ LPA2 */
t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN4, 1); /* 4k pages w/ LPA2 */
t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN16_2, 3); /* 16k stage2 w/ LPA2 */
t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN4_2, 3); /* 4k stage2 w/ LPA2 */
- cpu->isar.id_aa64mmfr0 = t;
+ SET_IDREG(&cpu->isar, ID_AA64MMFR0, t);
}
static void aarch64_a57_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
cpu->dtb_compatible = "arm,cortex-a57";
set_feature(&cpu->env, ARM_FEATURE_V8);
@@ -637,30 +660,30 @@ static void aarch64_a57_initfn(Object *obj)
cpu->isar.mvfr2 = 0x00000043;
cpu->ctr = 0x8444c004;
cpu->reset_sctlr = 0x00c50838;
- cpu->isar.id_pfr0 = 0x00000131;
- cpu->isar.id_pfr1 = 0x00011011;
- cpu->isar.id_dfr0 = 0x03010066;
- cpu->id_afr0 = 0x00000000;
- cpu->isar.id_mmfr0 = 0x10101105;
- cpu->isar.id_mmfr1 = 0x40000000;
- cpu->isar.id_mmfr2 = 0x01260000;
- cpu->isar.id_mmfr3 = 0x02102211;
- cpu->isar.id_isar0 = 0x02101110;
- cpu->isar.id_isar1 = 0x13112111;
- cpu->isar.id_isar2 = 0x21232042;
- cpu->isar.id_isar3 = 0x01112131;
- cpu->isar.id_isar4 = 0x00011142;
- cpu->isar.id_isar5 = 0x00011121;
- cpu->isar.id_isar6 = 0;
- cpu->isar.id_aa64pfr0 = 0x00002222;
- cpu->isar.id_aa64dfr0 = 0x10305106;
- cpu->isar.id_aa64isar0 = 0x00011120;
- cpu->isar.id_aa64mmfr0 = 0x00001124;
+ SET_IDREG(isar, ID_PFR0, 0x00000131);
+ SET_IDREG(isar, ID_PFR1, 0x00011011);
+ SET_IDREG(isar, ID_DFR0, 0x03010066);
+ SET_IDREG(isar, ID_AFR0, 0x00000000);
+ SET_IDREG(isar, ID_MMFR0, 0x10101105);
+ SET_IDREG(isar, ID_MMFR1, 0x40000000);
+ SET_IDREG(isar, ID_MMFR2, 0x01260000);
+ SET_IDREG(isar, ID_MMFR3, 0x02102211);
+ SET_IDREG(isar, ID_ISAR0, 0x02101110);
+ SET_IDREG(isar, ID_ISAR1, 0x13112111);
+ SET_IDREG(isar, ID_ISAR2, 0x21232042);
+ SET_IDREG(isar, ID_ISAR3, 0x01112131);
+ SET_IDREG(isar, ID_ISAR4, 0x00011142);
+ SET_IDREG(isar, ID_ISAR5, 0x00011121);
+ SET_IDREG(isar, ID_ISAR6, 0);
+ SET_IDREG(isar, ID_AA64PFR0, 0x00002222);
+ SET_IDREG(isar, ID_AA64DFR0, 0x10305106);
+ SET_IDREG(isar, ID_AA64ISAR0, 0x00011120);
+ SET_IDREG(isar, ID_AA64MMFR0, 0x00001124);
cpu->isar.dbgdidr = 0x3516d000;
cpu->isar.dbgdevid = 0x01110f13;
cpu->isar.dbgdevid1 = 0x2;
cpu->isar.reset_pmcr_el0 = 0x41013000;
- cpu->clidr = 0x0a200023;
+ SET_IDREG(isar, CLIDR, 0x0a200023);
/* 32KB L1 dcache */
cpu->ccsidr[0] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 64, 32 * KiB, 7);
/* 48KB L1 icache */
@@ -678,6 +701,7 @@ static void aarch64_a57_initfn(Object *obj)
static void aarch64_a53_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
cpu->dtb_compatible = "arm,cortex-a53";
set_feature(&cpu->env, ARM_FEATURE_V8);
@@ -698,30 +722,30 @@ static void aarch64_a53_initfn(Object *obj)
cpu->isar.mvfr2 = 0x00000043;
cpu->ctr = 0x84448004; /* L1Ip = VIPT */
cpu->reset_sctlr = 0x00c50838;
- cpu->isar.id_pfr0 = 0x00000131;
- cpu->isar.id_pfr1 = 0x00011011;
- cpu->isar.id_dfr0 = 0x03010066;
- cpu->id_afr0 = 0x00000000;
- cpu->isar.id_mmfr0 = 0x10101105;
- cpu->isar.id_mmfr1 = 0x40000000;
- cpu->isar.id_mmfr2 = 0x01260000;
- cpu->isar.id_mmfr3 = 0x02102211;
- cpu->isar.id_isar0 = 0x02101110;
- cpu->isar.id_isar1 = 0x13112111;
- cpu->isar.id_isar2 = 0x21232042;
- cpu->isar.id_isar3 = 0x01112131;
- cpu->isar.id_isar4 = 0x00011142;
- cpu->isar.id_isar5 = 0x00011121;
- cpu->isar.id_isar6 = 0;
- cpu->isar.id_aa64pfr0 = 0x00002222;
- cpu->isar.id_aa64dfr0 = 0x10305106;
- cpu->isar.id_aa64isar0 = 0x00011120;
- cpu->isar.id_aa64mmfr0 = 0x00001122; /* 40 bit physical addr */
+ SET_IDREG(isar, ID_PFR0, 0x00000131);
+ SET_IDREG(isar, ID_PFR1, 0x00011011);
+ SET_IDREG(isar, ID_DFR0, 0x03010066);
+ SET_IDREG(isar, ID_AFR0, 0x00000000);
+ SET_IDREG(isar, ID_MMFR0, 0x10101105);
+ SET_IDREG(isar, ID_MMFR1, 0x40000000);
+ SET_IDREG(isar, ID_MMFR2, 0x01260000);
+ SET_IDREG(isar, ID_MMFR3, 0x02102211);
+ SET_IDREG(isar, ID_ISAR0, 0x02101110);
+ SET_IDREG(isar, ID_ISAR1, 0x13112111);
+ SET_IDREG(isar, ID_ISAR2, 0x21232042);
+ SET_IDREG(isar, ID_ISAR3, 0x01112131);
+ SET_IDREG(isar, ID_ISAR4, 0x00011142);
+ SET_IDREG(isar, ID_ISAR5, 0x00011121);
+ SET_IDREG(isar, ID_ISAR6, 0);
+ SET_IDREG(isar, ID_AA64PFR0, 0x00002222);
+ SET_IDREG(isar, ID_AA64DFR0, 0x10305106);
+ SET_IDREG(isar, ID_AA64ISAR0, 0x00011120);
+ SET_IDREG(isar, ID_AA64MMFR0, 0x00001122); /* 40 bit physical addr */
cpu->isar.dbgdidr = 0x3516d000;
cpu->isar.dbgdevid = 0x00110f13;
cpu->isar.dbgdevid1 = 0x1;
cpu->isar.reset_pmcr_el0 = 0x41033000;
- cpu->clidr = 0x0a200023;
+ SET_IDREG(isar, CLIDR, 0x0a200023);
/* 32KB L1 dcache */
cpu->ccsidr[0] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 64, 32 * KiB, 7);
/* 32KB L1 icache */
@@ -781,104 +805,12 @@ static const ARMCPUInfo aarch64_cpus[] = {
#endif
};
-static bool aarch64_cpu_get_aarch64(Object *obj, Error **errp)
-{
- ARMCPU *cpu = ARM_CPU(obj);
-
- return arm_feature(&cpu->env, ARM_FEATURE_AARCH64);
-}
-
-static void aarch64_cpu_set_aarch64(Object *obj, bool value, Error **errp)
-{
- ARMCPU *cpu = ARM_CPU(obj);
-
- /* At this time, this property is only allowed if KVM is enabled. This
- * restriction allows us to avoid fixing up functionality that assumes a
- * uniform execution state like do_interrupt.
- */
- if (value == false) {
- if (!kvm_enabled() || !kvm_arm_aarch32_supported()) {
- error_setg(errp, "'aarch64' feature cannot be disabled "
- "unless KVM is enabled and 32-bit EL1 "
- "is supported");
- return;
- }
- unset_feature(&cpu->env, ARM_FEATURE_AARCH64);
- } else {
- set_feature(&cpu->env, ARM_FEATURE_AARCH64);
- }
-}
-
-static void aarch64_cpu_finalizefn(Object *obj)
-{
-}
-
-static const gchar *aarch64_gdb_arch_name(CPUState *cs)
-{
- return "aarch64";
-}
-
-static void aarch64_cpu_class_init(ObjectClass *oc, void *data)
-{
- CPUClass *cc = CPU_CLASS(oc);
-
- cc->gdb_read_register = aarch64_cpu_gdb_read_register;
- cc->gdb_write_register = aarch64_cpu_gdb_write_register;
- cc->gdb_core_xml_file = "aarch64-core.xml";
- cc->gdb_arch_name = aarch64_gdb_arch_name;
-
- object_class_property_add_bool(oc, "aarch64", aarch64_cpu_get_aarch64,
- aarch64_cpu_set_aarch64);
- object_class_property_set_description(oc, "aarch64",
- "Set on/off to enable/disable aarch64 "
- "execution state ");
-}
-
-static void aarch64_cpu_instance_init(Object *obj)
-{
- ARMCPUClass *acc = ARM_CPU_GET_CLASS(obj);
-
- acc->info->initfn(obj);
- arm_cpu_post_init(obj);
-}
-
-static void cpu_register_class_init(ObjectClass *oc, void *data)
-{
- ARMCPUClass *acc = ARM_CPU_CLASS(oc);
-
- acc->info = data;
-}
-
-void aarch64_cpu_register(const ARMCPUInfo *info)
-{
- TypeInfo type_info = {
- .parent = TYPE_AARCH64_CPU,
- .instance_init = aarch64_cpu_instance_init,
- .class_init = info->class_init ?: cpu_register_class_init,
- .class_data = (void *)info,
- };
-
- type_info.name = g_strdup_printf("%s-" TYPE_ARM_CPU, info->name);
- type_register_static(&type_info);
- g_free((void *)type_info.name);
-}
-
-static const TypeInfo aarch64_cpu_type_info = {
- .name = TYPE_AARCH64_CPU,
- .parent = TYPE_ARM_CPU,
- .instance_finalize = aarch64_cpu_finalizefn,
- .abstract = true,
- .class_init = aarch64_cpu_class_init,
-};
-
static void aarch64_cpu_register_types(void)
{
size_t i;
- type_register_static(&aarch64_cpu_type_info);
-
for (i = 0; i < ARRAY_SIZE(aarch64_cpus); ++i) {
- aarch64_cpu_register(&aarch64_cpus[i]);
+ arm_cpu_register(&aarch64_cpus[i]);
}
}
diff --git a/target/arm/debug_helper.c b/target/arm/debug_helper.c
index a9a619b..69fb1d0 100644
--- a/target/arm/debug_helper.c
+++ b/target/arm/debug_helper.c
@@ -11,10 +11,12 @@
#include "internals.h"
#include "cpu-features.h"
#include "cpregs.h"
-#include "exec/exec-all.h"
-#include "exec/helper-proto.h"
+#include "exec/watchpoint.h"
#include "system/tcg.h"
+#define HELPER_H "tcg/helper.h"
+#include "exec/helper-proto.h.inc"
+
#ifdef CONFIG_TCG
/* Return the Exception Level targeted by debug exceptions. */
static int arm_debug_target_el(CPUARMState *env)
@@ -378,7 +380,7 @@ bool arm_debug_check_breakpoint(CPUState *cs)
{
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
- target_ulong pc;
+ vaddr pc;
int n;
/*
diff --git a/target/arm/gdbstub.c b/target/arm/gdbstub.c
index 30068c2..ce4497a 100644
--- a/target/arm/gdbstub.c
+++ b/target/arm/gdbstub.c
@@ -44,6 +44,12 @@ int arm_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
+#ifdef TARGET_AARCH64
+ if (arm_gdbstub_is_aarch64(cpu)) {
+ return aarch64_cpu_gdb_read_register(cs, mem_buf, n);
+ }
+#endif
+
if (n < 16) {
/* Core integer register. */
return gdb_get_reg32(mem_buf, env->regs[n]);
@@ -66,6 +72,12 @@ int arm_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
CPUARMState *env = &cpu->env;
uint32_t tmp;
+#ifdef TARGET_AARCH64
+ if (arm_gdbstub_is_aarch64(cpu)) {
+ return aarch64_cpu_gdb_write_register(cs, mem_buf, n);
+ }
+#endif
+
tmp = ldl_p(mem_buf);
/*
diff --git a/target/arm/gdbstub64.c b/target/arm/gdbstub64.c
index 1a4dbec..64ee9b3 100644
--- a/target/arm/gdbstub64.c
+++ b/target/arm/gdbstub64.c
@@ -27,6 +27,10 @@
#include <sys/prctl.h>
#include "mte_user_helper.h"
#endif
+#ifdef CONFIG_TCG
+#include "accel/tcg/cpu-mmu-index.h"
+#include "exec/target_page.h"
+#endif
int aarch64_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
{
diff --git a/target/arm/helper.c b/target/arm/helper.c
index bb445e3..0c1299f 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -12,32 +12,35 @@
#include "cpu.h"
#include "internals.h"
#include "cpu-features.h"
-#include "exec/helper-proto.h"
#include "exec/page-protection.h"
+#include "exec/mmap-lock.h"
#include "qemu/main-loop.h"
#include "qemu/timer.h"
#include "qemu/bitops.h"
#include "qemu/qemu-print.h"
#include "exec/cputlb.h"
-#include "exec/exec-all.h"
#include "exec/translation-block.h"
#include "hw/irq.h"
#include "system/cpu-timers.h"
+#include "exec/icount.h"
#include "system/kvm.h"
#include "system/tcg.h"
#include "qapi/error.h"
#include "qemu/guest-random.h"
#ifdef CONFIG_TCG
+#include "accel/tcg/probe.h"
+#include "accel/tcg/getpc.h"
#include "semihosting/common-semi.h"
#endif
#include "cpregs.h"
#include "target/arm/gtimer.h"
-#define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
+#define HELPER_H "tcg/helper.h"
+#include "exec/helper-proto.h.inc"
static void switch_mode(CPUARMState *env, int mode);
-static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
+uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
{
assert(ri->fieldoffset);
if (cpreg_field_is_64bit(ri)) {
@@ -220,7 +223,7 @@ static void count_cpreg(gpointer key, gpointer opaque)
}
}
-static gint cpreg_key_compare(gconstpointer a, gconstpointer b)
+static gint cpreg_key_compare(gconstpointer a, gconstpointer b, gpointer d)
{
uint64_t aidx = cpreg_to_kvm_id((uintptr_t)a);
uint64_t bidx = cpreg_to_kvm_id((uintptr_t)b);
@@ -244,7 +247,7 @@ void init_cpreg_list(ARMCPU *cpu)
int arraylen;
keys = g_hash_table_get_keys(cpu->cp_regs);
- keys = g_list_sort(keys, cpreg_key_compare);
+ keys = g_list_sort_with_data(keys, cpreg_key_compare, NULL);
cpu->cpreg_array_len = 0;
@@ -265,7 +268,7 @@ void init_cpreg_list(ARMCPU *cpu)
g_list_free(keys);
}
-static bool arm_pan_enabled(CPUARMState *env)
+bool arm_pan_enabled(CPUARMState *env)
{
if (is_a64(env)) {
if ((arm_hcr_el2_eff(env) & (HCR_NV | HCR_NV1)) == (HCR_NV | HCR_NV1)) {
@@ -314,25 +317,6 @@ static CPAccessResult access_trap_aa32s_el1(CPUARMState *env,
return CP_ACCESS_UNDEFINED;
}
-/*
- * Check for traps to performance monitor registers, which are controlled
- * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
- */
-static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
- bool isread)
-{
- int el = arm_current_el(env);
- uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
-
- if (el < 2 && (mdcr_el2 & MDCR_TPM)) {
- return CP_ACCESS_TRAP_EL2;
- }
- if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
- return CP_ACCESS_TRAP_EL3;
- }
- return CP_ACCESS_OK;
-}
-
/* Check for traps from EL1 due to HCR_EL2.TVM and HCR_EL2.TRVM. */
CPAccessResult access_tvm_trvm(CPUARMState *env, const ARMCPRegInfo *ri,
bool isread)
@@ -676,283 +660,6 @@ static const ARMCPRegInfo v6_cp_reginfo[] = {
.resetfn = cpacr_reset, .writefn = cpacr_write, .readfn = cpacr_read },
};
-typedef struct pm_event {
- uint16_t number; /* PMEVTYPER.evtCount is 16 bits wide */
- /* If the event is supported on this CPU (used to generate PMCEID[01]) */
- bool (*supported)(CPUARMState *);
- /*
- * Retrieve the current count of the underlying event. The programmed
- * counters hold a difference from the return value from this function
- */
- uint64_t (*get_count)(CPUARMState *);
- /*
- * Return how many nanoseconds it will take (at a minimum) for count events
- * to occur. A negative value indicates the counter will never overflow, or
- * that the counter has otherwise arranged for the overflow bit to be set
- * and the PMU interrupt to be raised on overflow.
- */
- int64_t (*ns_per_count)(uint64_t);
-} pm_event;
-
-static bool event_always_supported(CPUARMState *env)
-{
- return true;
-}
-
-static uint64_t swinc_get_count(CPUARMState *env)
-{
- /*
- * SW_INCR events are written directly to the pmevcntr's by writes to
- * PMSWINC, so there is no underlying count maintained by the PMU itself
- */
- return 0;
-}
-
-static int64_t swinc_ns_per(uint64_t ignored)
-{
- return -1;
-}
-
-/*
- * Return the underlying cycle count for the PMU cycle counters. If we're in
- * usermode, simply return 0.
- */
-static uint64_t cycles_get_count(CPUARMState *env)
-{
-#ifndef CONFIG_USER_ONLY
- return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
- ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
-#else
- return cpu_get_host_ticks();
-#endif
-}
-
-#ifndef CONFIG_USER_ONLY
-static int64_t cycles_ns_per(uint64_t cycles)
-{
- return (ARM_CPU_FREQ / NANOSECONDS_PER_SECOND) * cycles;
-}
-
-static bool instructions_supported(CPUARMState *env)
-{
- /* Precise instruction counting */
- return icount_enabled() == ICOUNT_PRECISE;
-}
-
-static uint64_t instructions_get_count(CPUARMState *env)
-{
- assert(icount_enabled() == ICOUNT_PRECISE);
- return (uint64_t)icount_get_raw();
-}
-
-static int64_t instructions_ns_per(uint64_t icount)
-{
- assert(icount_enabled() == ICOUNT_PRECISE);
- return icount_to_ns((int64_t)icount);
-}
-#endif
-
-static bool pmuv3p1_events_supported(CPUARMState *env)
-{
- /* For events which are supported in any v8.1 PMU */
- return cpu_isar_feature(any_pmuv3p1, env_archcpu(env));
-}
-
-static bool pmuv3p4_events_supported(CPUARMState *env)
-{
- /* For events which are supported in any v8.1 PMU */
- return cpu_isar_feature(any_pmuv3p4, env_archcpu(env));
-}
-
-static uint64_t zero_event_get_count(CPUARMState *env)
-{
- /* For events which on QEMU never fire, so their count is always zero */
- return 0;
-}
-
-static int64_t zero_event_ns_per(uint64_t cycles)
-{
- /* An event which never fires can never overflow */
- return -1;
-}
-
-static const pm_event pm_events[] = {
- { .number = 0x000, /* SW_INCR */
- .supported = event_always_supported,
- .get_count = swinc_get_count,
- .ns_per_count = swinc_ns_per,
- },
-#ifndef CONFIG_USER_ONLY
- { .number = 0x008, /* INST_RETIRED, Instruction architecturally executed */
- .supported = instructions_supported,
- .get_count = instructions_get_count,
- .ns_per_count = instructions_ns_per,
- },
- { .number = 0x011, /* CPU_CYCLES, Cycle */
- .supported = event_always_supported,
- .get_count = cycles_get_count,
- .ns_per_count = cycles_ns_per,
- },
-#endif
- { .number = 0x023, /* STALL_FRONTEND */
- .supported = pmuv3p1_events_supported,
- .get_count = zero_event_get_count,
- .ns_per_count = zero_event_ns_per,
- },
- { .number = 0x024, /* STALL_BACKEND */
- .supported = pmuv3p1_events_supported,
- .get_count = zero_event_get_count,
- .ns_per_count = zero_event_ns_per,
- },
- { .number = 0x03c, /* STALL */
- .supported = pmuv3p4_events_supported,
- .get_count = zero_event_get_count,
- .ns_per_count = zero_event_ns_per,
- },
-};
-
-/*
- * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of
- * events (i.e. the statistical profiling extension), this implementation
- * should first be updated to something sparse instead of the current
- * supported_event_map[] array.
- */
-#define MAX_EVENT_ID 0x3c
-#define UNSUPPORTED_EVENT UINT16_MAX
-static uint16_t supported_event_map[MAX_EVENT_ID + 1];
-
-/*
- * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map
- * of ARM event numbers to indices in our pm_events array.
- *
- * Note: Events in the 0x40XX range are not currently supported.
- */
-void pmu_init(ARMCPU *cpu)
-{
- unsigned int i;
-
- /*
- * Empty supported_event_map and cpu->pmceid[01] before adding supported
- * events to them
- */
- for (i = 0; i < ARRAY_SIZE(supported_event_map); i++) {
- supported_event_map[i] = UNSUPPORTED_EVENT;
- }
- cpu->pmceid0 = 0;
- cpu->pmceid1 = 0;
-
- for (i = 0; i < ARRAY_SIZE(pm_events); i++) {
- const pm_event *cnt = &pm_events[i];
- assert(cnt->number <= MAX_EVENT_ID);
- /* We do not currently support events in the 0x40xx range */
- assert(cnt->number <= 0x3f);
-
- if (cnt->supported(&cpu->env)) {
- supported_event_map[cnt->number] = i;
- uint64_t event_mask = 1ULL << (cnt->number & 0x1f);
- if (cnt->number & 0x20) {
- cpu->pmceid1 |= event_mask;
- } else {
- cpu->pmceid0 |= event_mask;
- }
- }
- }
-}
-
-/*
- * Check at runtime whether a PMU event is supported for the current machine
- */
-static bool event_supported(uint16_t number)
-{
- if (number > MAX_EVENT_ID) {
- return false;
- }
- return supported_event_map[number] != UNSUPPORTED_EVENT;
-}
-
-static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri,
- bool isread)
-{
- /*
- * Performance monitor registers user accessibility is controlled
- * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
- * trapping to EL2 or EL3 for other accesses.
- */
- int el = arm_current_el(env);
- uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
-
- if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) {
- return CP_ACCESS_TRAP_EL1;
- }
- if (el < 2 && (mdcr_el2 & MDCR_TPM)) {
- return CP_ACCESS_TRAP_EL2;
- }
- if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
- return CP_ACCESS_TRAP_EL3;
- }
-
- return CP_ACCESS_OK;
-}
-
-static CPAccessResult pmreg_access_xevcntr(CPUARMState *env,
- const ARMCPRegInfo *ri,
- bool isread)
-{
- /* ER: event counter read trap control */
- if (arm_feature(env, ARM_FEATURE_V8)
- && arm_current_el(env) == 0
- && (env->cp15.c9_pmuserenr & (1 << 3)) != 0
- && isread) {
- return CP_ACCESS_OK;
- }
-
- return pmreg_access(env, ri, isread);
-}
-
-static CPAccessResult pmreg_access_swinc(CPUARMState *env,
- const ARMCPRegInfo *ri,
- bool isread)
-{
- /* SW: software increment write trap control */
- if (arm_feature(env, ARM_FEATURE_V8)
- && arm_current_el(env) == 0
- && (env->cp15.c9_pmuserenr & (1 << 1)) != 0
- && !isread) {
- return CP_ACCESS_OK;
- }
-
- return pmreg_access(env, ri, isread);
-}
-
-static CPAccessResult pmreg_access_selr(CPUARMState *env,
- const ARMCPRegInfo *ri,
- bool isread)
-{
- /* ER: event counter read trap control */
- if (arm_feature(env, ARM_FEATURE_V8)
- && arm_current_el(env) == 0
- && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) {
- return CP_ACCESS_OK;
- }
-
- return pmreg_access(env, ri, isread);
-}
-
-static CPAccessResult pmreg_access_ccntr(CPUARMState *env,
- const ARMCPRegInfo *ri,
- bool isread)
-{
- /* CR: cycle counter read trap control */
- if (arm_feature(env, ARM_FEATURE_V8)
- && arm_current_el(env) == 0
- && (env->cp15.c9_pmuserenr & (1 << 2)) != 0
- && isread) {
- return CP_ACCESS_OK;
- }
-
- return pmreg_access(env, ri, isread);
-}
-
/*
* Bits in MDCR_EL2 and MDCR_EL3 which pmu_counter_enabled() looks at.
* We use these to decide whether we need to wrap a write to MDCR_EL2
@@ -962,684 +669,6 @@ static CPAccessResult pmreg_access_ccntr(CPUARMState *env,
(MDCR_HPME | MDCR_HPMD | MDCR_HPMN | MDCR_HCCD | MDCR_HLP)
#define MDCR_EL3_PMU_ENABLE_BITS (MDCR_SPME | MDCR_SCCD)
-/*
- * Returns true if the counter (pass 31 for PMCCNTR) should count events using
- * the current EL, security state, and register configuration.
- */
-static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
-{
- uint64_t filter;
- bool e, p, u, nsk, nsu, nsh, m;
- bool enabled, prohibited = false, filtered;
- bool secure = arm_is_secure(env);
- int el = arm_current_el(env);
- uint64_t mdcr_el2;
- uint8_t hpmn;
-
- /*
- * We might be called for M-profile cores where MDCR_EL2 doesn't
- * exist and arm_mdcr_el2_eff() will assert, so this early-exit check
- * must be before we read that value.
- */
- if (!arm_feature(env, ARM_FEATURE_PMU)) {
- return false;
- }
-
- mdcr_el2 = arm_mdcr_el2_eff(env);
- hpmn = mdcr_el2 & MDCR_HPMN;
-
- if (!arm_feature(env, ARM_FEATURE_EL2) ||
- (counter < hpmn || counter == 31)) {
- e = env->cp15.c9_pmcr & PMCRE;
- } else {
- e = mdcr_el2 & MDCR_HPME;
- }
- enabled = e && (env->cp15.c9_pmcnten & (1 << counter));
-
- /* Is event counting prohibited? */
- if (el == 2 && (counter < hpmn || counter == 31)) {
- prohibited = mdcr_el2 & MDCR_HPMD;
- }
- if (secure) {
- prohibited = prohibited || !(env->cp15.mdcr_el3 & MDCR_SPME);
- }
-
- if (counter == 31) {
- /*
- * The cycle counter defaults to running. PMCR.DP says "disable
- * the cycle counter when event counting is prohibited".
- * Some MDCR bits disable the cycle counter specifically.
- */
- prohibited = prohibited && env->cp15.c9_pmcr & PMCRDP;
- if (cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
- if (secure) {
- prohibited = prohibited || (env->cp15.mdcr_el3 & MDCR_SCCD);
- }
- if (el == 2) {
- prohibited = prohibited || (mdcr_el2 & MDCR_HCCD);
- }
- }
- }
-
- if (counter == 31) {
- filter = env->cp15.pmccfiltr_el0;
- } else {
- filter = env->cp15.c14_pmevtyper[counter];
- }
-
- p = filter & PMXEVTYPER_P;
- u = filter & PMXEVTYPER_U;
- nsk = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSK);
- nsu = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSU);
- nsh = arm_feature(env, ARM_FEATURE_EL2) && (filter & PMXEVTYPER_NSH);
- m = arm_el_is_aa64(env, 1) &&
- arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_M);
-
- if (el == 0) {
- filtered = secure ? u : u != nsu;
- } else if (el == 1) {
- filtered = secure ? p : p != nsk;
- } else if (el == 2) {
- filtered = !nsh;
- } else { /* EL3 */
- filtered = m != p;
- }
-
- if (counter != 31) {
- /*
- * If not checking PMCCNTR, ensure the counter is setup to an event we
- * support
- */
- uint16_t event = filter & PMXEVTYPER_EVTCOUNT;
- if (!event_supported(event)) {
- return false;
- }
- }
-
- return enabled && !prohibited && !filtered;
-}
-
-static void pmu_update_irq(CPUARMState *env)
-{
- ARMCPU *cpu = env_archcpu(env);
- qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) &&
- (env->cp15.c9_pminten & env->cp15.c9_pmovsr));
-}
-
-static bool pmccntr_clockdiv_enabled(CPUARMState *env)
-{
- /*
- * Return true if the clock divider is enabled and the cycle counter
- * is supposed to tick only once every 64 clock cycles. This is
- * controlled by PMCR.D, but if PMCR.LC is set to enable the long
- * (64-bit) cycle counter PMCR.D has no effect.
- */
- return (env->cp15.c9_pmcr & (PMCRD | PMCRLC)) == PMCRD;
-}
-
-static bool pmevcntr_is_64_bit(CPUARMState *env, int counter)
-{
- /* Return true if the specified event counter is configured to be 64 bit */
-
- /* This isn't intended to be used with the cycle counter */
- assert(counter < 31);
-
- if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
- return false;
- }
-
- if (arm_feature(env, ARM_FEATURE_EL2)) {
- /*
- * MDCR_EL2.HLP still applies even when EL2 is disabled in the
- * current security state, so we don't use arm_mdcr_el2_eff() here.
- */
- bool hlp = env->cp15.mdcr_el2 & MDCR_HLP;
- int hpmn = env->cp15.mdcr_el2 & MDCR_HPMN;
-
- if (counter >= hpmn) {
- return hlp;
- }
- }
- return env->cp15.c9_pmcr & PMCRLP;
-}
-
-/*
- * Ensure c15_ccnt is the guest-visible count so that operations such as
- * enabling/disabling the counter or filtering, modifying the count itself,
- * etc. can be done logically. This is essentially a no-op if the counter is
- * not enabled at the time of the call.
- */
-static void pmccntr_op_start(CPUARMState *env)
-{
- uint64_t cycles = cycles_get_count(env);
-
- if (pmu_counter_enabled(env, 31)) {
- uint64_t eff_cycles = cycles;
- if (pmccntr_clockdiv_enabled(env)) {
- eff_cycles /= 64;
- }
-
- uint64_t new_pmccntr = eff_cycles - env->cp15.c15_ccnt_delta;
-
- uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \
- 1ull << 63 : 1ull << 31;
- if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) {
- env->cp15.c9_pmovsr |= (1ULL << 31);
- pmu_update_irq(env);
- }
-
- env->cp15.c15_ccnt = new_pmccntr;
- }
- env->cp15.c15_ccnt_delta = cycles;
-}
-
-/*
- * If PMCCNTR is enabled, recalculate the delta between the clock and the
- * guest-visible count. A call to pmccntr_op_finish should follow every call to
- * pmccntr_op_start.
- */
-static void pmccntr_op_finish(CPUARMState *env)
-{
- if (pmu_counter_enabled(env, 31)) {
-#ifndef CONFIG_USER_ONLY
- /* Calculate when the counter will next overflow */
- uint64_t remaining_cycles = -env->cp15.c15_ccnt;
- if (!(env->cp15.c9_pmcr & PMCRLC)) {
- remaining_cycles = (uint32_t)remaining_cycles;
- }
- int64_t overflow_in = cycles_ns_per(remaining_cycles);
-
- if (overflow_in > 0) {
- int64_t overflow_at;
-
- if (!sadd64_overflow(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
- overflow_in, &overflow_at)) {
- ARMCPU *cpu = env_archcpu(env);
- timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
- }
- }
-#endif
-
- uint64_t prev_cycles = env->cp15.c15_ccnt_delta;
- if (pmccntr_clockdiv_enabled(env)) {
- prev_cycles /= 64;
- }
- env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt;
- }
-}
-
-static void pmevcntr_op_start(CPUARMState *env, uint8_t counter)
-{
-
- uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
- uint64_t count = 0;
- if (event_supported(event)) {
- uint16_t event_idx = supported_event_map[event];
- count = pm_events[event_idx].get_count(env);
- }
-
- if (pmu_counter_enabled(env, counter)) {
- uint64_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter];
- uint64_t overflow_mask = pmevcntr_is_64_bit(env, counter) ?
- 1ULL << 63 : 1ULL << 31;
-
- if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & overflow_mask) {
- env->cp15.c9_pmovsr |= (1 << counter);
- pmu_update_irq(env);
- }
- env->cp15.c14_pmevcntr[counter] = new_pmevcntr;
- }
- env->cp15.c14_pmevcntr_delta[counter] = count;
-}
-
-static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter)
-{
- if (pmu_counter_enabled(env, counter)) {
-#ifndef CONFIG_USER_ONLY
- uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
- uint16_t event_idx = supported_event_map[event];
- uint64_t delta = -(env->cp15.c14_pmevcntr[counter] + 1);
- int64_t overflow_in;
-
- if (!pmevcntr_is_64_bit(env, counter)) {
- delta = (uint32_t)delta;
- }
- overflow_in = pm_events[event_idx].ns_per_count(delta);
-
- if (overflow_in > 0) {
- int64_t overflow_at;
-
- if (!sadd64_overflow(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
- overflow_in, &overflow_at)) {
- ARMCPU *cpu = env_archcpu(env);
- timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
- }
- }
-#endif
-
- env->cp15.c14_pmevcntr_delta[counter] -=
- env->cp15.c14_pmevcntr[counter];
- }
-}
-
-void pmu_op_start(CPUARMState *env)
-{
- unsigned int i;
- pmccntr_op_start(env);
- for (i = 0; i < pmu_num_counters(env); i++) {
- pmevcntr_op_start(env, i);
- }
-}
-
-void pmu_op_finish(CPUARMState *env)
-{
- unsigned int i;
- pmccntr_op_finish(env);
- for (i = 0; i < pmu_num_counters(env); i++) {
- pmevcntr_op_finish(env, i);
- }
-}
-
-void pmu_pre_el_change(ARMCPU *cpu, void *ignored)
-{
- pmu_op_start(&cpu->env);
-}
-
-void pmu_post_el_change(ARMCPU *cpu, void *ignored)
-{
- pmu_op_finish(&cpu->env);
-}
-
-void arm_pmu_timer_cb(void *opaque)
-{
- ARMCPU *cpu = opaque;
-
- /*
- * Update all the counter values based on the current underlying counts,
- * triggering interrupts to be raised, if necessary. pmu_op_finish() also
- * has the effect of setting the cpu->pmu_timer to the next earliest time a
- * counter may expire.
- */
- pmu_op_start(&cpu->env);
- pmu_op_finish(&cpu->env);
-}
-
-static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- pmu_op_start(env);
-
- if (value & PMCRC) {
- /* The counter has been reset */
- env->cp15.c15_ccnt = 0;
- }
-
- if (value & PMCRP) {
- unsigned int i;
- for (i = 0; i < pmu_num_counters(env); i++) {
- env->cp15.c14_pmevcntr[i] = 0;
- }
- }
-
- env->cp15.c9_pmcr &= ~PMCR_WRITABLE_MASK;
- env->cp15.c9_pmcr |= (value & PMCR_WRITABLE_MASK);
-
- pmu_op_finish(env);
-}
-
-static uint64_t pmcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
-{
- uint64_t pmcr = env->cp15.c9_pmcr;
-
- /*
- * If EL2 is implemented and enabled for the current security state, reads
- * of PMCR.N from EL1 or EL0 return the value of MDCR_EL2.HPMN or HDCR.HPMN.
- */
- if (arm_current_el(env) <= 1 && arm_is_el2_enabled(env)) {
- pmcr &= ~PMCRN_MASK;
- pmcr |= (env->cp15.mdcr_el2 & MDCR_HPMN) << PMCRN_SHIFT;
- }
-
- return pmcr;
-}
-
-static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- unsigned int i;
- uint64_t overflow_mask, new_pmswinc;
-
- for (i = 0; i < pmu_num_counters(env); i++) {
- /* Increment a counter's count iff: */
- if ((value & (1 << i)) && /* counter's bit is set */
- /* counter is enabled and not filtered */
- pmu_counter_enabled(env, i) &&
- /* counter is SW_INCR */
- (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) {
- pmevcntr_op_start(env, i);
-
- /*
- * Detect if this write causes an overflow since we can't predict
- * PMSWINC overflows like we can for other events
- */
- new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
-
- overflow_mask = pmevcntr_is_64_bit(env, i) ?
- 1ULL << 63 : 1ULL << 31;
-
- if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & overflow_mask) {
- env->cp15.c9_pmovsr |= (1 << i);
- pmu_update_irq(env);
- }
-
- env->cp15.c14_pmevcntr[i] = new_pmswinc;
-
- pmevcntr_op_finish(env, i);
- }
- }
-}
-
-static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
-{
- uint64_t ret;
- pmccntr_op_start(env);
- ret = env->cp15.c15_ccnt;
- pmccntr_op_finish(env);
- return ret;
-}
-
-static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- /*
- * The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
- * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
- * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
- * accessed.
- */
- env->cp15.c9_pmselr = value & 0x1f;
-}
-
-static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- pmccntr_op_start(env);
- env->cp15.c15_ccnt = value;
- pmccntr_op_finish(env);
-}
-
-static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- uint64_t cur_val = pmccntr_read(env, NULL);
-
- pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
-}
-
-static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- pmccntr_op_start(env);
- env->cp15.pmccfiltr_el0 = value & PMCCFILTR_EL0;
- pmccntr_op_finish(env);
-}
-
-static void pmccfiltr_write_a32(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- pmccntr_op_start(env);
- /* M is not accessible from AArch32 */
- env->cp15.pmccfiltr_el0 = (env->cp15.pmccfiltr_el0 & PMCCFILTR_M) |
- (value & PMCCFILTR);
- pmccntr_op_finish(env);
-}
-
-static uint64_t pmccfiltr_read_a32(CPUARMState *env, const ARMCPRegInfo *ri)
-{
- /* M is not visible in AArch32 */
- return env->cp15.pmccfiltr_el0 & PMCCFILTR;
-}
-
-static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- pmu_op_start(env);
- value &= pmu_counter_mask(env);
- env->cp15.c9_pmcnten |= value;
- pmu_op_finish(env);
-}
-
-static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- pmu_op_start(env);
- value &= pmu_counter_mask(env);
- env->cp15.c9_pmcnten &= ~value;
- pmu_op_finish(env);
-}
-
-static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- value &= pmu_counter_mask(env);
- env->cp15.c9_pmovsr &= ~value;
- pmu_update_irq(env);
-}
-
-static void pmovsset_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- value &= pmu_counter_mask(env);
- env->cp15.c9_pmovsr |= value;
- pmu_update_irq(env);
-}
-
-static void pmevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value, const uint8_t counter)
-{
- if (counter == 31) {
- pmccfiltr_write(env, ri, value);
- } else if (counter < pmu_num_counters(env)) {
- pmevcntr_op_start(env, counter);
-
- /*
- * If this counter's event type is changing, store the current
- * underlying count for the new type in c14_pmevcntr_delta[counter] so
- * pmevcntr_op_finish has the correct baseline when it converts back to
- * a delta.
- */
- uint16_t old_event = env->cp15.c14_pmevtyper[counter] &
- PMXEVTYPER_EVTCOUNT;
- uint16_t new_event = value & PMXEVTYPER_EVTCOUNT;
- if (old_event != new_event) {
- uint64_t count = 0;
- if (event_supported(new_event)) {
- uint16_t event_idx = supported_event_map[new_event];
- count = pm_events[event_idx].get_count(env);
- }
- env->cp15.c14_pmevcntr_delta[counter] = count;
- }
-
- env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK;
- pmevcntr_op_finish(env, counter);
- }
- /*
- * Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
- * PMSELR value is equal to or greater than the number of implemented
- * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
- */
-}
-
-static uint64_t pmevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri,
- const uint8_t counter)
-{
- if (counter == 31) {
- return env->cp15.pmccfiltr_el0;
- } else if (counter < pmu_num_counters(env)) {
- return env->cp15.c14_pmevtyper[counter];
- } else {
- /*
- * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
- * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write().
- */
- return 0;
- }
-}
-
-static void pmevtyper_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
- pmevtyper_write(env, ri, value, counter);
-}
-
-static void pmevtyper_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
- env->cp15.c14_pmevtyper[counter] = value;
-
- /*
- * pmevtyper_rawwrite is called between a pair of pmu_op_start and
- * pmu_op_finish calls when loading saved state for a migration. Because
- * we're potentially updating the type of event here, the value written to
- * c14_pmevcntr_delta by the preceding pmu_op_start call may be for a
- * different counter type. Therefore, we need to set this value to the
- * current count for the counter type we're writing so that pmu_op_finish
- * has the correct count for its calculation.
- */
- uint16_t event = value & PMXEVTYPER_EVTCOUNT;
- if (event_supported(event)) {
- uint16_t event_idx = supported_event_map[event];
- env->cp15.c14_pmevcntr_delta[counter] =
- pm_events[event_idx].get_count(env);
- }
-}
-
-static uint64_t pmevtyper_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
-{
- uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
- return pmevtyper_read(env, ri, counter);
-}
-
-static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- pmevtyper_write(env, ri, value, env->cp15.c9_pmselr & 31);
-}
-
-static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri)
-{
- return pmevtyper_read(env, ri, env->cp15.c9_pmselr & 31);
-}
-
-static void pmevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value, uint8_t counter)
-{
- if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
- /* Before FEAT_PMUv3p5, top 32 bits of event counters are RES0 */
- value &= MAKE_64BIT_MASK(0, 32);
- }
- if (counter < pmu_num_counters(env)) {
- pmevcntr_op_start(env, counter);
- env->cp15.c14_pmevcntr[counter] = value;
- pmevcntr_op_finish(env, counter);
- }
- /*
- * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
- * are CONSTRAINED UNPREDICTABLE.
- */
-}
-
-static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri,
- uint8_t counter)
-{
- if (counter < pmu_num_counters(env)) {
- uint64_t ret;
- pmevcntr_op_start(env, counter);
- ret = env->cp15.c14_pmevcntr[counter];
- pmevcntr_op_finish(env, counter);
- if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
- /* Before FEAT_PMUv3p5, top 32 bits of event counters are RES0 */
- ret &= MAKE_64BIT_MASK(0, 32);
- }
- return ret;
- } else {
- /*
- * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
- * are CONSTRAINED UNPREDICTABLE.
- */
- return 0;
- }
-}
-
-static void pmevcntr_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
- pmevcntr_write(env, ri, value, counter);
-}
-
-static uint64_t pmevcntr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
-{
- uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
- return pmevcntr_read(env, ri, counter);
-}
-
-static void pmevcntr_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
- assert(counter < pmu_num_counters(env));
- env->cp15.c14_pmevcntr[counter] = value;
- pmevcntr_write(env, ri, value, counter);
-}
-
-static uint64_t pmevcntr_rawread(CPUARMState *env, const ARMCPRegInfo *ri)
-{
- uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
- assert(counter < pmu_num_counters(env));
- return env->cp15.c14_pmevcntr[counter];
-}
-
-static void pmxevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- pmevcntr_write(env, ri, value, env->cp15.c9_pmselr & 31);
-}
-
-static uint64_t pmxevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
-{
- return pmevcntr_read(env, ri, env->cp15.c9_pmselr & 31);
-}
-
-static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- if (arm_feature(env, ARM_FEATURE_V8)) {
- env->cp15.c9_pmuserenr = value & 0xf;
- } else {
- env->cp15.c9_pmuserenr = value & 1;
- }
-}
-
-static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- /* We have no event counters so only the C bit can be changed */
- value &= pmu_counter_mask(env);
- env->cp15.c9_pminten |= value;
- pmu_update_irq(env);
-}
-
-static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- value &= pmu_counter_mask(env);
- env->cp15.c9_pminten &= ~value;
- pmu_update_irq(env);
-}
-
static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
@@ -1869,171 +898,6 @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
/* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
{ .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
.access = PL1_W, .type = ARM_CP_NOP },
- /*
- * Performance monitors are implementation defined in v7,
- * but with an ARM recommended set of registers, which we
- * follow.
- *
- * Performance registers fall into three categories:
- * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
- * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
- * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
- * For the cases controlled by PMUSERENR we must set .access to PL0_RW
- * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
- */
- { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
- .access = PL0_RW, .type = ARM_CP_ALIAS | ARM_CP_IO,
- .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
- .writefn = pmcntenset_write,
- .accessfn = pmreg_access,
- .fgt = FGT_PMCNTEN,
- .raw_writefn = raw_write },
- { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64, .type = ARM_CP_IO,
- .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1,
- .access = PL0_RW, .accessfn = pmreg_access,
- .fgt = FGT_PMCNTEN,
- .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0,
- .writefn = pmcntenset_write, .raw_writefn = raw_write },
- { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
- .access = PL0_RW,
- .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
- .accessfn = pmreg_access,
- .fgt = FGT_PMCNTEN,
- .writefn = pmcntenclr_write,
- .type = ARM_CP_ALIAS | ARM_CP_IO },
- { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
- .access = PL0_RW, .accessfn = pmreg_access,
- .fgt = FGT_PMCNTEN,
- .type = ARM_CP_ALIAS | ARM_CP_IO,
- .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
- .writefn = pmcntenclr_write },
- { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
- .access = PL0_RW, .type = ARM_CP_IO,
- .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
- .accessfn = pmreg_access,
- .fgt = FGT_PMOVS,
- .writefn = pmovsr_write,
- .raw_writefn = raw_write },
- { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3,
- .access = PL0_RW, .accessfn = pmreg_access,
- .fgt = FGT_PMOVS,
- .type = ARM_CP_ALIAS | ARM_CP_IO,
- .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
- .writefn = pmovsr_write,
- .raw_writefn = raw_write },
- { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
- .access = PL0_W, .accessfn = pmreg_access_swinc,
- .fgt = FGT_PMSWINC_EL0,
- .type = ARM_CP_NO_RAW | ARM_CP_IO,
- .writefn = pmswinc_write },
- { .name = "PMSWINC_EL0", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 4,
- .access = PL0_W, .accessfn = pmreg_access_swinc,
- .fgt = FGT_PMSWINC_EL0,
- .type = ARM_CP_NO_RAW | ARM_CP_IO,
- .writefn = pmswinc_write },
- { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
- .access = PL0_RW, .type = ARM_CP_ALIAS,
- .fgt = FGT_PMSELR_EL0,
- .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr),
- .accessfn = pmreg_access_selr, .writefn = pmselr_write,
- .raw_writefn = raw_write},
- { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5,
- .access = PL0_RW, .accessfn = pmreg_access_selr,
- .fgt = FGT_PMSELR_EL0,
- .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr),
- .writefn = pmselr_write, .raw_writefn = raw_write, },
- { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
- .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO,
- .fgt = FGT_PMCCNTR_EL0,
- .readfn = pmccntr_read, .writefn = pmccntr_write32,
- .accessfn = pmreg_access_ccntr },
- { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0,
- .access = PL0_RW, .accessfn = pmreg_access_ccntr,
- .fgt = FGT_PMCCNTR_EL0,
- .type = ARM_CP_IO,
- .fieldoffset = offsetof(CPUARMState, cp15.c15_ccnt),
- .readfn = pmccntr_read, .writefn = pmccntr_write,
- .raw_readfn = raw_read, .raw_writefn = raw_write, },
- { .name = "PMCCFILTR", .cp = 15, .opc1 = 0, .crn = 14, .crm = 15, .opc2 = 7,
- .writefn = pmccfiltr_write_a32, .readfn = pmccfiltr_read_a32,
- .access = PL0_RW, .accessfn = pmreg_access,
- .fgt = FGT_PMCCFILTR_EL0,
- .type = ARM_CP_ALIAS | ARM_CP_IO,
- .resetvalue = 0, },
- { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7,
- .writefn = pmccfiltr_write, .raw_writefn = raw_write,
- .access = PL0_RW, .accessfn = pmreg_access,
- .fgt = FGT_PMCCFILTR_EL0,
- .type = ARM_CP_IO,
- .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0),
- .resetvalue = 0, },
- { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
- .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
- .accessfn = pmreg_access,
- .fgt = FGT_PMEVTYPERN_EL0,
- .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
- { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1,
- .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
- .accessfn = pmreg_access,
- .fgt = FGT_PMEVTYPERN_EL0,
- .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
- { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
- .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
- .accessfn = pmreg_access_xevcntr,
- .fgt = FGT_PMEVCNTRN_EL0,
- .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
- { .name = "PMXEVCNTR_EL0", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 2,
- .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
- .accessfn = pmreg_access_xevcntr,
- .fgt = FGT_PMEVCNTRN_EL0,
- .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
- { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
- .access = PL0_R | PL1_RW, .accessfn = access_tpm,
- .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr),
- .resetvalue = 0,
- .writefn = pmuserenr_write, .raw_writefn = raw_write },
- { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0,
- .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
- .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
- .resetvalue = 0,
- .writefn = pmuserenr_write, .raw_writefn = raw_write },
- { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
- .access = PL1_RW, .accessfn = access_tpm,
- .fgt = FGT_PMINTEN,
- .type = ARM_CP_ALIAS | ARM_CP_IO,
- .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten),
- .resetvalue = 0,
- .writefn = pmintenset_write, .raw_writefn = raw_write },
- { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1,
- .access = PL1_RW, .accessfn = access_tpm,
- .fgt = FGT_PMINTEN,
- .type = ARM_CP_IO,
- .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
- .writefn = pmintenset_write, .raw_writefn = raw_write,
- .resetvalue = 0x0 },
- { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
- .access = PL1_RW, .accessfn = access_tpm,
- .fgt = FGT_PMINTEN,
- .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_NO_RAW,
- .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
- .writefn = pmintenclr_write, },
- { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2,
- .access = PL1_RW, .accessfn = access_tpm,
- .fgt = FGT_PMINTEN,
- .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_NO_RAW,
- .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
- .writefn = pmintenclr_write },
{ .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
.access = PL1_R,
@@ -2116,25 +980,6 @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
.type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read },
};
-static const ARMCPRegInfo pmovsset_cp_reginfo[] = {
- /* PMOVSSET is not implemented in v7 before v7ve */
- { .name = "PMOVSSET", .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 3,
- .access = PL0_RW, .accessfn = pmreg_access,
- .fgt = FGT_PMOVS,
- .type = ARM_CP_ALIAS | ARM_CP_IO,
- .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
- .writefn = pmovsset_write,
- .raw_writefn = raw_write },
- { .name = "PMOVSSET_EL0", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 3,
- .access = PL0_RW, .accessfn = pmreg_access,
- .fgt = FGT_PMOVS,
- .type = ARM_CP_ALIAS | ARM_CP_IO,
- .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
- .writefn = pmovsset_write,
- .raw_writefn = raw_write },
-};
-
static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
@@ -3443,402 +2288,6 @@ static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
}
}
-#ifndef CONFIG_USER_ONLY
-/* get_phys_addr() isn't present for user-mode-only targets */
-
-static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
- bool isread)
-{
- if (ri->opc2 & 4) {
- /*
- * The ATS12NSO* operations must trap to EL3 or EL2 if executed in
- * Secure EL1 (which can only happen if EL3 is AArch64).
- * They are simply UNDEF if executed from NS EL1.
- * They function normally from EL2 or EL3.
- */
- if (arm_current_el(env) == 1) {
- if (arm_is_secure_below_el3(env)) {
- if (env->cp15.scr_el3 & SCR_EEL2) {
- return CP_ACCESS_TRAP_EL2;
- }
- return CP_ACCESS_TRAP_EL3;
- }
- return CP_ACCESS_UNDEFINED;
- }
- }
- return CP_ACCESS_OK;
-}
-
-#ifdef CONFIG_TCG
-static int par_el1_shareability(GetPhysAddrResult *res)
-{
- /*
- * The PAR_EL1.SH field must be 0b10 for Device or Normal-NC
- * memory -- see pseudocode PAREncodeShareability().
- */
- if (((res->cacheattrs.attrs & 0xf0) == 0) ||
- res->cacheattrs.attrs == 0x44 || res->cacheattrs.attrs == 0x40) {
- return 2;
- }
- return res->cacheattrs.shareability;
-}
-
-static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
- MMUAccessType access_type, ARMMMUIdx mmu_idx,
- ARMSecuritySpace ss)
-{
- bool ret;
- uint64_t par64;
- bool format64 = false;
- ARMMMUFaultInfo fi = {};
- GetPhysAddrResult res = {};
-
- /*
- * I_MXTJT: Granule protection checks are not performed on the final
- * address of a successful translation. This is a translation not a
- * memory reference, so "memop = none = 0".
- */
- ret = get_phys_addr_with_space_nogpc(env, value, access_type, 0,
- mmu_idx, ss, &res, &fi);
-
- /*
- * ATS operations only do S1 or S1+S2 translations, so we never
- * have to deal with the ARMCacheAttrs format for S2 only.
- */
- assert(!res.cacheattrs.is_s2_format);
-
- if (ret) {
- /*
- * Some kinds of translation fault must cause exceptions rather
- * than being reported in the PAR.
- */
- int current_el = arm_current_el(env);
- int target_el;
- uint32_t syn, fsr, fsc;
- bool take_exc = false;
-
- if (fi.s1ptw && current_el == 1
- && arm_mmu_idx_is_stage1_of_2(mmu_idx)) {
- /*
- * Synchronous stage 2 fault on an access made as part of the
- * translation table walk for AT S1E0* or AT S1E1* insn
- * executed from NS EL1. If this is a synchronous external abort
- * and SCR_EL3.EA == 1, then we take a synchronous external abort
- * to EL3. Otherwise the fault is taken as an exception to EL2,
- * and HPFAR_EL2 holds the faulting IPA.
- */
- if (fi.type == ARMFault_SyncExternalOnWalk &&
- (env->cp15.scr_el3 & SCR_EA)) {
- target_el = 3;
- } else {
- env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4;
- if (arm_is_secure_below_el3(env) && fi.s1ns) {
- env->cp15.hpfar_el2 |= HPFAR_NS;
- }
- target_el = 2;
- }
- take_exc = true;
- } else if (fi.type == ARMFault_SyncExternalOnWalk) {
- /*
- * Synchronous external aborts during a translation table walk
- * are taken as Data Abort exceptions.
- */
- if (fi.stage2) {
- if (current_el == 3) {
- target_el = 3;
- } else {
- target_el = 2;
- }
- } else {
- target_el = exception_target_el(env);
- }
- take_exc = true;
- }
-
- if (take_exc) {
- /* Construct FSR and FSC using same logic as arm_deliver_fault() */
- if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
- arm_s1_regime_using_lpae_format(env, mmu_idx)) {
- fsr = arm_fi_to_lfsc(&fi);
- fsc = extract32(fsr, 0, 6);
- } else {
- fsr = arm_fi_to_sfsc(&fi);
- fsc = 0x3f;
- }
- /*
- * Report exception with ESR indicating a fault due to a
- * translation table walk for a cache maintenance instruction.
- */
- syn = syn_data_abort_no_iss(current_el == target_el, 0,
- fi.ea, 1, fi.s1ptw, 1, fsc);
- env->exception.vaddress = value;
- env->exception.fsr = fsr;
- raise_exception(env, EXCP_DATA_ABORT, syn, target_el);
- }
- }
-
- if (is_a64(env)) {
- format64 = true;
- } else if (arm_feature(env, ARM_FEATURE_LPAE)) {
- /*
- * ATS1Cxx:
- * * TTBCR.EAE determines whether the result is returned using the
- * 32-bit or the 64-bit PAR format
- * * Instructions executed in Hyp mode always use the 64bit format
- *
- * ATS1S2NSOxx uses the 64bit format if any of the following is true:
- * * The Non-secure TTBCR.EAE bit is set to 1
- * * The implementation includes EL2, and the value of HCR.VM is 1
- *
- * (Note that HCR.DC makes HCR.VM behave as if it is 1.)
- *
- * ATS1Hx always uses the 64bit format.
- */
- format64 = arm_s1_regime_using_lpae_format(env, mmu_idx);
-
- if (arm_feature(env, ARM_FEATURE_EL2)) {
- if (mmu_idx == ARMMMUIdx_E10_0 ||
- mmu_idx == ARMMMUIdx_E10_1 ||
- mmu_idx == ARMMMUIdx_E10_1_PAN) {
- format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC);
- } else {
- format64 |= arm_current_el(env) == 2;
- }
- }
- }
-
- if (format64) {
- /* Create a 64-bit PAR */
- par64 = (1 << 11); /* LPAE bit always set */
- if (!ret) {
- par64 |= res.f.phys_addr & ~0xfffULL;
- if (!res.f.attrs.secure) {
- par64 |= (1 << 9); /* NS */
- }
- par64 |= (uint64_t)res.cacheattrs.attrs << 56; /* ATTR */
- par64 |= par_el1_shareability(&res) << 7; /* SH */
- } else {
- uint32_t fsr = arm_fi_to_lfsc(&fi);
-
- par64 |= 1; /* F */
- par64 |= (fsr & 0x3f) << 1; /* FS */
- if (fi.stage2) {
- par64 |= (1 << 9); /* S */
- }
- if (fi.s1ptw) {
- par64 |= (1 << 8); /* PTW */
- }
- }
- } else {
- /*
- * fsr is a DFSR/IFSR value for the short descriptor
- * translation table format (with WnR always clear).
- * Convert it to a 32-bit PAR.
- */
- if (!ret) {
- /* We do not set any attribute bits in the PAR */
- if (res.f.lg_page_size == 24
- && arm_feature(env, ARM_FEATURE_V7)) {
- par64 = (res.f.phys_addr & 0xff000000) | (1 << 1);
- } else {
- par64 = res.f.phys_addr & 0xfffff000;
- }
- if (!res.f.attrs.secure) {
- par64 |= (1 << 9); /* NS */
- }
- } else {
- uint32_t fsr = arm_fi_to_sfsc(&fi);
-
- par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) |
- ((fsr & 0xf) << 1) | 1;
- }
- }
- return par64;
-}
-#endif /* CONFIG_TCG */
-
-static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
-{
-#ifdef CONFIG_TCG
- MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
- uint64_t par64;
- ARMMMUIdx mmu_idx;
- int el = arm_current_el(env);
- ARMSecuritySpace ss = arm_security_space(env);
-
- switch (ri->opc2 & 6) {
- case 0:
- /* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */
- switch (el) {
- case 3:
- if (ri->crm == 9 && arm_pan_enabled(env)) {
- mmu_idx = ARMMMUIdx_E30_3_PAN;
- } else {
- mmu_idx = ARMMMUIdx_E3;
- }
- break;
- case 2:
- g_assert(ss != ARMSS_Secure); /* ARMv8.4-SecEL2 is 64-bit only */
- /* fall through */
- case 1:
- if (ri->crm == 9 && arm_pan_enabled(env)) {
- mmu_idx = ARMMMUIdx_Stage1_E1_PAN;
- } else {
- mmu_idx = ARMMMUIdx_Stage1_E1;
- }
- break;
- default:
- g_assert_not_reached();
- }
- break;
- case 2:
- /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
- switch (el) {
- case 3:
- mmu_idx = ARMMMUIdx_E30_0;
- break;
- case 2:
- g_assert(ss != ARMSS_Secure); /* ARMv8.4-SecEL2 is 64-bit only */
- mmu_idx = ARMMMUIdx_Stage1_E0;
- break;
- case 1:
- mmu_idx = ARMMMUIdx_Stage1_E0;
- break;
- default:
- g_assert_not_reached();
- }
- break;
- case 4:
- /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
- mmu_idx = ARMMMUIdx_E10_1;
- ss = ARMSS_NonSecure;
- break;
- case 6:
- /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
- mmu_idx = ARMMMUIdx_E10_0;
- ss = ARMSS_NonSecure;
- break;
- default:
- g_assert_not_reached();
- }
-
- par64 = do_ats_write(env, value, access_type, mmu_idx, ss);
-
- A32_BANKED_CURRENT_REG_SET(env, par, par64);
-#else
- /* Handled by hardware accelerator. */
- g_assert_not_reached();
-#endif /* CONFIG_TCG */
-}
-
-static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
-#ifdef CONFIG_TCG
- MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
- uint64_t par64;
-
- /* There is no SecureEL2 for AArch32. */
- par64 = do_ats_write(env, value, access_type, ARMMMUIdx_E2,
- ARMSS_NonSecure);
-
- A32_BANKED_CURRENT_REG_SET(env, par, par64);
-#else
- /* Handled by hardware accelerator. */
- g_assert_not_reached();
-#endif /* CONFIG_TCG */
-}
-
-static CPAccessResult at_e012_access(CPUARMState *env, const ARMCPRegInfo *ri,
- bool isread)
-{
- /*
- * R_NYXTL: instruction is UNDEFINED if it applies to an Exception level
- * lower than EL3 and the combination SCR_EL3.{NSE,NS} is reserved. This can
- * only happen when executing at EL3 because that combination also causes an
- * illegal exception return. We don't need to check FEAT_RME either, because
- * scr_write() ensures that the NSE bit is not set otherwise.
- */
- if ((env->cp15.scr_el3 & (SCR_NSE | SCR_NS)) == SCR_NSE) {
- return CP_ACCESS_UNDEFINED;
- }
- return CP_ACCESS_OK;
-}
-
-static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri,
- bool isread)
-{
- if (arm_current_el(env) == 3 &&
- !(env->cp15.scr_el3 & (SCR_NS | SCR_EEL2))) {
- return CP_ACCESS_UNDEFINED;
- }
- return at_e012_access(env, ri, isread);
-}
-
-static CPAccessResult at_s1e01_access(CPUARMState *env, const ARMCPRegInfo *ri,
- bool isread)
-{
- if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_AT)) {
- return CP_ACCESS_TRAP_EL2;
- }
- return at_e012_access(env, ri, isread);
-}
-
-static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
-#ifdef CONFIG_TCG
- MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
- ARMMMUIdx mmu_idx;
- uint64_t hcr_el2 = arm_hcr_el2_eff(env);
- bool regime_e20 = (hcr_el2 & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE);
- bool for_el3 = false;
- ARMSecuritySpace ss;
-
- switch (ri->opc2 & 6) {
- case 0:
- switch (ri->opc1) {
- case 0: /* AT S1E1R, AT S1E1W, AT S1E1RP, AT S1E1WP */
- if (ri->crm == 9 && arm_pan_enabled(env)) {
- mmu_idx = regime_e20 ?
- ARMMMUIdx_E20_2_PAN : ARMMMUIdx_Stage1_E1_PAN;
- } else {
- mmu_idx = regime_e20 ? ARMMMUIdx_E20_2 : ARMMMUIdx_Stage1_E1;
- }
- break;
- case 4: /* AT S1E2R, AT S1E2W */
- mmu_idx = hcr_el2 & HCR_E2H ? ARMMMUIdx_E20_2 : ARMMMUIdx_E2;
- break;
- case 6: /* AT S1E3R, AT S1E3W */
- mmu_idx = ARMMMUIdx_E3;
- for_el3 = true;
- break;
- default:
- g_assert_not_reached();
- }
- break;
- case 2: /* AT S1E0R, AT S1E0W */
- mmu_idx = regime_e20 ? ARMMMUIdx_E20_0 : ARMMMUIdx_Stage1_E0;
- break;
- case 4: /* AT S12E1R, AT S12E1W */
- mmu_idx = regime_e20 ? ARMMMUIdx_E20_2 : ARMMMUIdx_E10_1;
- break;
- case 6: /* AT S12E0R, AT S12E0W */
- mmu_idx = regime_e20 ? ARMMMUIdx_E20_0 : ARMMMUIdx_E10_0;
- break;
- default:
- g_assert_not_reached();
- }
-
- ss = for_el3 ? arm_security_space(env) : arm_security_space_below_el3(env);
- env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx, ss);
-#else
- /* Handled by hardware accelerator. */
- g_assert_not_reached();
-#endif /* CONFIG_TCG */
-}
-#endif
-
/* Return basic MPU access permission bits. */
static uint32_t simple_mpu_ap_bits(uint32_t val)
{
@@ -4985,7 +3434,7 @@ static void ic_ivau_write(CPUARMState *env, const ARMCPRegInfo *ri,
mmap_lock();
- tb_invalidate_phys_range(start_address, end_address);
+ tb_invalidate_phys_range(env_cpu(env), start_address, end_address);
mmap_unlock();
}
@@ -5089,53 +3538,6 @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
.opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
.fgt = FGT_DCCISW,
.access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
-#ifndef CONFIG_USER_ONLY
- /* 64 bit address translation operations */
- { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0,
- .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
- .fgt = FGT_ATS1E1R,
- .accessfn = at_s1e01_access, .writefn = ats_write64 },
- { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1,
- .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
- .fgt = FGT_ATS1E1W,
- .accessfn = at_s1e01_access, .writefn = ats_write64 },
- { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2,
- .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
- .fgt = FGT_ATS1E0R,
- .accessfn = at_s1e01_access, .writefn = ats_write64 },
- { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3,
- .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
- .fgt = FGT_ATS1E0W,
- .accessfn = at_s1e01_access, .writefn = ats_write64 },
- { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4,
- .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
- .accessfn = at_e012_access, .writefn = ats_write64 },
- { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5,
- .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
- .accessfn = at_e012_access, .writefn = ats_write64 },
- { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6,
- .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
- .accessfn = at_e012_access, .writefn = ats_write64 },
- { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7,
- .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
- .accessfn = at_e012_access, .writefn = ats_write64 },
- /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
- { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0,
- .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
- .writefn = ats_write64 },
- { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1,
- .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
- .writefn = ats_write64 },
{ .name = "PAR_EL1", .state = ARM_CP_STATE_AA64,
.type = ARM_CP_ALIAS,
.opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0,
@@ -5143,7 +3545,6 @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
.fgt = FGT_PAR_EL1,
.fieldoffset = offsetof(CPUARMState, cp15.par_el[1]),
.writefn = par_write },
-#endif
/* 32 bit cache operations */
{ .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
.type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_ticab },
@@ -5746,33 +4147,6 @@ static const ARMCPRegInfo el2_cp_reginfo[] = {
.access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
.fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
#ifndef CONFIG_USER_ONLY
- /*
- * Unlike the other EL2-related AT operations, these must
- * UNDEF from EL3 if EL2 is not implemented, which is why we
- * define them here rather than with the rest of the AT ops.
- */
- { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
- .access = PL2_W, .accessfn = at_s1e2_access,
- .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC | ARM_CP_EL3_NO_EL2_UNDEF,
- .writefn = ats_write64 },
- { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
- .access = PL2_W, .accessfn = at_s1e2_access,
- .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC | ARM_CP_EL3_NO_EL2_UNDEF,
- .writefn = ats_write64 },
- /*
- * The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
- * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
- * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
- * to behave as if SCR.NS was 1.
- */
- { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
- .access = PL2_W,
- .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
- { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
- .access = PL2_W,
- .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
{ .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
/*
@@ -6584,7 +4958,6 @@ static const ARMCPRegInfo zcr_reginfo[] = {
.writefn = zcr_write, .raw_writefn = raw_write },
};
-#ifdef TARGET_AARCH64
static CPAccessResult access_tpidr2(CPUARMState *env, const ARMCPRegInfo *ri,
bool isread)
{
@@ -6659,7 +5032,7 @@ void aarch64_set_svcr(CPUARMState *env, uint64_t new, uint64_t mask)
* when disabled either.
*/
if (change & new & R_SVCR_ZA_MASK) {
- memset(env->zarray, 0, sizeof(env->zarray));
+ memset(&env->za_state, 0, sizeof(env->za_state));
}
if (tcg_enabled()) {
@@ -6678,10 +5051,14 @@ static void smcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
{
int cur_el = arm_current_el(env);
int old_len = sve_vqm1_for_el(env, cur_el);
+ uint64_t valid_mask = R_SMCR_LEN_MASK | R_SMCR_FA64_MASK;
int new_len;
QEMU_BUILD_BUG_ON(ARM_MAX_VQ > R_SMCR_LEN_MASK + 1);
- value &= R_SMCR_LEN_MASK | R_SMCR_FA64_MASK;
+ if (cpu_isar_feature(aa64_sme2, env_archcpu(env))) {
+ valid_mask |= R_SMCR_EZT0_MASK;
+ }
+ value &= valid_mask;
raw_write(env, ri, value);
/*
@@ -6818,106 +5195,6 @@ static const ARMCPRegInfo nmi_reginfo[] = {
.writefn = aa64_allint_write, .readfn = aa64_allint_read,
.resetfn = arm_cp_reset_ignore },
};
-#endif /* TARGET_AARCH64 */
-
-static void define_pmu_regs(ARMCPU *cpu)
-{
- /*
- * v7 performance monitor control register: same implementor
- * field as main ID register, and we implement four counters in
- * addition to the cycle count register.
- */
- unsigned int i, pmcrn = pmu_num_counters(&cpu->env);
- ARMCPRegInfo pmcr = {
- .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
- .access = PL0_RW,
- .fgt = FGT_PMCR_EL0,
- .type = ARM_CP_IO | ARM_CP_ALIAS,
- .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr),
- .accessfn = pmreg_access,
- .readfn = pmcr_read, .raw_readfn = raw_read,
- .writefn = pmcr_write, .raw_writefn = raw_write,
- };
- ARMCPRegInfo pmcr64 = {
- .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0,
- .access = PL0_RW, .accessfn = pmreg_access,
- .fgt = FGT_PMCR_EL0,
- .type = ARM_CP_IO,
- .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr),
- .resetvalue = cpu->isar.reset_pmcr_el0,
- .readfn = pmcr_read, .raw_readfn = raw_read,
- .writefn = pmcr_write, .raw_writefn = raw_write,
- };
-
- define_one_arm_cp_reg(cpu, &pmcr);
- define_one_arm_cp_reg(cpu, &pmcr64);
- for (i = 0; i < pmcrn; i++) {
- char *pmevcntr_name = g_strdup_printf("PMEVCNTR%d", i);
- char *pmevcntr_el0_name = g_strdup_printf("PMEVCNTR%d_EL0", i);
- char *pmevtyper_name = g_strdup_printf("PMEVTYPER%d", i);
- char *pmevtyper_el0_name = g_strdup_printf("PMEVTYPER%d_EL0", i);
- ARMCPRegInfo pmev_regs[] = {
- { .name = pmevcntr_name, .cp = 15, .crn = 14,
- .crm = 8 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
- .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
- .fgt = FGT_PMEVCNTRN_EL0,
- .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
- .accessfn = pmreg_access_xevcntr },
- { .name = pmevcntr_el0_name, .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 8 | (3 & (i >> 3)),
- .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access_xevcntr,
- .type = ARM_CP_IO,
- .fgt = FGT_PMEVCNTRN_EL0,
- .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
- .raw_readfn = pmevcntr_rawread,
- .raw_writefn = pmevcntr_rawwrite },
- { .name = pmevtyper_name, .cp = 15, .crn = 14,
- .crm = 12 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
- .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
- .fgt = FGT_PMEVTYPERN_EL0,
- .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
- .accessfn = pmreg_access },
- { .name = pmevtyper_el0_name, .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 12 | (3 & (i >> 3)),
- .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access,
- .fgt = FGT_PMEVTYPERN_EL0,
- .type = ARM_CP_IO,
- .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
- .raw_writefn = pmevtyper_rawwrite },
- };
- define_arm_cp_regs(cpu, pmev_regs);
- g_free(pmevcntr_name);
- g_free(pmevcntr_el0_name);
- g_free(pmevtyper_name);
- g_free(pmevtyper_el0_name);
- }
- if (cpu_isar_feature(aa32_pmuv3p1, cpu)) {
- ARMCPRegInfo v81_pmu_regs[] = {
- { .name = "PMCEID2", .state = ARM_CP_STATE_AA32,
- .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 4,
- .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
- .fgt = FGT_PMCEIDN_EL0,
- .resetvalue = extract64(cpu->pmceid0, 32, 32) },
- { .name = "PMCEID3", .state = ARM_CP_STATE_AA32,
- .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 5,
- .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
- .fgt = FGT_PMCEIDN_EL0,
- .resetvalue = extract64(cpu->pmceid1, 32, 32) },
- };
- define_arm_cp_regs(cpu, v81_pmu_regs);
- }
- if (cpu_isar_feature(any_pmuv3p4, cpu)) {
- static const ARMCPRegInfo v84_pmmir = {
- .name = "PMMIR_EL1", .state = ARM_CP_STATE_BOTH,
- .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 6,
- .access = PL1_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
- .fgt = FGT_PMMIR_EL1,
- .resetvalue = 0
- };
- define_one_arm_cp_reg(cpu, &v84_pmmir);
- }
-}
#ifndef CONFIG_USER_ONLY
/*
@@ -6929,7 +5206,7 @@ static void define_pmu_regs(ARMCPU *cpu)
static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri)
{
ARMCPU *cpu = env_archcpu(env);
- uint64_t pfr1 = cpu->isar.id_pfr1;
+ uint64_t pfr1 = GET_IDREG(&cpu->isar, ID_PFR1);
if (env->gicv3state) {
pfr1 |= 1 << 28;
@@ -6940,7 +5217,7 @@ static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri)
static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri)
{
ARMCPU *cpu = env_archcpu(env);
- uint64_t pfr0 = cpu->isar.id_aa64pfr0;
+ uint64_t pfr0 = GET_IDREG(&cpu->isar, ID_AA64PFR0);
if (env->gicv3state) {
pfr0 |= 1 << 24;
@@ -7010,7 +5287,6 @@ static const ARMCPRegInfo lor_reginfo[] = {
.type = ARM_CP_CONST, .resetvalue = 0 },
};
-#ifdef TARGET_AARCH64
static CPAccessResult access_pauth(CPUARMState *env, const ARMCPRegInfo *ri,
bool isread)
{
@@ -7503,8 +5779,6 @@ static const ARMCPRegInfo nv2_reginfo[] = {
.fieldoffset = offsetof(CPUARMState, cp15.vncr_el2) },
};
-#endif /* TARGET_AARCH64 */
-
static CPAccessResult access_predinv(CPUARMState *env, const ARMCPRegInfo *ri,
bool isread)
{
@@ -7700,32 +5974,6 @@ static const ARMCPRegInfo vhe_reginfo[] = {
#endif
};
-#ifndef CONFIG_USER_ONLY
-static const ARMCPRegInfo ats1e1_reginfo[] = {
- { .name = "AT_S1E1RP", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0,
- .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
- .fgt = FGT_ATS1E1RP,
- .accessfn = at_s1e01_access, .writefn = ats_write64 },
- { .name = "AT_S1E1WP", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1,
- .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
- .fgt = FGT_ATS1E1WP,
- .accessfn = at_s1e01_access, .writefn = ats_write64 },
-};
-
-static const ARMCPRegInfo ats1cp_reginfo[] = {
- { .name = "ATS1CPRP",
- .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0,
- .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
- .writefn = ats_write },
- { .name = "ATS1CPWP",
- .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1,
- .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
- .writefn = ats_write },
-};
-#endif
-
/*
* ACTLR2 and HACTLR2 map to ACTLR_EL1[63:32] and
* ACTLR_EL2[63:32]. They exist only if the ID_MMFR4.AC2 field
@@ -7750,6 +5998,8 @@ void register_cp_regs_for_features(ARMCPU *cpu)
{
/* Register all the coprocessor registers based on feature bits */
CPUARMState *env = &cpu->env;
+ ARMISARegisters *isar = &cpu->isar;
+
if (arm_feature(env, ARM_FEATURE_M)) {
/* M profile has no coprocessor registers */
return;
@@ -7764,7 +6014,12 @@ void register_cp_regs_for_features(ARMCPU *cpu)
define_arm_cp_regs(cpu, not_v8_cp_reginfo);
}
- define_tlb_insn_regs(cpu);
+#ifndef CONFIG_USER_ONLY
+ if (tcg_enabled()) {
+ define_tlb_insn_regs(cpu);
+ define_at_insn_regs(cpu);
+ }
+#endif
if (arm_feature(env, ARM_FEATURE_V6)) {
/* The ID registers all have impdef reset values */
@@ -7773,7 +6028,7 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa32_tid3,
- .resetvalue = cpu->isar.id_pfr0 },
+ .resetvalue = GET_IDREG(isar, ID_PFR0)},
/*
* ID_PFR1 is not a plain ARM_CP_CONST because we don't know
* the value of the GIC field until after we define these regs.
@@ -7784,7 +6039,7 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.accessfn = access_aa32_tid3,
#ifdef CONFIG_USER_ONLY
.type = ARM_CP_CONST,
- .resetvalue = cpu->isar.id_pfr1,
+ .resetvalue = GET_IDREG(isar, ID_PFR1),
#else
.type = ARM_CP_NO_RAW,
.accessfn = access_aa32_tid3,
@@ -7796,72 +6051,72 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa32_tid3,
- .resetvalue = cpu->isar.id_dfr0 },
+ .resetvalue = GET_IDREG(isar, ID_DFR0)},
{ .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa32_tid3,
- .resetvalue = cpu->id_afr0 },
+ .resetvalue = GET_IDREG(isar, ID_AFR0)},
{ .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa32_tid3,
- .resetvalue = cpu->isar.id_mmfr0 },
+ .resetvalue = GET_IDREG(isar, ID_MMFR0)},
{ .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa32_tid3,
- .resetvalue = cpu->isar.id_mmfr1 },
+ .resetvalue = GET_IDREG(isar, ID_MMFR1)},
{ .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa32_tid3,
- .resetvalue = cpu->isar.id_mmfr2 },
+ .resetvalue = GET_IDREG(isar, ID_MMFR2)},
{ .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa32_tid3,
- .resetvalue = cpu->isar.id_mmfr3 },
+ .resetvalue = GET_IDREG(isar, ID_MMFR3)},
{ .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa32_tid3,
- .resetvalue = cpu->isar.id_isar0 },
+ .resetvalue = GET_IDREG(isar, ID_ISAR0)},
{ .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa32_tid3,
- .resetvalue = cpu->isar.id_isar1 },
+ .resetvalue = GET_IDREG(isar, ID_ISAR1)},
{ .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa32_tid3,
- .resetvalue = cpu->isar.id_isar2 },
+ .resetvalue = GET_IDREG(isar, ID_ISAR2)},
{ .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa32_tid3,
- .resetvalue = cpu->isar.id_isar3 },
+ .resetvalue = GET_IDREG(isar, ID_ISAR3) },
{ .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa32_tid3,
- .resetvalue = cpu->isar.id_isar4 },
+ .resetvalue = GET_IDREG(isar, ID_ISAR4) },
{ .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa32_tid3,
- .resetvalue = cpu->isar.id_isar5 },
+ .resetvalue = GET_IDREG(isar, ID_ISAR5) },
{ .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa32_tid3,
- .resetvalue = cpu->isar.id_mmfr4 },
+ .resetvalue = GET_IDREG(isar, ID_MMFR4)},
{ .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa32_tid3,
- .resetvalue = cpu->isar.id_isar6 },
+ .resetvalue = GET_IDREG(isar, ID_ISAR6) },
};
define_arm_cp_regs(cpu, v6_idregs);
define_arm_cp_regs(cpu, v6_cp_reginfo);
@@ -7871,9 +6126,6 @@ void register_cp_regs_for_features(ARMCPU *cpu)
if (arm_feature(env, ARM_FEATURE_V6K)) {
define_arm_cp_regs(cpu, v6k_cp_reginfo);
}
- if (arm_feature(env, ARM_FEATURE_V7VE)) {
- define_arm_cp_regs(cpu, pmovsset_cp_reginfo);
- }
if (arm_feature(env, ARM_FEATURE_V7)) {
ARMCPRegInfo clidr = {
.name = "CLIDR", .state = ARM_CP_STATE_BOTH,
@@ -7881,12 +6133,11 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_tid4,
.fgt = FGT_CLIDR_EL1,
- .resetvalue = cpu->clidr
+ .resetvalue = GET_IDREG(isar, CLIDR)
};
define_one_arm_cp_reg(cpu, &clidr);
define_arm_cp_regs(cpu, v7_cp_reginfo);
define_debug_regs(cpu);
- define_pmu_regs(cpu);
} else {
define_arm_cp_regs(cpu, not_v7_cp_reginfo);
}
@@ -7912,7 +6163,7 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.access = PL1_R,
#ifdef CONFIG_USER_ONLY
.type = ARM_CP_CONST,
- .resetvalue = cpu->isar.id_aa64pfr0
+ .resetvalue = GET_IDREG(isar, ID_AA64PFR0)
#else
.type = ARM_CP_NO_RAW,
.accessfn = access_aa64_tid3,
@@ -7924,7 +6175,7 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
- .resetvalue = cpu->isar.id_aa64pfr1},
+ .resetvalue = GET_IDREG(isar, ID_AA64PFR1)},
{ .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2,
.access = PL1_R, .type = ARM_CP_CONST,
@@ -7939,12 +6190,12 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
- .resetvalue = cpu->isar.id_aa64zfr0 },
+ .resetvalue = GET_IDREG(isar, ID_AA64ZFR0)},
{ .name = "ID_AA64SMFR0_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
- .resetvalue = cpu->isar.id_aa64smfr0 },
+ .resetvalue = GET_IDREG(isar, ID_AA64SMFR0)},
{ .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6,
.access = PL1_R, .type = ARM_CP_CONST,
@@ -7959,12 +6210,12 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
- .resetvalue = cpu->isar.id_aa64dfr0 },
+ .resetvalue = GET_IDREG(isar, ID_AA64DFR0) },
{ .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
- .resetvalue = cpu->isar.id_aa64dfr1 },
+ .resetvalue = GET_IDREG(isar, ID_AA64DFR1) },
{ .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2,
.access = PL1_R, .type = ARM_CP_CONST,
@@ -7979,12 +6230,12 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
- .resetvalue = cpu->id_aa64afr0 },
+ .resetvalue = GET_IDREG(isar, ID_AA64AFR0) },
{ .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
- .resetvalue = cpu->id_aa64afr1 },
+ .resetvalue = GET_IDREG(isar, ID_AA64AFR1) },
{ .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6,
.access = PL1_R, .type = ARM_CP_CONST,
@@ -7999,17 +6250,17 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
- .resetvalue = cpu->isar.id_aa64isar0 },
+ .resetvalue = GET_IDREG(isar, ID_AA64ISAR0)},
{ .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
- .resetvalue = cpu->isar.id_aa64isar1 },
+ .resetvalue = GET_IDREG(isar, ID_AA64ISAR1)},
{ .name = "ID_AA64ISAR2_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
- .resetvalue = cpu->isar.id_aa64isar2 },
+ .resetvalue = GET_IDREG(isar, ID_AA64ISAR2)},
{ .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3,
.access = PL1_R, .type = ARM_CP_CONST,
@@ -8039,22 +6290,22 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
- .resetvalue = cpu->isar.id_aa64mmfr0 },
+ .resetvalue = GET_IDREG(isar, ID_AA64MMFR0)},
{ .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
- .resetvalue = cpu->isar.id_aa64mmfr1 },
+ .resetvalue = GET_IDREG(isar, ID_AA64MMFR1) },
{ .name = "ID_AA64MMFR2_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
- .resetvalue = cpu->isar.id_aa64mmfr2 },
+ .resetvalue = GET_IDREG(isar, ID_AA64MMFR2) },
{ .name = "ID_AA64MMFR3_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
- .resetvalue = cpu->isar.id_aa64mmfr3 },
+ .resetvalue = GET_IDREG(isar, ID_AA64MMFR3) },
{ .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4,
.access = PL1_R, .type = ARM_CP_CONST,
@@ -8126,42 +6377,22 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
- .resetvalue = cpu->isar.id_pfr2 },
+ .resetvalue = GET_IDREG(isar, ID_PFR2)},
{ .name = "ID_DFR1", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
- .resetvalue = cpu->isar.id_dfr1 },
+ .resetvalue = GET_IDREG(isar, ID_DFR1)},
{ .name = "ID_MMFR5", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
- .resetvalue = cpu->isar.id_mmfr5 },
+ .resetvalue = GET_IDREG(isar, ID_MMFR5)},
{ .name = "RES_0_C0_C3_7", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
.resetvalue = 0 },
- { .name = "PMCEID0", .state = ARM_CP_STATE_AA32,
- .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6,
- .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
- .fgt = FGT_PMCEIDN_EL0,
- .resetvalue = extract64(cpu->pmceid0, 0, 32) },
- { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6,
- .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
- .fgt = FGT_PMCEIDN_EL0,
- .resetvalue = cpu->pmceid0 },
- { .name = "PMCEID1", .state = ARM_CP_STATE_AA32,
- .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7,
- .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
- .fgt = FGT_PMCEIDN_EL0,
- .resetvalue = extract64(cpu->pmceid1, 0, 32) },
- { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7,
- .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
- .fgt = FGT_PMCEIDN_EL0,
- .resetvalue = cpu->pmceid1 },
};
#ifdef CONFIG_USER_ONLY
static const ARMCPRegUserSpaceInfo v8_user_idregs[] = {
@@ -8496,12 +6727,6 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s),
offsetoflow32(CPUARMState, cp15.par_ns) },
.writefn = par_write},
-#ifndef CONFIG_USER_ONLY
- /* This underdecoding is safe because the reginfo is NO_RAW. */
- { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
- .access = PL1_W, .accessfn = ats_access,
- .writefn = ats_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
-#endif
};
/*
@@ -8907,14 +7132,6 @@ void register_cp_regs_for_features(ARMCPU *cpu)
if (cpu_isar_feature(aa64_pan, cpu)) {
define_one_arm_cp_reg(cpu, &pan_reginfo);
}
-#ifndef CONFIG_USER_ONLY
- if (cpu_isar_feature(aa64_ats1e1, cpu)) {
- define_arm_cp_regs(cpu, ats1e1_reginfo);
- }
- if (cpu_isar_feature(aa32_ats1e1, cpu)) {
- define_arm_cp_regs(cpu, ats1cp_reginfo);
- }
-#endif
if (cpu_isar_feature(aa64_uao, cpu)) {
define_one_arm_cp_reg(cpu, &uao_reginfo);
}
@@ -8945,7 +7162,6 @@ void register_cp_regs_for_features(ARMCPU *cpu)
define_one_arm_cp_reg(cpu, &hcrx_el2_reginfo);
}
-#ifdef TARGET_AARCH64
if (cpu_isar_feature(aa64_sme, cpu)) {
define_arm_cp_regs(cpu, sme_reginfo);
}
@@ -9006,7 +7222,6 @@ void register_cp_regs_for_features(ARMCPU *cpu)
if (cpu_isar_feature(aa64_nmi, cpu)) {
define_arm_cp_regs(cpu, nmi_reginfo);
}
-#endif
if (cpu_isar_feature(any_predinv, cpu)) {
define_arm_cp_regs(cpu, predinv_reginfo);
@@ -9016,6 +7231,8 @@ void register_cp_regs_for_features(ARMCPU *cpu)
define_arm_cp_regs(cpu, ccsidr2_reginfo);
}
+ define_pm_cpregs(cpu);
+
#ifndef CONFIG_USER_ONLY
/*
* Register redirections and aliases must be done last,
@@ -10615,7 +8832,7 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
unsigned int new_el = env->exception.target_el;
- target_ulong addr = env->cp15.vbar_el[new_el];
+ vaddr addr = env->cp15.vbar_el[new_el];
unsigned int new_mode = aarch64_pstate_mode(new_el, true);
unsigned int old_mode;
unsigned int cur_el = arm_current_el(env);
@@ -11417,116 +9634,6 @@ ARMMMUIdx arm_mmu_idx(CPUARMState *env)
return arm_mmu_idx_el(env, arm_current_el(env));
}
-static bool mve_no_pred(CPUARMState *env)
-{
- /*
- * Return true if there is definitely no predication of MVE
- * instructions by VPR or LTPSIZE. (Returning false even if there
- * isn't any predication is OK; generated code will just be
- * a little worse.)
- * If the CPU does not implement MVE then this TB flag is always 0.
- *
- * NOTE: if you change this logic, the "recalculate s->mve_no_pred"
- * logic in gen_update_fp_context() needs to be updated to match.
- *
- * We do not include the effect of the ECI bits here -- they are
- * tracked in other TB flags. This simplifies the logic for
- * "when did we emit code that changes the MVE_NO_PRED TB flag
- * and thus need to end the TB?".
- */
- if (cpu_isar_feature(aa32_mve, env_archcpu(env))) {
- return false;
- }
- if (env->v7m.vpr) {
- return false;
- }
- if (env->v7m.ltpsize < 4) {
- return false;
- }
- return true;
-}
-
-void cpu_get_tb_cpu_state(CPUARMState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *pflags)
-{
- CPUARMTBFlags flags;
-
- assert_hflags_rebuild_correctly(env);
- flags = env->hflags;
-
- if (EX_TBFLAG_ANY(flags, AARCH64_STATE)) {
- *pc = env->pc;
- if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
- DP_TBFLAG_A64(flags, BTYPE, env->btype);
- }
- } else {
- *pc = env->regs[15];
-
- if (arm_feature(env, ARM_FEATURE_M)) {
- if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
- FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S)
- != env->v7m.secure) {
- DP_TBFLAG_M32(flags, FPCCR_S_WRONG, 1);
- }
-
- if ((env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) &&
- (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) ||
- (env->v7m.secure &&
- !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)))) {
- /*
- * ASPEN is set, but FPCA/SFPA indicate that there is no
- * active FP context; we must create a new FP context before
- * executing any FP insn.
- */
- DP_TBFLAG_M32(flags, NEW_FP_CTXT_NEEDED, 1);
- }
-
- bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
- if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) {
- DP_TBFLAG_M32(flags, LSPACT, 1);
- }
-
- if (mve_no_pred(env)) {
- DP_TBFLAG_M32(flags, MVE_NO_PRED, 1);
- }
- } else {
- /*
- * Note that XSCALE_CPAR shares bits with VECSTRIDE.
- * Note that VECLEN+VECSTRIDE are RES0 for M-profile.
- */
- if (arm_feature(env, ARM_FEATURE_XSCALE)) {
- DP_TBFLAG_A32(flags, XSCALE_CPAR, env->cp15.c15_cpar);
- } else {
- DP_TBFLAG_A32(flags, VECLEN, env->vfp.vec_len);
- DP_TBFLAG_A32(flags, VECSTRIDE, env->vfp.vec_stride);
- }
- if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) {
- DP_TBFLAG_A32(flags, VFPEN, 1);
- }
- }
-
- DP_TBFLAG_AM32(flags, THUMB, env->thumb);
- DP_TBFLAG_AM32(flags, CONDEXEC, env->condexec_bits);
- }
-
- /*
- * The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
- * states defined in the ARM ARM for software singlestep:
- * SS_ACTIVE PSTATE.SS State
- * 0 x Inactive (the TB flag for SS is always 0)
- * 1 0 Active-pending
- * 1 1 Active-not-pending
- * SS_ACTIVE is set in hflags; PSTATE__SS is computed every TB.
- */
- if (EX_TBFLAG_ANY(flags, SS_ACTIVE) && (env->pstate & PSTATE_SS)) {
- DP_TBFLAG_ANY(flags, PSTATE__SS, 1);
- }
-
- *pflags = flags.flags;
- *cs_base = flags.flags2;
-}
-
-#ifdef TARGET_AARCH64
/*
* The manual says that when SVE is enabled and VQ is widened the
* implementation is allowed to zero the previously inaccessible
@@ -11641,7 +9748,6 @@ void aarch64_sve_change_el(CPUARMState *env, int old_el,
aarch64_sve_narrow_vq(env, new_len + 1);
}
}
-#endif
#ifndef CONFIG_USER_ONLY
ARMSecuritySpace arm_security_space(CPUARMState *env)
diff --git a/target/arm/helper.h b/target/arm/helper.h
index 0907505..f340a49 100644
--- a/target/arm/helper.h
+++ b/target/arm/helper.h
@@ -1,1154 +1,6 @@
-DEF_HELPER_FLAGS_1(sxtb16, TCG_CALL_NO_RWG_SE, i32, i32)
-DEF_HELPER_FLAGS_1(uxtb16, TCG_CALL_NO_RWG_SE, i32, i32)
+/* SPDX-License-Identifier: GPL-2.0-or-later */
-DEF_HELPER_3(add_setq, i32, env, i32, i32)
-DEF_HELPER_3(add_saturate, i32, env, i32, i32)
-DEF_HELPER_3(sub_saturate, i32, env, i32, i32)
-DEF_HELPER_3(add_usaturate, i32, env, i32, i32)
-DEF_HELPER_3(sub_usaturate, i32, env, i32, i32)
-DEF_HELPER_FLAGS_3(sdiv, TCG_CALL_NO_RWG, s32, env, s32, s32)
-DEF_HELPER_FLAGS_3(udiv, TCG_CALL_NO_RWG, i32, env, i32, i32)
-DEF_HELPER_FLAGS_1(rbit, TCG_CALL_NO_RWG_SE, i32, i32)
-
-#define PAS_OP(pfx) \
- DEF_HELPER_3(pfx ## add8, i32, i32, i32, ptr) \
- DEF_HELPER_3(pfx ## sub8, i32, i32, i32, ptr) \
- DEF_HELPER_3(pfx ## sub16, i32, i32, i32, ptr) \
- DEF_HELPER_3(pfx ## add16, i32, i32, i32, ptr) \
- DEF_HELPER_3(pfx ## addsubx, i32, i32, i32, ptr) \
- DEF_HELPER_3(pfx ## subaddx, i32, i32, i32, ptr)
-
-PAS_OP(s)
-PAS_OP(u)
-#undef PAS_OP
-
-#define PAS_OP(pfx) \
- DEF_HELPER_2(pfx ## add8, i32, i32, i32) \
- DEF_HELPER_2(pfx ## sub8, i32, i32, i32) \
- DEF_HELPER_2(pfx ## sub16, i32, i32, i32) \
- DEF_HELPER_2(pfx ## add16, i32, i32, i32) \
- DEF_HELPER_2(pfx ## addsubx, i32, i32, i32) \
- DEF_HELPER_2(pfx ## subaddx, i32, i32, i32)
-PAS_OP(q)
-PAS_OP(sh)
-PAS_OP(uq)
-PAS_OP(uh)
-#undef PAS_OP
-
-DEF_HELPER_3(ssat, i32, env, i32, i32)
-DEF_HELPER_3(usat, i32, env, i32, i32)
-DEF_HELPER_3(ssat16, i32, env, i32, i32)
-DEF_HELPER_3(usat16, i32, env, i32, i32)
-
-DEF_HELPER_FLAGS_2(usad8, TCG_CALL_NO_RWG_SE, i32, i32, i32)
-
-DEF_HELPER_FLAGS_3(sel_flags, TCG_CALL_NO_RWG_SE,
- i32, i32, i32, i32)
-DEF_HELPER_2(exception_internal, noreturn, env, i32)
-DEF_HELPER_3(exception_with_syndrome, noreturn, env, i32, i32)
-DEF_HELPER_4(exception_with_syndrome_el, noreturn, env, i32, i32, i32)
-DEF_HELPER_2(exception_bkpt_insn, noreturn, env, i32)
-DEF_HELPER_2(exception_swstep, noreturn, env, i32)
-DEF_HELPER_2(exception_pc_alignment, noreturn, env, tl)
-DEF_HELPER_1(setend, void, env)
-DEF_HELPER_2(wfi, void, env, i32)
-DEF_HELPER_1(wfe, void, env)
-DEF_HELPER_2(wfit, void, env, i64)
-DEF_HELPER_1(yield, void, env)
-DEF_HELPER_1(pre_hvc, void, env)
-DEF_HELPER_2(pre_smc, void, env, i32)
-DEF_HELPER_1(vesb, void, env)
-
-DEF_HELPER_3(cpsr_write, void, env, i32, i32)
-DEF_HELPER_2(cpsr_write_eret, void, env, i32)
-DEF_HELPER_1(cpsr_read, i32, env)
-
-DEF_HELPER_3(v7m_msr, void, env, i32, i32)
-DEF_HELPER_2(v7m_mrs, i32, env, i32)
-
-DEF_HELPER_2(v7m_bxns, void, env, i32)
-DEF_HELPER_2(v7m_blxns, void, env, i32)
-
-DEF_HELPER_3(v7m_tt, i32, env, i32, i32)
-
-DEF_HELPER_1(v7m_preserve_fp_state, void, env)
-
-DEF_HELPER_2(v7m_vlstm, void, env, i32)
-DEF_HELPER_2(v7m_vlldm, void, env, i32)
-
-DEF_HELPER_2(v8m_stackcheck, void, env, i32)
-
-DEF_HELPER_FLAGS_2(check_bxj_trap, TCG_CALL_NO_WG, void, env, i32)
-
-DEF_HELPER_4(access_check_cp_reg, cptr, env, i32, i32, i32)
-DEF_HELPER_FLAGS_2(lookup_cp_reg, TCG_CALL_NO_RWG_SE, cptr, env, i32)
-DEF_HELPER_FLAGS_2(tidcp_el0, TCG_CALL_NO_WG, void, env, i32)
-DEF_HELPER_FLAGS_2(tidcp_el1, TCG_CALL_NO_WG, void, env, i32)
-DEF_HELPER_3(set_cp_reg, void, env, cptr, i32)
-DEF_HELPER_2(get_cp_reg, i32, env, cptr)
-DEF_HELPER_3(set_cp_reg64, void, env, cptr, i64)
-DEF_HELPER_2(get_cp_reg64, i64, env, cptr)
-
-DEF_HELPER_2(get_r13_banked, i32, env, i32)
-DEF_HELPER_3(set_r13_banked, void, env, i32, i32)
-
-DEF_HELPER_3(mrs_banked, i32, env, i32, i32)
-DEF_HELPER_4(msr_banked, void, env, i32, i32, i32)
-
-DEF_HELPER_2(get_user_reg, i32, env, i32)
-DEF_HELPER_3(set_user_reg, void, env, i32, i32)
-
-DEF_HELPER_FLAGS_1(rebuild_hflags_m32_newel, TCG_CALL_NO_RWG, void, env)
-DEF_HELPER_FLAGS_2(rebuild_hflags_m32, TCG_CALL_NO_RWG, void, env, int)
-DEF_HELPER_FLAGS_1(rebuild_hflags_a32_newel, TCG_CALL_NO_RWG, void, env)
-DEF_HELPER_FLAGS_2(rebuild_hflags_a32, TCG_CALL_NO_RWG, void, env, int)
-DEF_HELPER_FLAGS_2(rebuild_hflags_a64, TCG_CALL_NO_RWG, void, env, int)
-
-DEF_HELPER_FLAGS_5(probe_access, TCG_CALL_NO_WG, void, env, tl, i32, i32, i32)
-
-DEF_HELPER_1(vfp_get_fpscr, i32, env)
-DEF_HELPER_2(vfp_set_fpscr, void, env, i32)
-
-DEF_HELPER_3(vfp_addh, f16, f16, f16, fpst)
-DEF_HELPER_3(vfp_adds, f32, f32, f32, fpst)
-DEF_HELPER_3(vfp_addd, f64, f64, f64, fpst)
-DEF_HELPER_3(vfp_subh, f16, f16, f16, fpst)
-DEF_HELPER_3(vfp_subs, f32, f32, f32, fpst)
-DEF_HELPER_3(vfp_subd, f64, f64, f64, fpst)
-DEF_HELPER_3(vfp_mulh, f16, f16, f16, fpst)
-DEF_HELPER_3(vfp_muls, f32, f32, f32, fpst)
-DEF_HELPER_3(vfp_muld, f64, f64, f64, fpst)
-DEF_HELPER_3(vfp_divh, f16, f16, f16, fpst)
-DEF_HELPER_3(vfp_divs, f32, f32, f32, fpst)
-DEF_HELPER_3(vfp_divd, f64, f64, f64, fpst)
-DEF_HELPER_3(vfp_maxh, f16, f16, f16, fpst)
-DEF_HELPER_3(vfp_maxs, f32, f32, f32, fpst)
-DEF_HELPER_3(vfp_maxd, f64, f64, f64, fpst)
-DEF_HELPER_3(vfp_minh, f16, f16, f16, fpst)
-DEF_HELPER_3(vfp_mins, f32, f32, f32, fpst)
-DEF_HELPER_3(vfp_mind, f64, f64, f64, fpst)
-DEF_HELPER_3(vfp_maxnumh, f16, f16, f16, fpst)
-DEF_HELPER_3(vfp_maxnums, f32, f32, f32, fpst)
-DEF_HELPER_3(vfp_maxnumd, f64, f64, f64, fpst)
-DEF_HELPER_3(vfp_minnumh, f16, f16, f16, fpst)
-DEF_HELPER_3(vfp_minnums, f32, f32, f32, fpst)
-DEF_HELPER_3(vfp_minnumd, f64, f64, f64, fpst)
-DEF_HELPER_2(vfp_sqrth, f16, f16, fpst)
-DEF_HELPER_2(vfp_sqrts, f32, f32, fpst)
-DEF_HELPER_2(vfp_sqrtd, f64, f64, fpst)
-DEF_HELPER_3(vfp_cmph, void, f16, f16, env)
-DEF_HELPER_3(vfp_cmps, void, f32, f32, env)
-DEF_HELPER_3(vfp_cmpd, void, f64, f64, env)
-DEF_HELPER_3(vfp_cmpeh, void, f16, f16, env)
-DEF_HELPER_3(vfp_cmpes, void, f32, f32, env)
-DEF_HELPER_3(vfp_cmped, void, f64, f64, env)
-
-DEF_HELPER_2(vfp_fcvtds, f64, f32, fpst)
-DEF_HELPER_2(vfp_fcvtsd, f32, f64, fpst)
-DEF_HELPER_FLAGS_2(bfcvt, TCG_CALL_NO_RWG, i32, f32, fpst)
-DEF_HELPER_FLAGS_2(bfcvt_pair, TCG_CALL_NO_RWG, i32, i64, fpst)
-
-DEF_HELPER_2(vfp_uitoh, f16, i32, fpst)
-DEF_HELPER_2(vfp_uitos, f32, i32, fpst)
-DEF_HELPER_2(vfp_uitod, f64, i32, fpst)
-DEF_HELPER_2(vfp_sitoh, f16, i32, fpst)
-DEF_HELPER_2(vfp_sitos, f32, i32, fpst)
-DEF_HELPER_2(vfp_sitod, f64, i32, fpst)
-
-DEF_HELPER_2(vfp_touih, i32, f16, fpst)
-DEF_HELPER_2(vfp_touis, i32, f32, fpst)
-DEF_HELPER_2(vfp_touid, i32, f64, fpst)
-DEF_HELPER_2(vfp_touizh, i32, f16, fpst)
-DEF_HELPER_2(vfp_touizs, i32, f32, fpst)
-DEF_HELPER_2(vfp_touizd, i32, f64, fpst)
-DEF_HELPER_2(vfp_tosih, s32, f16, fpst)
-DEF_HELPER_2(vfp_tosis, s32, f32, fpst)
-DEF_HELPER_2(vfp_tosid, s32, f64, fpst)
-DEF_HELPER_2(vfp_tosizh, s32, f16, fpst)
-DEF_HELPER_2(vfp_tosizs, s32, f32, fpst)
-DEF_HELPER_2(vfp_tosizd, s32, f64, fpst)
-
-DEF_HELPER_3(vfp_toshh_round_to_zero, i32, f16, i32, fpst)
-DEF_HELPER_3(vfp_toslh_round_to_zero, i32, f16, i32, fpst)
-DEF_HELPER_3(vfp_touhh_round_to_zero, i32, f16, i32, fpst)
-DEF_HELPER_3(vfp_toulh_round_to_zero, i32, f16, i32, fpst)
-DEF_HELPER_3(vfp_toshs_round_to_zero, i32, f32, i32, fpst)
-DEF_HELPER_3(vfp_tosls_round_to_zero, i32, f32, i32, fpst)
-DEF_HELPER_3(vfp_touhs_round_to_zero, i32, f32, i32, fpst)
-DEF_HELPER_3(vfp_touls_round_to_zero, i32, f32, i32, fpst)
-DEF_HELPER_3(vfp_toshd_round_to_zero, i64, f64, i32, fpst)
-DEF_HELPER_3(vfp_tosld_round_to_zero, i64, f64, i32, fpst)
-DEF_HELPER_3(vfp_tosqd_round_to_zero, i64, f64, i32, fpst)
-DEF_HELPER_3(vfp_touhd_round_to_zero, i64, f64, i32, fpst)
-DEF_HELPER_3(vfp_tould_round_to_zero, i64, f64, i32, fpst)
-DEF_HELPER_3(vfp_touqd_round_to_zero, i64, f64, i32, fpst)
-DEF_HELPER_3(vfp_touhh, i32, f16, i32, fpst)
-DEF_HELPER_3(vfp_toshh, i32, f16, i32, fpst)
-DEF_HELPER_3(vfp_toulh, i32, f16, i32, fpst)
-DEF_HELPER_3(vfp_toslh, i32, f16, i32, fpst)
-DEF_HELPER_3(vfp_touqh, i64, f16, i32, fpst)
-DEF_HELPER_3(vfp_tosqh, i64, f16, i32, fpst)
-DEF_HELPER_3(vfp_toshs, i32, f32, i32, fpst)
-DEF_HELPER_3(vfp_tosls, i32, f32, i32, fpst)
-DEF_HELPER_3(vfp_tosqs, i64, f32, i32, fpst)
-DEF_HELPER_3(vfp_touhs, i32, f32, i32, fpst)
-DEF_HELPER_3(vfp_touls, i32, f32, i32, fpst)
-DEF_HELPER_3(vfp_touqs, i64, f32, i32, fpst)
-DEF_HELPER_3(vfp_toshd, i64, f64, i32, fpst)
-DEF_HELPER_3(vfp_tosld, i64, f64, i32, fpst)
-DEF_HELPER_3(vfp_tosqd, i64, f64, i32, fpst)
-DEF_HELPER_3(vfp_touhd, i64, f64, i32, fpst)
-DEF_HELPER_3(vfp_tould, i64, f64, i32, fpst)
-DEF_HELPER_3(vfp_touqd, i64, f64, i32, fpst)
-DEF_HELPER_3(vfp_shtos, f32, i32, i32, fpst)
-DEF_HELPER_3(vfp_sltos, f32, i32, i32, fpst)
-DEF_HELPER_3(vfp_sqtos, f32, i64, i32, fpst)
-DEF_HELPER_3(vfp_uhtos, f32, i32, i32, fpst)
-DEF_HELPER_3(vfp_ultos, f32, i32, i32, fpst)
-DEF_HELPER_3(vfp_uqtos, f32, i64, i32, fpst)
-DEF_HELPER_3(vfp_shtod, f64, i64, i32, fpst)
-DEF_HELPER_3(vfp_sltod, f64, i64, i32, fpst)
-DEF_HELPER_3(vfp_sqtod, f64, i64, i32, fpst)
-DEF_HELPER_3(vfp_uhtod, f64, i64, i32, fpst)
-DEF_HELPER_3(vfp_ultod, f64, i64, i32, fpst)
-DEF_HELPER_3(vfp_uqtod, f64, i64, i32, fpst)
-DEF_HELPER_3(vfp_shtoh, f16, i32, i32, fpst)
-DEF_HELPER_3(vfp_uhtoh, f16, i32, i32, fpst)
-DEF_HELPER_3(vfp_sltoh, f16, i32, i32, fpst)
-DEF_HELPER_3(vfp_ultoh, f16, i32, i32, fpst)
-DEF_HELPER_3(vfp_sqtoh, f16, i64, i32, fpst)
-DEF_HELPER_3(vfp_uqtoh, f16, i64, i32, fpst)
-
-DEF_HELPER_3(vfp_shtos_round_to_nearest, f32, i32, i32, fpst)
-DEF_HELPER_3(vfp_sltos_round_to_nearest, f32, i32, i32, fpst)
-DEF_HELPER_3(vfp_uhtos_round_to_nearest, f32, i32, i32, fpst)
-DEF_HELPER_3(vfp_ultos_round_to_nearest, f32, i32, i32, fpst)
-DEF_HELPER_3(vfp_shtod_round_to_nearest, f64, i64, i32, fpst)
-DEF_HELPER_3(vfp_sltod_round_to_nearest, f64, i64, i32, fpst)
-DEF_HELPER_3(vfp_uhtod_round_to_nearest, f64, i64, i32, fpst)
-DEF_HELPER_3(vfp_ultod_round_to_nearest, f64, i64, i32, fpst)
-DEF_HELPER_3(vfp_shtoh_round_to_nearest, f16, i32, i32, fpst)
-DEF_HELPER_3(vfp_uhtoh_round_to_nearest, f16, i32, i32, fpst)
-DEF_HELPER_3(vfp_sltoh_round_to_nearest, f16, i32, i32, fpst)
-DEF_HELPER_3(vfp_ultoh_round_to_nearest, f16, i32, i32, fpst)
-
-DEF_HELPER_FLAGS_2(set_rmode, TCG_CALL_NO_RWG, i32, i32, fpst)
-
-DEF_HELPER_FLAGS_3(vfp_fcvt_f16_to_f32, TCG_CALL_NO_RWG, f32, f16, fpst, i32)
-DEF_HELPER_FLAGS_3(vfp_fcvt_f32_to_f16, TCG_CALL_NO_RWG, f16, f32, fpst, i32)
-DEF_HELPER_FLAGS_3(vfp_fcvt_f16_to_f64, TCG_CALL_NO_RWG, f64, f16, fpst, i32)
-DEF_HELPER_FLAGS_3(vfp_fcvt_f64_to_f16, TCG_CALL_NO_RWG, f16, f64, fpst, i32)
-
-DEF_HELPER_4(vfp_muladdd, f64, f64, f64, f64, fpst)
-DEF_HELPER_4(vfp_muladds, f32, f32, f32, f32, fpst)
-DEF_HELPER_4(vfp_muladdh, f16, f16, f16, f16, fpst)
-
-DEF_HELPER_FLAGS_2(recpe_f16, TCG_CALL_NO_RWG, f16, f16, fpst)
-DEF_HELPER_FLAGS_2(recpe_f32, TCG_CALL_NO_RWG, f32, f32, fpst)
-DEF_HELPER_FLAGS_2(recpe_rpres_f32, TCG_CALL_NO_RWG, f32, f32, fpst)
-DEF_HELPER_FLAGS_2(recpe_f64, TCG_CALL_NO_RWG, f64, f64, fpst)
-DEF_HELPER_FLAGS_2(rsqrte_f16, TCG_CALL_NO_RWG, f16, f16, fpst)
-DEF_HELPER_FLAGS_2(rsqrte_f32, TCG_CALL_NO_RWG, f32, f32, fpst)
-DEF_HELPER_FLAGS_2(rsqrte_rpres_f32, TCG_CALL_NO_RWG, f32, f32, fpst)
-DEF_HELPER_FLAGS_2(rsqrte_f64, TCG_CALL_NO_RWG, f64, f64, fpst)
-DEF_HELPER_FLAGS_1(recpe_u32, TCG_CALL_NO_RWG, i32, i32)
-DEF_HELPER_FLAGS_1(rsqrte_u32, TCG_CALL_NO_RWG, i32, i32)
-DEF_HELPER_FLAGS_4(neon_tbl, TCG_CALL_NO_RWG, i64, env, i32, i64, i64)
-
-DEF_HELPER_3(shl_cc, i32, env, i32, i32)
-DEF_HELPER_3(shr_cc, i32, env, i32, i32)
-DEF_HELPER_3(sar_cc, i32, env, i32, i32)
-DEF_HELPER_3(ror_cc, i32, env, i32, i32)
-
-DEF_HELPER_FLAGS_2(rinth_exact, TCG_CALL_NO_RWG, f16, f16, fpst)
-DEF_HELPER_FLAGS_2(rints_exact, TCG_CALL_NO_RWG, f32, f32, fpst)
-DEF_HELPER_FLAGS_2(rintd_exact, TCG_CALL_NO_RWG, f64, f64, fpst)
-DEF_HELPER_FLAGS_2(rinth, TCG_CALL_NO_RWG, f16, f16, fpst)
-DEF_HELPER_FLAGS_2(rints, TCG_CALL_NO_RWG, f32, f32, fpst)
-DEF_HELPER_FLAGS_2(rintd, TCG_CALL_NO_RWG, f64, f64, fpst)
-
-DEF_HELPER_FLAGS_2(vjcvt, TCG_CALL_NO_RWG, i32, f64, env)
-DEF_HELPER_FLAGS_2(fjcvtzs, TCG_CALL_NO_RWG, i64, f64, fpst)
-
-DEF_HELPER_FLAGS_3(check_hcr_el2_trap, TCG_CALL_NO_WG, void, env, i32, i32)
-
-/* neon_helper.c */
-DEF_HELPER_2(neon_pmin_u8, i32, i32, i32)
-DEF_HELPER_2(neon_pmin_s8, i32, i32, i32)
-DEF_HELPER_2(neon_pmin_u16, i32, i32, i32)
-DEF_HELPER_2(neon_pmin_s16, i32, i32, i32)
-DEF_HELPER_2(neon_pmax_u8, i32, i32, i32)
-DEF_HELPER_2(neon_pmax_s8, i32, i32, i32)
-DEF_HELPER_2(neon_pmax_u16, i32, i32, i32)
-DEF_HELPER_2(neon_pmax_s16, i32, i32, i32)
-
-DEF_HELPER_2(neon_shl_u16, i32, i32, i32)
-DEF_HELPER_2(neon_shl_s16, i32, i32, i32)
-DEF_HELPER_2(neon_rshl_u8, i32, i32, i32)
-DEF_HELPER_2(neon_rshl_s8, i32, i32, i32)
-DEF_HELPER_2(neon_rshl_u16, i32, i32, i32)
-DEF_HELPER_2(neon_rshl_s16, i32, i32, i32)
-DEF_HELPER_2(neon_rshl_u32, i32, i32, i32)
-DEF_HELPER_2(neon_rshl_s32, i32, i32, i32)
-DEF_HELPER_2(neon_rshl_u64, i64, i64, i64)
-DEF_HELPER_2(neon_rshl_s64, i64, i64, i64)
-DEF_HELPER_3(neon_qshl_u8, i32, env, i32, i32)
-DEF_HELPER_3(neon_qshl_s8, i32, env, i32, i32)
-DEF_HELPER_3(neon_qshl_u16, i32, env, i32, i32)
-DEF_HELPER_3(neon_qshl_s16, i32, env, i32, i32)
-DEF_HELPER_3(neon_qshl_u32, i32, env, i32, i32)
-DEF_HELPER_3(neon_qshl_s32, i32, env, i32, i32)
-DEF_HELPER_3(neon_qshl_u64, i64, env, i64, i64)
-DEF_HELPER_3(neon_qshl_s64, i64, env, i64, i64)
-DEF_HELPER_3(neon_qshlu_s8, i32, env, i32, i32)
-DEF_HELPER_3(neon_qshlu_s16, i32, env, i32, i32)
-DEF_HELPER_3(neon_qshlu_s32, i32, env, i32, i32)
-DEF_HELPER_3(neon_qshlu_s64, i64, env, i64, i64)
-DEF_HELPER_3(neon_qrshl_u8, i32, env, i32, i32)
-DEF_HELPER_3(neon_qrshl_s8, i32, env, i32, i32)
-DEF_HELPER_3(neon_qrshl_u16, i32, env, i32, i32)
-DEF_HELPER_3(neon_qrshl_s16, i32, env, i32, i32)
-DEF_HELPER_3(neon_qrshl_u32, i32, env, i32, i32)
-DEF_HELPER_3(neon_qrshl_s32, i32, env, i32, i32)
-DEF_HELPER_3(neon_qrshl_u64, i64, env, i64, i64)
-DEF_HELPER_3(neon_qrshl_s64, i64, env, i64, i64)
-DEF_HELPER_FLAGS_5(neon_sqshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(neon_sqshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(neon_sqshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(neon_sqshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(neon_uqshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(neon_uqshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(neon_uqshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(neon_uqshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(neon_sqrshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(neon_sqrshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(neon_sqrshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(neon_sqrshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(neon_uqrshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(neon_uqrshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(neon_uqrshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(neon_uqrshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(neon_sqshli_b, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(neon_sqshli_h, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(neon_sqshli_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(neon_sqshli_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(neon_uqshli_b, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(neon_uqshli_h, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(neon_uqshli_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(neon_uqshli_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(neon_sqshlui_b, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(neon_sqshlui_h, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(neon_sqshlui_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_4(neon_sqshlui_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
-
-DEF_HELPER_FLAGS_4(gvec_srshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_srshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_srshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_srshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_urshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_urshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_urshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_urshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_2(neon_add_u8, i32, i32, i32)
-DEF_HELPER_2(neon_add_u16, i32, i32, i32)
-DEF_HELPER_2(neon_sub_u8, i32, i32, i32)
-DEF_HELPER_2(neon_sub_u16, i32, i32, i32)
-DEF_HELPER_2(neon_mul_u8, i32, i32, i32)
-DEF_HELPER_2(neon_mul_u16, i32, i32, i32)
-
-DEF_HELPER_2(neon_tst_u8, i32, i32, i32)
-DEF_HELPER_2(neon_tst_u16, i32, i32, i32)
-DEF_HELPER_2(neon_tst_u32, i32, i32, i32)
-
-DEF_HELPER_1(neon_clz_u8, i32, i32)
-DEF_HELPER_1(neon_clz_u16, i32, i32)
-DEF_HELPER_1(neon_cls_s8, i32, i32)
-DEF_HELPER_1(neon_cls_s16, i32, i32)
-DEF_HELPER_1(neon_cls_s32, i32, i32)
-DEF_HELPER_FLAGS_3(gvec_cnt_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_rbit_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-
-DEF_HELPER_3(neon_qdmulh_s16, i32, env, i32, i32)
-DEF_HELPER_3(neon_qrdmulh_s16, i32, env, i32, i32)
-DEF_HELPER_4(neon_qrdmlah_s16, i32, env, i32, i32, i32)
-DEF_HELPER_4(neon_qrdmlsh_s16, i32, env, i32, i32, i32)
-DEF_HELPER_3(neon_qdmulh_s32, i32, env, i32, i32)
-DEF_HELPER_3(neon_qrdmulh_s32, i32, env, i32, i32)
-DEF_HELPER_4(neon_qrdmlah_s32, i32, env, s32, s32, s32)
-DEF_HELPER_4(neon_qrdmlsh_s32, i32, env, s32, s32, s32)
-
-DEF_HELPER_1(neon_narrow_u8, i64, i64)
-DEF_HELPER_1(neon_narrow_u16, i64, i64)
-DEF_HELPER_2(neon_unarrow_sat8, i64, env, i64)
-DEF_HELPER_2(neon_narrow_sat_u8, i64, env, i64)
-DEF_HELPER_2(neon_narrow_sat_s8, i64, env, i64)
-DEF_HELPER_2(neon_unarrow_sat16, i64, env, i64)
-DEF_HELPER_2(neon_narrow_sat_u16, i64, env, i64)
-DEF_HELPER_2(neon_narrow_sat_s16, i64, env, i64)
-DEF_HELPER_2(neon_unarrow_sat32, i64, env, i64)
-DEF_HELPER_2(neon_narrow_sat_u32, i64, env, i64)
-DEF_HELPER_2(neon_narrow_sat_s32, i64, env, i64)
-DEF_HELPER_1(neon_narrow_high_u8, i32, i64)
-DEF_HELPER_1(neon_narrow_high_u16, i32, i64)
-DEF_HELPER_1(neon_narrow_round_high_u8, i32, i64)
-DEF_HELPER_1(neon_narrow_round_high_u16, i32, i64)
-DEF_HELPER_1(neon_widen_u8, i64, i32)
-DEF_HELPER_1(neon_widen_s8, i64, i32)
-DEF_HELPER_1(neon_widen_u16, i64, i32)
-DEF_HELPER_1(neon_widen_s16, i64, i32)
-
-DEF_HELPER_FLAGS_1(neon_addlp_s8, TCG_CALL_NO_RWG_SE, i64, i64)
-DEF_HELPER_FLAGS_1(neon_addlp_s16, TCG_CALL_NO_RWG_SE, i64, i64)
-DEF_HELPER_3(neon_addl_saturate_s32, i64, env, i64, i64)
-DEF_HELPER_3(neon_addl_saturate_s64, i64, env, i64, i64)
-DEF_HELPER_2(neon_abdl_u16, i64, i32, i32)
-DEF_HELPER_2(neon_abdl_s16, i64, i32, i32)
-DEF_HELPER_2(neon_abdl_u32, i64, i32, i32)
-DEF_HELPER_2(neon_abdl_s32, i64, i32, i32)
-DEF_HELPER_2(neon_abdl_u64, i64, i32, i32)
-DEF_HELPER_2(neon_abdl_s64, i64, i32, i32)
-DEF_HELPER_2(neon_mull_u8, i64, i32, i32)
-DEF_HELPER_2(neon_mull_s8, i64, i32, i32)
-DEF_HELPER_2(neon_mull_u16, i64, i32, i32)
-DEF_HELPER_2(neon_mull_s16, i64, i32, i32)
-
-DEF_HELPER_1(neon_negl_u16, i64, i64)
-DEF_HELPER_1(neon_negl_u32, i64, i64)
-
-DEF_HELPER_FLAGS_2(neon_qabs_s8, TCG_CALL_NO_RWG, i32, env, i32)
-DEF_HELPER_FLAGS_2(neon_qabs_s16, TCG_CALL_NO_RWG, i32, env, i32)
-DEF_HELPER_FLAGS_2(neon_qabs_s32, TCG_CALL_NO_RWG, i32, env, i32)
-DEF_HELPER_FLAGS_2(neon_qabs_s64, TCG_CALL_NO_RWG, i64, env, i64)
-DEF_HELPER_FLAGS_2(neon_qneg_s8, TCG_CALL_NO_RWG, i32, env, i32)
-DEF_HELPER_FLAGS_2(neon_qneg_s16, TCG_CALL_NO_RWG, i32, env, i32)
-DEF_HELPER_FLAGS_2(neon_qneg_s32, TCG_CALL_NO_RWG, i32, env, i32)
-DEF_HELPER_FLAGS_2(neon_qneg_s64, TCG_CALL_NO_RWG, i64, env, i64)
-
-DEF_HELPER_3(neon_ceq_f32, i32, i32, i32, fpst)
-DEF_HELPER_3(neon_cge_f32, i32, i32, i32, fpst)
-DEF_HELPER_3(neon_cgt_f32, i32, i32, i32, fpst)
-DEF_HELPER_3(neon_acge_f32, i32, i32, i32, fpst)
-DEF_HELPER_3(neon_acgt_f32, i32, i32, i32, fpst)
-DEF_HELPER_3(neon_acge_f64, i64, i64, i64, fpst)
-DEF_HELPER_3(neon_acgt_f64, i64, i64, i64, fpst)
-
-/* iwmmxt_helper.c */
-DEF_HELPER_2(iwmmxt_maddsq, i64, i64, i64)
-DEF_HELPER_2(iwmmxt_madduq, i64, i64, i64)
-DEF_HELPER_2(iwmmxt_sadb, i64, i64, i64)
-DEF_HELPER_2(iwmmxt_sadw, i64, i64, i64)
-DEF_HELPER_2(iwmmxt_mulslw, i64, i64, i64)
-DEF_HELPER_2(iwmmxt_mulshw, i64, i64, i64)
-DEF_HELPER_2(iwmmxt_mululw, i64, i64, i64)
-DEF_HELPER_2(iwmmxt_muluhw, i64, i64, i64)
-DEF_HELPER_2(iwmmxt_macsw, i64, i64, i64)
-DEF_HELPER_2(iwmmxt_macuw, i64, i64, i64)
-DEF_HELPER_1(iwmmxt_setpsr_nz, i32, i64)
-
-#define DEF_IWMMXT_HELPER_SIZE_ENV(name) \
-DEF_HELPER_3(iwmmxt_##name##b, i64, env, i64, i64) \
-DEF_HELPER_3(iwmmxt_##name##w, i64, env, i64, i64) \
-DEF_HELPER_3(iwmmxt_##name##l, i64, env, i64, i64) \
-
-DEF_IWMMXT_HELPER_SIZE_ENV(unpackl)
-DEF_IWMMXT_HELPER_SIZE_ENV(unpackh)
-
-DEF_HELPER_2(iwmmxt_unpacklub, i64, env, i64)
-DEF_HELPER_2(iwmmxt_unpackluw, i64, env, i64)
-DEF_HELPER_2(iwmmxt_unpacklul, i64, env, i64)
-DEF_HELPER_2(iwmmxt_unpackhub, i64, env, i64)
-DEF_HELPER_2(iwmmxt_unpackhuw, i64, env, i64)
-DEF_HELPER_2(iwmmxt_unpackhul, i64, env, i64)
-DEF_HELPER_2(iwmmxt_unpacklsb, i64, env, i64)
-DEF_HELPER_2(iwmmxt_unpacklsw, i64, env, i64)
-DEF_HELPER_2(iwmmxt_unpacklsl, i64, env, i64)
-DEF_HELPER_2(iwmmxt_unpackhsb, i64, env, i64)
-DEF_HELPER_2(iwmmxt_unpackhsw, i64, env, i64)
-DEF_HELPER_2(iwmmxt_unpackhsl, i64, env, i64)
-
-DEF_IWMMXT_HELPER_SIZE_ENV(cmpeq)
-DEF_IWMMXT_HELPER_SIZE_ENV(cmpgtu)
-DEF_IWMMXT_HELPER_SIZE_ENV(cmpgts)
-
-DEF_IWMMXT_HELPER_SIZE_ENV(mins)
-DEF_IWMMXT_HELPER_SIZE_ENV(minu)
-DEF_IWMMXT_HELPER_SIZE_ENV(maxs)
-DEF_IWMMXT_HELPER_SIZE_ENV(maxu)
-
-DEF_IWMMXT_HELPER_SIZE_ENV(subn)
-DEF_IWMMXT_HELPER_SIZE_ENV(addn)
-DEF_IWMMXT_HELPER_SIZE_ENV(subu)
-DEF_IWMMXT_HELPER_SIZE_ENV(addu)
-DEF_IWMMXT_HELPER_SIZE_ENV(subs)
-DEF_IWMMXT_HELPER_SIZE_ENV(adds)
-
-DEF_HELPER_3(iwmmxt_avgb0, i64, env, i64, i64)
-DEF_HELPER_3(iwmmxt_avgb1, i64, env, i64, i64)
-DEF_HELPER_3(iwmmxt_avgw0, i64, env, i64, i64)
-DEF_HELPER_3(iwmmxt_avgw1, i64, env, i64, i64)
-
-DEF_HELPER_3(iwmmxt_align, i64, i64, i64, i32)
-DEF_HELPER_4(iwmmxt_insr, i64, i64, i32, i32, i32)
-
-DEF_HELPER_1(iwmmxt_bcstb, i64, i32)
-DEF_HELPER_1(iwmmxt_bcstw, i64, i32)
-DEF_HELPER_1(iwmmxt_bcstl, i64, i32)
-
-DEF_HELPER_1(iwmmxt_addcb, i64, i64)
-DEF_HELPER_1(iwmmxt_addcw, i64, i64)
-DEF_HELPER_1(iwmmxt_addcl, i64, i64)
-
-DEF_HELPER_1(iwmmxt_msbb, i32, i64)
-DEF_HELPER_1(iwmmxt_msbw, i32, i64)
-DEF_HELPER_1(iwmmxt_msbl, i32, i64)
-
-DEF_HELPER_3(iwmmxt_srlw, i64, env, i64, i32)
-DEF_HELPER_3(iwmmxt_srll, i64, env, i64, i32)
-DEF_HELPER_3(iwmmxt_srlq, i64, env, i64, i32)
-DEF_HELPER_3(iwmmxt_sllw, i64, env, i64, i32)
-DEF_HELPER_3(iwmmxt_slll, i64, env, i64, i32)
-DEF_HELPER_3(iwmmxt_sllq, i64, env, i64, i32)
-DEF_HELPER_3(iwmmxt_sraw, i64, env, i64, i32)
-DEF_HELPER_3(iwmmxt_sral, i64, env, i64, i32)
-DEF_HELPER_3(iwmmxt_sraq, i64, env, i64, i32)
-DEF_HELPER_3(iwmmxt_rorw, i64, env, i64, i32)
-DEF_HELPER_3(iwmmxt_rorl, i64, env, i64, i32)
-DEF_HELPER_3(iwmmxt_rorq, i64, env, i64, i32)
-DEF_HELPER_3(iwmmxt_shufh, i64, env, i64, i32)
-
-DEF_HELPER_3(iwmmxt_packuw, i64, env, i64, i64)
-DEF_HELPER_3(iwmmxt_packul, i64, env, i64, i64)
-DEF_HELPER_3(iwmmxt_packuq, i64, env, i64, i64)
-DEF_HELPER_3(iwmmxt_packsw, i64, env, i64, i64)
-DEF_HELPER_3(iwmmxt_packsl, i64, env, i64, i64)
-DEF_HELPER_3(iwmmxt_packsq, i64, env, i64, i64)
-
-DEF_HELPER_3(iwmmxt_muladdsl, i64, i64, i32, i32)
-DEF_HELPER_3(iwmmxt_muladdsw, i64, i64, i32, i32)
-DEF_HELPER_3(iwmmxt_muladdswl, i64, i64, i32, i32)
-
-DEF_HELPER_FLAGS_2(neon_unzip8, TCG_CALL_NO_RWG, void, ptr, ptr)
-DEF_HELPER_FLAGS_2(neon_unzip16, TCG_CALL_NO_RWG, void, ptr, ptr)
-DEF_HELPER_FLAGS_2(neon_qunzip8, TCG_CALL_NO_RWG, void, ptr, ptr)
-DEF_HELPER_FLAGS_2(neon_qunzip16, TCG_CALL_NO_RWG, void, ptr, ptr)
-DEF_HELPER_FLAGS_2(neon_qunzip32, TCG_CALL_NO_RWG, void, ptr, ptr)
-DEF_HELPER_FLAGS_2(neon_zip8, TCG_CALL_NO_RWG, void, ptr, ptr)
-DEF_HELPER_FLAGS_2(neon_zip16, TCG_CALL_NO_RWG, void, ptr, ptr)
-DEF_HELPER_FLAGS_2(neon_qzip8, TCG_CALL_NO_RWG, void, ptr, ptr)
-DEF_HELPER_FLAGS_2(neon_qzip16, TCG_CALL_NO_RWG, void, ptr, ptr)
-DEF_HELPER_FLAGS_2(neon_qzip32, TCG_CALL_NO_RWG, void, ptr, ptr)
-
-DEF_HELPER_FLAGS_4(crypto_aese, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(crypto_aesd, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(crypto_aesmc, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(crypto_aesimc, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(crypto_sha1su0, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(crypto_sha1c, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(crypto_sha1p, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(crypto_sha1m, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(crypto_sha1h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(crypto_sha1su1, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(crypto_sha256h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(crypto_sha256h2, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(crypto_sha256su0, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(crypto_sha256su1, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(crypto_sha512h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(crypto_sha512h2, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(crypto_sha512su0, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(crypto_sha512su1, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(crypto_sm3tt1a, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(crypto_sm3tt1b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(crypto_sm3tt2a, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(crypto_sm3tt2b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(crypto_sm3partw1, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(crypto_sm3partw2, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(crypto_sm4e, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(crypto_sm4ekey, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(crypto_rax1, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_3(crc32, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32)
-DEF_HELPER_FLAGS_3(crc32c, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32)
-
-DEF_HELPER_FLAGS_5(gvec_qrdmlah_s16, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_qrdmlsh_s16, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_qrdmlah_s32, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_qrdmlsh_s32, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(sve2_sqrdmlah_b, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_b, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(sve2_sqrdmlah_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(sve2_sqrdmlah_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(sve2_sqrdmlah_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_sdot_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_udot_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_sdot_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_udot_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_usdot_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_sdot_idx_b, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_udot_idx_b, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_sdot_idx_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_udot_idx_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_sudot_idx_b, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_usdot_idx_b, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fcaddh, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fcadds, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fcaddd, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_6(gvec_fcmlah, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_6(gvec_fcmlah_idx, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_6(gvec_fcmlas, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_6(gvec_fcmlas_idx, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_6(gvec_fcmlad, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_4(gvec_sstoh, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_sitos, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_ustoh, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_uitos, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_tosszh, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_tosizs, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_touszh, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_touizs, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_4(gvec_vcvt_sf, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_vcvt_uf, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_vcvt_rz_fs, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_vcvt_rz_fu, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_4(gvec_vcvt_sh, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_vcvt_uh, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_vcvt_rz_hs, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_vcvt_rz_hu, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_4(gvec_vcvt_sd, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_vcvt_ud, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_vcvt_rz_ds, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_vcvt_rz_du, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_4(gvec_vcvt_rm_sd, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_vcvt_rm_ud, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_vcvt_rm_ss, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_vcvt_rm_us, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_vcvt_rm_sh, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_vcvt_rm_uh, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_4(gvec_vrint_rm_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_vrint_rm_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_4(gvec_vrintx_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_vrintx_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_4(gvec_frecpe_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_frecpe_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_frecpe_rpres_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_frecpe_d, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_4(gvec_frsqrte_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_frsqrte_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_frsqrte_rpres_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_frsqrte_d, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_4(gvec_fcgt0_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_fcgt0_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_fcgt0_d, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_4(gvec_fcge0_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_fcge0_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_fcge0_d, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_4(gvec_fceq0_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_fceq0_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_fceq0_d, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_4(gvec_fcle0_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_fcle0_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_fcle0_d, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_4(gvec_fclt0_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_fclt0_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_4(gvec_fclt0_d, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fadd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fadd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fadd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fsub_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fsub_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fsub_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fmul_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fmul_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fmul_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fabd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fabd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fabd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_ah_fabd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_ah_fabd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_ah_fabd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fceq_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fceq_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fceq_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fcge_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fcge_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fcge_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fcgt_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fcgt_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fcgt_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_facge_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_facge_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_facge_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_facgt_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_facgt_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_facgt_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fmax_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fmax_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fmax_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fmin_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fmin_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fmin_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fmaxnum_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fmaxnum_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fmaxnum_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fminnum_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fminnum_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fminnum_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_recps_nf_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_recps_nf_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_rsqrts_nf_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_rsqrts_nf_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fmla_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fmla_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fmls_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fmls_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_vfma_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_vfma_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_vfma_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_vfms_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_vfms_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_vfms_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_ah_vfms_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_ah_vfms_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_ah_vfms_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_ftsmul_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_ftsmul_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_ftsmul_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fmul_idx_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fmul_idx_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fmul_idx_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fmla_nf_idx_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fmla_nf_idx_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fmls_nf_idx_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fmls_nf_idx_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_6(gvec_fmla_idx_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_6(gvec_fmla_idx_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_6(gvec_fmla_idx_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_6(gvec_fmls_idx_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_6(gvec_fmls_idx_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_6(gvec_fmls_idx_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_6(gvec_ah_fmls_idx_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_6(gvec_ah_fmls_idx_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_6(gvec_ah_fmls_idx_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_uqadd_b, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_uqadd_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_uqadd_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_uqadd_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_sqadd_b, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_sqadd_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_sqadd_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_sqadd_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_uqsub_b, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_uqsub_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_uqsub_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_uqsub_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_sqsub_b, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_sqsub_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_sqsub_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_sqsub_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_usqadd_b, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_usqadd_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_usqadd_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_usqadd_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_suqadd_b, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_suqadd_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_suqadd_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_suqadd_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fmlal_a32, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(gvec_fmlal_a64, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(gvec_fmlal_idx_a32, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_5(gvec_fmlal_idx_a64, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, env, i32)
-
-DEF_HELPER_FLAGS_2(frint32_s, TCG_CALL_NO_RWG, f32, f32, fpst)
-DEF_HELPER_FLAGS_2(frint64_s, TCG_CALL_NO_RWG, f32, f32, fpst)
-DEF_HELPER_FLAGS_2(frint32_d, TCG_CALL_NO_RWG, f64, f64, fpst)
-DEF_HELPER_FLAGS_2(frint64_d, TCG_CALL_NO_RWG, f64, f64, fpst)
-
-DEF_HELPER_FLAGS_3(gvec_ceq0_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_ceq0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_clt0_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_clt0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_cle0_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_cle0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_cgt0_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_cgt0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_cge0_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_cge0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_smulh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_smulh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_smulh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_smulh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_umulh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_umulh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_umulh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_umulh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_sshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_sshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_ushl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_ushl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_pmul_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_pmull_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(neon_pmull_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_3(gvec_ssra_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_ssra_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_ssra_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_ssra_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_3(gvec_usra_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_usra_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_usra_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_usra_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_3(gvec_srshr_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_srshr_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_srshr_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_srshr_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_3(gvec_urshr_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_urshr_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_urshr_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_urshr_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_3(gvec_srsra_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_srsra_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_srsra_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_srsra_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_3(gvec_ursra_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_ursra_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_ursra_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_ursra_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_3(gvec_sri_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_sri_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_sri_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_sri_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_3(gvec_sli_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_sli_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_sli_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_sli_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_sabd_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_sabd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_sabd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_sabd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_uabd_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_uabd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_uabd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_uabd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_saba_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_saba_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_saba_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_saba_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_uaba_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_uaba_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_uaba_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_uaba_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_mul_idx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_mul_idx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_mul_idx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_mla_idx_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_mla_idx_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_mla_idx_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_mls_idx_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_mls_idx_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_mls_idx_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(neon_sqdmulh_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(neon_sqdmulh_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(neon_sqrdmulh_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(neon_sqrdmulh_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(neon_sqdmulh_idx_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(neon_sqdmulh_idx_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(neon_sqrdmulh_idx_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(neon_sqrdmulh_idx_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(neon_sqrdmlah_idx_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(neon_sqrdmlah_idx_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(neon_sqrdmlsh_idx_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(neon_sqrdmlsh_idx_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(sve2_sqdmulh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(sve2_sqdmulh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(sve2_sqdmulh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(sve2_sqdmulh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(sve2_sqrdmulh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(sve2_sqrdmulh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(sve2_sqrdmulh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(sve2_sqrdmulh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(sve2_sqdmulh_idx_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(sve2_sqdmulh_idx_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(sve2_sqdmulh_idx_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(sve2_sqrdmulh_idx_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(sve2_sqrdmulh_idx_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(sve2_sqrdmulh_idx_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_6(sve2_fmlal_zzzw_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_6(sve2_fmlal_zzxw_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, env, i32)
-
-DEF_HELPER_FLAGS_4(gvec_xar_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_smmla_b, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_ummla_b, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_usmmla_b, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_6(gvec_bfdot, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, env, i32)
-DEF_HELPER_FLAGS_6(gvec_bfdot_idx, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, env, i32)
-
-DEF_HELPER_FLAGS_6(gvec_bfmmla, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, env, i32)
-
-DEF_HELPER_FLAGS_6(gvec_bfmlal, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_6(gvec_bfmlal_idx, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_sclamp_b, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_sclamp_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_sclamp_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_sclamp_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_uclamp_b, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_uclamp_h, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_uclamp_s, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_5(gvec_uclamp_d, TCG_CALL_NO_RWG,
- void, ptr, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_5(gvec_faddp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_faddp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_faddp_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fmaxp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fmaxp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fmaxp_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fminp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fminp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fminp_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fmaxnump_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fmaxnump_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fmaxnump_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_5(gvec_fminnump_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fminnump_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-DEF_HELPER_FLAGS_5(gvec_fminnump_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
-
-DEF_HELPER_FLAGS_4(gvec_addp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_addp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_addp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_addp_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_smaxp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_smaxp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_smaxp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_sminp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_sminp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_sminp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_umaxp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_umaxp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_umaxp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_4(gvec_uminp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_uminp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_4(gvec_uminp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
-
-DEF_HELPER_FLAGS_3(gvec_urecpe_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
-DEF_HELPER_FLAGS_3(gvec_ursqrte_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+#include "tcg/helper.h"
#ifdef TARGET_AARCH64
#include "tcg/helper-a64.h"
diff --git a/target/arm/hvf-stub.c b/target/arm/hvf-stub.c
new file mode 100644
index 0000000..ff13726
--- /dev/null
+++ b/target/arm/hvf-stub.c
@@ -0,0 +1,20 @@
+/*
+ * QEMU Hypervisor.framework (HVF) stubs for ARM
+ *
+ * Copyright (c) Linaro
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "hvf_arm.h"
+
+uint32_t hvf_arm_get_default_ipa_bit_size(void)
+{
+ g_assert_not_reached();
+}
+
+uint32_t hvf_arm_get_max_ipa_bit_size(void)
+{
+ g_assert_not_reached();
+}
diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c
index 2439af6..c9cfcdc 100644
--- a/target/arm/hvf/hvf.c
+++ b/target/arm/hvf/hvf.c
@@ -19,10 +19,12 @@
#include "system/hw_accel.h"
#include "hvf_arm.h"
#include "cpregs.h"
+#include "cpu-sysregs.h"
#include <mach/mach_time.h>
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
+#include "system/memory.h"
#include "hw/boards.h"
#include "hw/irq.h"
#include "qemu/main-loop.h"
@@ -811,9 +813,9 @@ int hvf_put_registers(CPUState *cpu)
static void flush_cpu_state(CPUState *cpu)
{
- if (cpu->accel->dirty) {
+ if (cpu->vcpu_dirty) {
hvf_put_registers(cpu);
- cpu->accel->dirty = false;
+ cpu->vcpu_dirty = false;
}
}
@@ -844,14 +846,17 @@ static uint64_t hvf_get_reg(CPUState *cpu, int rt)
return val;
}
-static void clamp_id_aa64mmfr0_parange_to_ipa_size(uint64_t *id_aa64mmfr0)
+static void clamp_id_aa64mmfr0_parange_to_ipa_size(ARMISARegisters *isar)
{
uint32_t ipa_size = chosen_ipa_bit_size ?
chosen_ipa_bit_size : hvf_arm_get_max_ipa_bit_size();
+ uint64_t id_aa64mmfr0;
/* Clamp down the PARange to the IPA size the kernel supports. */
uint8_t index = round_down_to_parange_index(ipa_size);
- *id_aa64mmfr0 = (*id_aa64mmfr0 & ~R_ID_AA64MMFR0_PARANGE_MASK) | index;
+ id_aa64mmfr0 = GET_IDREG(isar, ID_AA64MMFR0);
+ id_aa64mmfr0 = (id_aa64mmfr0 & ~R_ID_AA64MMFR0_PARANGE_MASK) | index;
+ SET_IDREG(isar, ID_AA64MMFR0, id_aa64mmfr0);
}
static bool hvf_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
@@ -861,16 +866,16 @@ static bool hvf_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
int reg;
uint64_t *val;
} regs[] = {
- { HV_SYS_REG_ID_AA64PFR0_EL1, &host_isar.id_aa64pfr0 },
- { HV_SYS_REG_ID_AA64PFR1_EL1, &host_isar.id_aa64pfr1 },
- { HV_SYS_REG_ID_AA64DFR0_EL1, &host_isar.id_aa64dfr0 },
- { HV_SYS_REG_ID_AA64DFR1_EL1, &host_isar.id_aa64dfr1 },
- { HV_SYS_REG_ID_AA64ISAR0_EL1, &host_isar.id_aa64isar0 },
- { HV_SYS_REG_ID_AA64ISAR1_EL1, &host_isar.id_aa64isar1 },
+ { HV_SYS_REG_ID_AA64PFR0_EL1, &host_isar.idregs[ID_AA64PFR0_EL1_IDX] },
+ { HV_SYS_REG_ID_AA64PFR1_EL1, &host_isar.idregs[ID_AA64PFR1_EL1_IDX] },
+ { HV_SYS_REG_ID_AA64DFR0_EL1, &host_isar.idregs[ID_AA64DFR0_EL1_IDX] },
+ { HV_SYS_REG_ID_AA64DFR1_EL1, &host_isar.idregs[ID_AA64DFR1_EL1_IDX] },
+ { HV_SYS_REG_ID_AA64ISAR0_EL1, &host_isar.idregs[ID_AA64ISAR0_EL1_IDX] },
+ { HV_SYS_REG_ID_AA64ISAR1_EL1, &host_isar.idregs[ID_AA64ISAR1_EL1_IDX] },
/* Add ID_AA64ISAR2_EL1 here when HVF supports it */
- { HV_SYS_REG_ID_AA64MMFR0_EL1, &host_isar.id_aa64mmfr0 },
- { HV_SYS_REG_ID_AA64MMFR1_EL1, &host_isar.id_aa64mmfr1 },
- { HV_SYS_REG_ID_AA64MMFR2_EL1, &host_isar.id_aa64mmfr2 },
+ { HV_SYS_REG_ID_AA64MMFR0_EL1, &host_isar.idregs[ID_AA64MMFR0_EL1_IDX] },
+ { HV_SYS_REG_ID_AA64MMFR1_EL1, &host_isar.idregs[ID_AA64MMFR1_EL1_IDX] },
+ { HV_SYS_REG_ID_AA64MMFR2_EL1, &host_isar.idregs[ID_AA64MMFR2_EL1_IDX] },
/* Add ID_AA64MMFR3_EL1 here when HVF supports it */
};
hv_vcpu_t fd;
@@ -878,7 +883,7 @@ static bool hvf_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
hv_vcpu_exit_t *exit;
int i;
- ahcf->dtb_compatible = "arm,arm-v8";
+ ahcf->dtb_compatible = "arm,armv8";
ahcf->features = (1ULL << ARM_FEATURE_V8) |
(1ULL << ARM_FEATURE_NEON) |
(1ULL << ARM_FEATURE_AARCH64) |
@@ -897,7 +902,7 @@ static bool hvf_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
r |= hv_vcpu_get_sys_reg(fd, HV_SYS_REG_MIDR_EL1, &ahcf->midr);
r |= hv_vcpu_destroy(fd);
- clamp_id_aa64mmfr0_parange_to_ipa_size(&host_isar.id_aa64mmfr0);
+ clamp_id_aa64mmfr0_parange_to_ipa_size(&host_isar);
/*
* Disable SME, which is not properly handled by QEMU hvf yet.
@@ -909,7 +914,8 @@ static bool hvf_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
* - fix any assumptions we made that SME implies SVE (since
* on the M4 there is SME but not SVE)
*/
- host_isar.id_aa64pfr1 &= ~R_ID_AA64PFR1_SME_MASK;
+ SET_IDREG(&host_isar, ID_AA64PFR1,
+ GET_IDREG(&host_isar, ID_AA64PFR1) & ~R_ID_AA64PFR1_SME_MASK);
ahcf->isar = host_isar;
@@ -926,7 +932,7 @@ static bool hvf_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
ahcf->reset_sctlr |= 0x00800000;
/* Make sure we don't advertise AArch32 support for EL0/EL1 */
- if ((host_isar.id_aa64pfr0 & 0xff) != 0x11) {
+ if ((GET_IDREG(&host_isar, ID_AA64PFR0) & 0xff) != 0x11) {
return false;
}
@@ -1064,12 +1070,12 @@ int hvf_arch_init_vcpu(CPUState *cpu)
/* We're limited to underlying hardware caps, override internal versions */
ret = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_ID_AA64MMFR0_EL1,
- &arm_cpu->isar.id_aa64mmfr0);
+ &arm_cpu->isar.idregs[ID_AA64MMFR0_EL1_IDX]);
assert_hvf_ok(ret);
- clamp_id_aa64mmfr0_parange_to_ipa_size(&arm_cpu->isar.id_aa64mmfr0);
+ clamp_id_aa64mmfr0_parange_to_ipa_size(&arm_cpu->isar);
ret = hv_vcpu_set_sys_reg(cpu->accel->fd, HV_SYS_REG_ID_AA64MMFR0_EL1,
- arm_cpu->isar.id_aa64mmfr0);
+ arm_cpu->isar.idregs[ID_AA64MMFR0_EL1_IDX]);
assert_hvf_ok(ret);
return 0;
@@ -1082,13 +1088,13 @@ void hvf_kick_vcpu_thread(CPUState *cpu)
}
static void hvf_raise_exception(CPUState *cpu, uint32_t excp,
- uint32_t syndrome)
+ uint32_t syndrome, int target_el)
{
ARMCPU *arm_cpu = ARM_CPU(cpu);
CPUARMState *env = &arm_cpu->env;
cpu->exception_index = excp;
- env->exception.target_el = 1;
+ env->exception.target_el = target_el;
env->exception.syndrome = syndrome;
arm_cpu_do_interrupt(cpu);
@@ -1448,7 +1454,7 @@ static int hvf_sysreg_read(CPUState *cpu, uint32_t reg, uint64_t *val)
SYSREG_CRN(reg),
SYSREG_CRM(reg),
SYSREG_OP2(reg));
- hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
+ hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized(), 1);
return 1;
}
@@ -1758,7 +1764,7 @@ static int hvf_sysreg_write(CPUState *cpu, uint32_t reg, uint64_t val)
SYSREG_CRN(reg),
SYSREG_CRM(reg),
SYSREG_OP2(reg));
- hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
+ hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized(), 1);
return 1;
}
@@ -1909,7 +1915,17 @@ int hvf_vcpu_exec(CPUState *cpu)
flush_cpu_state(cpu);
bql_unlock();
- assert_hvf_ok(hv_vcpu_run(cpu->accel->fd));
+ r = hv_vcpu_run(cpu->accel->fd);
+ bql_lock();
+ switch (r) {
+ case HV_SUCCESS:
+ break;
+ case HV_ILLEGAL_GUEST_STATE:
+ trace_hvf_illegal_guest_state();
+ /* fall through */
+ default:
+ g_assert_not_reached();
+ }
/* handle VMEXIT */
uint64_t exit_reason = hvf_exit->reason;
@@ -1917,7 +1933,6 @@ int hvf_vcpu_exec(CPUState *cpu)
uint32_t ec = syn_get_ec(syndrome);
ret = 0;
- bql_lock();
switch (exit_reason) {
case HV_EXIT_REASON_EXCEPTION:
/* This is the main one, handle below. */
@@ -1952,7 +1967,7 @@ int hvf_vcpu_exec(CPUState *cpu)
if (!hvf_find_sw_breakpoint(cpu, env->pc)) {
/* Re-inject into the guest */
ret = 0;
- hvf_raise_exception(cpu, EXCP_BKPT, syn_aa64_bkpt(0));
+ hvf_raise_exception(cpu, EXCP_BKPT, syn_aa64_bkpt(0), 1);
}
break;
}
@@ -2057,13 +2072,13 @@ int hvf_vcpu_exec(CPUState *cpu)
cpu_synchronize_state(cpu);
if (arm_cpu->psci_conduit == QEMU_PSCI_CONDUIT_HVC) {
if (!hvf_handle_psci_call(cpu)) {
- trace_hvf_unknown_hvc(env->xregs[0]);
+ trace_hvf_unknown_hvc(env->pc, env->xregs[0]);
/* SMCCC 1.3 section 5.2 says every unknown SMCCC call returns -1 */
env->xregs[0] = -1;
}
} else {
- trace_hvf_unknown_hvc(env->xregs[0]);
- hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
+ trace_hvf_unknown_hvc(env->pc, env->xregs[0]);
+ hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized(), 1);
}
break;
case EC_AA64_SMC:
@@ -2078,7 +2093,7 @@ int hvf_vcpu_exec(CPUState *cpu)
}
} else {
trace_hvf_unknown_smc(env->xregs[0]);
- hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
+ hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized(), 1);
}
break;
default:
@@ -2277,28 +2292,23 @@ static inline bool hvf_arm_hw_debug_active(CPUState *cpu)
return ((cur_hw_wps > 0) || (cur_hw_bps > 0));
}
-static void hvf_arch_set_traps(void)
+static void hvf_arch_set_traps(CPUState *cpu)
{
- CPUState *cpu;
bool should_enable_traps = false;
hv_return_t r = HV_SUCCESS;
/* Check whether guest debugging is enabled for at least one vCPU; if it
* is, enable exiting the guest on all vCPUs */
- CPU_FOREACH(cpu) {
- should_enable_traps |= cpu->accel->guest_debug_enabled;
- }
- CPU_FOREACH(cpu) {
- /* Set whether debug exceptions exit the guest */
- r = hv_vcpu_set_trap_debug_exceptions(cpu->accel->fd,
- should_enable_traps);
- assert_hvf_ok(r);
+ should_enable_traps |= cpu->accel->guest_debug_enabled;
+ /* Set whether debug exceptions exit the guest */
+ r = hv_vcpu_set_trap_debug_exceptions(cpu->accel->fd,
+ should_enable_traps);
+ assert_hvf_ok(r);
- /* Set whether accesses to debug registers exit the guest */
- r = hv_vcpu_set_trap_debug_reg_accesses(cpu->accel->fd,
- should_enable_traps);
- assert_hvf_ok(r);
- }
+ /* Set whether accesses to debug registers exit the guest */
+ r = hv_vcpu_set_trap_debug_reg_accesses(cpu->accel->fd,
+ should_enable_traps);
+ assert_hvf_ok(r);
}
void hvf_arch_update_guest_debug(CPUState *cpu)
@@ -2339,7 +2349,7 @@ void hvf_arch_update_guest_debug(CPUState *cpu)
deposit64(env->cp15.mdscr_el1, MDSCR_EL1_MDE_SHIFT, 1, 0);
}
- hvf_arch_set_traps();
+ hvf_arch_set_traps(cpu);
}
bool hvf_arch_supports_guest_debug(void)
diff --git a/target/arm/hvf/trace-events b/target/arm/hvf/trace-events
index 4fbbe4b..b49746f 100644
--- a/target/arm/hvf/trace-events
+++ b/target/arm/hvf/trace-events
@@ -5,9 +5,10 @@ hvf_inject_irq(void) "injecting IRQ"
hvf_data_abort(uint64_t pc, uint64_t va, uint64_t pa, bool isv, bool iswrite, bool s1ptw, uint32_t len, uint32_t srt) "data abort: [pc=0x%"PRIx64" va=0x%016"PRIx64" pa=0x%016"PRIx64" isv=%d iswrite=%d s1ptw=%d len=%d srt=%d]"
hvf_sysreg_read(uint32_t reg, uint32_t op0, uint32_t op1, uint32_t crn, uint32_t crm, uint32_t op2, uint64_t val) "sysreg read 0x%08x (op0=%d op1=%d crn=%d crm=%d op2=%d) = 0x%016"PRIx64
hvf_sysreg_write(uint32_t reg, uint32_t op0, uint32_t op1, uint32_t crn, uint32_t crm, uint32_t op2, uint64_t val) "sysreg write 0x%08x (op0=%d op1=%d crn=%d crm=%d op2=%d, val=0x%016"PRIx64")"
-hvf_unknown_hvc(uint64_t x0) "unknown HVC! 0x%016"PRIx64
+hvf_unknown_hvc(uint64_t pc, uint64_t x0) "pc=0x%"PRIx64" unknown HVC! 0x%016"PRIx64
hvf_unknown_smc(uint64_t x0) "unknown SMC! 0x%016"PRIx64
hvf_exit(uint64_t syndrome, uint32_t ec, uint64_t pc) "exit: 0x%"PRIx64" [ec=0x%x pc=0x%"PRIx64"]"
-hvf_psci_call(uint64_t x0, uint64_t x1, uint64_t x2, uint64_t x3, uint32_t cpuid) "PSCI Call x0=0x%016"PRIx64" x1=0x%016"PRIx64" x2=0x%016"PRIx64" x3=0x%016"PRIx64" cpu=0x%x"
+hvf_psci_call(uint64_t x0, uint64_t x1, uint64_t x2, uint64_t x3, uint32_t cpuid) "PSCI Call x0=0x%016"PRIx64" x1=0x%016"PRIx64" x2=0x%016"PRIx64" x3=0x%016"PRIx64" cpuid=0x%x"
hvf_vgic_write(const char *name, uint64_t val) "vgic write to %s [val=0x%016"PRIx64"]"
hvf_vgic_read(const char *name, uint64_t val) "vgic read from %s [val=0x%016"PRIx64"]"
+hvf_illegal_guest_state(void) "HV_ILLEGAL_GUEST_STATE"
diff --git a/target/arm/hvf_arm.h b/target/arm/hvf_arm.h
index 26c717b..ea82f26 100644
--- a/target/arm/hvf_arm.h
+++ b/target/arm/hvf_arm.h
@@ -11,7 +11,7 @@
#ifndef QEMU_HVF_ARM_H
#define QEMU_HVF_ARM_H
-#include "cpu.h"
+#include "target/arm/cpu-qom.h"
/**
* hvf_arm_init_debug() - initialize guest debug capabilities
@@ -22,23 +22,7 @@ void hvf_arm_init_debug(void);
void hvf_arm_set_cpu_features_from_host(ARMCPU *cpu);
-#ifdef CONFIG_HVF
-
uint32_t hvf_arm_get_default_ipa_bit_size(void);
uint32_t hvf_arm_get_max_ipa_bit_size(void);
-#else
-
-static inline uint32_t hvf_arm_get_default_ipa_bit_size(void)
-{
- return 0;
-}
-
-static inline uint32_t hvf_arm_get_max_ipa_bit_size(void)
-{
- return 0;
-}
-
-#endif
-
#endif
diff --git a/target/arm/hyp_gdbstub.c b/target/arm/hyp_gdbstub.c
index 1e86126..bb59697 100644
--- a/target/arm/hyp_gdbstub.c
+++ b/target/arm/hyp_gdbstub.c
@@ -54,7 +54,7 @@ GArray *hw_breakpoints, *hw_watchpoints;
* here so future PC comparisons will work properly.
*/
-int insert_hw_breakpoint(target_ulong addr)
+int insert_hw_breakpoint(vaddr addr)
{
HWBreakpoint brk = {
.bcr = 0x1, /* BCR E=1, enable */
@@ -80,7 +80,7 @@ int insert_hw_breakpoint(target_ulong addr)
* Delete a breakpoint and shuffle any above down
*/
-int delete_hw_breakpoint(target_ulong pc)
+int delete_hw_breakpoint(vaddr pc)
{
int i;
for (i = 0; i < hw_breakpoints->len; i++) {
@@ -125,7 +125,7 @@ int delete_hw_breakpoint(target_ulong pc)
* need to ensure you mask the address as required and set BAS=0xff
*/
-int insert_hw_watchpoint(target_ulong addr, target_ulong len, int type)
+int insert_hw_watchpoint(vaddr addr, vaddr len, int type)
{
HWWatchpoint wp = {
.wcr = R_DBGWCR_E_MASK, /* E=1, enable */
@@ -182,7 +182,7 @@ int insert_hw_watchpoint(target_ulong addr, target_ulong len, int type)
return 0;
}
-bool check_watchpoint_in_range(int i, target_ulong addr)
+bool check_watchpoint_in_range(int i, vaddr addr)
{
HWWatchpoint *wp = get_hw_wp(i);
uint64_t addr_top, addr_bottom = wp->wvr;
@@ -214,7 +214,7 @@ bool check_watchpoint_in_range(int i, target_ulong addr)
* Delete a breakpoint and shuffle any above down
*/
-int delete_hw_watchpoint(target_ulong addr, target_ulong len, int type)
+int delete_hw_watchpoint(vaddr addr, vaddr len, int type)
{
int i;
for (i = 0; i < cur_hw_wps; i++) {
@@ -226,7 +226,7 @@ int delete_hw_watchpoint(target_ulong addr, target_ulong len, int type)
return -ENOENT;
}
-bool find_hw_breakpoint(CPUState *cpu, target_ulong pc)
+bool find_hw_breakpoint(CPUState *cpu, vaddr pc)
{
int i;
@@ -239,7 +239,7 @@ bool find_hw_breakpoint(CPUState *cpu, target_ulong pc)
return false;
}
-CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, target_ulong addr)
+CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, vaddr addr)
{
int i;
diff --git a/target/arm/internals.h b/target/arm/internals.h
index 28585c0..c4765e4 100644
--- a/target/arm/internals.h
+++ b/target/arm/internals.h
@@ -25,9 +25,13 @@
#ifndef TARGET_ARM_INTERNALS_H
#define TARGET_ARM_INTERNALS_H
+#include "exec/hwaddr.h"
+#include "exec/vaddr.h"
#include "exec/breakpoint.h"
+#include "accel/tcg/tb-cpu-state.h"
#include "hw/registerfields.h"
#include "tcg/tcg-gvec-desc.h"
+#include "system/memory.h"
#include "syndrome.h"
#include "cpu-features.h"
@@ -350,7 +354,6 @@ static inline int r14_bank_number(int mode)
}
void arm_cpu_register(const ARMCPUInfo *info);
-void aarch64_cpu_register(const ARMCPUInfo *info);
void register_cp_regs_for_features(ARMCPU *cpu);
void init_cpreg_list(ARMCPU *cpu);
@@ -369,10 +372,12 @@ void arm_restore_state_to_opc(CPUState *cs,
const uint64_t *data);
#ifdef CONFIG_TCG
+TCGTBCPUState arm_get_tb_cpu_state(CPUState *cs);
void arm_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb);
/* Our implementation of TCGCPUOps::cpu_exec_halt */
bool arm_cpu_exec_halt(CPUState *cs);
+int arm_cpu_mmu_index(CPUState *cs, bool ifetch);
#endif /* CONFIG_TCG */
typedef enum ARMFPRounding {
@@ -645,16 +650,12 @@ static inline bool arm_is_psci_call(ARMCPU *cpu, int excp_type)
{
return false;
}
-static inline void arm_handle_psci_call(ARMCPU *cpu)
-{
- g_assert_not_reached();
-}
#else
/* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */
bool arm_is_psci_call(ARMCPU *cpu, int excp_type);
+#endif
/* Actually handle a PSCI call */
void arm_handle_psci_call(ARMCPU *cpu);
-#endif
/**
* arm_clear_exclusive: clear the exclusive monitor
@@ -724,8 +725,8 @@ typedef struct ARMMMUFaultInfo ARMMMUFaultInfo;
struct ARMMMUFaultInfo {
ARMFaultType type;
ARMGPCF gpcf;
- target_ulong s2addr;
- target_ulong paddr;
+ hwaddr s2addr;
+ hwaddr paddr;
ARMSecuritySpace paddr_space;
int level;
int domain;
@@ -1170,7 +1171,7 @@ static inline bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
static inline int arm_num_brps(ARMCPU *cpu)
{
if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
- return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, BRPS) + 1;
+ return FIELD_EX64_IDREG(&cpu->isar, ID_AA64DFR0, BRPS) + 1;
} else {
return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, BRPS) + 1;
}
@@ -1184,7 +1185,7 @@ static inline int arm_num_brps(ARMCPU *cpu)
static inline int arm_num_wrps(ARMCPU *cpu)
{
if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
- return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, WRPS) + 1;
+ return FIELD_EX64_IDREG(&cpu->isar, ID_AA64DFR0, WRPS) + 1;
} else {
return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, WRPS) + 1;
}
@@ -1198,7 +1199,7 @@ static inline int arm_num_wrps(ARMCPU *cpu)
static inline int arm_num_ctx_cmps(ARMCPU *cpu)
{
if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
- return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS) + 1;
+ return FIELD_EX64_IDREG(&cpu->isar, ID_AA64DFR0, CTX_CMPS) + 1;
} else {
return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, CTX_CMPS) + 1;
}
@@ -1806,7 +1807,6 @@ static inline uint64_t pmu_counter_mask(CPUARMState *env)
return (1ULL << 31) | ((1ULL << pmu_num_counters(env)) - 1);
}
-#ifdef TARGET_AARCH64
GDBFeature *arm_gen_dynamic_svereg_feature(CPUState *cpu, int base_reg);
int aarch64_gdb_get_sve_reg(CPUState *cs, GByteArray *buf, int reg);
int aarch64_gdb_set_sve_reg(CPUState *cs, uint8_t *buf, int reg);
@@ -1824,7 +1824,12 @@ void aarch64_max_tcg_initfn(Object *obj);
void aarch64_add_pauth_properties(Object *obj);
void aarch64_add_sve_properties(Object *obj);
void aarch64_add_sme_properties(Object *obj);
-#endif
+
+/* Return true if the gdbstub is presenting an AArch64 CPU */
+static inline bool arm_gdbstub_is_aarch64(ARMCPU *cpu)
+{
+ return arm_feature(&cpu->env, ARM_FEATURE_AARCH64);
+}
/* Read the CONTROL register as the MRS instruction would. */
uint32_t arm_v7m_mrs_control(CPUARMState *env, uint32_t secure);
@@ -1866,6 +1871,10 @@ void define_debug_regs(ARMCPU *cpu);
/* Add the cpreg definitions for TLBI instructions */
void define_tlb_insn_regs(ARMCPU *cpu);
+/* Add the cpreg definitions for AT instructions */
+void define_at_insn_regs(ARMCPU *cpu);
+/* Add the cpreg definitions for PM cpregs */
+void define_pm_cpregs(ARMCPU *cpu);
/* Effective value of MDCR_EL2 */
static inline uint64_t arm_mdcr_el2_eff(CPUARMState *env)
@@ -1898,8 +1907,6 @@ static inline bool arm_fgt_active(CPUARMState *env, int el)
(!arm_feature(env, ARM_FEATURE_EL3) || (env->cp15.scr_el3 & SCR_FGTEN));
}
-void assert_hflags_rebuild_correctly(CPUARMState *env);
-
/*
* Although the ARM implementation of hardware assisted debugging
* allows for different breakpoints per-core, the current GDB
@@ -1941,14 +1948,14 @@ extern GArray *hw_breakpoints, *hw_watchpoints;
#define get_hw_bp(i) (&g_array_index(hw_breakpoints, HWBreakpoint, i))
#define get_hw_wp(i) (&g_array_index(hw_watchpoints, HWWatchpoint, i))
-bool find_hw_breakpoint(CPUState *cpu, target_ulong pc);
-int insert_hw_breakpoint(target_ulong pc);
-int delete_hw_breakpoint(target_ulong pc);
+bool find_hw_breakpoint(CPUState *cpu, vaddr pc);
+int insert_hw_breakpoint(vaddr pc);
+int delete_hw_breakpoint(vaddr pc);
-bool check_watchpoint_in_range(int i, target_ulong addr);
-CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, target_ulong addr);
-int insert_hw_watchpoint(target_ulong addr, target_ulong len, int type);
-int delete_hw_watchpoint(target_ulong addr, target_ulong len, int type);
+bool check_watchpoint_in_range(int i, vaddr addr);
+CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, vaddr addr);
+int insert_hw_watchpoint(vaddr addr, vaddr len, int type);
+int delete_hw_watchpoint(vaddr addr, vaddr len, int type);
/* Return the current value of the system counter in ticks */
uint64_t gt_get_countervalue(CPUARMState *env);
@@ -1978,5 +1985,6 @@ void vfp_clear_float_status_exc_flags(CPUARMState *env);
* specified by mask changing to the values in val.
*/
void vfp_set_fpcr_to_host(CPUARMState *env, uint32_t val, uint32_t mask);
+bool arm_pan_enabled(CPUARMState *env);
#endif
diff --git a/target/arm/kvm-stub.c b/target/arm/kvm-stub.c
index 965a486..c93462c 100644
--- a/target/arm/kvm-stub.c
+++ b/target/arm/kvm-stub.c
@@ -22,3 +22,105 @@ bool write_list_to_kvmstate(ARMCPU *cpu, int level)
{
g_assert_not_reached();
}
+
+/*
+ * It's safe to call these functions without KVM support.
+ * They should either do nothing or return "not supported".
+ */
+bool kvm_arm_aarch32_supported(void)
+{
+ return false;
+}
+
+bool kvm_arm_pmu_supported(void)
+{
+ return false;
+}
+
+bool kvm_arm_sve_supported(void)
+{
+ return false;
+}
+
+bool kvm_arm_mte_supported(void)
+{
+ return false;
+}
+
+bool kvm_arm_el2_supported(void)
+{
+ return false;
+}
+
+/*
+ * These functions should never actually be called without KVM support.
+ */
+void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu)
+{
+ g_assert_not_reached();
+}
+
+void kvm_arm_add_vcpu_properties(ARMCPU *cpu)
+{
+ g_assert_not_reached();
+}
+
+int kvm_arm_get_max_vm_ipa_size(MachineState *ms, bool *fixed_ipa)
+{
+ g_assert_not_reached();
+}
+
+int kvm_arm_vgic_probe(void)
+{
+ g_assert_not_reached();
+}
+
+void kvm_arm_pmu_set_irq(ARMCPU *cpu, int irq)
+{
+ g_assert_not_reached();
+}
+
+void kvm_arm_pmu_init(ARMCPU *cpu)
+{
+ g_assert_not_reached();
+}
+
+void kvm_arm_pvtime_init(ARMCPU *cpu, uint64_t ipa)
+{
+ g_assert_not_reached();
+}
+
+void kvm_arm_steal_time_finalize(ARMCPU *cpu, Error **errp)
+{
+ g_assert_not_reached();
+}
+
+uint32_t kvm_arm_sve_get_vls(ARMCPU *cpu)
+{
+ g_assert_not_reached();
+}
+
+void kvm_arm_enable_mte(Object *cpuobj, Error **errp)
+{
+ g_assert_not_reached();
+}
+
+void kvm_arm_reset_vcpu(ARMCPU *cpu)
+{
+ g_assert_not_reached();
+}
+
+void arm_cpu_kvm_set_irq(void *arm_cpu, int irq, int level)
+{
+ g_assert_not_reached();
+}
+
+void kvm_arm_cpu_pre_save(ARMCPU *cpu)
+{
+ g_assert_not_reached();
+}
+
+bool kvm_arm_cpu_post_load(ARMCPU *cpu)
+{
+ g_assert_not_reached();
+}
diff --git a/target/arm/kvm.c b/target/arm/kvm.c
index da30bdb..6672344 100644
--- a/target/arm/kvm.c
+++ b/target/arm/kvm.c
@@ -26,11 +26,12 @@
#include "system/kvm_int.h"
#include "kvm_arm.h"
#include "cpu.h"
+#include "cpu-sysregs.h"
#include "trace.h"
#include "internals.h"
#include "hw/pci/pci.h"
#include "exec/memattrs.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "gdbstub/enums.h"
#include "hw/boards.h"
#include "hw/irq.h"
@@ -100,8 +101,7 @@ static int kvm_arm_vcpu_finalize(ARMCPU *cpu, int feature)
return kvm_vcpu_ioctl(CPU(cpu), KVM_ARM_VCPU_FINALIZE, &feature);
}
-bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try,
- int *fdarray,
+bool kvm_arm_create_scratch_host_vcpu(int *fdarray,
struct kvm_vcpu_init *init)
{
int ret = 0, kvmfd = -1, vmfd = -1, cpufd = -1;
@@ -150,40 +150,13 @@ bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try,
struct kvm_vcpu_init preferred;
ret = ioctl(vmfd, KVM_ARM_PREFERRED_TARGET, &preferred);
- if (!ret) {
- init->target = preferred.target;
- }
- }
- if (ret >= 0) {
- ret = ioctl(cpufd, KVM_ARM_VCPU_INIT, init);
if (ret < 0) {
goto err;
}
- } else if (cpus_to_try) {
- /* Old kernel which doesn't know about the
- * PREFERRED_TARGET ioctl: we know it will only support
- * creating one kind of guest CPU which is its preferred
- * CPU type.
- */
- struct kvm_vcpu_init try;
-
- while (*cpus_to_try != QEMU_KVM_ARM_TARGET_NONE) {
- try.target = *cpus_to_try++;
- memcpy(try.features, init->features, sizeof(init->features));
- ret = ioctl(cpufd, KVM_ARM_VCPU_INIT, &try);
- if (ret >= 0) {
- break;
- }
- }
- if (ret < 0) {
- goto err;
- }
- init->target = try.target;
- } else {
- /* Treat a NULL cpus_to_try argument the same as an empty
- * list, which means we will fail the call since this must
- * be an old kernel which doesn't support PREFERRED_TARGET.
- */
+ init->target = preferred.target;
+ }
+ ret = ioctl(cpufd, KVM_ARM_VCPU_INIT, init);
+ if (ret < 0) {
goto err;
}
@@ -246,6 +219,29 @@ static bool kvm_arm_pauth_supported(void)
kvm_check_extension(kvm_state, KVM_CAP_ARM_PTRAUTH_GENERIC));
}
+
+static uint64_t idregs_sysreg_to_kvm_reg(ARMSysRegs sysreg)
+{
+ return ARM64_SYS_REG((sysreg & CP_REG_ARM64_SYSREG_OP0_MASK) >> CP_REG_ARM64_SYSREG_OP0_SHIFT,
+ (sysreg & CP_REG_ARM64_SYSREG_OP1_MASK) >> CP_REG_ARM64_SYSREG_OP1_SHIFT,
+ (sysreg & CP_REG_ARM64_SYSREG_CRN_MASK) >> CP_REG_ARM64_SYSREG_CRN_SHIFT,
+ (sysreg & CP_REG_ARM64_SYSREG_CRM_MASK) >> CP_REG_ARM64_SYSREG_CRM_SHIFT,
+ (sysreg & CP_REG_ARM64_SYSREG_OP2_MASK) >> CP_REG_ARM64_SYSREG_OP2_SHIFT);
+}
+
+/* read a sysreg value and store it in the idregs */
+static int get_host_cpu_reg(int fd, ARMHostCPUFeatures *ahcf,
+ ARMIDRegisterIdx index)
+{
+ uint64_t *reg;
+ int ret;
+
+ reg = &ahcf->isar.idregs[index];
+ ret = read_sys_reg64(fd, reg,
+ idregs_sysreg_to_kvm_reg(id_register_sysreg[index]));
+ return ret;
+}
+
static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
{
/* Identify the feature bits corresponding to the host CPU, and
@@ -255,21 +251,11 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
*/
int fdarray[3];
bool sve_supported;
+ bool el2_supported;
bool pmu_supported = false;
uint64_t features = 0;
int err;
- /* Old kernels may not know about the PREFERRED_TARGET ioctl: however
- * we know these will only support creating one kind of guest CPU,
- * which is its preferred CPU type. Fortunately these old kernels
- * support only a very limited number of CPUs.
- */
- static const uint32_t cpus_to_try[] = {
- KVM_ARM_TARGET_AEM_V8,
- KVM_ARM_TARGET_FOUNDATION_V8,
- KVM_ARM_TARGET_CORTEX_A57,
- QEMU_KVM_ARM_TARGET_NONE
- };
/*
* target = -1 informs kvm_arm_create_scratch_host_vcpu()
* to use the preferred target
@@ -286,6 +272,14 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
}
/*
+ * Ask for EL2 if supported.
+ */
+ el2_supported = kvm_arm_el2_supported();
+ if (el2_supported) {
+ init.features[0] |= 1 << KVM_ARM_VCPU_HAS_EL2;
+ }
+
+ /*
* Ask for Pointer Authentication if supported, so that we get
* the unsanitized field values for AA64ISAR1_EL1.
*/
@@ -300,15 +294,15 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
features |= 1ULL << ARM_FEATURE_PMU;
}
- if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) {
+ if (!kvm_arm_create_scratch_host_vcpu(fdarray, &init)) {
return false;
}
ahcf->target = init.target;
- ahcf->dtb_compatible = "arm,arm-v8";
+ ahcf->dtb_compatible = "arm,armv8";
+ int fd = fdarray[2];
- err = read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr0,
- ARM64_SYS_REG(3, 0, 0, 4, 0));
+ err = get_host_cpu_reg(fd, ahcf, ID_AA64PFR0_EL1_IDX);
if (unlikely(err < 0)) {
/*
* Before v4.15, the kernel only exposed a limited number of system
@@ -326,31 +320,20 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
* ??? Either of these sounds like too much effort just
* to work around running a modern host kernel.
*/
- ahcf->isar.id_aa64pfr0 = 0x00000011; /* EL1&0, AArch64 only */
+ SET_IDREG(&ahcf->isar, ID_AA64PFR0, 0x00000011); /* EL1&0, AArch64 only */
err = 0;
} else {
- err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr1,
- ARM64_SYS_REG(3, 0, 0, 4, 1));
- err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64smfr0,
- ARM64_SYS_REG(3, 0, 0, 4, 5));
- err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr0,
- ARM64_SYS_REG(3, 0, 0, 5, 0));
- err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr1,
- ARM64_SYS_REG(3, 0, 0, 5, 1));
- err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar0,
- ARM64_SYS_REG(3, 0, 0, 6, 0));
- err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar1,
- ARM64_SYS_REG(3, 0, 0, 6, 1));
- err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar2,
- ARM64_SYS_REG(3, 0, 0, 6, 2));
- err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr0,
- ARM64_SYS_REG(3, 0, 0, 7, 0));
- err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr1,
- ARM64_SYS_REG(3, 0, 0, 7, 1));
- err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr2,
- ARM64_SYS_REG(3, 0, 0, 7, 2));
- err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr3,
- ARM64_SYS_REG(3, 0, 0, 7, 3));
+ err |= get_host_cpu_reg(fd, ahcf, ID_AA64PFR1_EL1_IDX);
+ err |= get_host_cpu_reg(fd, ahcf, ID_AA64SMFR0_EL1_IDX);
+ err |= get_host_cpu_reg(fd, ahcf, ID_AA64DFR0_EL1_IDX);
+ err |= get_host_cpu_reg(fd, ahcf, ID_AA64DFR1_EL1_IDX);
+ err |= get_host_cpu_reg(fd, ahcf, ID_AA64ISAR0_EL1_IDX);
+ err |= get_host_cpu_reg(fd, ahcf, ID_AA64ISAR1_EL1_IDX);
+ err |= get_host_cpu_reg(fd, ahcf, ID_AA64ISAR2_EL1_IDX);
+ err |= get_host_cpu_reg(fd, ahcf, ID_AA64MMFR0_EL1_IDX);
+ err |= get_host_cpu_reg(fd, ahcf, ID_AA64MMFR1_EL1_IDX);
+ err |= get_host_cpu_reg(fd, ahcf, ID_AA64MMFR2_EL1_IDX);
+ err |= get_host_cpu_reg(fd, ahcf, ID_AA64MMFR3_EL1_IDX);
/*
* Note that if AArch32 support is not present in the host,
@@ -359,49 +342,31 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
* than skipping the reads and leaving 0, as we must avoid
* considering the values in every case.
*/
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr0,
- ARM64_SYS_REG(3, 0, 0, 1, 0));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr1,
- ARM64_SYS_REG(3, 0, 0, 1, 1));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr0,
- ARM64_SYS_REG(3, 0, 0, 1, 2));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr0,
- ARM64_SYS_REG(3, 0, 0, 1, 4));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr1,
- ARM64_SYS_REG(3, 0, 0, 1, 5));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr2,
- ARM64_SYS_REG(3, 0, 0, 1, 6));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr3,
- ARM64_SYS_REG(3, 0, 0, 1, 7));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar0,
- ARM64_SYS_REG(3, 0, 0, 2, 0));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar1,
- ARM64_SYS_REG(3, 0, 0, 2, 1));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar2,
- ARM64_SYS_REG(3, 0, 0, 2, 2));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar3,
- ARM64_SYS_REG(3, 0, 0, 2, 3));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar4,
- ARM64_SYS_REG(3, 0, 0, 2, 4));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar5,
- ARM64_SYS_REG(3, 0, 0, 2, 5));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr4,
- ARM64_SYS_REG(3, 0, 0, 2, 6));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar6,
- ARM64_SYS_REG(3, 0, 0, 2, 7));
-
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr0,
+ err |= get_host_cpu_reg(fd, ahcf, ID_PFR0_EL1_IDX);
+ err |= get_host_cpu_reg(fd, ahcf, ID_PFR1_EL1_IDX);
+ err |= get_host_cpu_reg(fd, ahcf, ID_DFR0_EL1_IDX);
+ err |= get_host_cpu_reg(fd, ahcf, ID_MMFR0_EL1_IDX);
+ err |= get_host_cpu_reg(fd, ahcf, ID_MMFR1_EL1_IDX);
+ err |= get_host_cpu_reg(fd, ahcf, ID_MMFR2_EL1_IDX);
+ err |= get_host_cpu_reg(fd, ahcf, ID_MMFR3_EL1_IDX);
+ err |= get_host_cpu_reg(fd, ahcf, ID_ISAR0_EL1_IDX);
+ err |= get_host_cpu_reg(fd, ahcf, ID_ISAR1_EL1_IDX);
+ err |= get_host_cpu_reg(fd, ahcf, ID_ISAR2_EL1_IDX);
+ err |= get_host_cpu_reg(fd, ahcf, ID_ISAR3_EL1_IDX);
+ err |= get_host_cpu_reg(fd, ahcf, ID_ISAR4_EL1_IDX);
+ err |= get_host_cpu_reg(fd, ahcf, ID_ISAR5_EL1_IDX);
+ err |= get_host_cpu_reg(fd, ahcf, ID_ISAR6_EL1_IDX);
+ err |= get_host_cpu_reg(fd, ahcf, ID_MMFR4_EL1_IDX);
+
+ err |= read_sys_reg32(fd, &ahcf->isar.mvfr0,
ARM64_SYS_REG(3, 0, 0, 3, 0));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr1,
+ err |= read_sys_reg32(fd, &ahcf->isar.mvfr1,
ARM64_SYS_REG(3, 0, 0, 3, 1));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr2,
+ err |= read_sys_reg32(fd, &ahcf->isar.mvfr2,
ARM64_SYS_REG(3, 0, 0, 3, 2));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr2,
- ARM64_SYS_REG(3, 0, 0, 3, 4));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr1,
- ARM64_SYS_REG(3, 0, 0, 3, 5));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr5,
- ARM64_SYS_REG(3, 0, 0, 3, 6));
+ err |= get_host_cpu_reg(fd, ahcf, ID_PFR2_EL1_IDX);
+ err |= get_host_cpu_reg(fd, ahcf, ID_DFR1_EL1_IDX);
+ err |= get_host_cpu_reg(fd, ahcf, ID_MMFR5_EL1_IDX);
/*
* DBGDIDR is a bit complicated because the kernel doesn't
@@ -413,14 +378,14 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
* arch/arm64/kvm/sys_regs.c:trap_dbgidr() does.
* We only do this if the CPU supports AArch32 at EL1.
*/
- if (FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL1) >= 2) {
- int wrps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, WRPS);
- int brps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, BRPS);
+ if (FIELD_EX32_IDREG(&ahcf->isar, ID_AA64PFR0, EL1) >= 2) {
+ int wrps = FIELD_EX64_IDREG(&ahcf->isar, ID_AA64DFR0, WRPS);
+ int brps = FIELD_EX64_IDREG(&ahcf->isar, ID_AA64DFR0, BRPS);
int ctx_cmps =
- FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS);
+ FIELD_EX64_IDREG(&ahcf->isar, ID_AA64DFR0, CTX_CMPS);
int version = 6; /* ARMv8 debug architecture */
bool has_el3 =
- !!FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL3);
+ !!FIELD_EX32_IDREG(&ahcf->isar, ID_AA64PFR0, EL3);
uint32_t dbgdidr = 0;
dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, WRPS, wrps);
@@ -435,7 +400,7 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
if (pmu_supported) {
/* PMCR_EL0 is only accessible if the vCPU has feature PMU_V3 */
- err |= read_sys_reg64(fdarray[2], &ahcf->isar.reset_pmcr_el0,
+ err |= read_sys_reg64(fd, &ahcf->isar.reset_pmcr_el0,
ARM64_SYS_REG(3, 3, 9, 12, 0));
}
@@ -447,8 +412,7 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
* enabled SVE support, which resulted in an error rather than RAZ.
* So only read the register if we set KVM_ARM_VCPU_SVE above.
*/
- err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64zfr0,
- ARM64_SYS_REG(3, 0, 0, 4, 4));
+ err |= get_host_cpu_reg(fd, ahcf, ID_AA64ZFR0_EL1_IDX);
}
}
@@ -468,6 +432,10 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
features |= 1ULL << ARM_FEATURE_AARCH64;
features |= 1ULL << ARM_FEATURE_GENERIC_TIMER;
+ if (el2_supported) {
+ features |= 1ULL << ARM_FEATURE_EL2;
+ }
+
ahcf->features = features;
return true;
@@ -977,13 +945,24 @@ void kvm_arm_cpu_pre_save(ARMCPU *cpu)
}
}
-void kvm_arm_cpu_post_load(ARMCPU *cpu)
+bool kvm_arm_cpu_post_load(ARMCPU *cpu)
{
+ if (!write_list_to_kvmstate(cpu, KVM_PUT_FULL_STATE)) {
+ return false;
+ }
+ /* Note that it's OK for the TCG side not to know about
+ * every register in the list; KVM is authoritative if
+ * we're using it.
+ */
+ write_list_to_cpustate(cpu);
+
/* KVM virtual time adjustment */
if (cpu->kvm_adjvtime) {
cpu->kvm_vtime = *kvm_arm_get_cpreg_ptr(cpu, KVM_REG_ARM_TIMER_CNT);
cpu->kvm_vtime_dirty = true;
}
+
+ return true;
}
void kvm_arm_reset_vcpu(ARMCPU *cpu)
@@ -1797,6 +1776,11 @@ bool kvm_arm_aarch32_supported(void)
return kvm_check_extension(kvm_state, KVM_CAP_ARM_EL1_32BIT);
}
+bool kvm_arm_el2_supported(void)
+{
+ return kvm_check_extension(kvm_state, KVM_CAP_ARM_EL2);
+}
+
bool kvm_arm_sve_supported(void)
{
return kvm_check_extension(kvm_state, KVM_CAP_ARM_SVE);
@@ -1835,7 +1819,7 @@ uint32_t kvm_arm_sve_get_vls(ARMCPU *cpu)
probed = true;
- if (!kvm_arm_create_scratch_host_vcpu(NULL, fdarray, &init)) {
+ if (!kvm_arm_create_scratch_host_vcpu(fdarray, &init)) {
error_report("failed to create scratch VCPU with SVE enabled");
abort();
}
@@ -1874,6 +1858,11 @@ static int kvm_arm_sve_set_vls(ARMCPU *cpu)
#define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5
+int kvm_arch_pre_create_vcpu(CPUState *cpu, Error **errp)
+{
+ return 0;
+}
+
int kvm_arch_init_vcpu(CPUState *cs)
{
int ret;
@@ -1882,8 +1871,7 @@ int kvm_arch_init_vcpu(CPUState *cs)
CPUARMState *env = &cpu->env;
uint64_t psciver;
- if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE ||
- !object_dynamic_cast(OBJECT(cpu), TYPE_AARCH64_CPU)) {
+ if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE) {
error_report("KVM is not supported for this guest CPU type");
return -EINVAL;
}
@@ -1913,6 +1901,9 @@ int kvm_arch_init_vcpu(CPUState *cs)
cpu->kvm_init_features[0] |= (1 << KVM_ARM_VCPU_PTRAUTH_ADDRESS |
1 << KVM_ARM_VCPU_PTRAUTH_GENERIC);
}
+ if (cpu->has_el2 && kvm_arm_el2_supported()) {
+ cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_HAS_EL2;
+ }
/* Do KVM_ARM_VCPU_INIT ioctl */
ret = kvm_arm_vcpu_init(cpu);
@@ -2468,3 +2459,32 @@ void kvm_arm_enable_mte(Object *cpuobj, Error **errp)
cpu->kvm_mte = true;
}
}
+
+void arm_cpu_kvm_set_irq(void *arm_cpu, int irq, int level)
+{
+ ARMCPU *cpu = arm_cpu;
+ CPUARMState *env = &cpu->env;
+ CPUState *cs = CPU(cpu);
+ uint32_t linestate_bit;
+ int irq_id;
+
+ switch (irq) {
+ case ARM_CPU_IRQ:
+ irq_id = KVM_ARM_IRQ_CPU_IRQ;
+ linestate_bit = CPU_INTERRUPT_HARD;
+ break;
+ case ARM_CPU_FIQ:
+ irq_id = KVM_ARM_IRQ_CPU_FIQ;
+ linestate_bit = CPU_INTERRUPT_FIQ;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (level) {
+ env->irq_line_state |= linestate_bit;
+ } else {
+ env->irq_line_state &= ~linestate_bit;
+ }
+ kvm_arm_set_irq(cs->cpu_index, KVM_ARM_IRQ_TYPE_CPU, irq_id, !!level);
+}
diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h
index 05c3de8..b4cad05 100644
--- a/target/arm/kvm_arm.h
+++ b/target/arm/kvm_arm.h
@@ -12,6 +12,7 @@
#define QEMU_KVM_ARM_H
#include "system/kvm.h"
+#include "target/arm/cpu-qom.h"
#define KVM_ARM_VGIC_V2 (1 << 0)
#define KVM_ARM_VGIC_V3 (1 << 1)
@@ -83,8 +84,10 @@ void kvm_arm_cpu_pre_save(ARMCPU *cpu);
* @cpu: ARMCPU
*
* Called from cpu_post_load() to update KVM CPU state from the cpreg list.
+ *
+ * Returns: true on success, or false if write_list_to_kvmstate failed.
*/
-void kvm_arm_cpu_post_load(ARMCPU *cpu);
+bool kvm_arm_cpu_post_load(ARMCPU *cpu);
/**
* kvm_arm_reset_vcpu:
@@ -94,13 +97,9 @@ void kvm_arm_cpu_post_load(ARMCPU *cpu);
*/
void kvm_arm_reset_vcpu(ARMCPU *cpu);
-#ifdef CONFIG_KVM
+struct kvm_vcpu_init;
/**
* kvm_arm_create_scratch_host_vcpu:
- * @cpus_to_try: array of QEMU_KVM_ARM_TARGET_* values (terminated with
- * QEMU_KVM_ARM_TARGET_NONE) to try as fallback if the kernel does not
- * know the PREFERRED_TARGET ioctl. Passing NULL is the same as passing
- * an empty array.
* @fdarray: filled in with kvmfd, vmfd, cpufd file descriptors in that order
* @init: filled in with the necessary values for creating a host
* vcpu. If NULL is provided, will not init the vCPU (though the cpufd
@@ -113,8 +112,7 @@ void kvm_arm_reset_vcpu(ARMCPU *cpu);
* Returns: true on success (and fdarray and init are filled in),
* false on failure (and fdarray and init are not valid).
*/
-bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try,
- int *fdarray,
+bool kvm_arm_create_scratch_host_vcpu(int *fdarray,
struct kvm_vcpu_init *init);
/**
@@ -194,6 +192,13 @@ bool kvm_arm_sve_supported(void);
bool kvm_arm_mte_supported(void);
/**
+ * kvm_arm_el2_supported:
+ *
+ * Returns true if KVM can enable EL2 and false otherwise.
+ */
+bool kvm_arm_el2_supported(void);
+
+/**
* kvm_arm_get_max_vm_ipa_size:
* @ms: Machine state handle
* @fixed_ipa: True when the IPA limit is fixed at 40. This is the case
@@ -221,85 +226,6 @@ int kvm_arm_set_irq(int cpu, int irqtype, int irq, int level);
void kvm_arm_enable_mte(Object *cpuobj, Error **errp);
-#else
-
-/*
- * It's safe to call these functions without KVM support.
- * They should either do nothing or return "not supported".
- */
-static inline bool kvm_arm_aarch32_supported(void)
-{
- return false;
-}
-
-static inline bool kvm_arm_pmu_supported(void)
-{
- return false;
-}
-
-static inline bool kvm_arm_sve_supported(void)
-{
- return false;
-}
-
-static inline bool kvm_arm_mte_supported(void)
-{
- return false;
-}
-
-/*
- * These functions should never actually be called without KVM support.
- */
-static inline void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu)
-{
- g_assert_not_reached();
-}
-
-static inline void kvm_arm_add_vcpu_properties(ARMCPU *cpu)
-{
- g_assert_not_reached();
-}
-
-static inline int kvm_arm_get_max_vm_ipa_size(MachineState *ms, bool *fixed_ipa)
-{
- g_assert_not_reached();
-}
-
-static inline int kvm_arm_vgic_probe(void)
-{
- g_assert_not_reached();
-}
-
-static inline void kvm_arm_pmu_set_irq(ARMCPU *cpu, int irq)
-{
- g_assert_not_reached();
-}
-
-static inline void kvm_arm_pmu_init(ARMCPU *cpu)
-{
- g_assert_not_reached();
-}
-
-static inline void kvm_arm_pvtime_init(ARMCPU *cpu, uint64_t ipa)
-{
- g_assert_not_reached();
-}
-
-static inline void kvm_arm_steal_time_finalize(ARMCPU *cpu, Error **errp)
-{
- g_assert_not_reached();
-}
-
-static inline uint32_t kvm_arm_sve_get_vls(ARMCPU *cpu)
-{
- g_assert_not_reached();
-}
-
-static inline void kvm_arm_enable_mte(Object *cpuobj, Error **errp)
-{
- g_assert_not_reached();
-}
-
-#endif
+void arm_cpu_kvm_set_irq(void *arm_cpu, int irq, int level);
#endif
diff --git a/target/arm/machine.c b/target/arm/machine.c
index 978249f..6986915 100644
--- a/target/arm/machine.c
+++ b/target/arm/machine.c
@@ -6,7 +6,8 @@
#include "kvm_arm.h"
#include "internals.h"
#include "cpu-features.h"
-#include "migration/cpu.h"
+#include "migration/qemu-file-types.h"
+#include "migration/vmstate.h"
#include "target/arm/gtimer.h"
static bool vfp_needed(void *opaque)
@@ -240,7 +241,6 @@ static const VMStateDescription vmstate_iwmmxt = {
}
};
-#ifdef TARGET_AARCH64
/* The expression ARM_MAX_VQ - 2 is 0 for pure AArch32 build,
* and ARMPredicateReg is actively empty. This triggers errors
* in the expansion of the VMSTATE macros.
@@ -315,12 +315,30 @@ static const VMStateDescription vmstate_za = {
.minimum_version_id = 1,
.needed = za_needed,
.fields = (const VMStateField[]) {
- VMSTATE_STRUCT_ARRAY(env.zarray, ARMCPU, ARM_MAX_VQ * 16, 0,
+ VMSTATE_STRUCT_ARRAY(env.za_state.za, ARMCPU, ARM_MAX_VQ * 16, 0,
vmstate_vreg, ARMVectorReg),
VMSTATE_END_OF_LIST()
}
};
-#endif /* AARCH64 */
+
+static bool zt0_needed(void *opaque)
+{
+ ARMCPU *cpu = opaque;
+
+ return za_needed(cpu) && cpu_isar_feature(aa64_sme2, cpu);
+}
+
+static const VMStateDescription vmstate_zt0 = {
+ .name = "cpu/zt0",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = zt0_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64_ARRAY(env.za_state.zt0, ARMCPU,
+ ARRAY_SIZE(((CPUARMState *)0)->za_state.zt0)),
+ VMSTATE_END_OF_LIST()
+ }
+};
static bool serror_needed(void *opaque)
{
@@ -977,15 +995,9 @@ static int cpu_post_load(void *opaque, int version_id)
}
if (kvm_enabled()) {
- if (!write_list_to_kvmstate(cpu, KVM_PUT_FULL_STATE)) {
+ if (!kvm_arm_cpu_post_load(cpu)) {
return -1;
}
- /* Note that it's OK for the TCG side not to know about
- * every register in the list; KVM is authoritative if
- * we're using it.
- */
- write_list_to_cpustate(cpu);
- kvm_arm_cpu_post_load(cpu);
} else {
if (!write_list_to_cpustate(cpu)) {
return -1;
@@ -1101,10 +1113,9 @@ const VMStateDescription vmstate_arm_cpu = {
&vmstate_pmsav7,
&vmstate_pmsav8,
&vmstate_m_security,
-#ifdef TARGET_AARCH64
&vmstate_sve,
&vmstate_za,
-#endif
+ &vmstate_zt0,
&vmstate_serror,
&vmstate_irq_line_state,
&vmstate_wfxt_timer,
diff --git a/target/arm/meson.build b/target/arm/meson.build
index 3065081..07d9271 100644
--- a/target/arm/meson.build
+++ b/target/arm/meson.build
@@ -1,41 +1,60 @@
arm_ss = ss.source_set()
+arm_common_ss = ss.source_set()
arm_ss.add(files(
- 'cpu.c',
- 'debug_helper.c',
'gdbstub.c',
- 'helper.c',
- 'vfp_fpscr.c',
))
-arm_ss.add(zlib)
-
-arm_ss.add(when: 'CONFIG_KVM', if_true: files('hyp_gdbstub.c', 'kvm.c'), if_false: files('kvm-stub.c'))
-arm_ss.add(when: 'CONFIG_HVF', if_true: files('hyp_gdbstub.c'))
arm_ss.add(when: 'TARGET_AARCH64', if_true: files(
'cpu64.c',
- 'gdbstub64.c',
-))
+ 'gdbstub64.c'))
arm_system_ss = ss.source_set()
+arm_common_system_ss = ss.source_set()
arm_system_ss.add(files(
+ 'arm-qmp-cmds.c',
+))
+arm_system_ss.add(when: 'CONFIG_KVM', if_true: files('hyp_gdbstub.c', 'kvm.c'))
+arm_system_ss.add(when: 'CONFIG_HVF', if_true: files('hyp_gdbstub.c'))
+
+arm_user_ss = ss.source_set()
+arm_user_ss.add(files('cpu.c'))
+arm_user_ss.add(when: 'TARGET_AARCH64', if_false: files(
+ 'cpu32-stubs.c',
+))
+arm_user_ss.add(files(
+ 'cpregs-pmu.c',
+ 'debug_helper.c',
+ 'helper.c',
+ 'vfp_fpscr.c',
+))
+
+arm_common_system_ss.add(files('cpu.c'))
+arm_common_system_ss.add(when: 'TARGET_AARCH64', if_false: files(
+ 'cpu32-stubs.c'))
+arm_common_system_ss.add(when: 'CONFIG_KVM', if_false: files('kvm-stub.c'))
+arm_common_system_ss.add(when: 'CONFIG_HVF', if_false: files('hvf-stub.c'))
+arm_common_system_ss.add(files(
'arch_dump.c',
'arm-powerctl.c',
- 'arm-qmp-cmds.c',
'cortex-regs.c',
+ 'cpregs-pmu.c',
+ 'debug_helper.c',
+ 'helper.c',
'machine.c',
'ptw.c',
+ 'vfp_fpscr.c',
))
-arm_user_ss = ss.source_set()
-
subdir('hvf')
if 'CONFIG_TCG' in config_all_accel
subdir('tcg')
else
- arm_ss.add(files('tcg-stubs.c'))
+ arm_common_system_ss.add(files('tcg-stubs.c'))
endif
target_arch += {'arm': arm_ss}
target_system_arch += {'arm': arm_system_ss}
target_user_arch += {'arm': arm_user_ss}
+target_common_arch += {'arm': arm_common_ss}
+target_common_system_arch += {'arm': arm_common_system_ss}
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
index 4330900..561bf26 100644
--- a/target/arm/ptw.c
+++ b/target/arm/ptw.c
@@ -10,8 +10,10 @@
#include "qemu/log.h"
#include "qemu/range.h"
#include "qemu/main-loop.h"
-#include "exec/exec-all.h"
#include "exec/page-protection.h"
+#include "exec/target_page.h"
+#include "exec/tlb-flags.h"
+#include "accel/tcg/probe.h"
#include "cpu.h"
#include "internals.h"
#include "cpu-features.h"
@@ -120,7 +122,7 @@ unsigned int arm_pamax(ARMCPU *cpu)
{
if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
unsigned int parange =
- FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE);
+ FIELD_EX64_IDREG(&cpu->isar, ID_AA64MMFR0, PARANGE);
/*
* id_aa64mmfr0 is a read-only register so values outside of the
@@ -330,7 +332,7 @@ static bool granule_protection_check(CPUARMState *env, uint64_t paddress,
* physical address size is invalid.
*/
pps = FIELD_EX64(gpccr, GPCCR, PPS);
- if (pps > FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE)) {
+ if (pps > FIELD_EX64_IDREG(&cpu->isar, ID_AA64MMFR0, PARANGE)) {
goto fault_walk;
}
pps = pamax_map[pps];
@@ -735,7 +737,7 @@ static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val,
uint64_t new_val, S1Translate *ptw,
ARMMMUFaultInfo *fi)
{
-#if defined(TARGET_AARCH64) && defined(CONFIG_TCG)
+#if defined(CONFIG_ATOMIC64) && defined(CONFIG_TCG)
uint64_t cur_val;
void *host = ptw->out_host;
@@ -1658,7 +1660,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
uint64_t ttbr;
hwaddr descaddr, indexmask, indexmask_grainsize;
uint32_t tableattrs;
- target_ulong page_size;
+ uint64_t page_size;
uint64_t attrs;
int32_t stride;
int addrsize, inputsize, outputsize;
@@ -1701,7 +1703,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
* ID_AA64MMFR0 is a read-only register so values outside of the
* supported mappings can be considered an implementation error.
*/
- ps = FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE);
+ ps = FIELD_EX64_IDREG(&cpu->isar, ID_AA64MMFR0, PARANGE);
ps = MIN(ps, param.ps);
assert(ps < ARRAY_SIZE(pamax_map));
outputsize = pamax_map[ps];
@@ -1731,7 +1733,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
* validation to do here.
*/
if (inputsize < addrsize) {
- target_ulong top_bits = sextract64(address, inputsize,
+ uint64_t top_bits = sextract64(address, inputsize,
addrsize - inputsize);
if (-top_bits != param.select) {
/* The gap between the two regions is a Translation fault */
@@ -3549,13 +3551,9 @@ bool get_phys_addr_with_space_nogpc(CPUARMState *env, vaddr address,
memop, result, fi);
}
-bool get_phys_addr(CPUARMState *env, vaddr address,
- MMUAccessType access_type, MemOp memop, ARMMMUIdx mmu_idx,
- GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
+static ARMSecuritySpace
+arm_mmu_idx_to_security_space(CPUARMState *env, ARMMMUIdx mmu_idx)
{
- S1Translate ptw = {
- .in_mmu_idx = mmu_idx,
- };
ARMSecuritySpace ss;
switch (mmu_idx) {
@@ -3616,28 +3614,33 @@ bool get_phys_addr(CPUARMState *env, vaddr address,
g_assert_not_reached();
}
- ptw.in_space = ss;
+ return ss;
+}
+
+bool get_phys_addr(CPUARMState *env, vaddr address,
+ MMUAccessType access_type, MemOp memop, ARMMMUIdx mmu_idx,
+ GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
+{
+ S1Translate ptw = {
+ .in_mmu_idx = mmu_idx,
+ .in_space = arm_mmu_idx_to_security_space(env, mmu_idx),
+ };
+
return get_phys_addr_gpc(env, &ptw, address, access_type,
memop, result, fi);
}
-hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
- MemTxAttrs *attrs)
+static hwaddr arm_cpu_get_phys_page(CPUARMState *env, vaddr addr,
+ MemTxAttrs *attrs, ARMMMUIdx mmu_idx)
{
- ARMCPU *cpu = ARM_CPU(cs);
- CPUARMState *env = &cpu->env;
- ARMMMUIdx mmu_idx = arm_mmu_idx(env);
- ARMSecuritySpace ss = arm_security_space(env);
S1Translate ptw = {
.in_mmu_idx = mmu_idx,
- .in_space = ss,
+ .in_space = arm_mmu_idx_to_security_space(env, mmu_idx),
.in_debug = true,
};
GetPhysAddrResult res = {};
ARMMMUFaultInfo fi = {};
- bool ret;
-
- ret = get_phys_addr_gpc(env, &ptw, addr, MMU_DATA_LOAD, 0, &res, &fi);
+ bool ret = get_phys_addr_gpc(env, &ptw, addr, MMU_DATA_LOAD, 0, &res, &fi);
*attrs = res.f.attrs;
if (ret) {
@@ -3645,3 +3648,33 @@ hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
}
return res.f.phys_addr;
}
+
+hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
+ MemTxAttrs *attrs)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ ARMMMUIdx mmu_idx = arm_mmu_idx(env);
+
+ hwaddr res = arm_cpu_get_phys_page(env, addr, attrs, mmu_idx);
+
+ if (res != -1) {
+ return res;
+ }
+
+ /*
+ * Memory may be accessible for an "unprivileged load/store" variant.
+ * In this case, get_a64_user_mem_index function generates an op using an
+ * unprivileged mmu idx, so we need to try with it.
+ */
+ switch (mmu_idx) {
+ case ARMMMUIdx_E10_1:
+ case ARMMMUIdx_E10_1_PAN:
+ return arm_cpu_get_phys_page(env, addr, attrs, ARMMMUIdx_E10_0);
+ case ARMMMUIdx_E20_2:
+ case ARMMMUIdx_E20_2_PAN:
+ return arm_cpu_get_phys_page(env, addr, attrs, ARMMMUIdx_E20_0);
+ default:
+ return -1;
+ }
+}
diff --git a/target/arm/syndrome.h b/target/arm/syndrome.h
index 3244e07..c48d3b8 100644
--- a/target/arm/syndrome.h
+++ b/target/arm/syndrome.h
@@ -80,6 +80,7 @@ typedef enum {
SME_ET_Streaming,
SME_ET_NotStreaming,
SME_ET_InactiveZA,
+ SME_ET_InaccessibleZT0,
} SMEExceptionType;
#define ARM_EL_EC_LENGTH 6
diff --git a/target/arm/tcg-stubs.c b/target/arm/tcg-stubs.c
index 93a15ca..aac99b2 100644
--- a/target/arm/tcg-stubs.c
+++ b/target/arm/tcg-stubs.c
@@ -21,15 +21,6 @@ void raise_exception_ra(CPUARMState *env, uint32_t excp, uint32_t syndrome,
{
g_assert_not_reached();
}
-/* Temporarily while cpu_get_tb_cpu_state() is still in common code */
-void assert_hflags_rebuild_correctly(CPUARMState *env)
-{
-}
-
-/* TLBI insns are only used by TCG, so we don't need to do anything for KVM */
-void define_tlb_insn_regs(ARMCPU *cpu)
-{
-}
/* With KVM, we never use float_status, so these can be no-ops */
void arm_set_default_fp_behaviours(float_status *s)
diff --git a/target/arm/tcg/arith_helper.c b/target/arm/tcg/arith_helper.c
index 9a555c7..6701398 100644
--- a/target/arm/tcg/arith_helper.c
+++ b/target/arm/tcg/arith_helper.c
@@ -6,11 +6,12 @@
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#include "qemu/osdep.h"
-#include "cpu.h"
-#include "exec/helper-proto.h"
#include "qemu/crc32c.h"
#include <zlib.h> /* for crc32 */
+#define HELPER_H "tcg/helper.h"
+#include "exec/helper-proto.h.inc"
+
/*
* Note that signed overflow is undefined in C. The following routines are
* careful to use unsigned types where modulo arithmetic is required.
diff --git a/target/arm/tcg/cpregs-at.c b/target/arm/tcg/cpregs-at.c
new file mode 100644
index 0000000..398a61d
--- /dev/null
+++ b/target/arm/tcg/cpregs-at.c
@@ -0,0 +1,519 @@
+/*
+ * System instructions for address translation
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "cpu-features.h"
+#include "internals.h"
+#include "cpregs.h"
+
+
+static int par_el1_shareability(GetPhysAddrResult *res)
+{
+ /*
+ * The PAR_EL1.SH field must be 0b10 for Device or Normal-NC
+ * memory -- see pseudocode PAREncodeShareability().
+ */
+ if (((res->cacheattrs.attrs & 0xf0) == 0) ||
+ res->cacheattrs.attrs == 0x44 || res->cacheattrs.attrs == 0x40) {
+ return 2;
+ }
+ return res->cacheattrs.shareability;
+}
+
+static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ ARMSecuritySpace ss)
+{
+ bool ret;
+ uint64_t par64;
+ bool format64 = false;
+ ARMMMUFaultInfo fi = {};
+ GetPhysAddrResult res = {};
+
+ /*
+ * I_MXTJT: Granule protection checks are not performed on the final
+ * address of a successful translation. This is a translation not a
+ * memory reference, so "memop = none = 0".
+ */
+ ret = get_phys_addr_with_space_nogpc(env, value, access_type, 0,
+ mmu_idx, ss, &res, &fi);
+
+ /*
+ * ATS operations only do S1 or S1+S2 translations, so we never
+ * have to deal with the ARMCacheAttrs format for S2 only.
+ */
+ assert(!res.cacheattrs.is_s2_format);
+
+ if (ret) {
+ /*
+ * Some kinds of translation fault must cause exceptions rather
+ * than being reported in the PAR.
+ */
+ int current_el = arm_current_el(env);
+ int target_el;
+ uint32_t syn, fsr, fsc;
+ bool take_exc = false;
+
+ if (fi.s1ptw && current_el == 1
+ && arm_mmu_idx_is_stage1_of_2(mmu_idx)) {
+ /*
+ * Synchronous stage 2 fault on an access made as part of the
+ * translation table walk for AT S1E0* or AT S1E1* insn
+ * executed from NS EL1. If this is a synchronous external abort
+ * and SCR_EL3.EA == 1, then we take a synchronous external abort
+ * to EL3. Otherwise the fault is taken as an exception to EL2,
+ * and HPFAR_EL2 holds the faulting IPA.
+ */
+ if (fi.type == ARMFault_SyncExternalOnWalk &&
+ (env->cp15.scr_el3 & SCR_EA)) {
+ target_el = 3;
+ } else {
+ env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4;
+ if (arm_is_secure_below_el3(env) && fi.s1ns) {
+ env->cp15.hpfar_el2 |= HPFAR_NS;
+ }
+ target_el = 2;
+ }
+ take_exc = true;
+ } else if (fi.type == ARMFault_SyncExternalOnWalk) {
+ /*
+ * Synchronous external aborts during a translation table walk
+ * are taken as Data Abort exceptions.
+ */
+ if (fi.stage2) {
+ if (current_el == 3) {
+ target_el = 3;
+ } else {
+ target_el = 2;
+ }
+ } else {
+ target_el = exception_target_el(env);
+ }
+ take_exc = true;
+ }
+
+ if (take_exc) {
+ /* Construct FSR and FSC using same logic as arm_deliver_fault() */
+ if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
+ arm_s1_regime_using_lpae_format(env, mmu_idx)) {
+ fsr = arm_fi_to_lfsc(&fi);
+ fsc = extract32(fsr, 0, 6);
+ } else {
+ fsr = arm_fi_to_sfsc(&fi);
+ fsc = 0x3f;
+ }
+ /*
+ * Report exception with ESR indicating a fault due to a
+ * translation table walk for a cache maintenance instruction.
+ */
+ syn = syn_data_abort_no_iss(current_el == target_el, 0,
+ fi.ea, 1, fi.s1ptw, 1, fsc);
+ env->exception.vaddress = value;
+ env->exception.fsr = fsr;
+ raise_exception(env, EXCP_DATA_ABORT, syn, target_el);
+ }
+ }
+
+ if (is_a64(env)) {
+ format64 = true;
+ } else if (arm_feature(env, ARM_FEATURE_LPAE)) {
+ /*
+ * ATS1Cxx:
+ * * TTBCR.EAE determines whether the result is returned using the
+ * 32-bit or the 64-bit PAR format
+ * * Instructions executed in Hyp mode always use the 64bit format
+ *
+ * ATS1S2NSOxx uses the 64bit format if any of the following is true:
+ * * The Non-secure TTBCR.EAE bit is set to 1
+ * * The implementation includes EL2, and the value of HCR.VM is 1
+ *
+ * (Note that HCR.DC makes HCR.VM behave as if it is 1.)
+ *
+ * ATS1Hx always uses the 64bit format.
+ */
+ format64 = arm_s1_regime_using_lpae_format(env, mmu_idx);
+
+ if (arm_feature(env, ARM_FEATURE_EL2)) {
+ if (mmu_idx == ARMMMUIdx_E10_0 ||
+ mmu_idx == ARMMMUIdx_E10_1 ||
+ mmu_idx == ARMMMUIdx_E10_1_PAN) {
+ format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC);
+ } else {
+ format64 |= arm_current_el(env) == 2;
+ }
+ }
+ }
+
+ if (format64) {
+ /* Create a 64-bit PAR */
+ par64 = (1 << 11); /* LPAE bit always set */
+ if (!ret) {
+ par64 |= res.f.phys_addr & ~0xfffULL;
+ if (!res.f.attrs.secure) {
+ par64 |= (1 << 9); /* NS */
+ }
+ par64 |= (uint64_t)res.cacheattrs.attrs << 56; /* ATTR */
+ par64 |= par_el1_shareability(&res) << 7; /* SH */
+ } else {
+ uint32_t fsr = arm_fi_to_lfsc(&fi);
+
+ par64 |= 1; /* F */
+ par64 |= (fsr & 0x3f) << 1; /* FS */
+ if (fi.stage2) {
+ par64 |= (1 << 9); /* S */
+ }
+ if (fi.s1ptw) {
+ par64 |= (1 << 8); /* PTW */
+ }
+ }
+ } else {
+ /*
+ * fsr is a DFSR/IFSR value for the short descriptor
+ * translation table format (with WnR always clear).
+ * Convert it to a 32-bit PAR.
+ */
+ if (!ret) {
+ /* We do not set any attribute bits in the PAR */
+ if (res.f.lg_page_size == 24
+ && arm_feature(env, ARM_FEATURE_V7)) {
+ par64 = (res.f.phys_addr & 0xff000000) | (1 << 1);
+ } else {
+ par64 = res.f.phys_addr & 0xfffff000;
+ }
+ if (!res.f.attrs.secure) {
+ par64 |= (1 << 9); /* NS */
+ }
+ } else {
+ uint32_t fsr = arm_fi_to_sfsc(&fi);
+
+ par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) |
+ ((fsr & 0xf) << 1) | 1;
+ }
+ }
+ return par64;
+}
+
+static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
+{
+ MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
+ uint64_t par64;
+ ARMMMUIdx mmu_idx;
+ int el = arm_current_el(env);
+ ARMSecuritySpace ss = arm_security_space(env);
+
+ switch (ri->opc2 & 6) {
+ case 0:
+ /* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */
+ switch (el) {
+ case 3:
+ if (ri->crm == 9 && arm_pan_enabled(env)) {
+ mmu_idx = ARMMMUIdx_E30_3_PAN;
+ } else {
+ mmu_idx = ARMMMUIdx_E3;
+ }
+ break;
+ case 2:
+ g_assert(ss != ARMSS_Secure); /* ARMv8.4-SecEL2 is 64-bit only */
+ /* fall through */
+ case 1:
+ if (ri->crm == 9 && arm_pan_enabled(env)) {
+ mmu_idx = ARMMMUIdx_Stage1_E1_PAN;
+ } else {
+ mmu_idx = ARMMMUIdx_Stage1_E1;
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ break;
+ case 2:
+ /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
+ switch (el) {
+ case 3:
+ mmu_idx = ARMMMUIdx_E30_0;
+ break;
+ case 2:
+ g_assert(ss != ARMSS_Secure); /* ARMv8.4-SecEL2 is 64-bit only */
+ mmu_idx = ARMMMUIdx_Stage1_E0;
+ break;
+ case 1:
+ mmu_idx = ARMMMUIdx_Stage1_E0;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ break;
+ case 4:
+ /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
+ mmu_idx = ARMMMUIdx_E10_1;
+ ss = ARMSS_NonSecure;
+ break;
+ case 6:
+ /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
+ mmu_idx = ARMMMUIdx_E10_0;
+ ss = ARMSS_NonSecure;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ par64 = do_ats_write(env, value, access_type, mmu_idx, ss);
+
+ A32_BANKED_CURRENT_REG_SET(env, par, par64);
+}
+
+static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
+ uint64_t par64;
+
+ /* There is no SecureEL2 for AArch32. */
+ par64 = do_ats_write(env, value, access_type, ARMMMUIdx_E2,
+ ARMSS_NonSecure);
+
+ A32_BANKED_CURRENT_REG_SET(env, par, par64);
+}
+
+static CPAccessResult at_e012_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ /*
+ * R_NYXTL: instruction is UNDEFINED if it applies to an Exception level
+ * lower than EL3 and the combination SCR_EL3.{NSE,NS} is reserved. This can
+ * only happen when executing at EL3 because that combination also causes an
+ * illegal exception return. We don't need to check FEAT_RME either, because
+ * scr_write() ensures that the NSE bit is not set otherwise.
+ */
+ if ((env->cp15.scr_el3 & (SCR_NSE | SCR_NS)) == SCR_NSE) {
+ return CP_ACCESS_UNDEFINED;
+ }
+ return CP_ACCESS_OK;
+}
+
+static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if (arm_current_el(env) == 3 &&
+ !(env->cp15.scr_el3 & (SCR_NS | SCR_EEL2))) {
+ return CP_ACCESS_UNDEFINED;
+ }
+ return at_e012_access(env, ri, isread);
+}
+
+static CPAccessResult at_s1e01_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_AT)) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ return at_e012_access(env, ri, isread);
+}
+
+static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
+ ARMMMUIdx mmu_idx;
+ uint64_t hcr_el2 = arm_hcr_el2_eff(env);
+ bool regime_e20 = (hcr_el2 & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE);
+ bool for_el3 = false;
+ ARMSecuritySpace ss;
+
+ switch (ri->opc2 & 6) {
+ case 0:
+ switch (ri->opc1) {
+ case 0: /* AT S1E1R, AT S1E1W, AT S1E1RP, AT S1E1WP */
+ if (ri->crm == 9 && arm_pan_enabled(env)) {
+ mmu_idx = regime_e20 ?
+ ARMMMUIdx_E20_2_PAN : ARMMMUIdx_Stage1_E1_PAN;
+ } else {
+ mmu_idx = regime_e20 ? ARMMMUIdx_E20_2 : ARMMMUIdx_Stage1_E1;
+ }
+ break;
+ case 4: /* AT S1E2R, AT S1E2W */
+ mmu_idx = hcr_el2 & HCR_E2H ? ARMMMUIdx_E20_2 : ARMMMUIdx_E2;
+ break;
+ case 6: /* AT S1E3R, AT S1E3W */
+ mmu_idx = ARMMMUIdx_E3;
+ for_el3 = true;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ break;
+ case 2: /* AT S1E0R, AT S1E0W */
+ mmu_idx = regime_e20 ? ARMMMUIdx_E20_0 : ARMMMUIdx_Stage1_E0;
+ break;
+ case 4: /* AT S12E1R, AT S12E1W */
+ mmu_idx = regime_e20 ? ARMMMUIdx_E20_2 : ARMMMUIdx_E10_1;
+ break;
+ case 6: /* AT S12E0R, AT S12E0W */
+ mmu_idx = regime_e20 ? ARMMMUIdx_E20_0 : ARMMMUIdx_E10_0;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ ss = for_el3 ? arm_security_space(env) : arm_security_space_below_el3(env);
+ env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx, ss);
+}
+
+static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if (ri->opc2 & 4) {
+ /*
+ * The ATS12NSO* operations must trap to EL3 or EL2 if executed in
+ * Secure EL1 (which can only happen if EL3 is AArch64).
+ * They are simply UNDEF if executed from NS EL1.
+ * They function normally from EL2 or EL3.
+ */
+ if (arm_current_el(env) == 1) {
+ if (arm_is_secure_below_el3(env)) {
+ if (env->cp15.scr_el3 & SCR_EEL2) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ return CP_ACCESS_TRAP_EL3;
+ }
+ return CP_ACCESS_UNDEFINED;
+ }
+ }
+ return CP_ACCESS_OK;
+}
+
+static const ARMCPRegInfo vapa_ats_reginfo[] = {
+ /* This underdecoding is safe because the reginfo is NO_RAW. */
+ { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
+ .access = PL1_W, .accessfn = ats_access,
+ .writefn = ats_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
+};
+
+static const ARMCPRegInfo v8_ats_reginfo[] = {
+ /* 64 bit address translation operations */
+ { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0,
+ .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .fgt = FGT_ATS1E1R,
+ .accessfn = at_s1e01_access, .writefn = ats_write64 },
+ { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1,
+ .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .fgt = FGT_ATS1E1W,
+ .accessfn = at_s1e01_access, .writefn = ats_write64 },
+ { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2,
+ .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .fgt = FGT_ATS1E0R,
+ .accessfn = at_s1e01_access, .writefn = ats_write64 },
+ { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3,
+ .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .fgt = FGT_ATS1E0W,
+ .accessfn = at_s1e01_access, .writefn = ats_write64 },
+ { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4,
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .accessfn = at_e012_access, .writefn = ats_write64 },
+ { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5,
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .accessfn = at_e012_access, .writefn = ats_write64 },
+ { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6,
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .accessfn = at_e012_access, .writefn = ats_write64 },
+ { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7,
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .accessfn = at_e012_access, .writefn = ats_write64 },
+ /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
+ { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0,
+ .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .writefn = ats_write64 },
+ { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1,
+ .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .writefn = ats_write64 },
+};
+
+static const ARMCPRegInfo el2_ats_reginfo[] = {
+ /*
+ * Unlike the other EL2-related AT operations, these must
+ * UNDEF from EL3 if EL2 is not implemented, which is why we
+ * define them here rather than with the rest of the AT ops.
+ */
+ { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
+ .access = PL2_W, .accessfn = at_s1e2_access,
+ .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC | ARM_CP_EL3_NO_EL2_UNDEF,
+ .writefn = ats_write64 },
+ { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
+ .access = PL2_W, .accessfn = at_s1e2_access,
+ .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC | ARM_CP_EL3_NO_EL2_UNDEF,
+ .writefn = ats_write64 },
+ /*
+ * The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
+ * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
+ * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
+ * to behave as if SCR.NS was 1.
+ */
+ { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
+ .access = PL2_W,
+ .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
+ { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
+ .access = PL2_W,
+ .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
+};
+
+static const ARMCPRegInfo ats1e1_reginfo[] = {
+ { .name = "AT_S1E1RP", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0,
+ .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .fgt = FGT_ATS1E1RP,
+ .accessfn = at_s1e01_access, .writefn = ats_write64 },
+ { .name = "AT_S1E1WP", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1,
+ .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .fgt = FGT_ATS1E1WP,
+ .accessfn = at_s1e01_access, .writefn = ats_write64 },
+};
+
+static const ARMCPRegInfo ats1cp_reginfo[] = {
+ { .name = "ATS1CPRP",
+ .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0,
+ .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .writefn = ats_write },
+ { .name = "ATS1CPWP",
+ .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1,
+ .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .writefn = ats_write },
+};
+
+void define_at_insn_regs(ARMCPU *cpu)
+{
+ CPUARMState *env = &cpu->env;
+
+ if (arm_feature(env, ARM_FEATURE_VAPA)) {
+ define_arm_cp_regs(cpu, vapa_ats_reginfo);
+ }
+ if (arm_feature(env, ARM_FEATURE_V8)) {
+ define_arm_cp_regs(cpu, v8_ats_reginfo);
+ }
+ if (arm_feature(env, ARM_FEATURE_EL2)
+ || (arm_feature(env, ARM_FEATURE_EL3)
+ && arm_feature(env, ARM_FEATURE_V8))) {
+ define_arm_cp_regs(cpu, el2_ats_reginfo);
+ }
+ if (cpu_isar_feature(aa64_ats1e1, cpu)) {
+ define_arm_cp_regs(cpu, ats1e1_reginfo);
+ }
+ if (cpu_isar_feature(aa32_ats1e1, cpu)) {
+ define_arm_cp_regs(cpu, ats1cp_reginfo);
+ }
+}
diff --git a/target/arm/tcg/cpu-v7m.c b/target/arm/tcg/cpu-v7m.c
index c4dd309..dc249ce 100644
--- a/target/arm/tcg/cpu-v7m.c
+++ b/target/arm/tcg/cpu-v7m.c
@@ -45,6 +45,7 @@ static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
static void cortex_m0_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
set_feature(&cpu->env, ARM_FEATURE_V6);
set_feature(&cpu->env, ARM_FEATURE_M);
@@ -58,51 +59,53 @@ static void cortex_m0_initfn(Object *obj)
* by looking at ID register fields. We use the same values as
* for the M3.
*/
- cpu->isar.id_pfr0 = 0x00000030;
- cpu->isar.id_pfr1 = 0x00000200;
- cpu->isar.id_dfr0 = 0x00100000;
- cpu->id_afr0 = 0x00000000;
- cpu->isar.id_mmfr0 = 0x00000030;
- cpu->isar.id_mmfr1 = 0x00000000;
- cpu->isar.id_mmfr2 = 0x00000000;
- cpu->isar.id_mmfr3 = 0x00000000;
- cpu->isar.id_isar0 = 0x01141110;
- cpu->isar.id_isar1 = 0x02111000;
- cpu->isar.id_isar2 = 0x21112231;
- cpu->isar.id_isar3 = 0x01111110;
- cpu->isar.id_isar4 = 0x01310102;
- cpu->isar.id_isar5 = 0x00000000;
- cpu->isar.id_isar6 = 0x00000000;
+ SET_IDREG(isar, ID_PFR0, 0x00000030);
+ SET_IDREG(isar, ID_PFR1, 0x00000200);
+ SET_IDREG(isar, ID_DFR0, 0x00100000);
+ SET_IDREG(isar, ID_AFR0, 0x00000000);
+ SET_IDREG(isar, ID_MMFR0, 0x00000030);
+ SET_IDREG(isar, ID_MMFR1, 0x00000000);
+ SET_IDREG(isar, ID_MMFR2, 0x00000000);
+ SET_IDREG(isar, ID_MMFR3, 0x00000000);
+ SET_IDREG(isar, ID_ISAR0, 0x01141110);
+ SET_IDREG(isar, ID_ISAR1, 0x02111000);
+ SET_IDREG(isar, ID_ISAR2, 0x21112231);
+ SET_IDREG(isar, ID_ISAR3, 0x01111110);
+ SET_IDREG(isar, ID_ISAR4, 0x01310102);
+ SET_IDREG(isar, ID_ISAR5, 0x00000000);
+ SET_IDREG(isar, ID_ISAR6, 0x00000000);
}
static void cortex_m3_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
set_feature(&cpu->env, ARM_FEATURE_V7);
set_feature(&cpu->env, ARM_FEATURE_M);
set_feature(&cpu->env, ARM_FEATURE_M_MAIN);
cpu->midr = 0x410fc231;
cpu->pmsav7_dregion = 8;
- cpu->isar.id_pfr0 = 0x00000030;
- cpu->isar.id_pfr1 = 0x00000200;
- cpu->isar.id_dfr0 = 0x00100000;
- cpu->id_afr0 = 0x00000000;
- cpu->isar.id_mmfr0 = 0x00000030;
- cpu->isar.id_mmfr1 = 0x00000000;
- cpu->isar.id_mmfr2 = 0x00000000;
- cpu->isar.id_mmfr3 = 0x00000000;
- cpu->isar.id_isar0 = 0x01141110;
- cpu->isar.id_isar1 = 0x02111000;
- cpu->isar.id_isar2 = 0x21112231;
- cpu->isar.id_isar3 = 0x01111110;
- cpu->isar.id_isar4 = 0x01310102;
- cpu->isar.id_isar5 = 0x00000000;
- cpu->isar.id_isar6 = 0x00000000;
+ SET_IDREG(isar, ID_PFR0, 0x00000030);
+ SET_IDREG(isar, ID_PFR1, 0x00000200);
+ SET_IDREG(isar, ID_DFR0, 0x00100000);
+ SET_IDREG(isar, ID_AFR0, 0x00000000);
+ SET_IDREG(isar, ID_MMFR0, 0x00000030);
+ SET_IDREG(isar, ID_MMFR1, 0x00000000);
+ SET_IDREG(isar, ID_MMFR2, 0x00000000);
+ SET_IDREG(isar, ID_MMFR3, 0x00000000);
+ SET_IDREG(isar, ID_ISAR0, 0x01141110);
+ SET_IDREG(isar, ID_ISAR1, 0x02111000);
+ SET_IDREG(isar, ID_ISAR2, 0x21112231);
+ SET_IDREG(isar, ID_ISAR3, 0x01111110);
+ SET_IDREG(isar, ID_ISAR4, 0x01310102);
+ SET_IDREG(isar, ID_ISAR5, 0x00000000);
+ SET_IDREG(isar, ID_ISAR6, 0x00000000);
}
static void cortex_m4_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
set_feature(&cpu->env, ARM_FEATURE_V7);
set_feature(&cpu->env, ARM_FEATURE_M);
@@ -113,26 +116,27 @@ static void cortex_m4_initfn(Object *obj)
cpu->isar.mvfr0 = 0x10110021;
cpu->isar.mvfr1 = 0x11000011;
cpu->isar.mvfr2 = 0x00000000;
- cpu->isar.id_pfr0 = 0x00000030;
- cpu->isar.id_pfr1 = 0x00000200;
- cpu->isar.id_dfr0 = 0x00100000;
- cpu->id_afr0 = 0x00000000;
- cpu->isar.id_mmfr0 = 0x00000030;
- cpu->isar.id_mmfr1 = 0x00000000;
- cpu->isar.id_mmfr2 = 0x00000000;
- cpu->isar.id_mmfr3 = 0x00000000;
- cpu->isar.id_isar0 = 0x01141110;
- cpu->isar.id_isar1 = 0x02111000;
- cpu->isar.id_isar2 = 0x21112231;
- cpu->isar.id_isar3 = 0x01111110;
- cpu->isar.id_isar4 = 0x01310102;
- cpu->isar.id_isar5 = 0x00000000;
- cpu->isar.id_isar6 = 0x00000000;
+ SET_IDREG(isar, ID_PFR0, 0x00000030);
+ SET_IDREG(isar, ID_PFR1, 0x00000200);
+ SET_IDREG(isar, ID_DFR0, 0x00100000);
+ SET_IDREG(isar, ID_AFR0, 0x00000000);
+ SET_IDREG(isar, ID_MMFR0, 0x00000030);
+ SET_IDREG(isar, ID_MMFR1, 0x00000000);
+ SET_IDREG(isar, ID_MMFR2, 0x00000000);
+ SET_IDREG(isar, ID_MMFR3, 0x00000000);
+ SET_IDREG(isar, ID_ISAR0, 0x01141110);
+ SET_IDREG(isar, ID_ISAR1, 0x02111000);
+ SET_IDREG(isar, ID_ISAR2, 0x21112231);
+ SET_IDREG(isar, ID_ISAR3, 0x01111110);
+ SET_IDREG(isar, ID_ISAR4, 0x01310102);
+ SET_IDREG(isar, ID_ISAR5, 0x00000000);
+ SET_IDREG(isar, ID_ISAR6, 0x00000000);
}
static void cortex_m7_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
set_feature(&cpu->env, ARM_FEATURE_V7);
set_feature(&cpu->env, ARM_FEATURE_M);
@@ -143,26 +147,27 @@ static void cortex_m7_initfn(Object *obj)
cpu->isar.mvfr0 = 0x10110221;
cpu->isar.mvfr1 = 0x12000011;
cpu->isar.mvfr2 = 0x00000040;
- cpu->isar.id_pfr0 = 0x00000030;
- cpu->isar.id_pfr1 = 0x00000200;
- cpu->isar.id_dfr0 = 0x00100000;
- cpu->id_afr0 = 0x00000000;
- cpu->isar.id_mmfr0 = 0x00100030;
- cpu->isar.id_mmfr1 = 0x00000000;
- cpu->isar.id_mmfr2 = 0x01000000;
- cpu->isar.id_mmfr3 = 0x00000000;
- cpu->isar.id_isar0 = 0x01101110;
- cpu->isar.id_isar1 = 0x02112000;
- cpu->isar.id_isar2 = 0x20232231;
- cpu->isar.id_isar3 = 0x01111131;
- cpu->isar.id_isar4 = 0x01310132;
- cpu->isar.id_isar5 = 0x00000000;
- cpu->isar.id_isar6 = 0x00000000;
+ SET_IDREG(isar, ID_PFR0, 0x00000030);
+ SET_IDREG(isar, ID_PFR1, 0x00000200);
+ SET_IDREG(isar, ID_DFR0, 0x00100000);
+ SET_IDREG(isar, ID_AFR0, 0x00000000);
+ SET_IDREG(isar, ID_MMFR0, 0x00100030);
+ SET_IDREG(isar, ID_MMFR1, 0x00000000);
+ SET_IDREG(isar, ID_MMFR2, 0x01000000);
+ SET_IDREG(isar, ID_MMFR3, 0x00000000);
+ SET_IDREG(isar, ID_ISAR0, 0x01101110);
+ SET_IDREG(isar, ID_ISAR1, 0x02112000);
+ SET_IDREG(isar, ID_ISAR2, 0x20232231);
+ SET_IDREG(isar, ID_ISAR3, 0x01111131);
+ SET_IDREG(isar, ID_ISAR4, 0x01310132);
+ SET_IDREG(isar, ID_ISAR5, 0x00000000);
+ SET_IDREG(isar, ID_ISAR6, 0x00000000);
}
static void cortex_m33_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
set_feature(&cpu->env, ARM_FEATURE_V8);
set_feature(&cpu->env, ARM_FEATURE_M);
@@ -175,28 +180,29 @@ static void cortex_m33_initfn(Object *obj)
cpu->isar.mvfr0 = 0x10110021;
cpu->isar.mvfr1 = 0x11000011;
cpu->isar.mvfr2 = 0x00000040;
- cpu->isar.id_pfr0 = 0x00000030;
- cpu->isar.id_pfr1 = 0x00000210;
- cpu->isar.id_dfr0 = 0x00200000;
- cpu->id_afr0 = 0x00000000;
- cpu->isar.id_mmfr0 = 0x00101F40;
- cpu->isar.id_mmfr1 = 0x00000000;
- cpu->isar.id_mmfr2 = 0x01000000;
- cpu->isar.id_mmfr3 = 0x00000000;
- cpu->isar.id_isar0 = 0x01101110;
- cpu->isar.id_isar1 = 0x02212000;
- cpu->isar.id_isar2 = 0x20232232;
- cpu->isar.id_isar3 = 0x01111131;
- cpu->isar.id_isar4 = 0x01310132;
- cpu->isar.id_isar5 = 0x00000000;
- cpu->isar.id_isar6 = 0x00000000;
- cpu->clidr = 0x00000000;
+ SET_IDREG(isar, ID_PFR0, 0x00000030);
+ SET_IDREG(isar, ID_PFR1, 0x00000210);
+ SET_IDREG(isar, ID_DFR0, 0x00200000);
+ SET_IDREG(isar, ID_AFR0, 0x00000000);
+ SET_IDREG(isar, ID_MMFR0, 0x00101F40);
+ SET_IDREG(isar, ID_MMFR1, 0x00000000);
+ SET_IDREG(isar, ID_MMFR2, 0x01000000);
+ SET_IDREG(isar, ID_MMFR3, 0x00000000);
+ SET_IDREG(isar, ID_ISAR0, 0x01101110);
+ SET_IDREG(isar, ID_ISAR1, 0x02212000);
+ SET_IDREG(isar, ID_ISAR2, 0x20232232);
+ SET_IDREG(isar, ID_ISAR3, 0x01111131);
+ SET_IDREG(isar, ID_ISAR4, 0x01310132);
+ SET_IDREG(isar, ID_ISAR5, 0x00000000);
+ SET_IDREG(isar, ID_ISAR6, 0x00000000);
+ SET_IDREG(isar, CLIDR, 0x00000000);
cpu->ctr = 0x8000c000;
}
static void cortex_m55_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
set_feature(&cpu->env, ARM_FEATURE_V8);
set_feature(&cpu->env, ARM_FEATURE_V8_1M);
@@ -212,39 +218,47 @@ static void cortex_m55_initfn(Object *obj)
cpu->isar.mvfr0 = 0x10110221;
cpu->isar.mvfr1 = 0x12100211;
cpu->isar.mvfr2 = 0x00000040;
- cpu->isar.id_pfr0 = 0x20000030;
- cpu->isar.id_pfr1 = 0x00000230;
- cpu->isar.id_dfr0 = 0x10200000;
- cpu->id_afr0 = 0x00000000;
- cpu->isar.id_mmfr0 = 0x00111040;
- cpu->isar.id_mmfr1 = 0x00000000;
- cpu->isar.id_mmfr2 = 0x01000000;
- cpu->isar.id_mmfr3 = 0x00000011;
- cpu->isar.id_isar0 = 0x01103110;
- cpu->isar.id_isar1 = 0x02212000;
- cpu->isar.id_isar2 = 0x20232232;
- cpu->isar.id_isar3 = 0x01111131;
- cpu->isar.id_isar4 = 0x01310132;
- cpu->isar.id_isar5 = 0x00000000;
- cpu->isar.id_isar6 = 0x00000000;
- cpu->clidr = 0x00000000; /* caches not implemented */
+ SET_IDREG(isar, ID_PFR0, 0x20000030);
+ SET_IDREG(isar, ID_PFR1, 0x00000230);
+ SET_IDREG(isar, ID_DFR0, 0x10200000);
+ SET_IDREG(isar, ID_AFR0, 0x00000000);
+ SET_IDREG(isar, ID_MMFR0, 0x00111040);
+ SET_IDREG(isar, ID_MMFR1, 0x00000000);
+ SET_IDREG(isar, ID_MMFR2, 0x01000000);
+ SET_IDREG(isar, ID_MMFR3, 0x00000011);
+ SET_IDREG(isar, ID_ISAR0, 0x01103110);
+ SET_IDREG(isar, ID_ISAR1, 0x02212000);
+ SET_IDREG(isar, ID_ISAR2, 0x20232232);
+ SET_IDREG(isar, ID_ISAR3, 0x01111131);
+ SET_IDREG(isar, ID_ISAR4, 0x01310132);
+ SET_IDREG(isar, ID_ISAR5, 0x00000000);
+ SET_IDREG(isar, ID_ISAR6, 0x00000000);
+ SET_IDREG(isar, CLIDR, 0x00000000); /* caches not implemented */
cpu->ctr = 0x8303c003;
}
static const TCGCPUOps arm_v7m_tcg_ops = {
+ /* ARM processors have a weak memory model */
+ .guest_default_memory_order = 0,
+ .mttcg_supported = true,
+
.initialize = arm_translate_init,
.translate_code = arm_translate_code,
+ .get_tb_cpu_state = arm_get_tb_cpu_state,
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
.debug_excp_handler = arm_debug_excp_handler,
.restore_state_to_opc = arm_restore_state_to_opc,
+ .mmu_index = arm_cpu_mmu_index,
#ifdef CONFIG_USER_ONLY
.record_sigsegv = arm_cpu_record_sigsegv,
.record_sigbus = arm_cpu_record_sigbus,
#else
.tlb_fill_align = arm_cpu_tlb_fill_align,
+ .pointer_wrap = cpu_pointer_wrap_uint32,
.cpu_exec_interrupt = arm_v7m_cpu_exec_interrupt,
.cpu_exec_halt = arm_cpu_exec_halt,
+ .cpu_exec_reset = cpu_reset,
.do_interrupt = arm_v7m_cpu_do_interrupt,
.do_transaction_failed = arm_cpu_do_transaction_failed,
.do_unaligned_access = arm_cpu_do_unaligned_access,
@@ -254,14 +268,13 @@ static const TCGCPUOps arm_v7m_tcg_ops = {
#endif /* !CONFIG_USER_ONLY */
};
-static void arm_v7m_class_init(ObjectClass *oc, void *data)
+static void arm_v7m_class_init(ObjectClass *oc, const void *data)
{
ARMCPUClass *acc = ARM_CPU_CLASS(oc);
CPUClass *cc = CPU_CLASS(oc);
acc->info = data;
cc->tcg_ops = &arm_v7m_tcg_ops;
- cc->gdb_core_xml_file = "arm-m-profile.xml";
}
static const ARMCPUInfo arm_v7m_cpus[] = {
diff --git a/target/arm/tcg/cpu32.c b/target/arm/tcg/cpu32.c
index 2c45b7e..a2a23ea 100644
--- a/target/arm/tcg/cpu32.c
+++ b/target/arm/tcg/cpu32.c
@@ -23,18 +23,19 @@
void aa32_max_features(ARMCPU *cpu)
{
uint32_t t;
+ ARMISARegisters *isar = &cpu->isar;
/* Add additional features supported by QEMU */
- t = cpu->isar.id_isar5;
+ t = GET_IDREG(isar, ID_ISAR5);
t = FIELD_DP32(t, ID_ISAR5, AES, 2); /* FEAT_PMULL */
t = FIELD_DP32(t, ID_ISAR5, SHA1, 1); /* FEAT_SHA1 */
t = FIELD_DP32(t, ID_ISAR5, SHA2, 1); /* FEAT_SHA256 */
t = FIELD_DP32(t, ID_ISAR5, CRC32, 1);
t = FIELD_DP32(t, ID_ISAR5, RDM, 1); /* FEAT_RDM */
t = FIELD_DP32(t, ID_ISAR5, VCMA, 1); /* FEAT_FCMA */
- cpu->isar.id_isar5 = t;
+ SET_IDREG(isar, ID_ISAR5, t);
- t = cpu->isar.id_isar6;
+ t = GET_IDREG(isar, ID_ISAR6);
t = FIELD_DP32(t, ID_ISAR6, JSCVT, 1); /* FEAT_JSCVT */
t = FIELD_DP32(t, ID_ISAR6, DP, 1); /* Feat_DotProd */
t = FIELD_DP32(t, ID_ISAR6, FHM, 1); /* FEAT_FHM */
@@ -42,7 +43,7 @@ void aa32_max_features(ARMCPU *cpu)
t = FIELD_DP32(t, ID_ISAR6, SPECRES, 1); /* FEAT_SPECRES */
t = FIELD_DP32(t, ID_ISAR6, BF16, 1); /* FEAT_AA32BF16 */
t = FIELD_DP32(t, ID_ISAR6, I8MM, 1); /* FEAT_AA32I8MM */
- cpu->isar.id_isar6 = t;
+ SET_IDREG(isar, ID_ISAR6, t);
t = cpu->isar.mvfr1;
t = FIELD_DP32(t, MVFR1, FPHP, 3); /* FEAT_FP16 */
@@ -54,38 +55,34 @@ void aa32_max_features(ARMCPU *cpu)
t = FIELD_DP32(t, MVFR2, FPMISC, 4); /* FP MaxNum */
cpu->isar.mvfr2 = t;
- t = cpu->isar.id_mmfr3;
- t = FIELD_DP32(t, ID_MMFR3, PAN, 2); /* FEAT_PAN2 */
- cpu->isar.id_mmfr3 = t;
+ FIELD_DP32_IDREG(isar, ID_MMFR3, PAN, 2); /* FEAT_PAN2 */
- t = cpu->isar.id_mmfr4;
+ t = GET_IDREG(isar, ID_MMFR4);
t = FIELD_DP32(t, ID_MMFR4, HPDS, 2); /* FEAT_HPDS2 */
t = FIELD_DP32(t, ID_MMFR4, AC2, 1); /* ACTLR2, HACTLR2 */
t = FIELD_DP32(t, ID_MMFR4, CNP, 1); /* FEAT_TTCNP */
t = FIELD_DP32(t, ID_MMFR4, XNX, 1); /* FEAT_XNX */
t = FIELD_DP32(t, ID_MMFR4, EVT, 2); /* FEAT_EVT */
- cpu->isar.id_mmfr4 = t;
+ SET_IDREG(isar, ID_MMFR4, t);
- t = cpu->isar.id_mmfr5;
- t = FIELD_DP32(t, ID_MMFR5, ETS, 2); /* FEAT_ETS2 */
- cpu->isar.id_mmfr5 = t;
+ FIELD_DP32_IDREG(isar, ID_MMFR5, ETS, 2); /* FEAT_ETS2 */
- t = cpu->isar.id_pfr0;
+ t = GET_IDREG(isar, ID_PFR0);
t = FIELD_DP32(t, ID_PFR0, CSV2, 2); /* FEAT_CSV2 */
t = FIELD_DP32(t, ID_PFR0, DIT, 1); /* FEAT_DIT */
t = FIELD_DP32(t, ID_PFR0, RAS, 1); /* FEAT_RAS */
- cpu->isar.id_pfr0 = t;
+ SET_IDREG(isar, ID_PFR0, t);
- t = cpu->isar.id_pfr2;
+ t = GET_IDREG(isar, ID_PFR2);
t = FIELD_DP32(t, ID_PFR2, CSV3, 1); /* FEAT_CSV3 */
t = FIELD_DP32(t, ID_PFR2, SSBS, 1); /* FEAT_SSBS */
- cpu->isar.id_pfr2 = t;
+ SET_IDREG(isar, ID_PFR2, t);
- t = cpu->isar.id_dfr0;
+ t = GET_IDREG(isar, ID_DFR0);
t = FIELD_DP32(t, ID_DFR0, COPDBG, 10); /* FEAT_Debugv8p8 */
t = FIELD_DP32(t, ID_DFR0, COPSDBG, 10); /* FEAT_Debugv8p8 */
t = FIELD_DP32(t, ID_DFR0, PERFMON, 6); /* FEAT_PMUv3p5 */
- cpu->isar.id_dfr0 = t;
+ SET_IDREG(isar, ID_DFR0, t);
/* Debug ID registers. */
@@ -115,9 +112,7 @@ void aa32_max_features(ARMCPU *cpu)
t = FIELD_DP32(t, DBGDEVID1, PCSROFFSET, 2);
cpu->isar.dbgdevid1 = t;
- t = cpu->isar.id_dfr1;
- t = FIELD_DP32(t, ID_DFR1, HPMN0, 1); /* FEAT_HPMN0 */
- cpu->isar.id_dfr1 = t;
+ FIELD_DP32_IDREG(isar, ID_DFR1, HPMN0, 1); /* FEAT_HPMN0 */
}
/* CPU models. These are not needed for the AArch64 linux-user build. */
@@ -140,7 +135,7 @@ static void arm926_initfn(Object *obj)
* ARMv5 does not have the ID_ISAR registers, but we can still
* set the field to indicate Jazelle support within QEMU.
*/
- cpu->isar.id_isar1 = FIELD_DP32(cpu->isar.id_isar1, ID_ISAR1, JAZELLE, 1);
+ FIELD_DP32_IDREG(&cpu->isar, ID_ISAR1, JAZELLE, 1);
/*
* Similarly, we need to set MVFR0 fields to enable vfp and short vector
* support even though ARMv5 doesn't have this register.
@@ -182,7 +177,7 @@ static void arm1026_initfn(Object *obj)
* ARMv5 does not have the ID_ISAR registers, but we can still
* set the field to indicate Jazelle support within QEMU.
*/
- cpu->isar.id_isar1 = FIELD_DP32(cpu->isar.id_isar1, ID_ISAR1, JAZELLE, 1);
+ FIELD_DP32_IDREG(&cpu->isar, ID_ISAR1, JAZELLE, 1);
/*
* Similarly, we need to set MVFR0 fields to enable vfp and short vector
* support even though ARMv5 doesn't have this register.
@@ -206,6 +201,7 @@ static void arm1026_initfn(Object *obj)
static void arm1136_r2_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
/*
* What qemu calls "arm1136_r2" is actually the 1136 r0p2, ie an
* older core than plain "arm1136". In particular this does not
@@ -226,24 +222,25 @@ static void arm1136_r2_initfn(Object *obj)
cpu->isar.mvfr1 = 0x00000000;
cpu->ctr = 0x1dd20d2;
cpu->reset_sctlr = 0x00050078;
- cpu->isar.id_pfr0 = 0x111;
- cpu->isar.id_pfr1 = 0x1;
- cpu->isar.id_dfr0 = 0x2;
- cpu->id_afr0 = 0x3;
- cpu->isar.id_mmfr0 = 0x01130003;
- cpu->isar.id_mmfr1 = 0x10030302;
- cpu->isar.id_mmfr2 = 0x01222110;
- cpu->isar.id_isar0 = 0x00140011;
- cpu->isar.id_isar1 = 0x12002111;
- cpu->isar.id_isar2 = 0x11231111;
- cpu->isar.id_isar3 = 0x01102131;
- cpu->isar.id_isar4 = 0x141;
+ SET_IDREG(isar, ID_PFR0, 0x111);
+ SET_IDREG(isar, ID_PFR1, 0x1);
+ SET_IDREG(isar, ID_DFR0, 0x2);
+ SET_IDREG(isar, ID_AFR0, 0x3);
+ SET_IDREG(isar, ID_MMFR0, 0x01130003);
+ SET_IDREG(isar, ID_MMFR1, 0x10030302);
+ SET_IDREG(isar, ID_MMFR2, 0x01222110);
+ SET_IDREG(isar, ID_ISAR0, 0x00140011);
+ SET_IDREG(isar, ID_ISAR1, 0x12002111);
+ SET_IDREG(isar, ID_ISAR2, 0x11231111);
+ SET_IDREG(isar, ID_ISAR3, 0x01102131);
+ SET_IDREG(isar, ID_ISAR4, 0x141);
cpu->reset_auxcr = 7;
}
static void arm1136_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
cpu->dtb_compatible = "arm,arm1136";
set_feature(&cpu->env, ARM_FEATURE_V6K);
@@ -257,24 +254,25 @@ static void arm1136_initfn(Object *obj)
cpu->isar.mvfr1 = 0x00000000;
cpu->ctr = 0x1dd20d2;
cpu->reset_sctlr = 0x00050078;
- cpu->isar.id_pfr0 = 0x111;
- cpu->isar.id_pfr1 = 0x1;
- cpu->isar.id_dfr0 = 0x2;
- cpu->id_afr0 = 0x3;
- cpu->isar.id_mmfr0 = 0x01130003;
- cpu->isar.id_mmfr1 = 0x10030302;
- cpu->isar.id_mmfr2 = 0x01222110;
- cpu->isar.id_isar0 = 0x00140011;
- cpu->isar.id_isar1 = 0x12002111;
- cpu->isar.id_isar2 = 0x11231111;
- cpu->isar.id_isar3 = 0x01102131;
- cpu->isar.id_isar4 = 0x141;
+ SET_IDREG(isar, ID_PFR0, 0x111);
+ SET_IDREG(isar, ID_PFR1, 0x1);
+ SET_IDREG(isar, ID_DFR0, 0x2);
+ SET_IDREG(isar, ID_AFR0, 0x3);
+ SET_IDREG(isar, ID_MMFR0, 0x01130003);
+ SET_IDREG(isar, ID_MMFR1, 0x10030302);
+ SET_IDREG(isar, ID_MMFR2, 0x01222110);
+ SET_IDREG(isar, ID_ISAR0, 0x00140011);
+ SET_IDREG(isar, ID_ISAR1, 0x12002111);
+ SET_IDREG(isar, ID_ISAR2, 0x11231111);
+ SET_IDREG(isar, ID_ISAR3, 0x01102131);
+ SET_IDREG(isar, ID_ISAR4, 0x141);
cpu->reset_auxcr = 7;
}
static void arm1176_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
cpu->dtb_compatible = "arm,arm1176";
set_feature(&cpu->env, ARM_FEATURE_V6K);
@@ -289,24 +287,25 @@ static void arm1176_initfn(Object *obj)
cpu->isar.mvfr1 = 0x00000000;
cpu->ctr = 0x1dd20d2;
cpu->reset_sctlr = 0x00050078;
- cpu->isar.id_pfr0 = 0x111;
- cpu->isar.id_pfr1 = 0x11;
- cpu->isar.id_dfr0 = 0x33;
- cpu->id_afr0 = 0;
- cpu->isar.id_mmfr0 = 0x01130003;
- cpu->isar.id_mmfr1 = 0x10030302;
- cpu->isar.id_mmfr2 = 0x01222100;
- cpu->isar.id_isar0 = 0x0140011;
- cpu->isar.id_isar1 = 0x12002111;
- cpu->isar.id_isar2 = 0x11231121;
- cpu->isar.id_isar3 = 0x01102131;
- cpu->isar.id_isar4 = 0x01141;
+ SET_IDREG(isar, ID_PFR0, 0x111);
+ SET_IDREG(isar, ID_PFR1, 0x11);
+ SET_IDREG(isar, ID_DFR0, 0x33);
+ SET_IDREG(isar, ID_AFR0, 0);
+ SET_IDREG(isar, ID_MMFR0, 0x01130003);
+ SET_IDREG(isar, ID_MMFR1, 0x10030302);
+ SET_IDREG(isar, ID_MMFR2, 0x01222100);
+ SET_IDREG(isar, ID_ISAR0, 0x0140011);
+ SET_IDREG(isar, ID_ISAR1, 0x12002111);
+ SET_IDREG(isar, ID_ISAR2, 0x11231121);
+ SET_IDREG(isar, ID_ISAR3, 0x01102131);
+ SET_IDREG(isar, ID_ISAR4, 0x01141);
cpu->reset_auxcr = 7;
}
static void arm11mpcore_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
cpu->dtb_compatible = "arm,arm11mpcore";
set_feature(&cpu->env, ARM_FEATURE_V6K);
@@ -318,18 +317,18 @@ static void arm11mpcore_initfn(Object *obj)
cpu->isar.mvfr0 = 0x11111111;
cpu->isar.mvfr1 = 0x00000000;
cpu->ctr = 0x1d192992; /* 32K icache 32K dcache */
- cpu->isar.id_pfr0 = 0x111;
- cpu->isar.id_pfr1 = 0x1;
- cpu->isar.id_dfr0 = 0;
- cpu->id_afr0 = 0x2;
- cpu->isar.id_mmfr0 = 0x01100103;
- cpu->isar.id_mmfr1 = 0x10020302;
- cpu->isar.id_mmfr2 = 0x01222000;
- cpu->isar.id_isar0 = 0x00100011;
- cpu->isar.id_isar1 = 0x12002111;
- cpu->isar.id_isar2 = 0x11221011;
- cpu->isar.id_isar3 = 0x01102131;
- cpu->isar.id_isar4 = 0x141;
+ SET_IDREG(isar, ID_PFR0, 0x111);
+ SET_IDREG(isar, ID_PFR1, 0x1);
+ SET_IDREG(isar, ID_DFR0, 0);
+ SET_IDREG(isar, ID_AFR0, 0x2);
+ SET_IDREG(isar, ID_MMFR0, 0x01100103);
+ SET_IDREG(isar, ID_MMFR1, 0x10020302);
+ SET_IDREG(isar, ID_MMFR2, 0x01222000);
+ SET_IDREG(isar, ID_ISAR0, 0x00100011);
+ SET_IDREG(isar, ID_ISAR1, 0x12002111);
+ SET_IDREG(isar, ID_ISAR2, 0x11221011);
+ SET_IDREG(isar, ID_ISAR3, 0x01102131);
+ SET_IDREG(isar, ID_ISAR4, 0x141);
cpu->reset_auxcr = 1;
}
@@ -343,6 +342,7 @@ static const ARMCPRegInfo cortexa8_cp_reginfo[] = {
static void cortex_a8_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
cpu->dtb_compatible = "arm,cortex-a8";
set_feature(&cpu->env, ARM_FEATURE_V7);
@@ -357,21 +357,21 @@ static void cortex_a8_initfn(Object *obj)
cpu->isar.mvfr1 = 0x00011111;
cpu->ctr = 0x82048004;
cpu->reset_sctlr = 0x00c50078;
- cpu->isar.id_pfr0 = 0x1031;
- cpu->isar.id_pfr1 = 0x11;
- cpu->isar.id_dfr0 = 0x400;
- cpu->id_afr0 = 0;
- cpu->isar.id_mmfr0 = 0x31100003;
- cpu->isar.id_mmfr1 = 0x20000000;
- cpu->isar.id_mmfr2 = 0x01202000;
- cpu->isar.id_mmfr3 = 0x11;
- cpu->isar.id_isar0 = 0x00101111;
- cpu->isar.id_isar1 = 0x12112111;
- cpu->isar.id_isar2 = 0x21232031;
- cpu->isar.id_isar3 = 0x11112131;
- cpu->isar.id_isar4 = 0x00111142;
+ SET_IDREG(isar, ID_PFR0, 0x1031);
+ SET_IDREG(isar, ID_PFR1, 0x11);
+ SET_IDREG(isar, ID_DFR0, 0x400);
+ SET_IDREG(isar, ID_AFR0, 0);
+ SET_IDREG(isar, ID_MMFR0, 0x31100003);
+ SET_IDREG(isar, ID_MMFR1, 0x20000000);
+ SET_IDREG(isar, ID_MMFR2, 0x01202000);
+ SET_IDREG(isar, ID_MMFR3, 0x11);
+ SET_IDREG(isar, ID_ISAR0, 0x00101111);
+ SET_IDREG(isar, ID_ISAR1, 0x12112111);
+ SET_IDREG(isar, ID_ISAR2, 0x21232031);
+ SET_IDREG(isar, ID_ISAR3, 0x11112131);
+ SET_IDREG(isar, ID_ISAR4, 0x00111142);
cpu->isar.dbgdidr = 0x15141000;
- cpu->clidr = (1 << 27) | (2 << 24) | 3;
+ SET_IDREG(isar, CLIDR, (1 << 27) | (2 << 24) | 3);
cpu->ccsidr[0] = 0xe007e01a; /* 16k L1 dcache. */
cpu->ccsidr[1] = 0x2007e01a; /* 16k L1 icache. */
cpu->ccsidr[2] = 0xf0000000; /* No L2 icache. */
@@ -412,6 +412,7 @@ static const ARMCPRegInfo cortexa9_cp_reginfo[] = {
static void cortex_a9_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
cpu->dtb_compatible = "arm,cortex-a9";
set_feature(&cpu->env, ARM_FEATURE_V7);
@@ -432,21 +433,21 @@ static void cortex_a9_initfn(Object *obj)
cpu->isar.mvfr1 = 0x01111111;
cpu->ctr = 0x80038003;
cpu->reset_sctlr = 0x00c50078;
- cpu->isar.id_pfr0 = 0x1031;
- cpu->isar.id_pfr1 = 0x11;
- cpu->isar.id_dfr0 = 0x000;
- cpu->id_afr0 = 0;
- cpu->isar.id_mmfr0 = 0x00100103;
- cpu->isar.id_mmfr1 = 0x20000000;
- cpu->isar.id_mmfr2 = 0x01230000;
- cpu->isar.id_mmfr3 = 0x00002111;
- cpu->isar.id_isar0 = 0x00101111;
- cpu->isar.id_isar1 = 0x13112111;
- cpu->isar.id_isar2 = 0x21232041;
- cpu->isar.id_isar3 = 0x11112131;
- cpu->isar.id_isar4 = 0x00111142;
+ SET_IDREG(isar, ID_PFR0, 0x1031);
+ SET_IDREG(isar, ID_PFR1, 0x11);
+ SET_IDREG(isar, ID_DFR0, 0x000);
+ SET_IDREG(isar, ID_AFR0, 0);
+ SET_IDREG(isar, ID_MMFR0, 0x00100103);
+ SET_IDREG(isar, ID_MMFR1, 0x20000000);
+ SET_IDREG(isar, ID_MMFR2, 0x01230000);
+ SET_IDREG(isar, ID_MMFR3, 0x00002111);
+ SET_IDREG(isar, ID_ISAR0, 0x00101111);
+ SET_IDREG(isar, ID_ISAR1, 0x13112111);
+ SET_IDREG(isar, ID_ISAR2, 0x21232041);
+ SET_IDREG(isar, ID_ISAR3, 0x11112131);
+ SET_IDREG(isar, ID_ISAR4, 0x00111142);
cpu->isar.dbgdidr = 0x35141000;
- cpu->clidr = (1 << 27) | (1 << 24) | 3;
+ SET_IDREG(isar, CLIDR, (1 << 27) | (1 << 24) | 3);
cpu->ccsidr[0] = 0xe00fe019; /* 16k L1 dcache. */
cpu->ccsidr[1] = 0x200fe019; /* 16k L1 icache. */
cpu->isar.reset_pmcr_el0 = 0x41093000;
@@ -479,6 +480,7 @@ static const ARMCPRegInfo cortexa15_cp_reginfo[] = {
static void cortex_a7_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
cpu->dtb_compatible = "arm,cortex-a7";
set_feature(&cpu->env, ARM_FEATURE_V7VE);
@@ -497,27 +499,27 @@ static void cortex_a7_initfn(Object *obj)
cpu->isar.mvfr1 = 0x11111111;
cpu->ctr = 0x84448003;
cpu->reset_sctlr = 0x00c50078;
- cpu->isar.id_pfr0 = 0x00001131;
- cpu->isar.id_pfr1 = 0x00011011;
- cpu->isar.id_dfr0 = 0x02010555;
- cpu->id_afr0 = 0x00000000;
- cpu->isar.id_mmfr0 = 0x10101105;
- cpu->isar.id_mmfr1 = 0x40000000;
- cpu->isar.id_mmfr2 = 0x01240000;
- cpu->isar.id_mmfr3 = 0x02102211;
+ SET_IDREG(isar, ID_PFR0, 0x00001131);
+ SET_IDREG(isar, ID_PFR1, 0x00011011);
+ SET_IDREG(isar, ID_DFR0, 0x02010555);
+ SET_IDREG(isar, ID_AFR0, 0x00000000);
+ SET_IDREG(isar, ID_MMFR0, 0x10101105);
+ SET_IDREG(isar, ID_MMFR1, 0x40000000);
+ SET_IDREG(isar, ID_MMFR2, 0x01240000);
+ SET_IDREG(isar, ID_MMFR3, 0x02102211);
/*
* a7_mpcore_r0p5_trm, page 4-4 gives 0x01101110; but
* table 4-41 gives 0x02101110, which includes the arm div insns.
*/
- cpu->isar.id_isar0 = 0x02101110;
- cpu->isar.id_isar1 = 0x13112111;
- cpu->isar.id_isar2 = 0x21232041;
- cpu->isar.id_isar3 = 0x11112131;
- cpu->isar.id_isar4 = 0x10011142;
+ SET_IDREG(isar, ID_ISAR0, 0x02101110);
+ SET_IDREG(isar, ID_ISAR1, 0x13112111);
+ SET_IDREG(isar, ID_ISAR2, 0x21232041);
+ SET_IDREG(isar, ID_ISAR3, 0x11112131);
+ SET_IDREG(isar, ID_ISAR4, 0x10011142);
cpu->isar.dbgdidr = 0x3515f005;
cpu->isar.dbgdevid = 0x01110f13;
cpu->isar.dbgdevid1 = 0x1;
- cpu->clidr = 0x0a200023;
+ SET_IDREG(isar, CLIDR, 0x0a200023);
cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */
cpu->ccsidr[1] = 0x201fe00a; /* 32K L1 icache */
cpu->ccsidr[2] = 0x711fe07a; /* 4096K L2 unified cache */
@@ -528,6 +530,7 @@ static void cortex_a7_initfn(Object *obj)
static void cortex_a15_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
cpu->dtb_compatible = "arm,cortex-a15";
set_feature(&cpu->env, ARM_FEATURE_V7VE);
@@ -548,23 +551,23 @@ static void cortex_a15_initfn(Object *obj)
cpu->isar.mvfr1 = 0x11111111;
cpu->ctr = 0x8444c004;
cpu->reset_sctlr = 0x00c50078;
- cpu->isar.id_pfr0 = 0x00001131;
- cpu->isar.id_pfr1 = 0x00011011;
- cpu->isar.id_dfr0 = 0x02010555;
- cpu->id_afr0 = 0x00000000;
- cpu->isar.id_mmfr0 = 0x10201105;
- cpu->isar.id_mmfr1 = 0x20000000;
- cpu->isar.id_mmfr2 = 0x01240000;
- cpu->isar.id_mmfr3 = 0x02102211;
- cpu->isar.id_isar0 = 0x02101110;
- cpu->isar.id_isar1 = 0x13112111;
- cpu->isar.id_isar2 = 0x21232041;
- cpu->isar.id_isar3 = 0x11112131;
- cpu->isar.id_isar4 = 0x10011142;
+ SET_IDREG(isar, ID_PFR0, 0x00001131);
+ SET_IDREG(isar, ID_PFR1, 0x00011011);
+ SET_IDREG(isar, ID_DFR0, 0x02010555);
+ SET_IDREG(isar, ID_AFR0, 0x00000000);
+ SET_IDREG(isar, ID_MMFR0, 0x10201105);
+ SET_IDREG(isar, ID_MMFR1, 0x20000000);
+ SET_IDREG(isar, ID_MMFR2, 0x01240000);
+ SET_IDREG(isar, ID_MMFR3, 0x02102211);
+ SET_IDREG(isar, ID_ISAR0, 0x02101110);
+ SET_IDREG(isar, ID_ISAR1, 0x13112111);
+ SET_IDREG(isar, ID_ISAR2, 0x21232041);
+ SET_IDREG(isar, ID_ISAR3, 0x11112131);
+ SET_IDREG(isar, ID_ISAR4, 0x10011142);
cpu->isar.dbgdidr = 0x3515f021;
cpu->isar.dbgdevid = 0x01110f13;
cpu->isar.dbgdevid1 = 0x0;
- cpu->clidr = 0x0a200023;
+ SET_IDREG(isar, CLIDR, 0x0a200023);
cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */
cpu->ccsidr[1] = 0x201fe00a; /* 32K L1 icache */
cpu->ccsidr[2] = 0x711fe07a; /* 4096K L2 unified cache */
@@ -585,27 +588,28 @@ static const ARMCPRegInfo cortexr5_cp_reginfo[] = {
static void cortex_r5_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
set_feature(&cpu->env, ARM_FEATURE_V7);
set_feature(&cpu->env, ARM_FEATURE_V7MP);
set_feature(&cpu->env, ARM_FEATURE_PMSA);
set_feature(&cpu->env, ARM_FEATURE_PMU);
cpu->midr = 0x411fc153; /* r1p3 */
- cpu->isar.id_pfr0 = 0x0131;
- cpu->isar.id_pfr1 = 0x001;
- cpu->isar.id_dfr0 = 0x010400;
- cpu->id_afr0 = 0x0;
- cpu->isar.id_mmfr0 = 0x0210030;
- cpu->isar.id_mmfr1 = 0x00000000;
- cpu->isar.id_mmfr2 = 0x01200000;
- cpu->isar.id_mmfr3 = 0x0211;
- cpu->isar.id_isar0 = 0x02101111;
- cpu->isar.id_isar1 = 0x13112111;
- cpu->isar.id_isar2 = 0x21232141;
- cpu->isar.id_isar3 = 0x01112131;
- cpu->isar.id_isar4 = 0x0010142;
- cpu->isar.id_isar5 = 0x0;
- cpu->isar.id_isar6 = 0x0;
+ SET_IDREG(isar, ID_PFR0, 0x0131);
+ SET_IDREG(isar, ID_PFR1, 0x001);
+ SET_IDREG(isar, ID_DFR0, 0x010400);
+ SET_IDREG(isar, ID_AFR0, 0x0);
+ SET_IDREG(isar, ID_MMFR0, 0x0210030);
+ SET_IDREG(isar, ID_MMFR1, 0x00000000);
+ SET_IDREG(isar, ID_MMFR2, 0x01200000);
+ SET_IDREG(isar, ID_MMFR3, 0x0211);
+ SET_IDREG(isar, ID_ISAR0, 0x02101111);
+ SET_IDREG(isar, ID_ISAR1, 0x13112111);
+ SET_IDREG(isar, ID_ISAR2, 0x21232141);
+ SET_IDREG(isar, ID_ISAR3, 0x01112131);
+ SET_IDREG(isar, ID_ISAR4, 0x0010142);
+ SET_IDREG(isar, ID_ISAR5, 0x0);
+ SET_IDREG(isar, ID_ISAR6, 0x0);
cpu->mp_is_up = true;
cpu->pmsav7_dregion = 16;
cpu->isar.reset_pmcr_el0 = 0x41151800;
@@ -720,6 +724,7 @@ static const ARMCPRegInfo cortex_r52_cp_reginfo[] = {
static void cortex_r52_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
set_feature(&cpu->env, ARM_FEATURE_V8);
set_feature(&cpu->env, ARM_FEATURE_EL2);
@@ -737,23 +742,23 @@ static void cortex_r52_initfn(Object *obj)
cpu->isar.mvfr2 = 0x00000043;
cpu->ctr = 0x8144c004;
cpu->reset_sctlr = 0x30c50838;
- cpu->isar.id_pfr0 = 0x00000131;
- cpu->isar.id_pfr1 = 0x10111001;
- cpu->isar.id_dfr0 = 0x03010006;
- cpu->id_afr0 = 0x00000000;
- cpu->isar.id_mmfr0 = 0x00211040;
- cpu->isar.id_mmfr1 = 0x40000000;
- cpu->isar.id_mmfr2 = 0x01200000;
- cpu->isar.id_mmfr3 = 0xf0102211;
- cpu->isar.id_mmfr4 = 0x00000010;
- cpu->isar.id_isar0 = 0x02101110;
- cpu->isar.id_isar1 = 0x13112111;
- cpu->isar.id_isar2 = 0x21232142;
- cpu->isar.id_isar3 = 0x01112131;
- cpu->isar.id_isar4 = 0x00010142;
- cpu->isar.id_isar5 = 0x00010001;
+ SET_IDREG(isar, ID_PFR0, 0x00000131);
+ SET_IDREG(isar, ID_PFR1, 0x10111001);
+ SET_IDREG(isar, ID_DFR0, 0x03010006);
+ SET_IDREG(isar, ID_AFR0, 0x00000000);
+ SET_IDREG(isar, ID_MMFR0, 0x00211040);
+ SET_IDREG(isar, ID_MMFR1, 0x40000000);
+ SET_IDREG(isar, ID_MMFR2, 0x01200000);
+ SET_IDREG(isar, ID_MMFR3, 0xf0102211);
+ SET_IDREG(isar, ID_MMFR4, 0x00000010);
+ SET_IDREG(isar, ID_ISAR0, 0x02101110);
+ SET_IDREG(isar, ID_ISAR1, 0x13112111);
+ SET_IDREG(isar, ID_ISAR2, 0x21232142);
+ SET_IDREG(isar, ID_ISAR3, 0x01112131);
+ SET_IDREG(isar, ID_ISAR4, 0x00010142);
+ SET_IDREG(isar, ID_ISAR5, 0x00010001);
cpu->isar.dbgdidr = 0x77168000;
- cpu->clidr = (1 << 27) | (1 << 24) | 0x3;
+ SET_IDREG(isar, CLIDR, (1 << 27) | (1 << 24) | 0x3);
cpu->ccsidr[0] = 0x700fe01a; /* 32KB L1 dcache */
cpu->ccsidr[1] = 0x201fe00a; /* 32KB L1 icache */
@@ -949,6 +954,7 @@ static void pxa270c5_initfn(Object *obj)
static void arm_max_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
/* aarch64_a57_initfn, advertising none of the aarch64 features */
cpu->dtb_compatible = "arm,cortex-a57";
@@ -968,23 +974,23 @@ static void arm_max_initfn(Object *obj)
cpu->isar.mvfr2 = 0x00000043;
cpu->ctr = 0x8444c004;
cpu->reset_sctlr = 0x00c50838;
- cpu->isar.id_pfr0 = 0x00000131;
- cpu->isar.id_pfr1 = 0x00011011;
- cpu->isar.id_dfr0 = 0x03010066;
- cpu->id_afr0 = 0x00000000;
- cpu->isar.id_mmfr0 = 0x10101105;
- cpu->isar.id_mmfr1 = 0x40000000;
- cpu->isar.id_mmfr2 = 0x01260000;
- cpu->isar.id_mmfr3 = 0x02102211;
- cpu->isar.id_isar0 = 0x02101110;
- cpu->isar.id_isar1 = 0x13112111;
- cpu->isar.id_isar2 = 0x21232042;
- cpu->isar.id_isar3 = 0x01112131;
- cpu->isar.id_isar4 = 0x00011142;
- cpu->isar.id_isar5 = 0x00011121;
- cpu->isar.id_isar6 = 0;
+ SET_IDREG(isar, ID_PFR0, 0x00000131);
+ SET_IDREG(isar, ID_PFR1, 0x00011011);
+ SET_IDREG(isar, ID_DFR0, 0x03010066);
+ SET_IDREG(isar, ID_AFR0, 0x00000000);
+ SET_IDREG(isar, ID_MMFR0, 0x10101105);
+ SET_IDREG(isar, ID_MMFR1, 0x40000000);
+ SET_IDREG(isar, ID_MMFR2, 0x01260000);
+ SET_IDREG(isar, ID_MMFR3, 0x02102211);
+ SET_IDREG(isar, ID_ISAR0, 0x02101110);
+ SET_IDREG(isar, ID_ISAR1, 0x13112111);
+ SET_IDREG(isar, ID_ISAR2, 0x21232042);
+ SET_IDREG(isar, ID_ISAR3, 0x01112131);
+ SET_IDREG(isar, ID_ISAR4, 0x00011142);
+ SET_IDREG(isar, ID_ISAR5, 0x00011121);
+ SET_IDREG(isar, ID_ISAR6, 0);
cpu->isar.reset_pmcr_el0 = 0x41013000;
- cpu->clidr = 0x0a200023;
+ SET_IDREG(isar, CLIDR, 0x0a200023);
cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */
cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */
cpu->ccsidr[2] = 0x70ffe07a; /* 2048KB L2 cache */
diff --git a/target/arm/tcg/cpu64.c b/target/arm/tcg/cpu64.c
index 29ab0ac..35cddba 100644
--- a/target/arm/tcg/cpu64.c
+++ b/target/arm/tcg/cpu64.c
@@ -32,6 +32,7 @@
static void aarch64_a35_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
cpu->dtb_compatible = "arm,cortex-a35";
set_feature(&cpu->env, ARM_FEATURE_V8);
@@ -48,29 +49,29 @@ static void aarch64_a35_initfn(Object *obj)
cpu->midr = 0x411fd040;
cpu->revidr = 0;
cpu->ctr = 0x84448004;
- cpu->isar.id_pfr0 = 0x00000131;
- cpu->isar.id_pfr1 = 0x00011011;
- cpu->isar.id_dfr0 = 0x03010066;
- cpu->id_afr0 = 0;
- cpu->isar.id_mmfr0 = 0x10201105;
- cpu->isar.id_mmfr1 = 0x40000000;
- cpu->isar.id_mmfr2 = 0x01260000;
- cpu->isar.id_mmfr3 = 0x02102211;
- cpu->isar.id_isar0 = 0x02101110;
- cpu->isar.id_isar1 = 0x13112111;
- cpu->isar.id_isar2 = 0x21232042;
- cpu->isar.id_isar3 = 0x01112131;
- cpu->isar.id_isar4 = 0x00011142;
- cpu->isar.id_isar5 = 0x00011121;
- cpu->isar.id_aa64pfr0 = 0x00002222;
- cpu->isar.id_aa64pfr1 = 0;
- cpu->isar.id_aa64dfr0 = 0x10305106;
- cpu->isar.id_aa64dfr1 = 0;
- cpu->isar.id_aa64isar0 = 0x00011120;
- cpu->isar.id_aa64isar1 = 0;
- cpu->isar.id_aa64mmfr0 = 0x00101122;
- cpu->isar.id_aa64mmfr1 = 0;
- cpu->clidr = 0x0a200023;
+ SET_IDREG(isar, ID_PFR0, 0x00000131);
+ SET_IDREG(isar, ID_PFR1, 0x00011011);
+ SET_IDREG(isar, ID_DFR0, 0x03010066);
+ SET_IDREG(isar, ID_AFR0, 0);
+ SET_IDREG(isar, ID_MMFR0, 0x10201105);
+ SET_IDREG(isar, ID_MMFR1, 0x40000000);
+ SET_IDREG(isar, ID_MMFR2, 0x01260000);
+ SET_IDREG(isar, ID_MMFR3, 0x02102211);
+ SET_IDREG(isar, ID_ISAR0, 0x02101110);
+ SET_IDREG(isar, ID_ISAR1, 0x13112111);
+ SET_IDREG(isar, ID_ISAR2, 0x21232042);
+ SET_IDREG(isar, ID_ISAR3, 0x01112131);
+ SET_IDREG(isar, ID_ISAR4, 0x00011142);
+ SET_IDREG(isar, ID_ISAR5, 0x00011121);
+ SET_IDREG(isar, ID_AA64PFR0, 0x00002222);
+ SET_IDREG(isar, ID_AA64PFR1, 0);
+ SET_IDREG(isar, ID_AA64DFR0, 0x10305106);
+ SET_IDREG(isar, ID_AA64DFR1, 0);
+ SET_IDREG(isar, ID_AA64ISAR0, 0x00011120);
+ SET_IDREG(isar, ID_AA64ISAR1, 0);
+ SET_IDREG(isar, ID_AA64MMFR0, 0x00101122);
+ SET_IDREG(isar, ID_AA64MMFR1, 0);
+ SET_IDREG(isar, CLIDR, 0x0a200023);
cpu->dcz_blocksize = 4;
/* From B2.4 AArch64 Virtual Memory control registers */
@@ -157,11 +158,8 @@ static bool cpu_arm_get_rme(Object *obj, Error **errp)
static void cpu_arm_set_rme(Object *obj, bool value, Error **errp)
{
ARMCPU *cpu = ARM_CPU(obj);
- uint64_t t;
- t = cpu->isar.id_aa64pfr0;
- t = FIELD_DP64(t, ID_AA64PFR0, RME, value);
- cpu->isar.id_aa64pfr0 = t;
+ FIELD_DP64_IDREG(&cpu->isar, ID_AA64PFR0, RME, value);
}
static void cpu_max_set_l0gptsz(Object *obj, Visitor *v, const char *name,
@@ -204,6 +202,7 @@ static const Property arm_cpu_lpa2_property =
static void aarch64_a55_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
cpu->dtb_compatible = "arm,cortex-a55";
set_feature(&cpu->env, ARM_FEATURE_V8);
@@ -217,34 +216,34 @@ static void aarch64_a55_initfn(Object *obj)
set_feature(&cpu->env, ARM_FEATURE_PMU);
/* Ordered by B2.4 AArch64 registers by functional group */
- cpu->clidr = 0x82000023;
+ SET_IDREG(isar, CLIDR, 0x82000023);
cpu->ctr = 0x84448004; /* L1Ip = VIPT */
cpu->dcz_blocksize = 4; /* 64 bytes */
- cpu->isar.id_aa64dfr0 = 0x0000000010305408ull;
- cpu->isar.id_aa64isar0 = 0x0000100010211120ull;
- cpu->isar.id_aa64isar1 = 0x0000000000100001ull;
- cpu->isar.id_aa64mmfr0 = 0x0000000000101122ull;
- cpu->isar.id_aa64mmfr1 = 0x0000000010212122ull;
- cpu->isar.id_aa64mmfr2 = 0x0000000000001011ull;
- cpu->isar.id_aa64pfr0 = 0x0000000010112222ull;
- cpu->isar.id_aa64pfr1 = 0x0000000000000010ull;
- cpu->id_afr0 = 0x00000000;
- cpu->isar.id_dfr0 = 0x04010088;
- cpu->isar.id_isar0 = 0x02101110;
- cpu->isar.id_isar1 = 0x13112111;
- cpu->isar.id_isar2 = 0x21232042;
- cpu->isar.id_isar3 = 0x01112131;
- cpu->isar.id_isar4 = 0x00011142;
- cpu->isar.id_isar5 = 0x01011121;
- cpu->isar.id_isar6 = 0x00000010;
- cpu->isar.id_mmfr0 = 0x10201105;
- cpu->isar.id_mmfr1 = 0x40000000;
- cpu->isar.id_mmfr2 = 0x01260000;
- cpu->isar.id_mmfr3 = 0x02122211;
- cpu->isar.id_mmfr4 = 0x00021110;
- cpu->isar.id_pfr0 = 0x10010131;
- cpu->isar.id_pfr1 = 0x00011011;
- cpu->isar.id_pfr2 = 0x00000011;
+ SET_IDREG(isar, ID_AA64DFR0, 0x0000000010305408ull);
+ SET_IDREG(isar, ID_AA64ISAR0, 0x0000100010211120ull);
+ SET_IDREG(isar, ID_AA64ISAR1, 0x0000000000100001ull);
+ SET_IDREG(isar, ID_AA64MMFR0, 0x0000000000101122ull);
+ SET_IDREG(isar, ID_AA64MMFR1, 0x0000000010212122ull);
+ SET_IDREG(isar, ID_AA64MMFR2, 0x0000000000001011ull);
+ SET_IDREG(isar, ID_AA64PFR0, 0x0000000010112222ull);
+ SET_IDREG(isar, ID_AA64PFR1, 0x0000000000000010ull);
+ SET_IDREG(isar, ID_AFR0, 0x00000000);
+ SET_IDREG(isar, ID_DFR0, 0x04010088);
+ SET_IDREG(isar, ID_ISAR0, 0x02101110);
+ SET_IDREG(isar, ID_ISAR1, 0x13112111);
+ SET_IDREG(isar, ID_ISAR2, 0x21232042);
+ SET_IDREG(isar, ID_ISAR3, 0x01112131);
+ SET_IDREG(isar, ID_ISAR4, 0x00011142);
+ SET_IDREG(isar, ID_ISAR5, 0x01011121);
+ SET_IDREG(isar, ID_ISAR6, 0x00000010);
+ SET_IDREG(isar, ID_MMFR0, 0x10201105);
+ SET_IDREG(isar, ID_MMFR1, 0x40000000);
+ SET_IDREG(isar, ID_MMFR2, 0x01260000);
+ SET_IDREG(isar, ID_MMFR3, 0x02122211);
+ SET_IDREG(isar, ID_MMFR4, 0x00021110);
+ SET_IDREG(isar, ID_PFR0, 0x10010131);
+ SET_IDREG(isar, ID_PFR1, 0x00011011);
+ SET_IDREG(isar, ID_PFR2, 0x00000011);
cpu->midr = 0x412FD050; /* r2p0 */
cpu->revidr = 0;
@@ -276,6 +275,7 @@ static void aarch64_a55_initfn(Object *obj)
static void aarch64_a72_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
cpu->dtb_compatible = "arm,cortex-a72";
set_feature(&cpu->env, ARM_FEATURE_V8);
@@ -295,29 +295,29 @@ static void aarch64_a72_initfn(Object *obj)
cpu->isar.mvfr2 = 0x00000043;
cpu->ctr = 0x8444c004;
cpu->reset_sctlr = 0x00c50838;
- cpu->isar.id_pfr0 = 0x00000131;
- cpu->isar.id_pfr1 = 0x00011011;
- cpu->isar.id_dfr0 = 0x03010066;
- cpu->id_afr0 = 0x00000000;
- cpu->isar.id_mmfr0 = 0x10201105;
- cpu->isar.id_mmfr1 = 0x40000000;
- cpu->isar.id_mmfr2 = 0x01260000;
- cpu->isar.id_mmfr3 = 0x02102211;
- cpu->isar.id_isar0 = 0x02101110;
- cpu->isar.id_isar1 = 0x13112111;
- cpu->isar.id_isar2 = 0x21232042;
- cpu->isar.id_isar3 = 0x01112131;
- cpu->isar.id_isar4 = 0x00011142;
- cpu->isar.id_isar5 = 0x00011121;
- cpu->isar.id_aa64pfr0 = 0x00002222;
- cpu->isar.id_aa64dfr0 = 0x10305106;
- cpu->isar.id_aa64isar0 = 0x00011120;
- cpu->isar.id_aa64mmfr0 = 0x00001124;
+ SET_IDREG(isar, ID_PFR0, 0x00000131);
+ SET_IDREG(isar, ID_PFR1, 0x00011011);
+ SET_IDREG(isar, ID_DFR0, 0x03010066);
+ SET_IDREG(isar, ID_AFR0, 0x00000000);
+ SET_IDREG(isar, ID_MMFR0, 0x10201105);
+ SET_IDREG(isar, ID_MMFR1, 0x40000000);
+ SET_IDREG(isar, ID_MMFR2, 0x01260000);
+ SET_IDREG(isar, ID_MMFR3, 0x02102211);
+ SET_IDREG(isar, ID_ISAR0, 0x02101110);
+ SET_IDREG(isar, ID_ISAR1, 0x13112111);
+ SET_IDREG(isar, ID_ISAR2, 0x21232042);
+ SET_IDREG(isar, ID_ISAR3, 0x01112131);
+ SET_IDREG(isar, ID_ISAR4, 0x00011142);
+ SET_IDREG(isar, ID_ISAR5, 0x00011121);
+ SET_IDREG(isar, ID_AA64PFR0, 0x00002222);
+ SET_IDREG(isar, ID_AA64DFR0, 0x10305106);
+ SET_IDREG(isar, ID_AA64ISAR0, 0x00011120);
+ SET_IDREG(isar, ID_AA64MMFR0, 0x00001124);
cpu->isar.dbgdidr = 0x3516d000;
cpu->isar.dbgdevid = 0x01110f13;
cpu->isar.dbgdevid1 = 0x2;
cpu->isar.reset_pmcr_el0 = 0x41023000;
- cpu->clidr = 0x0a200023;
+ SET_IDREG(isar, CLIDR, 0x0a200023);
/* 32KB L1 dcache */
cpu->ccsidr[0] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 64, 32 * KiB, 7);
/* 48KB L1 dcache */
@@ -335,6 +335,7 @@ static void aarch64_a72_initfn(Object *obj)
static void aarch64_a76_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
cpu->dtb_compatible = "arm,cortex-a76";
set_feature(&cpu->env, ARM_FEATURE_V8);
@@ -348,34 +349,34 @@ static void aarch64_a76_initfn(Object *obj)
set_feature(&cpu->env, ARM_FEATURE_PMU);
/* Ordered by B2.4 AArch64 registers by functional group */
- cpu->clidr = 0x82000023;
+ SET_IDREG(isar, CLIDR, 0x82000023);
cpu->ctr = 0x8444C004;
cpu->dcz_blocksize = 4;
- cpu->isar.id_aa64dfr0 = 0x0000000010305408ull;
- cpu->isar.id_aa64isar0 = 0x0000100010211120ull;
- cpu->isar.id_aa64isar1 = 0x0000000000100001ull;
- cpu->isar.id_aa64mmfr0 = 0x0000000000101122ull;
- cpu->isar.id_aa64mmfr1 = 0x0000000010212122ull;
- cpu->isar.id_aa64mmfr2 = 0x0000000000001011ull;
- cpu->isar.id_aa64pfr0 = 0x1100000010111112ull; /* GIC filled in later */
- cpu->isar.id_aa64pfr1 = 0x0000000000000010ull;
- cpu->id_afr0 = 0x00000000;
- cpu->isar.id_dfr0 = 0x04010088;
- cpu->isar.id_isar0 = 0x02101110;
- cpu->isar.id_isar1 = 0x13112111;
- cpu->isar.id_isar2 = 0x21232042;
- cpu->isar.id_isar3 = 0x01112131;
- cpu->isar.id_isar4 = 0x00010142;
- cpu->isar.id_isar5 = 0x01011121;
- cpu->isar.id_isar6 = 0x00000010;
- cpu->isar.id_mmfr0 = 0x10201105;
- cpu->isar.id_mmfr1 = 0x40000000;
- cpu->isar.id_mmfr2 = 0x01260000;
- cpu->isar.id_mmfr3 = 0x02122211;
- cpu->isar.id_mmfr4 = 0x00021110;
- cpu->isar.id_pfr0 = 0x10010131;
- cpu->isar.id_pfr1 = 0x00010000; /* GIC filled in later */
- cpu->isar.id_pfr2 = 0x00000011;
+ SET_IDREG(isar, ID_AA64DFR0, 0x0000000010305408ull);
+ SET_IDREG(isar, ID_AA64ISAR0, 0x0000100010211120ull);
+ SET_IDREG(isar, ID_AA64ISAR1, 0x0000000000100001ull);
+ SET_IDREG(isar, ID_AA64MMFR0, 0x0000000000101122ull);
+ SET_IDREG(isar, ID_AA64MMFR1, 0x0000000010212122ull);
+ SET_IDREG(isar, ID_AA64MMFR2, 0x0000000000001011ull);
+ SET_IDREG(isar, ID_AA64PFR0, 0x1100000010111112ull); /* GIC filled in later */
+ SET_IDREG(isar, ID_AA64PFR1, 0x0000000000000010ull);
+ SET_IDREG(isar, ID_AFR0, 0x00000000);
+ SET_IDREG(isar, ID_DFR0, 0x04010088);
+ SET_IDREG(isar, ID_ISAR0, 0x02101110);
+ SET_IDREG(isar, ID_ISAR1, 0x13112111);
+ SET_IDREG(isar, ID_ISAR2, 0x21232042);
+ SET_IDREG(isar, ID_ISAR3, 0x01112131);
+ SET_IDREG(isar, ID_ISAR4, 0x00010142);
+ SET_IDREG(isar, ID_ISAR5, 0x01011121);
+ SET_IDREG(isar, ID_ISAR6, 0x00000010);
+ SET_IDREG(isar, ID_MMFR0, 0x10201105);
+ SET_IDREG(isar, ID_MMFR1, 0x40000000);
+ SET_IDREG(isar, ID_MMFR2, 0x01260000);
+ SET_IDREG(isar, ID_MMFR3, 0x02122211);
+ SET_IDREG(isar, ID_MMFR4, 0x00021110);
+ SET_IDREG(isar, ID_PFR0, 0x10010131);
+ SET_IDREG(isar, ID_PFR1, 0x00010000); /* GIC filled in later */
+ SET_IDREG(isar, ID_PFR2, 0x00000011);
cpu->midr = 0x414fd0b1; /* r4p1 */
cpu->revidr = 0;
@@ -408,6 +409,7 @@ static void aarch64_a76_initfn(Object *obj)
static void aarch64_a64fx_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
cpu->dtb_compatible = "arm,a64fx";
set_feature(&cpu->env, ARM_FEATURE_V8);
@@ -422,19 +424,19 @@ static void aarch64_a64fx_initfn(Object *obj)
cpu->revidr = 0x00000000;
cpu->ctr = 0x86668006;
cpu->reset_sctlr = 0x30000180;
- cpu->isar.id_aa64pfr0 = 0x0000000101111111; /* No RAS Extensions */
- cpu->isar.id_aa64pfr1 = 0x0000000000000000;
- cpu->isar.id_aa64dfr0 = 0x0000000010305408;
- cpu->isar.id_aa64dfr1 = 0x0000000000000000;
- cpu->id_aa64afr0 = 0x0000000000000000;
- cpu->id_aa64afr1 = 0x0000000000000000;
- cpu->isar.id_aa64mmfr0 = 0x0000000000001122;
- cpu->isar.id_aa64mmfr1 = 0x0000000011212100;
- cpu->isar.id_aa64mmfr2 = 0x0000000000001011;
- cpu->isar.id_aa64isar0 = 0x0000000010211120;
- cpu->isar.id_aa64isar1 = 0x0000000000010001;
- cpu->isar.id_aa64zfr0 = 0x0000000000000000;
- cpu->clidr = 0x0000000080000023;
+ SET_IDREG(isar, ID_AA64PFR0, 0x0000000101111111); /* No RAS Extensions */
+ SET_IDREG(isar, ID_AA64PFR1, 0x0000000000000000);
+ SET_IDREG(isar, ID_AA64DFR0, 0x0000000010305408);
+ SET_IDREG(isar, ID_AA64DFR1, 0x0000000000000000);
+ SET_IDREG(isar, ID_AA64AFR0, 0x0000000000000000);
+ SET_IDREG(isar, ID_AA64AFR1, 0x0000000000000000);
+ SET_IDREG(isar, ID_AA64MMFR0, 0x0000000000001122);
+ SET_IDREG(isar, ID_AA64MMFR1, 0x0000000011212100);
+ SET_IDREG(isar, ID_AA64MMFR2, 0x0000000000001011);
+ SET_IDREG(isar, ID_AA64ISAR0, 0x0000000010211120);
+ SET_IDREG(isar, ID_AA64ISAR1, 0x0000000000010001);
+ SET_IDREG(isar, ID_AA64ZFR0, 0x0000000000000000);
+ SET_IDREG(isar, CLIDR, 0x0000000080000023);
/* 64KB L1 dcache */
cpu->ccsidr[0] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 256, 64 * KiB, 7);
/* 64KB L1 icache */
@@ -581,6 +583,7 @@ static void define_neoverse_v1_cp_reginfo(ARMCPU *cpu)
static void aarch64_neoverse_n1_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
cpu->dtb_compatible = "arm,neoverse-n1";
set_feature(&cpu->env, ARM_FEATURE_V8);
@@ -594,34 +597,34 @@ static void aarch64_neoverse_n1_initfn(Object *obj)
set_feature(&cpu->env, ARM_FEATURE_PMU);
/* Ordered by B2.4 AArch64 registers by functional group */
- cpu->clidr = 0x82000023;
+ SET_IDREG(isar, CLIDR, 0x82000023);
cpu->ctr = 0x8444c004;
cpu->dcz_blocksize = 4;
- cpu->isar.id_aa64dfr0 = 0x0000000110305408ull;
- cpu->isar.id_aa64isar0 = 0x0000100010211120ull;
- cpu->isar.id_aa64isar1 = 0x0000000000100001ull;
- cpu->isar.id_aa64mmfr0 = 0x0000000000101125ull;
- cpu->isar.id_aa64mmfr1 = 0x0000000010212122ull;
- cpu->isar.id_aa64mmfr2 = 0x0000000000001011ull;
- cpu->isar.id_aa64pfr0 = 0x1100000010111112ull; /* GIC filled in later */
- cpu->isar.id_aa64pfr1 = 0x0000000000000020ull;
- cpu->id_afr0 = 0x00000000;
- cpu->isar.id_dfr0 = 0x04010088;
- cpu->isar.id_isar0 = 0x02101110;
- cpu->isar.id_isar1 = 0x13112111;
- cpu->isar.id_isar2 = 0x21232042;
- cpu->isar.id_isar3 = 0x01112131;
- cpu->isar.id_isar4 = 0x00010142;
- cpu->isar.id_isar5 = 0x01011121;
- cpu->isar.id_isar6 = 0x00000010;
- cpu->isar.id_mmfr0 = 0x10201105;
- cpu->isar.id_mmfr1 = 0x40000000;
- cpu->isar.id_mmfr2 = 0x01260000;
- cpu->isar.id_mmfr3 = 0x02122211;
- cpu->isar.id_mmfr4 = 0x00021110;
- cpu->isar.id_pfr0 = 0x10010131;
- cpu->isar.id_pfr1 = 0x00010000; /* GIC filled in later */
- cpu->isar.id_pfr2 = 0x00000011;
+ SET_IDREG(isar, ID_AA64DFR0, 0x0000000110305408ull);
+ SET_IDREG(isar, ID_AA64ISAR0, 0x0000100010211120ull);
+ SET_IDREG(isar, ID_AA64ISAR1, 0x0000000000100001ull);
+ SET_IDREG(isar, ID_AA64MMFR0, 0x0000000000101125ull);
+ SET_IDREG(isar, ID_AA64MMFR1, 0x0000000010212122ull);
+ SET_IDREG(isar, ID_AA64MMFR2, 0x0000000000001011ull);
+ SET_IDREG(isar, ID_AA64PFR0, 0x1100000010111112ull); /* GIC filled in later */
+ SET_IDREG(isar, ID_AA64PFR1, 0x0000000000000020ull);
+ SET_IDREG(isar, ID_AFR0, 0x00000000);
+ SET_IDREG(isar, ID_DFR0, 0x04010088);
+ SET_IDREG(isar, ID_ISAR0, 0x02101110);
+ SET_IDREG(isar, ID_ISAR1, 0x13112111);
+ SET_IDREG(isar, ID_ISAR2, 0x21232042);
+ SET_IDREG(isar, ID_ISAR3, 0x01112131);
+ SET_IDREG(isar, ID_ISAR4, 0x00010142);
+ SET_IDREG(isar, ID_ISAR5, 0x01011121);
+ SET_IDREG(isar, ID_ISAR6, 0x00000010);
+ SET_IDREG(isar, ID_MMFR0, 0x10201105);
+ SET_IDREG(isar, ID_MMFR1, 0x40000000);
+ SET_IDREG(isar, ID_MMFR2, 0x01260000);
+ SET_IDREG(isar, ID_MMFR3, 0x02122211);
+ SET_IDREG(isar, ID_MMFR4, 0x00021110);
+ SET_IDREG(isar, ID_PFR0, 0x10010131);
+ SET_IDREG(isar, ID_PFR1, 0x00010000); /* GIC filled in later */
+ SET_IDREG(isar, ID_PFR2, 0x00000011);
cpu->midr = 0x414fd0c1; /* r4p1 */
cpu->revidr = 0;
@@ -656,6 +659,7 @@ static void aarch64_neoverse_n1_initfn(Object *obj)
static void aarch64_neoverse_v1_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
cpu->dtb_compatible = "arm,neoverse-v1";
set_feature(&cpu->env, ARM_FEATURE_V8);
@@ -669,37 +673,37 @@ static void aarch64_neoverse_v1_initfn(Object *obj)
set_feature(&cpu->env, ARM_FEATURE_PMU);
/* Ordered by 3.2.4 AArch64 registers by functional group */
- cpu->clidr = 0x82000023;
+ SET_IDREG(isar, CLIDR, 0x82000023);
cpu->ctr = 0xb444c004; /* With DIC and IDC set */
cpu->dcz_blocksize = 4;
- cpu->id_aa64afr0 = 0x00000000;
- cpu->id_aa64afr1 = 0x00000000;
- cpu->isar.id_aa64dfr0 = 0x000001f210305519ull;
- cpu->isar.id_aa64dfr1 = 0x00000000;
- cpu->isar.id_aa64isar0 = 0x1011111110212120ull; /* with FEAT_RNG */
- cpu->isar.id_aa64isar1 = 0x0011100001211032ull;
- cpu->isar.id_aa64mmfr0 = 0x0000000000101125ull;
- cpu->isar.id_aa64mmfr1 = 0x0000000010212122ull;
- cpu->isar.id_aa64mmfr2 = 0x0220011102101011ull;
- cpu->isar.id_aa64pfr0 = 0x1101110120111112ull; /* GIC filled in later */
- cpu->isar.id_aa64pfr1 = 0x0000000000000020ull;
- cpu->id_afr0 = 0x00000000;
- cpu->isar.id_dfr0 = 0x15011099;
- cpu->isar.id_isar0 = 0x02101110;
- cpu->isar.id_isar1 = 0x13112111;
- cpu->isar.id_isar2 = 0x21232042;
- cpu->isar.id_isar3 = 0x01112131;
- cpu->isar.id_isar4 = 0x00010142;
- cpu->isar.id_isar5 = 0x11011121;
- cpu->isar.id_isar6 = 0x01100111;
- cpu->isar.id_mmfr0 = 0x10201105;
- cpu->isar.id_mmfr1 = 0x40000000;
- cpu->isar.id_mmfr2 = 0x01260000;
- cpu->isar.id_mmfr3 = 0x02122211;
- cpu->isar.id_mmfr4 = 0x01021110;
- cpu->isar.id_pfr0 = 0x21110131;
- cpu->isar.id_pfr1 = 0x00010000; /* GIC filled in later */
- cpu->isar.id_pfr2 = 0x00000011;
+ SET_IDREG(isar, ID_AA64AFR0, 0x00000000);
+ SET_IDREG(isar, ID_AA64AFR1, 0x00000000);
+ SET_IDREG(isar, ID_AA64DFR0, 0x000001f210305519ull);
+ SET_IDREG(isar, ID_AA64DFR1, 0x00000000);
+ SET_IDREG(isar, ID_AA64ISAR0, 0x1011111110212120ull); /* with FEAT_RNG */
+ SET_IDREG(isar, ID_AA64ISAR1, 0x0011000001211032ull);
+ SET_IDREG(isar, ID_AA64MMFR0, 0x0000000000101125ull);
+ SET_IDREG(isar, ID_AA64MMFR1, 0x0000000010212122ull);
+ SET_IDREG(isar, ID_AA64MMFR2, 0x0220011102101011ull);
+ SET_IDREG(isar, ID_AA64PFR0, 0x1101110120111112ull); /* GIC filled in later */
+ SET_IDREG(isar, ID_AA64PFR1, 0x0000000000000020ull);
+ SET_IDREG(isar, ID_AFR0, 0x00000000);
+ SET_IDREG(isar, ID_DFR0, 0x15011099);
+ SET_IDREG(isar, ID_ISAR0, 0x02101110);
+ SET_IDREG(isar, ID_ISAR1, 0x13112111);
+ SET_IDREG(isar, ID_ISAR2, 0x21232042);
+ SET_IDREG(isar, ID_ISAR3, 0x01112131);
+ SET_IDREG(isar, ID_ISAR4, 0x00010142);
+ SET_IDREG(isar, ID_ISAR5, 0x11011121);
+ SET_IDREG(isar, ID_ISAR6, 0x01100111);
+ SET_IDREG(isar, ID_MMFR0, 0x10201105);
+ SET_IDREG(isar, ID_MMFR1, 0x40000000);
+ SET_IDREG(isar, ID_MMFR2, 0x01260000);
+ SET_IDREG(isar, ID_MMFR3, 0x02122211);
+ SET_IDREG(isar, ID_MMFR4, 0x01021110);
+ SET_IDREG(isar, ID_PFR0, 0x21110131);
+ SET_IDREG(isar, ID_PFR1, 0x00010000); /* GIC filled in later */
+ SET_IDREG(isar, ID_PFR2, 0x00000011);
cpu->midr = 0x411FD402; /* r1p2 */
cpu->revidr = 0;
@@ -735,7 +739,7 @@ static void aarch64_neoverse_v1_initfn(Object *obj)
cpu->isar.mvfr2 = 0x00000043;
/* From 3.7.5 ID_AA64ZFR0_EL1 */
- cpu->isar.id_aa64zfr0 = 0x0000100000100000;
+ SET_IDREG(isar, ID_AA64ZFR0, 0x0000100000100000);
cpu->sve_vq.supported = (1 << 0) /* 128bit */
| (1 << 1); /* 256bit */
@@ -882,6 +886,7 @@ static const ARMCPRegInfo cortex_a710_cp_reginfo[] = {
static void aarch64_a710_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
cpu->dtb_compatible = "arm,cortex-a710";
set_feature(&cpu->env, ARM_FEATURE_V8);
@@ -897,39 +902,39 @@ static void aarch64_a710_initfn(Object *obj)
/* Ordered by Section B.4: AArch64 registers */
cpu->midr = 0x412FD471; /* r2p1 */
cpu->revidr = 0;
- cpu->isar.id_pfr0 = 0x21110131;
- cpu->isar.id_pfr1 = 0x00010000; /* GIC filled in later */
- cpu->isar.id_dfr0 = 0x16011099;
- cpu->id_afr0 = 0;
- cpu->isar.id_mmfr0 = 0x10201105;
- cpu->isar.id_mmfr1 = 0x40000000;
- cpu->isar.id_mmfr2 = 0x01260000;
- cpu->isar.id_mmfr3 = 0x02122211;
- cpu->isar.id_isar0 = 0x02101110;
- cpu->isar.id_isar1 = 0x13112111;
- cpu->isar.id_isar2 = 0x21232042;
- cpu->isar.id_isar3 = 0x01112131;
- cpu->isar.id_isar4 = 0x00010142;
- cpu->isar.id_isar5 = 0x11011121; /* with Crypto */
- cpu->isar.id_mmfr4 = 0x21021110;
- cpu->isar.id_isar6 = 0x01111111;
+ SET_IDREG(isar, ID_PFR0, 0x21110131);
+ SET_IDREG(isar, ID_PFR1, 0x00010000); /* GIC filled in later */
+ SET_IDREG(isar, ID_DFR0, 0x16011099);
+ SET_IDREG(isar, ID_AFR0, 0);
+ SET_IDREG(isar, ID_MMFR0, 0x10201105);
+ SET_IDREG(isar, ID_MMFR1, 0x40000000);
+ SET_IDREG(isar, ID_MMFR2, 0x01260000);
+ SET_IDREG(isar, ID_MMFR3, 0x02122211);
+ SET_IDREG(isar, ID_ISAR0, 0x02101110);
+ SET_IDREG(isar, ID_ISAR1, 0x13112111);
+ SET_IDREG(isar, ID_ISAR2, 0x21232042);
+ SET_IDREG(isar, ID_ISAR3, 0x01112131);
+ SET_IDREG(isar, ID_ISAR4, 0x00010142);
+ SET_IDREG(isar, ID_ISAR5, 0x11011121); /* with Crypto */
+ SET_IDREG(isar, ID_MMFR4, 0x21021110);
+ SET_IDREG(isar, ID_ISAR6, 0x01111111);
cpu->isar.mvfr0 = 0x10110222;
cpu->isar.mvfr1 = 0x13211111;
cpu->isar.mvfr2 = 0x00000043;
- cpu->isar.id_pfr2 = 0x00000011;
- cpu->isar.id_aa64pfr0 = 0x1201111120111112ull; /* GIC filled in later */
- cpu->isar.id_aa64pfr1 = 0x0000000000000221ull;
- cpu->isar.id_aa64zfr0 = 0x0000110100110021ull; /* with Crypto */
- cpu->isar.id_aa64dfr0 = 0x000011f010305619ull;
- cpu->isar.id_aa64dfr1 = 0;
- cpu->id_aa64afr0 = 0;
- cpu->id_aa64afr1 = 0;
- cpu->isar.id_aa64isar0 = 0x0221111110212120ull; /* with Crypto */
- cpu->isar.id_aa64isar1 = 0x0010111101211052ull;
- cpu->isar.id_aa64mmfr0 = 0x0000022200101122ull;
- cpu->isar.id_aa64mmfr1 = 0x0000000010212122ull;
- cpu->isar.id_aa64mmfr2 = 0x1221011110101011ull;
- cpu->clidr = 0x0000001482000023ull;
+ SET_IDREG(isar, ID_PFR2, 0x00000011);
+ SET_IDREG(isar, ID_AA64PFR0, 0x1201111120111112ull); /* GIC filled in later */
+ SET_IDREG(isar, ID_AA64PFR1, 0x0000000000000221ull);
+ SET_IDREG(isar, ID_AA64ZFR0, 0x0000110100110021ull); /* with Crypto */
+ SET_IDREG(isar, ID_AA64DFR0, 0x000011f010305619ull);
+ SET_IDREG(isar, ID_AA64DFR1, 0);
+ SET_IDREG(isar, ID_AA64AFR0, 0);
+ SET_IDREG(isar, ID_AA64AFR1, 0);
+ SET_IDREG(isar, ID_AA64ISAR0, 0x0221111110212120ull); /* with Crypto */
+ SET_IDREG(isar, ID_AA64ISAR1, 0x0010111101211052ull);
+ SET_IDREG(isar, ID_AA64MMFR0, 0x0000022200101122ull);
+ SET_IDREG(isar, ID_AA64MMFR1, 0x0000000010212122ull);
+ SET_IDREG(isar, ID_AA64MMFR2, 0x1221011110101011ull);
+ SET_IDREG(isar, CLIDR, 0x0000001482000023ull);
cpu->gm_blocksize = 4;
cpu->ctr = 0x000000049444c004ull;
cpu->dcz_blocksize = 4;
@@ -983,6 +988,7 @@ static const ARMCPRegInfo neoverse_n2_cp_reginfo[] = {
static void aarch64_neoverse_n2_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
cpu->dtb_compatible = "arm,neoverse-n2";
set_feature(&cpu->env, ARM_FEATURE_V8);
@@ -998,39 +1004,39 @@ static void aarch64_neoverse_n2_initfn(Object *obj)
/* Ordered by Section B.5: AArch64 ID registers */
cpu->midr = 0x410FD493; /* r0p3 */
cpu->revidr = 0;
- cpu->isar.id_pfr0 = 0x21110131;
- cpu->isar.id_pfr1 = 0x00010000; /* GIC filled in later */
- cpu->isar.id_dfr0 = 0x16011099;
- cpu->id_afr0 = 0;
- cpu->isar.id_mmfr0 = 0x10201105;
- cpu->isar.id_mmfr1 = 0x40000000;
- cpu->isar.id_mmfr2 = 0x01260000;
- cpu->isar.id_mmfr3 = 0x02122211;
- cpu->isar.id_isar0 = 0x02101110;
- cpu->isar.id_isar1 = 0x13112111;
- cpu->isar.id_isar2 = 0x21232042;
- cpu->isar.id_isar3 = 0x01112131;
- cpu->isar.id_isar4 = 0x00010142;
- cpu->isar.id_isar5 = 0x11011121; /* with Crypto */
- cpu->isar.id_mmfr4 = 0x01021110;
- cpu->isar.id_isar6 = 0x01111111;
+ SET_IDREG(isar, ID_PFR0, 0x21110131);
+ SET_IDREG(isar, ID_PFR1, 0x00010000); /* GIC filled in later */
+ SET_IDREG(isar, ID_DFR0, 0x16011099);
+ SET_IDREG(isar, ID_AFR0, 0);
+ SET_IDREG(isar, ID_MMFR0, 0x10201105);
+ SET_IDREG(isar, ID_MMFR1, 0x40000000);
+ SET_IDREG(isar, ID_MMFR2, 0x01260000);
+ SET_IDREG(isar, ID_MMFR3, 0x02122211);
+ SET_IDREG(isar, ID_ISAR0, 0x02101110);
+ SET_IDREG(isar, ID_ISAR1, 0x13112111);
+ SET_IDREG(isar, ID_ISAR2, 0x21232042);
+ SET_IDREG(isar, ID_ISAR3, 0x01112131);
+ SET_IDREG(isar, ID_ISAR4, 0x00010142);
+ SET_IDREG(isar, ID_ISAR5, 0x11011121); /* with Crypto */
+ SET_IDREG(isar, ID_MMFR4, 0x01021110);
+ SET_IDREG(isar, ID_ISAR6, 0x01111111);
cpu->isar.mvfr0 = 0x10110222;
cpu->isar.mvfr1 = 0x13211111;
cpu->isar.mvfr2 = 0x00000043;
- cpu->isar.id_pfr2 = 0x00000011;
- cpu->isar.id_aa64pfr0 = 0x1201111120111112ull; /* GIC filled in later */
- cpu->isar.id_aa64pfr1 = 0x0000000000000221ull;
- cpu->isar.id_aa64zfr0 = 0x0000110100110021ull; /* with Crypto */
- cpu->isar.id_aa64dfr0 = 0x000011f210305619ull;
- cpu->isar.id_aa64dfr1 = 0;
- cpu->id_aa64afr0 = 0;
- cpu->id_aa64afr1 = 0;
- cpu->isar.id_aa64isar0 = 0x1221111110212120ull; /* with Crypto and FEAT_RNG */
- cpu->isar.id_aa64isar1 = 0x0011111101211052ull;
- cpu->isar.id_aa64mmfr0 = 0x0000022200101125ull;
- cpu->isar.id_aa64mmfr1 = 0x0000000010212122ull;
- cpu->isar.id_aa64mmfr2 = 0x1221011112101011ull;
- cpu->clidr = 0x0000001482000023ull;
+ SET_IDREG(isar, ID_PFR2, 0x00000011);
+ SET_IDREG(isar, ID_AA64PFR0, 0x1201111120111112ull); /* GIC filled in later */
+ SET_IDREG(isar, ID_AA64PFR1, 0x0000000000000221ull);
+ SET_IDREG(isar, ID_AA64ZFR0, 0x0000110100110021ull); /* with Crypto */
+ SET_IDREG(isar, ID_AA64DFR0, 0x000011f210305619ull);
+ SET_IDREG(isar, ID_AA64DFR1, 0);
+ SET_IDREG(isar, ID_AA64AFR0, 0);
+ SET_IDREG(isar, ID_AA64AFR1, 0);
+ SET_IDREG(isar, ID_AA64ISAR0, 0x1221111110212120ull); /* with Crypto and FEAT_RNG */
+ SET_IDREG(isar, ID_AA64ISAR1, 0x0011111101211052ull);
+ SET_IDREG(isar, ID_AA64MMFR0, 0x0000022200101125ull);
+ SET_IDREG(isar, ID_AA64MMFR1, 0x0000000010212122ull);
+ SET_IDREG(isar, ID_AA64MMFR2, 0x1221011112101011ull);
+ SET_IDREG(isar, CLIDR, 0x0000001482000023ull);
cpu->gm_blocksize = 4;
cpu->ctr = 0x00000004b444c004ull;
cpu->dcz_blocksize = 4;
@@ -1083,6 +1089,7 @@ static void aarch64_neoverse_n2_initfn(Object *obj)
void aarch64_max_tcg_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
+ ARMISARegisters *isar = &cpu->isar;
uint64_t t;
uint32_t u;
@@ -1118,10 +1125,10 @@ void aarch64_max_tcg_initfn(Object *obj)
* We're going to set FEAT_S2FWB, which mandates that CLIDR_EL1.{LoUU,LoUIS}
* are zero.
*/
- u = cpu->clidr;
+ u = GET_IDREG(isar, CLIDR);
u = FIELD_DP32(u, CLIDR_EL1, LOUIS, 0);
u = FIELD_DP32(u, CLIDR_EL1, LOUU, 0);
- cpu->clidr = u;
+ SET_IDREG(isar, CLIDR, u);
/*
* Set CTR_EL0.DIC and IDC to tell the guest it doesnt' need to
@@ -1133,7 +1140,7 @@ void aarch64_max_tcg_initfn(Object *obj)
t = FIELD_DP64(t, CTR_EL0, DIC, 1);
cpu->ctr = t;
- t = cpu->isar.id_aa64isar0;
+ t = GET_IDREG(isar, ID_AA64ISAR0);
t = FIELD_DP64(t, ID_AA64ISAR0, AES, 2); /* FEAT_PMULL */
t = FIELD_DP64(t, ID_AA64ISAR0, SHA1, 1); /* FEAT_SHA1 */
t = FIELD_DP64(t, ID_AA64ISAR0, SHA2, 2); /* FEAT_SHA512 */
@@ -1148,9 +1155,9 @@ void aarch64_max_tcg_initfn(Object *obj)
t = FIELD_DP64(t, ID_AA64ISAR0, TS, 2); /* FEAT_FlagM2 */
t = FIELD_DP64(t, ID_AA64ISAR0, TLB, 2); /* FEAT_TLBIRANGE */
t = FIELD_DP64(t, ID_AA64ISAR0, RNDR, 1); /* FEAT_RNG */
- cpu->isar.id_aa64isar0 = t;
+ SET_IDREG(isar, ID_AA64ISAR0, t);
- t = cpu->isar.id_aa64isar1;
+ t = GET_IDREG(isar, ID_AA64ISAR1);
t = FIELD_DP64(t, ID_AA64ISAR1, DPB, 2); /* FEAT_DPB2 */
t = FIELD_DP64(t, ID_AA64ISAR1, APA, PauthFeat_FPACCOMBINED);
t = FIELD_DP64(t, ID_AA64ISAR1, API, 1);
@@ -1164,16 +1171,16 @@ void aarch64_max_tcg_initfn(Object *obj)
t = FIELD_DP64(t, ID_AA64ISAR1, DGH, 1); /* FEAT_DGH */
t = FIELD_DP64(t, ID_AA64ISAR1, I8MM, 1); /* FEAT_I8MM */
t = FIELD_DP64(t, ID_AA64ISAR1, XS, 1); /* FEAT_XS */
- cpu->isar.id_aa64isar1 = t;
+ SET_IDREG(isar, ID_AA64ISAR1, t);
- t = cpu->isar.id_aa64isar2;
+ t = GET_IDREG(isar, ID_AA64ISAR2);
t = FIELD_DP64(t, ID_AA64ISAR2, RPRES, 1); /* FEAT_RPRES */
t = FIELD_DP64(t, ID_AA64ISAR2, MOPS, 1); /* FEAT_MOPS */
t = FIELD_DP64(t, ID_AA64ISAR2, BC, 1); /* FEAT_HBC */
t = FIELD_DP64(t, ID_AA64ISAR2, WFXT, 2); /* FEAT_WFxT */
- cpu->isar.id_aa64isar2 = t;
+ SET_IDREG(isar, ID_AA64ISAR2, t);
- t = cpu->isar.id_aa64pfr0;
+ t = GET_IDREG(isar, ID_AA64PFR0);
t = FIELD_DP64(t, ID_AA64PFR0, FP, 1); /* FEAT_FP16 */
t = FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 1); /* FEAT_FP16 */
t = FIELD_DP64(t, ID_AA64PFR0, RAS, 2); /* FEAT_RASv1p1 + FEAT_DoubleFault */
@@ -1182,9 +1189,9 @@ void aarch64_max_tcg_initfn(Object *obj)
t = FIELD_DP64(t, ID_AA64PFR0, DIT, 1); /* FEAT_DIT */
t = FIELD_DP64(t, ID_AA64PFR0, CSV2, 3); /* FEAT_CSV2_3 */
t = FIELD_DP64(t, ID_AA64PFR0, CSV3, 1); /* FEAT_CSV3 */
- cpu->isar.id_aa64pfr0 = t;
+ SET_IDREG(isar, ID_AA64PFR0, t);
- t = cpu->isar.id_aa64pfr1;
+ t = GET_IDREG(isar, ID_AA64PFR1);
t = FIELD_DP64(t, ID_AA64PFR1, BT, 1); /* FEAT_BTI */
t = FIELD_DP64(t, ID_AA64PFR1, SSBS, 2); /* FEAT_SSBS2 */
/*
@@ -1194,12 +1201,12 @@ void aarch64_max_tcg_initfn(Object *obj)
*/
t = FIELD_DP64(t, ID_AA64PFR1, MTE, 3); /* FEAT_MTE3 */
t = FIELD_DP64(t, ID_AA64PFR1, RAS_FRAC, 0); /* FEAT_RASv1p1 + FEAT_DoubleFault */
- t = FIELD_DP64(t, ID_AA64PFR1, SME, 1); /* FEAT_SME */
+ t = FIELD_DP64(t, ID_AA64PFR1, SME, 2); /* FEAT_SME2 */
t = FIELD_DP64(t, ID_AA64PFR1, CSV2_FRAC, 0); /* FEAT_CSV2_3 */
t = FIELD_DP64(t, ID_AA64PFR1, NMI, 1); /* FEAT_NMI */
- cpu->isar.id_aa64pfr1 = t;
+ SET_IDREG(isar, ID_AA64PFR1, t);
- t = cpu->isar.id_aa64mmfr0;
+ t = GET_IDREG(isar, ID_AA64MMFR0);
t = FIELD_DP64(t, ID_AA64MMFR0, PARANGE, 6); /* FEAT_LPA: 52 bits */
t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN16, 1); /* 16k pages supported */
t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN16_2, 2); /* 16k stage2 supported */
@@ -1207,9 +1214,9 @@ void aarch64_max_tcg_initfn(Object *obj)
t = FIELD_DP64(t, ID_AA64MMFR0, TGRAN4_2, 2); /* 4k stage2 supported */
t = FIELD_DP64(t, ID_AA64MMFR0, FGT, 1); /* FEAT_FGT */
t = FIELD_DP64(t, ID_AA64MMFR0, ECV, 2); /* FEAT_ECV */
- cpu->isar.id_aa64mmfr0 = t;
+ SET_IDREG(isar, ID_AA64MMFR0, t);
- t = cpu->isar.id_aa64mmfr1;
+ t = GET_IDREG(isar, ID_AA64MMFR1);
t = FIELD_DP64(t, ID_AA64MMFR1, HAFDBS, 2); /* FEAT_HAFDBS */
t = FIELD_DP64(t, ID_AA64MMFR1, VMIDBITS, 2); /* FEAT_VMID16 */
t = FIELD_DP64(t, ID_AA64MMFR1, VH, 1); /* FEAT_VHE */
@@ -1222,9 +1229,9 @@ void aarch64_max_tcg_initfn(Object *obj)
t = FIELD_DP64(t, ID_AA64MMFR1, AFP, 1); /* FEAT_AFP */
t = FIELD_DP64(t, ID_AA64MMFR1, TIDCP1, 1); /* FEAT_TIDCP1 */
t = FIELD_DP64(t, ID_AA64MMFR1, CMOW, 1); /* FEAT_CMOW */
- cpu->isar.id_aa64mmfr1 = t;
+ SET_IDREG(isar, ID_AA64MMFR1, t);
- t = cpu->isar.id_aa64mmfr2;
+ t = GET_IDREG(isar, ID_AA64MMFR2);
t = FIELD_DP64(t, ID_AA64MMFR2, CNP, 1); /* FEAT_TTCNP */
t = FIELD_DP64(t, ID_AA64MMFR2, UAO, 1); /* FEAT_UAO */
t = FIELD_DP64(t, ID_AA64MMFR2, IESB, 1); /* FEAT_IESB */
@@ -1238,39 +1245,43 @@ void aarch64_max_tcg_initfn(Object *obj)
t = FIELD_DP64(t, ID_AA64MMFR2, BBM, 2); /* FEAT_BBM at level 2 */
t = FIELD_DP64(t, ID_AA64MMFR2, EVT, 2); /* FEAT_EVT */
t = FIELD_DP64(t, ID_AA64MMFR2, E0PD, 1); /* FEAT_E0PD */
- cpu->isar.id_aa64mmfr2 = t;
+ SET_IDREG(isar, ID_AA64MMFR2, t);
- t = cpu->isar.id_aa64mmfr3;
- t = FIELD_DP64(t, ID_AA64MMFR3, SPEC_FPACC, 1); /* FEAT_FPACC_SPEC */
- cpu->isar.id_aa64mmfr3 = t;
+ FIELD_DP64_IDREG(isar, ID_AA64MMFR3, SPEC_FPACC, 1); /* FEAT_FPACC_SPEC */
- t = cpu->isar.id_aa64zfr0;
- t = FIELD_DP64(t, ID_AA64ZFR0, SVEVER, 1);
+ t = GET_IDREG(isar, ID_AA64ZFR0);
+ t = FIELD_DP64(t, ID_AA64ZFR0, SVEVER, 2); /* FEAT_SVE2p1 */
t = FIELD_DP64(t, ID_AA64ZFR0, AES, 2); /* FEAT_SVE_PMULL128 */
t = FIELD_DP64(t, ID_AA64ZFR0, BITPERM, 1); /* FEAT_SVE_BitPerm */
t = FIELD_DP64(t, ID_AA64ZFR0, BFLOAT16, 2); /* FEAT_BF16, FEAT_EBF16 */
+ t = FIELD_DP64(t, ID_AA64ZFR0, B16B16, 1); /* FEAT_SVE_B16B16 */
t = FIELD_DP64(t, ID_AA64ZFR0, SHA3, 1); /* FEAT_SVE_SHA3 */
t = FIELD_DP64(t, ID_AA64ZFR0, SM4, 1); /* FEAT_SVE_SM4 */
t = FIELD_DP64(t, ID_AA64ZFR0, I8MM, 1); /* FEAT_I8MM */
t = FIELD_DP64(t, ID_AA64ZFR0, F32MM, 1); /* FEAT_F32MM */
t = FIELD_DP64(t, ID_AA64ZFR0, F64MM, 1); /* FEAT_F64MM */
- cpu->isar.id_aa64zfr0 = t;
+ SET_IDREG(isar, ID_AA64ZFR0, t);
- t = cpu->isar.id_aa64dfr0;
+ t = GET_IDREG(isar, ID_AA64DFR0);
t = FIELD_DP64(t, ID_AA64DFR0, DEBUGVER, 10); /* FEAT_Debugv8p8 */
t = FIELD_DP64(t, ID_AA64DFR0, PMUVER, 6); /* FEAT_PMUv3p5 */
t = FIELD_DP64(t, ID_AA64DFR0, HPMN0, 1); /* FEAT_HPMN0 */
- cpu->isar.id_aa64dfr0 = t;
+ SET_IDREG(isar, ID_AA64DFR0, t);
- t = cpu->isar.id_aa64smfr0;
+ t = GET_IDREG(isar, ID_AA64SMFR0);
t = FIELD_DP64(t, ID_AA64SMFR0, F32F32, 1); /* FEAT_SME */
+ t = FIELD_DP64(t, ID_AA64SMFR0, BI32I32, 1); /* FEAT_SME2 */
t = FIELD_DP64(t, ID_AA64SMFR0, B16F32, 1); /* FEAT_SME */
t = FIELD_DP64(t, ID_AA64SMFR0, F16F32, 1); /* FEAT_SME */
t = FIELD_DP64(t, ID_AA64SMFR0, I8I32, 0xf); /* FEAT_SME */
+ t = FIELD_DP64(t, ID_AA64SMFR0, F16F16, 1); /* FEAT_SME_F16F16 */
+ t = FIELD_DP64(t, ID_AA64SMFR0, B16B16, 1); /* FEAT_SME_B16B16 */
+ t = FIELD_DP64(t, ID_AA64SMFR0, I16I32, 5); /* FEAT_SME2 */
t = FIELD_DP64(t, ID_AA64SMFR0, F64F64, 1); /* FEAT_SME_F64F64 */
t = FIELD_DP64(t, ID_AA64SMFR0, I16I64, 0xf); /* FEAT_SME_I16I64 */
+ t = FIELD_DP64(t, ID_AA64SMFR0, SMEVER, 2); /* FEAT_SME2p1 */
t = FIELD_DP64(t, ID_AA64SMFR0, FA64, 1); /* FEAT_SME_FA64 */
- cpu->isar.id_aa64smfr0 = t;
+ SET_IDREG(isar, ID_AA64SMFR0, t);
/* Replicate the same data to the 32-bit id registers. */
aa32_max_features(cpu);
@@ -1316,7 +1327,7 @@ static void aarch64_cpu_register_types(void)
size_t i;
for (i = 0; i < ARRAY_SIZE(aarch64_cpus); ++i) {
- aarch64_cpu_register(&aarch64_cpus[i]);
+ arm_cpu_register(&aarch64_cpus[i]);
}
}
diff --git a/target/arm/tcg/crypto_helper.c b/target/arm/tcg/crypto_helper.c
index 7cadd61..3428bd1 100644
--- a/target/arm/tcg/crypto_helper.c
+++ b/target/arm/tcg/crypto_helper.c
@@ -10,14 +10,16 @@
*/
#include "qemu/osdep.h"
+#include "qemu/bitops.h"
-#include "cpu.h"
-#include "exec/helper-proto.h"
#include "tcg/tcg-gvec-desc.h"
#include "crypto/aes-round.h"
#include "crypto/sm4.h"
#include "vec_internal.h"
+#define HELPER_H "tcg/helper.h"
+#include "exec/helper-proto.h.inc"
+
union CRYPTO_STATE {
uint8_t bytes[16];
uint32_t words[4];
diff --git a/target/arm/tcg/gengvec64.c b/target/arm/tcg/gengvec64.c
index 2617cde..2429cab 100644
--- a/target/arm/tcg/gengvec64.c
+++ b/target/arm/tcg/gengvec64.c
@@ -369,3 +369,14 @@ void gen_gvec_usqadd_qc(unsigned vece, uint32_t rd_ofs,
tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
}
+
+void gen_gvec_sve2_sqdmulh(unsigned vece, uint32_t rd_ofs,
+ uint32_t rn_ofs, uint32_t rm_ofs,
+ uint32_t opr_sz, uint32_t max_sz)
+{
+ static gen_helper_gvec_3 * const fns[4] = {
+ gen_helper_sve2_sqdmulh_b, gen_helper_sve2_sqdmulh_h,
+ gen_helper_sve2_sqdmulh_s, gen_helper_sve2_sqdmulh_d,
+ };
+ tcg_gen_gvec_3_ool(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, 0, fns[vece]);
+}
diff --git a/target/arm/tcg/helper-a64.c b/target/arm/tcg/helper-a64.c
index ad3c4f3..71c6c44 100644
--- a/target/arm/tcg/helper-a64.c
+++ b/target/arm/tcg/helper-a64.c
@@ -29,8 +29,11 @@
#include "internals.h"
#include "qemu/crc32c.h"
#include "exec/cpu-common.h"
-#include "exec/exec-all.h"
-#include "exec/cpu_ldst.h"
+#include "accel/tcg/cpu-ldst.h"
+#include "accel/tcg/helper-retaddr.h"
+#include "accel/tcg/probe.h"
+#include "exec/target_page.h"
+#include "exec/tlb-flags.h"
#include "qemu/int128.h"
#include "qemu/atomic128.h"
#include "fpu/softfloat.h"
@@ -399,6 +402,8 @@ AH_MINMAX_HELPER(vfp_ah_mind, float64, float64, min)
AH_MINMAX_HELPER(vfp_ah_maxh, dh_ctype_f16, float16, max)
AH_MINMAX_HELPER(vfp_ah_maxs, float32, float32, max)
AH_MINMAX_HELPER(vfp_ah_maxd, float64, float64, max)
+AH_MINMAX_HELPER(sme2_ah_fmax_b16, bfloat16, bfloat16, max)
+AH_MINMAX_HELPER(sme2_ah_fmin_b16, bfloat16, bfloat16, min)
/* 64-bit versions of the CRC helpers. Note that although the operation
* (and the prototypes of crc32c() and crc32() mean that only the bottom
@@ -653,15 +658,6 @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc)
spsr &= ~PSTATE_SS;
}
- /*
- * FEAT_RME forbids return from EL3 with an invalid security state.
- * We don't need an explicit check for FEAT_RME here because we enforce
- * in scr_write() that you can't set the NSE bit without it.
- */
- if (cur_el == 3 && (env->cp15.scr_el3 & (SCR_NS | SCR_NSE)) == SCR_NSE) {
- goto illegal_return;
- }
-
new_el = el_from_spsr(spsr);
if (new_el == -1) {
goto illegal_return;
@@ -673,6 +669,17 @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc)
goto illegal_return;
}
+ /*
+ * FEAT_RME forbids return from EL3 to a lower exception level
+ * with an invalid security state.
+ * We don't need an explicit check for FEAT_RME here because we enforce
+ * in scr_write() that you can't set the NSE bit without it.
+ */
+ if (cur_el == 3 && new_el < 3 &&
+ (env->cp15.scr_el3 & (SCR_NS | SCR_NSE)) == SCR_NSE) {
+ goto illegal_return;
+ }
+
if (new_el != 0 && arm_el_is_aa64(env, new_el) != return_to_aa64) {
/* Return to an EL which is configured for a different register width */
goto illegal_return;
diff --git a/target/arm/tcg/helper-sme.h b/target/arm/tcg/helper-sme.h
index 858d691..1fc756b 100644
--- a/target/arm/tcg/helper-sme.h
+++ b/target/arm/tcg/helper-sme.h
@@ -33,6 +33,21 @@ DEF_HELPER_FLAGS_4(sme_mova_zc_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sme_mova_cz_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sme_mova_zc_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_mova_cz_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_mova_zc_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_mova_cz_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_mova_zc_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_mova_cz_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_mova_zc_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_mova_cz_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_mova_zc_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sme2p1_movaz_zc_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2p1_movaz_zc_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2p1_movaz_zc_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2p1_movaz_zc_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2p1_movaz_zc_q, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
DEF_HELPER_FLAGS_5(sme_ld1b_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_5(sme_ld1b_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_5(sme_ld1b_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32)
@@ -120,14 +135,45 @@ DEF_HELPER_FLAGS_5(sme_addva_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_5(sme_addha_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_5(sme_addva_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
-DEF_HELPER_FLAGS_7(sme_fmopa_h, TCG_CALL_NO_RWG,
+DEF_HELPER_FLAGS_7(sme_fmopa_w_h, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_7(sme_fmopa_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_7(sme_fmopa_s, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_7(sme_fmopa_d, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_7(sme_bfmopa_w, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, env, i32)
DEF_HELPER_FLAGS_7(sme_bfmopa, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_7(sme_fmops_w_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_7(sme_fmops_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_7(sme_fmops_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_7(sme_fmops_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_7(sme_bfmops_w, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_7(sme_bfmops, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_7(sme_ah_fmops_w_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_7(sme_ah_fmops_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_7(sme_ah_fmops_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_7(sme_ah_fmops_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_7(sme_ah_bfmops_w, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_7(sme_ah_bfmops, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, fpst, i32)
+
DEF_HELPER_FLAGS_6(sme_smopa_s, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_6(sme_umopa_s, TCG_CALL_NO_RWG,
@@ -144,3 +190,168 @@ DEF_HELPER_FLAGS_6(sme_sumopa_d, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_6(sme_usmopa_d, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_6(sme2_bmopa_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sme2_smopa2_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_6(sme2_umopa2_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fmax_b16, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmin_b16, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_ah_fmax_b16, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_ah_fmin_b16, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmaxnum_b16, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fminnum_b16, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_6(sme2_fdot_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_6(sme2_fdot_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_6(sme2_fvdot_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, env, i32)
+
+DEF_HELPER_FLAGS_4(sme2_svdot_idx_4b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sme2_uvdot_idx_4b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sme2_suvdot_idx_4b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sme2_usvdot_idx_4b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sme2_svdot_idx_4h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sme2_uvdot_idx_4h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sme2_svdot_idx_2h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sme2_uvdot_idx_2h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sme2_smlall_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sme2_smlall_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sme2_smlsll_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sme2_smlsll_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sme2_umlall_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sme2_umlall_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sme2_umlsll_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sme2_umlsll_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sme2_usmlall_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sme2_smlall_idx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sme2_smlall_idx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sme2_smlsll_idx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sme2_smlsll_idx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sme2_umlall_idx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sme2_umlall_idx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sme2_umlsll_idx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sme2_umlsll_idx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sme2_usmlall_idx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sme2_sumlall_idx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sme2_bfcvt, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(sme2_bfcvtn, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(sme2_fcvt_n, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(sme2_fcvtn, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(sme2_fcvt_w, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(sme2_fcvtl, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(sme2_scvtf, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(sme2_ucvtf, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_3(sme2_sqcvt_sb, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_uqcvt_sb, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_sqcvtu_sb, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_sqcvt_sh, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_uqcvt_sh, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_sqcvtu_sh, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_sqcvt_dh, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_uqcvt_dh, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_sqcvtu_dh, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sme2_sqcvtn_sb, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_uqcvtn_sb, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_sqcvtun_sb, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_sqcvtn_sh, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_uqcvtn_sh, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_sqcvtun_sh, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_sqcvtn_dh, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_uqcvtn_dh, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_sqcvtun_dh, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sme2_sunpk2_bh, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_sunpk2_hs, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_sunpk2_sd, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_sunpk4_bh, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_sunpk4_hs, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_sunpk4_sd, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_uunpk2_bh, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_uunpk2_hs, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_uunpk2_sd, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_uunpk4_bh, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_uunpk4_hs, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_uunpk4_sd, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sme2_zip2_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sme2_zip2_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sme2_zip2_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sme2_zip2_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sme2_zip2_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sme2_uzp2_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sme2_uzp2_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sme2_uzp2_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sme2_uzp2_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sme2_uzp2_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sme2_zip4_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_zip4_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_zip4_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_zip4_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_zip4_q, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sme2_uzp4_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_uzp4_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_uzp4_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_uzp4_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_uzp4_q, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sme2_sqrshr_sh, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_uqrshr_sh, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_sqrshru_sh, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_sqrshr_sb, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_uqrshr_sb, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_sqrshru_sb, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_sqrshr_dh, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_uqrshr_dh, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_sqrshru_dh, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(sme2_sqrshrn_sh, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_uqrshrn_sh, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_sqrshrun_sh, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_sqrshrn_sb, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_uqrshrn_sb, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_sqrshrun_sb, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_sqrshrn_dh, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_uqrshrn_dh, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(sme2_sqrshrun_dh, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sme2_sclamp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sme2_sclamp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sme2_sclamp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sme2_sclamp_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sme2_uclamp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sme2_uclamp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sme2_uclamp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sme2_uclamp_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sme2_fclamp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(sme2_fclamp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(sme2_fclamp_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(sme2_bfclamp, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(sme2_sel_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32, i32)
+DEF_HELPER_FLAGS_5(sme2_sel_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32, i32)
+DEF_HELPER_FLAGS_5(sme2_sel_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32, i32)
+DEF_HELPER_FLAGS_5(sme2_sel_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32, i32)
diff --git a/target/arm/tcg/helper-sve.h b/target/arm/tcg/helper-sve.h
index 0b1b588..c36090d 100644
--- a/target/arm/tcg/helper-sve.h
+++ b/target/arm/tcg/helper-sve.h
@@ -676,11 +676,21 @@ DEF_HELPER_FLAGS_5(sve2_tbl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_5(sve2_tbl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_5(sve2_tbl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2p1_tblq_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2p1_tblq_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2p1_tblq_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2p1_tblq_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
DEF_HELPER_FLAGS_4(sve2_tbx_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve2_tbx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve2_tbx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve2_tbx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2p1_tbxq_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2p1_tbxq_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2p1_tbxq_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2p1_tbxq_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
DEF_HELPER_FLAGS_3(sve_sunpk_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(sve_sunpk_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(sve_sunpk_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
@@ -701,12 +711,22 @@ DEF_HELPER_FLAGS_4(sve_zip_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve_zip_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve2_zip_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2p1_zipq_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2p1_zipq_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2p1_zipq_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2p1_zipq_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
DEF_HELPER_FLAGS_4(sve_uzp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve_uzp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve_uzp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve_uzp_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve2_uzp_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2p1_uzpq_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2p1_uzpq_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2p1_uzpq_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2p1_uzpq_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
DEF_HELPER_FLAGS_4(sve_trn_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve_trn_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve_trn_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
@@ -937,10 +957,17 @@ DEF_HELPER_FLAGS_4(sve_brkn, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve_brkns, TCG_CALL_NO_RWG, i32, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_3(sve_cntp, TCG_CALL_NO_RWG, i64, ptr, ptr, i32)
+DEF_HELPER_FLAGS_2(sve2p1_cntp_c, TCG_CALL_NO_RWG_SE, i64, i32, i32)
DEF_HELPER_FLAGS_3(sve_whilel, TCG_CALL_NO_RWG, i32, ptr, i32, i32)
DEF_HELPER_FLAGS_3(sve_whileg, TCG_CALL_NO_RWG, i32, ptr, i32, i32)
+DEF_HELPER_FLAGS_3(sve_while2l, TCG_CALL_NO_RWG, i32, ptr, i32, i32)
+DEF_HELPER_FLAGS_3(sve_while2g, TCG_CALL_NO_RWG, i32, ptr, i32, i32)
+
+DEF_HELPER_FLAGS_3(sve_whilecl, TCG_CALL_NO_RWG, i32, ptr, i32, i32)
+DEF_HELPER_FLAGS_3(sve_whilecg, TCG_CALL_NO_RWG, i32, ptr, i32, i32)
+
DEF_HELPER_FLAGS_4(sve_subri_b, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
DEF_HELPER_FLAGS_4(sve_subri_h, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
DEF_HELPER_FLAGS_4(sve_subri_s, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
@@ -1071,6 +1098,55 @@ DEF_HELPER_FLAGS_4(sve_ah_fminv_s, TCG_CALL_NO_RWG,
DEF_HELPER_FLAGS_4(sve_ah_fminv_d, TCG_CALL_NO_RWG,
i64, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(sve2p1_faddqv_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(sve2p1_faddqv_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(sve2p1_faddqv_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(sve2p1_fmaxnmqv_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(sve2p1_fmaxnmqv_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(sve2p1_fmaxnmqv_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(sve2p1_fminnmqv_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(sve2p1_fminnmqv_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(sve2p1_fminnmqv_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(sve2p1_fmaxqv_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(sve2p1_fmaxqv_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(sve2p1_fmaxqv_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(sve2p1_fminqv_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(sve2p1_fminqv_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(sve2p1_fminqv_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(sve2p1_ah_fmaxqv_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(sve2p1_ah_fmaxqv_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(sve2p1_ah_fmaxqv_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(sve2p1_ah_fminqv_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(sve2p1_ah_fminqv_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(sve2p1_ah_fminqv_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+
DEF_HELPER_FLAGS_5(sve_fadda_h, TCG_CALL_NO_RWG,
i64, i64, ptr, ptr, fpst, i32)
DEF_HELPER_FLAGS_5(sve_fadda_s, TCG_CALL_NO_RWG,
@@ -1582,6 +1658,14 @@ DEF_HELPER_FLAGS_4(sve_ld2dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld3dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld4dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld2qq_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld3qq_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld4qq_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_ld2qq_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld3qq_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld4qq_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
DEF_HELPER_FLAGS_4(sve_ld1bhu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld1bsu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld1bdu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
@@ -1602,9 +1686,15 @@ DEF_HELPER_FLAGS_4(sve_ld1hds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld1sdu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld1sds_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld1squ_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld1dqu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
DEF_HELPER_FLAGS_4(sve_ld1sdu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld1sds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld1squ_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld1dqu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
DEF_HELPER_FLAGS_4(sve_ld1bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld2bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld3bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
@@ -1640,6 +1730,14 @@ DEF_HELPER_FLAGS_4(sve_ld2dd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld3dd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld4dd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld2qq_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld3qq_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld4qq_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_ld2qq_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld3qq_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld4qq_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
DEF_HELPER_FLAGS_4(sve_ld1bhu_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld1bsu_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld1bdu_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
@@ -1660,9 +1758,15 @@ DEF_HELPER_FLAGS_4(sve_ld1hds_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld1sdu_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld1sds_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld1squ_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld1dqu_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
DEF_HELPER_FLAGS_4(sve_ld1sdu_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ld1sds_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld1squ_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_ld1dqu_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
DEF_HELPER_FLAGS_4(sve_ldff1bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ldff1bhu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_ldff1bsu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
@@ -1858,6 +1962,14 @@ DEF_HELPER_FLAGS_4(sve_st2dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_st3dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_st4dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st2qq_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st3qq_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st4qq_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_st2qq_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st3qq_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st4qq_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
DEF_HELPER_FLAGS_4(sve_st1bh_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_st1bs_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_st1bd_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
@@ -1870,6 +1982,11 @@ DEF_HELPER_FLAGS_4(sve_st1hd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_st1sd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_st1sd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st1sq_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st1sq_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st1dq_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st1dq_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
DEF_HELPER_FLAGS_4(sve_st1bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_st2bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_st3bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
@@ -1905,6 +2022,14 @@ DEF_HELPER_FLAGS_4(sve_st2dd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_st3dd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_st4dd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st2qq_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st3qq_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st4qq_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
+DEF_HELPER_FLAGS_4(sve_st2qq_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st3qq_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st4qq_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
DEF_HELPER_FLAGS_4(sve_st1bh_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_st1bs_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_st1bd_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
@@ -1917,6 +2042,11 @@ DEF_HELPER_FLAGS_4(sve_st1hd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_st1sd_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve_st1sd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st1sq_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st1sq_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st1dq_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+DEF_HELPER_FLAGS_4(sve_st1dq_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32)
+
DEF_HELPER_FLAGS_6(sve_ldbsu_zsu, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldhsu_le_zsu, TCG_CALL_NO_WG,
@@ -2025,6 +2155,10 @@ DEF_HELPER_FLAGS_6(sve_ldsds_le_zd, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldsds_be_zd, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldqq_le_zd, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldqq_be_zd, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldbsu_zsu_mte, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
@@ -2134,6 +2268,10 @@ DEF_HELPER_FLAGS_6(sve_ldsds_le_zd_mte, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldsds_be_zd_mte, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldqq_le_zd_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_ldqq_be_zd_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_ldffbsu_zsu, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
@@ -2419,6 +2557,10 @@ DEF_HELPER_FLAGS_6(sve_stdd_le_zd, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_stdd_be_zd, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_stqq_le_zd, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_stqq_be_zd, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_stbs_zsu_mte, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
@@ -2486,6 +2628,10 @@ DEF_HELPER_FLAGS_6(sve_stdd_le_zd_mte, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_6(sve_stdd_be_zd_mte, TCG_CALL_NO_WG,
void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_stqq_le_zd_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
+DEF_HELPER_FLAGS_6(sve_stqq_be_zd_mte, TCG_CALL_NO_WG,
+ void, env, ptr, ptr, ptr, tl, i32)
DEF_HELPER_FLAGS_4(sve2_sqdmull_zzz_h, TCG_CALL_NO_RWG,
void, ptr, ptr, ptr, i32)
@@ -2922,3 +3068,69 @@ DEF_HELPER_FLAGS_4(sve2_sqshlu_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve2_sqshlu_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve2_sqshlu_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(sve2_sqshlu_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2p1_addqv_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2p1_addqv_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2p1_addqv_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2p1_addqv_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2p1_smaxqv_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2p1_smaxqv_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2p1_smaxqv_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2p1_smaxqv_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2p1_sminqv_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2p1_sminqv_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2p1_sminqv_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2p1_sminqv_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2p1_umaxqv_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2p1_umaxqv_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2p1_umaxqv_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2p1_umaxqv_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2p1_uminqv_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2p1_uminqv_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2p1_uminqv_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2p1_uminqv_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(pext, TCG_CALL_NO_RWG, void, ptr, i32, i32)
+
+DEF_HELPER_FLAGS_4(sve2p1_orqv_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2p1_orqv_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2p1_orqv_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2p1_orqv_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2p1_eorqv_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2p1_eorqv_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2p1_eorqv_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2p1_eorqv_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2p1_andqv_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2p1_andqv_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2p1_andqv_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2p1_andqv_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(pmov_pv_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(pmov_pv_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(pmov_pv_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(pmov_vp_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(pmov_vp_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(pmov_vp_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2p1_ld1bb_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i32)
+DEF_HELPER_FLAGS_5(sve2p1_ld1hh_le_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i32)
+DEF_HELPER_FLAGS_5(sve2p1_ld1hh_be_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i32)
+DEF_HELPER_FLAGS_5(sve2p1_ld1ss_le_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i32)
+DEF_HELPER_FLAGS_5(sve2p1_ld1ss_be_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i32)
+DEF_HELPER_FLAGS_5(sve2p1_ld1dd_le_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i32)
+DEF_HELPER_FLAGS_5(sve2p1_ld1dd_be_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i32)
+
+DEF_HELPER_FLAGS_5(sve2p1_st1bb_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i32)
+DEF_HELPER_FLAGS_5(sve2p1_st1hh_le_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i32)
+DEF_HELPER_FLAGS_5(sve2p1_st1hh_be_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i32)
+DEF_HELPER_FLAGS_5(sve2p1_st1ss_le_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i32)
+DEF_HELPER_FLAGS_5(sve2p1_st1ss_be_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i32)
+DEF_HELPER_FLAGS_5(sve2p1_st1dd_le_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i32)
+DEF_HELPER_FLAGS_5(sve2p1_st1dd_be_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i32)
diff --git a/target/arm/tcg/helper.h b/target/arm/tcg/helper.h
new file mode 100644
index 0000000..0a006d9
--- /dev/null
+++ b/target/arm/tcg/helper.h
@@ -0,0 +1,1213 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+DEF_HELPER_FLAGS_1(sxtb16, TCG_CALL_NO_RWG_SE, i32, i32)
+DEF_HELPER_FLAGS_1(uxtb16, TCG_CALL_NO_RWG_SE, i32, i32)
+
+DEF_HELPER_3(add_setq, i32, env, i32, i32)
+DEF_HELPER_3(add_saturate, i32, env, i32, i32)
+DEF_HELPER_3(sub_saturate, i32, env, i32, i32)
+DEF_HELPER_3(add_usaturate, i32, env, i32, i32)
+DEF_HELPER_3(sub_usaturate, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(sdiv, TCG_CALL_NO_RWG, s32, env, s32, s32)
+DEF_HELPER_FLAGS_3(udiv, TCG_CALL_NO_RWG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_1(rbit, TCG_CALL_NO_RWG_SE, i32, i32)
+
+#define PAS_OP(pfx) \
+ DEF_HELPER_3(pfx ## add8, i32, i32, i32, ptr) \
+ DEF_HELPER_3(pfx ## sub8, i32, i32, i32, ptr) \
+ DEF_HELPER_3(pfx ## sub16, i32, i32, i32, ptr) \
+ DEF_HELPER_3(pfx ## add16, i32, i32, i32, ptr) \
+ DEF_HELPER_3(pfx ## addsubx, i32, i32, i32, ptr) \
+ DEF_HELPER_3(pfx ## subaddx, i32, i32, i32, ptr)
+
+PAS_OP(s)
+PAS_OP(u)
+#undef PAS_OP
+
+#define PAS_OP(pfx) \
+ DEF_HELPER_2(pfx ## add8, i32, i32, i32) \
+ DEF_HELPER_2(pfx ## sub8, i32, i32, i32) \
+ DEF_HELPER_2(pfx ## sub16, i32, i32, i32) \
+ DEF_HELPER_2(pfx ## add16, i32, i32, i32) \
+ DEF_HELPER_2(pfx ## addsubx, i32, i32, i32) \
+ DEF_HELPER_2(pfx ## subaddx, i32, i32, i32)
+PAS_OP(q)
+PAS_OP(sh)
+PAS_OP(uq)
+PAS_OP(uh)
+#undef PAS_OP
+
+DEF_HELPER_3(ssat, i32, env, i32, i32)
+DEF_HELPER_3(usat, i32, env, i32, i32)
+DEF_HELPER_3(ssat16, i32, env, i32, i32)
+DEF_HELPER_3(usat16, i32, env, i32, i32)
+
+DEF_HELPER_FLAGS_2(usad8, TCG_CALL_NO_RWG_SE, i32, i32, i32)
+
+DEF_HELPER_FLAGS_3(sel_flags, TCG_CALL_NO_RWG_SE,
+ i32, i32, i32, i32)
+DEF_HELPER_2(exception_internal, noreturn, env, i32)
+DEF_HELPER_3(exception_with_syndrome, noreturn, env, i32, i32)
+DEF_HELPER_4(exception_with_syndrome_el, noreturn, env, i32, i32, i32)
+DEF_HELPER_2(exception_bkpt_insn, noreturn, env, i32)
+DEF_HELPER_2(exception_swstep, noreturn, env, i32)
+DEF_HELPER_2(exception_pc_alignment, noreturn, env, vaddr)
+DEF_HELPER_1(setend, void, env)
+DEF_HELPER_2(wfi, void, env, i32)
+DEF_HELPER_1(wfe, void, env)
+DEF_HELPER_2(wfit, void, env, i64)
+DEF_HELPER_1(yield, void, env)
+DEF_HELPER_1(pre_hvc, void, env)
+DEF_HELPER_2(pre_smc, void, env, i32)
+DEF_HELPER_1(vesb, void, env)
+
+DEF_HELPER_3(cpsr_write, void, env, i32, i32)
+DEF_HELPER_2(cpsr_write_eret, void, env, i32)
+DEF_HELPER_1(cpsr_read, i32, env)
+
+DEF_HELPER_3(v7m_msr, void, env, i32, i32)
+DEF_HELPER_2(v7m_mrs, i32, env, i32)
+
+DEF_HELPER_2(v7m_bxns, void, env, i32)
+DEF_HELPER_2(v7m_blxns, void, env, i32)
+
+DEF_HELPER_3(v7m_tt, i32, env, i32, i32)
+
+DEF_HELPER_1(v7m_preserve_fp_state, void, env)
+
+DEF_HELPER_2(v7m_vlstm, void, env, i32)
+DEF_HELPER_2(v7m_vlldm, void, env, i32)
+
+DEF_HELPER_2(v8m_stackcheck, void, env, i32)
+
+DEF_HELPER_FLAGS_2(check_bxj_trap, TCG_CALL_NO_WG, void, env, i32)
+
+DEF_HELPER_4(access_check_cp_reg, cptr, env, i32, i32, i32)
+DEF_HELPER_FLAGS_2(lookup_cp_reg, TCG_CALL_NO_RWG_SE, cptr, env, i32)
+DEF_HELPER_FLAGS_2(tidcp_el0, TCG_CALL_NO_WG, void, env, i32)
+DEF_HELPER_FLAGS_2(tidcp_el1, TCG_CALL_NO_WG, void, env, i32)
+DEF_HELPER_3(set_cp_reg, void, env, cptr, i32)
+DEF_HELPER_2(get_cp_reg, i32, env, cptr)
+DEF_HELPER_3(set_cp_reg64, void, env, cptr, i64)
+DEF_HELPER_2(get_cp_reg64, i64, env, cptr)
+
+DEF_HELPER_2(get_r13_banked, i32, env, i32)
+DEF_HELPER_3(set_r13_banked, void, env, i32, i32)
+
+DEF_HELPER_3(mrs_banked, i32, env, i32, i32)
+DEF_HELPER_4(msr_banked, void, env, i32, i32, i32)
+
+DEF_HELPER_2(get_user_reg, i32, env, i32)
+DEF_HELPER_3(set_user_reg, void, env, i32, i32)
+
+DEF_HELPER_FLAGS_1(rebuild_hflags_m32_newel, TCG_CALL_NO_RWG, void, env)
+DEF_HELPER_FLAGS_2(rebuild_hflags_m32, TCG_CALL_NO_RWG, void, env, int)
+DEF_HELPER_FLAGS_1(rebuild_hflags_a32_newel, TCG_CALL_NO_RWG, void, env)
+DEF_HELPER_FLAGS_2(rebuild_hflags_a32, TCG_CALL_NO_RWG, void, env, int)
+DEF_HELPER_FLAGS_2(rebuild_hflags_a64, TCG_CALL_NO_RWG, void, env, int)
+
+DEF_HELPER_FLAGS_5(probe_access, TCG_CALL_NO_WG, void, env, vaddr, i32, i32, i32)
+
+DEF_HELPER_1(vfp_get_fpscr, i32, env)
+DEF_HELPER_2(vfp_set_fpscr, void, env, i32)
+
+DEF_HELPER_3(vfp_addh, f16, f16, f16, fpst)
+DEF_HELPER_3(vfp_adds, f32, f32, f32, fpst)
+DEF_HELPER_3(vfp_addd, f64, f64, f64, fpst)
+DEF_HELPER_3(vfp_subh, f16, f16, f16, fpst)
+DEF_HELPER_3(vfp_subs, f32, f32, f32, fpst)
+DEF_HELPER_3(vfp_subd, f64, f64, f64, fpst)
+DEF_HELPER_3(vfp_mulh, f16, f16, f16, fpst)
+DEF_HELPER_3(vfp_muls, f32, f32, f32, fpst)
+DEF_HELPER_3(vfp_muld, f64, f64, f64, fpst)
+DEF_HELPER_3(vfp_divh, f16, f16, f16, fpst)
+DEF_HELPER_3(vfp_divs, f32, f32, f32, fpst)
+DEF_HELPER_3(vfp_divd, f64, f64, f64, fpst)
+DEF_HELPER_3(vfp_maxh, f16, f16, f16, fpst)
+DEF_HELPER_3(vfp_maxs, f32, f32, f32, fpst)
+DEF_HELPER_3(vfp_maxd, f64, f64, f64, fpst)
+DEF_HELPER_3(vfp_minh, f16, f16, f16, fpst)
+DEF_HELPER_3(vfp_mins, f32, f32, f32, fpst)
+DEF_HELPER_3(vfp_mind, f64, f64, f64, fpst)
+DEF_HELPER_3(vfp_maxnumh, f16, f16, f16, fpst)
+DEF_HELPER_3(vfp_maxnums, f32, f32, f32, fpst)
+DEF_HELPER_3(vfp_maxnumd, f64, f64, f64, fpst)
+DEF_HELPER_3(vfp_minnumh, f16, f16, f16, fpst)
+DEF_HELPER_3(vfp_minnums, f32, f32, f32, fpst)
+DEF_HELPER_3(vfp_minnumd, f64, f64, f64, fpst)
+DEF_HELPER_2(vfp_sqrth, f16, f16, fpst)
+DEF_HELPER_2(vfp_sqrts, f32, f32, fpst)
+DEF_HELPER_2(vfp_sqrtd, f64, f64, fpst)
+DEF_HELPER_3(vfp_cmph, void, f16, f16, env)
+DEF_HELPER_3(vfp_cmps, void, f32, f32, env)
+DEF_HELPER_3(vfp_cmpd, void, f64, f64, env)
+DEF_HELPER_3(vfp_cmpeh, void, f16, f16, env)
+DEF_HELPER_3(vfp_cmpes, void, f32, f32, env)
+DEF_HELPER_3(vfp_cmped, void, f64, f64, env)
+
+DEF_HELPER_2(vfp_fcvtds, f64, f32, fpst)
+DEF_HELPER_2(vfp_fcvtsd, f32, f64, fpst)
+DEF_HELPER_FLAGS_2(bfcvt, TCG_CALL_NO_RWG, i32, f32, fpst)
+DEF_HELPER_FLAGS_2(bfcvt_pair, TCG_CALL_NO_RWG, i32, i64, fpst)
+
+DEF_HELPER_2(vfp_uitoh, f16, i32, fpst)
+DEF_HELPER_2(vfp_uitos, f32, i32, fpst)
+DEF_HELPER_2(vfp_uitod, f64, i32, fpst)
+DEF_HELPER_2(vfp_sitoh, f16, i32, fpst)
+DEF_HELPER_2(vfp_sitos, f32, i32, fpst)
+DEF_HELPER_2(vfp_sitod, f64, i32, fpst)
+
+DEF_HELPER_2(vfp_touih, i32, f16, fpst)
+DEF_HELPER_2(vfp_touis, i32, f32, fpst)
+DEF_HELPER_2(vfp_touid, i32, f64, fpst)
+DEF_HELPER_2(vfp_touizh, i32, f16, fpst)
+DEF_HELPER_2(vfp_touizs, i32, f32, fpst)
+DEF_HELPER_2(vfp_touizd, i32, f64, fpst)
+DEF_HELPER_2(vfp_tosih, s32, f16, fpst)
+DEF_HELPER_2(vfp_tosis, s32, f32, fpst)
+DEF_HELPER_2(vfp_tosid, s32, f64, fpst)
+DEF_HELPER_2(vfp_tosizh, s32, f16, fpst)
+DEF_HELPER_2(vfp_tosizs, s32, f32, fpst)
+DEF_HELPER_2(vfp_tosizd, s32, f64, fpst)
+
+DEF_HELPER_3(vfp_toshh_round_to_zero, i32, f16, i32, fpst)
+DEF_HELPER_3(vfp_toslh_round_to_zero, i32, f16, i32, fpst)
+DEF_HELPER_3(vfp_touhh_round_to_zero, i32, f16, i32, fpst)
+DEF_HELPER_3(vfp_toulh_round_to_zero, i32, f16, i32, fpst)
+DEF_HELPER_3(vfp_toshs_round_to_zero, i32, f32, i32, fpst)
+DEF_HELPER_3(vfp_tosls_round_to_zero, i32, f32, i32, fpst)
+DEF_HELPER_3(vfp_touhs_round_to_zero, i32, f32, i32, fpst)
+DEF_HELPER_3(vfp_touls_round_to_zero, i32, f32, i32, fpst)
+DEF_HELPER_3(vfp_toshd_round_to_zero, i64, f64, i32, fpst)
+DEF_HELPER_3(vfp_tosld_round_to_zero, i64, f64, i32, fpst)
+DEF_HELPER_3(vfp_tosqd_round_to_zero, i64, f64, i32, fpst)
+DEF_HELPER_3(vfp_touhd_round_to_zero, i64, f64, i32, fpst)
+DEF_HELPER_3(vfp_tould_round_to_zero, i64, f64, i32, fpst)
+DEF_HELPER_3(vfp_touqd_round_to_zero, i64, f64, i32, fpst)
+DEF_HELPER_3(vfp_touhh, i32, f16, i32, fpst)
+DEF_HELPER_3(vfp_toshh, i32, f16, i32, fpst)
+DEF_HELPER_3(vfp_toulh, i32, f16, i32, fpst)
+DEF_HELPER_3(vfp_toslh, i32, f16, i32, fpst)
+DEF_HELPER_3(vfp_touqh, i64, f16, i32, fpst)
+DEF_HELPER_3(vfp_tosqh, i64, f16, i32, fpst)
+DEF_HELPER_3(vfp_toshs, i32, f32, i32, fpst)
+DEF_HELPER_3(vfp_tosls, i32, f32, i32, fpst)
+DEF_HELPER_3(vfp_tosqs, i64, f32, i32, fpst)
+DEF_HELPER_3(vfp_touhs, i32, f32, i32, fpst)
+DEF_HELPER_3(vfp_touls, i32, f32, i32, fpst)
+DEF_HELPER_3(vfp_touqs, i64, f32, i32, fpst)
+DEF_HELPER_3(vfp_toshd, i64, f64, i32, fpst)
+DEF_HELPER_3(vfp_tosld, i64, f64, i32, fpst)
+DEF_HELPER_3(vfp_tosqd, i64, f64, i32, fpst)
+DEF_HELPER_3(vfp_touhd, i64, f64, i32, fpst)
+DEF_HELPER_3(vfp_tould, i64, f64, i32, fpst)
+DEF_HELPER_3(vfp_touqd, i64, f64, i32, fpst)
+DEF_HELPER_3(vfp_shtos, f32, i32, i32, fpst)
+DEF_HELPER_3(vfp_sltos, f32, i32, i32, fpst)
+DEF_HELPER_3(vfp_sqtos, f32, i64, i32, fpst)
+DEF_HELPER_3(vfp_uhtos, f32, i32, i32, fpst)
+DEF_HELPER_3(vfp_ultos, f32, i32, i32, fpst)
+DEF_HELPER_3(vfp_uqtos, f32, i64, i32, fpst)
+DEF_HELPER_3(vfp_shtod, f64, i64, i32, fpst)
+DEF_HELPER_3(vfp_sltod, f64, i64, i32, fpst)
+DEF_HELPER_3(vfp_sqtod, f64, i64, i32, fpst)
+DEF_HELPER_3(vfp_uhtod, f64, i64, i32, fpst)
+DEF_HELPER_3(vfp_ultod, f64, i64, i32, fpst)
+DEF_HELPER_3(vfp_uqtod, f64, i64, i32, fpst)
+DEF_HELPER_3(vfp_shtoh, f16, i32, i32, fpst)
+DEF_HELPER_3(vfp_uhtoh, f16, i32, i32, fpst)
+DEF_HELPER_3(vfp_sltoh, f16, i32, i32, fpst)
+DEF_HELPER_3(vfp_ultoh, f16, i32, i32, fpst)
+DEF_HELPER_3(vfp_sqtoh, f16, i64, i32, fpst)
+DEF_HELPER_3(vfp_uqtoh, f16, i64, i32, fpst)
+
+DEF_HELPER_3(vfp_shtos_round_to_nearest, f32, i32, i32, fpst)
+DEF_HELPER_3(vfp_sltos_round_to_nearest, f32, i32, i32, fpst)
+DEF_HELPER_3(vfp_uhtos_round_to_nearest, f32, i32, i32, fpst)
+DEF_HELPER_3(vfp_ultos_round_to_nearest, f32, i32, i32, fpst)
+DEF_HELPER_3(vfp_shtod_round_to_nearest, f64, i64, i32, fpst)
+DEF_HELPER_3(vfp_sltod_round_to_nearest, f64, i64, i32, fpst)
+DEF_HELPER_3(vfp_uhtod_round_to_nearest, f64, i64, i32, fpst)
+DEF_HELPER_3(vfp_ultod_round_to_nearest, f64, i64, i32, fpst)
+DEF_HELPER_3(vfp_shtoh_round_to_nearest, f16, i32, i32, fpst)
+DEF_HELPER_3(vfp_uhtoh_round_to_nearest, f16, i32, i32, fpst)
+DEF_HELPER_3(vfp_sltoh_round_to_nearest, f16, i32, i32, fpst)
+DEF_HELPER_3(vfp_ultoh_round_to_nearest, f16, i32, i32, fpst)
+
+DEF_HELPER_FLAGS_2(set_rmode, TCG_CALL_NO_RWG, i32, i32, fpst)
+
+DEF_HELPER_FLAGS_3(vfp_fcvt_f16_to_f32, TCG_CALL_NO_RWG, f32, f16, fpst, i32)
+DEF_HELPER_FLAGS_3(vfp_fcvt_f32_to_f16, TCG_CALL_NO_RWG, f16, f32, fpst, i32)
+DEF_HELPER_FLAGS_3(vfp_fcvt_f16_to_f64, TCG_CALL_NO_RWG, f64, f16, fpst, i32)
+DEF_HELPER_FLAGS_3(vfp_fcvt_f64_to_f16, TCG_CALL_NO_RWG, f16, f64, fpst, i32)
+
+DEF_HELPER_4(vfp_muladdd, f64, f64, f64, f64, fpst)
+DEF_HELPER_4(vfp_muladds, f32, f32, f32, f32, fpst)
+DEF_HELPER_4(vfp_muladdh, f16, f16, f16, f16, fpst)
+
+DEF_HELPER_FLAGS_2(recpe_f16, TCG_CALL_NO_RWG, f16, f16, fpst)
+DEF_HELPER_FLAGS_2(recpe_f32, TCG_CALL_NO_RWG, f32, f32, fpst)
+DEF_HELPER_FLAGS_2(recpe_rpres_f32, TCG_CALL_NO_RWG, f32, f32, fpst)
+DEF_HELPER_FLAGS_2(recpe_f64, TCG_CALL_NO_RWG, f64, f64, fpst)
+DEF_HELPER_FLAGS_2(rsqrte_f16, TCG_CALL_NO_RWG, f16, f16, fpst)
+DEF_HELPER_FLAGS_2(rsqrte_f32, TCG_CALL_NO_RWG, f32, f32, fpst)
+DEF_HELPER_FLAGS_2(rsqrte_rpres_f32, TCG_CALL_NO_RWG, f32, f32, fpst)
+DEF_HELPER_FLAGS_2(rsqrte_f64, TCG_CALL_NO_RWG, f64, f64, fpst)
+DEF_HELPER_FLAGS_1(recpe_u32, TCG_CALL_NO_RWG, i32, i32)
+DEF_HELPER_FLAGS_1(rsqrte_u32, TCG_CALL_NO_RWG, i32, i32)
+DEF_HELPER_FLAGS_4(neon_tbl, TCG_CALL_NO_RWG, i64, env, i32, i64, i64)
+
+DEF_HELPER_3(shl_cc, i32, env, i32, i32)
+DEF_HELPER_3(shr_cc, i32, env, i32, i32)
+DEF_HELPER_3(sar_cc, i32, env, i32, i32)
+DEF_HELPER_3(ror_cc, i32, env, i32, i32)
+
+DEF_HELPER_FLAGS_2(rinth_exact, TCG_CALL_NO_RWG, f16, f16, fpst)
+DEF_HELPER_FLAGS_2(rints_exact, TCG_CALL_NO_RWG, f32, f32, fpst)
+DEF_HELPER_FLAGS_2(rintd_exact, TCG_CALL_NO_RWG, f64, f64, fpst)
+DEF_HELPER_FLAGS_2(rinth, TCG_CALL_NO_RWG, f16, f16, fpst)
+DEF_HELPER_FLAGS_2(rints, TCG_CALL_NO_RWG, f32, f32, fpst)
+DEF_HELPER_FLAGS_2(rintd, TCG_CALL_NO_RWG, f64, f64, fpst)
+
+DEF_HELPER_FLAGS_2(vjcvt, TCG_CALL_NO_RWG, i32, f64, env)
+DEF_HELPER_FLAGS_2(fjcvtzs, TCG_CALL_NO_RWG, i64, f64, fpst)
+
+DEF_HELPER_FLAGS_3(check_hcr_el2_trap, TCG_CALL_NO_WG, void, env, i32, i32)
+
+/* neon_helper.c */
+DEF_HELPER_2(neon_pmin_u8, i32, i32, i32)
+DEF_HELPER_2(neon_pmin_s8, i32, i32, i32)
+DEF_HELPER_2(neon_pmin_u16, i32, i32, i32)
+DEF_HELPER_2(neon_pmin_s16, i32, i32, i32)
+DEF_HELPER_2(neon_pmax_u8, i32, i32, i32)
+DEF_HELPER_2(neon_pmax_s8, i32, i32, i32)
+DEF_HELPER_2(neon_pmax_u16, i32, i32, i32)
+DEF_HELPER_2(neon_pmax_s16, i32, i32, i32)
+
+DEF_HELPER_2(neon_shl_u16, i32, i32, i32)
+DEF_HELPER_2(neon_shl_s16, i32, i32, i32)
+DEF_HELPER_2(neon_rshl_u8, i32, i32, i32)
+DEF_HELPER_2(neon_rshl_s8, i32, i32, i32)
+DEF_HELPER_2(neon_rshl_u16, i32, i32, i32)
+DEF_HELPER_2(neon_rshl_s16, i32, i32, i32)
+DEF_HELPER_2(neon_rshl_u32, i32, i32, i32)
+DEF_HELPER_2(neon_rshl_s32, i32, i32, i32)
+DEF_HELPER_2(neon_rshl_u64, i64, i64, i64)
+DEF_HELPER_2(neon_rshl_s64, i64, i64, i64)
+DEF_HELPER_3(neon_qshl_u8, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshl_s8, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshl_u16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshl_s16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshl_u32, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshl_s32, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshl_u64, i64, env, i64, i64)
+DEF_HELPER_3(neon_qshl_s64, i64, env, i64, i64)
+DEF_HELPER_3(neon_qshlu_s8, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshlu_s16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshlu_s32, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshlu_s64, i64, env, i64, i64)
+DEF_HELPER_3(neon_qrshl_u8, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrshl_s8, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrshl_u16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrshl_s16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrshl_u32, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrshl_s32, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrshl_u64, i64, env, i64, i64)
+DEF_HELPER_3(neon_qrshl_s64, i64, env, i64, i64)
+DEF_HELPER_FLAGS_5(neon_sqshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(neon_sqshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(neon_sqshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(neon_sqshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(neon_uqshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(neon_uqshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(neon_uqshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(neon_uqshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(neon_sqrshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(neon_sqrshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(neon_sqrshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(neon_sqrshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(neon_uqrshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(neon_uqrshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(neon_uqrshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(neon_uqrshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(neon_sqshli_b, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(neon_sqshli_h, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(neon_sqshli_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(neon_sqshli_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(neon_uqshli_b, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(neon_uqshli_h, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(neon_uqshli_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(neon_uqshli_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(neon_sqshlui_b, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(neon_sqshlui_h, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(neon_sqshlui_s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(neon_sqshlui_d, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+
+DEF_HELPER_FLAGS_4(gvec_srshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_srshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_srshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_srshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_urshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_urshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_urshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_urshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sme2_srshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sme2_srshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sme2_srshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sme2_urshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sme2_urshl_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sme2_urshl_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_2(neon_add_u8, i32, i32, i32)
+DEF_HELPER_2(neon_add_u16, i32, i32, i32)
+DEF_HELPER_2(neon_sub_u8, i32, i32, i32)
+DEF_HELPER_2(neon_sub_u16, i32, i32, i32)
+DEF_HELPER_2(neon_mul_u8, i32, i32, i32)
+DEF_HELPER_2(neon_mul_u16, i32, i32, i32)
+
+DEF_HELPER_2(neon_tst_u8, i32, i32, i32)
+DEF_HELPER_2(neon_tst_u16, i32, i32, i32)
+DEF_HELPER_2(neon_tst_u32, i32, i32, i32)
+
+DEF_HELPER_1(neon_clz_u8, i32, i32)
+DEF_HELPER_1(neon_clz_u16, i32, i32)
+DEF_HELPER_1(neon_cls_s8, i32, i32)
+DEF_HELPER_1(neon_cls_s16, i32, i32)
+DEF_HELPER_1(neon_cls_s32, i32, i32)
+DEF_HELPER_FLAGS_3(gvec_cnt_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_rbit_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_3(neon_qdmulh_s16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrdmulh_s16, i32, env, i32, i32)
+DEF_HELPER_4(neon_qrdmlah_s16, i32, env, i32, i32, i32)
+DEF_HELPER_4(neon_qrdmlsh_s16, i32, env, i32, i32, i32)
+DEF_HELPER_3(neon_qdmulh_s32, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrdmulh_s32, i32, env, i32, i32)
+DEF_HELPER_4(neon_qrdmlah_s32, i32, env, s32, s32, s32)
+DEF_HELPER_4(neon_qrdmlsh_s32, i32, env, s32, s32, s32)
+
+DEF_HELPER_1(neon_narrow_u8, i64, i64)
+DEF_HELPER_1(neon_narrow_u16, i64, i64)
+DEF_HELPER_2(neon_unarrow_sat8, i64, env, i64)
+DEF_HELPER_2(neon_narrow_sat_u8, i64, env, i64)
+DEF_HELPER_2(neon_narrow_sat_s8, i64, env, i64)
+DEF_HELPER_2(neon_unarrow_sat16, i64, env, i64)
+DEF_HELPER_2(neon_narrow_sat_u16, i64, env, i64)
+DEF_HELPER_2(neon_narrow_sat_s16, i64, env, i64)
+DEF_HELPER_2(neon_unarrow_sat32, i64, env, i64)
+DEF_HELPER_2(neon_narrow_sat_u32, i64, env, i64)
+DEF_HELPER_2(neon_narrow_sat_s32, i64, env, i64)
+DEF_HELPER_1(neon_narrow_high_u8, i32, i64)
+DEF_HELPER_1(neon_narrow_high_u16, i32, i64)
+DEF_HELPER_1(neon_narrow_round_high_u8, i32, i64)
+DEF_HELPER_1(neon_narrow_round_high_u16, i32, i64)
+DEF_HELPER_1(neon_widen_u8, i64, i32)
+DEF_HELPER_1(neon_widen_s8, i64, i32)
+DEF_HELPER_1(neon_widen_u16, i64, i32)
+DEF_HELPER_1(neon_widen_s16, i64, i32)
+
+DEF_HELPER_FLAGS_1(neon_addlp_s8, TCG_CALL_NO_RWG_SE, i64, i64)
+DEF_HELPER_FLAGS_1(neon_addlp_s16, TCG_CALL_NO_RWG_SE, i64, i64)
+DEF_HELPER_3(neon_addl_saturate_s32, i64, env, i64, i64)
+DEF_HELPER_3(neon_addl_saturate_s64, i64, env, i64, i64)
+DEF_HELPER_2(neon_abdl_u16, i64, i32, i32)
+DEF_HELPER_2(neon_abdl_s16, i64, i32, i32)
+DEF_HELPER_2(neon_abdl_u32, i64, i32, i32)
+DEF_HELPER_2(neon_abdl_s32, i64, i32, i32)
+DEF_HELPER_2(neon_abdl_u64, i64, i32, i32)
+DEF_HELPER_2(neon_abdl_s64, i64, i32, i32)
+DEF_HELPER_2(neon_mull_u8, i64, i32, i32)
+DEF_HELPER_2(neon_mull_s8, i64, i32, i32)
+DEF_HELPER_2(neon_mull_u16, i64, i32, i32)
+DEF_HELPER_2(neon_mull_s16, i64, i32, i32)
+
+DEF_HELPER_1(neon_negl_u16, i64, i64)
+DEF_HELPER_1(neon_negl_u32, i64, i64)
+
+DEF_HELPER_FLAGS_2(neon_qabs_s8, TCG_CALL_NO_RWG, i32, env, i32)
+DEF_HELPER_FLAGS_2(neon_qabs_s16, TCG_CALL_NO_RWG, i32, env, i32)
+DEF_HELPER_FLAGS_2(neon_qabs_s32, TCG_CALL_NO_RWG, i32, env, i32)
+DEF_HELPER_FLAGS_2(neon_qabs_s64, TCG_CALL_NO_RWG, i64, env, i64)
+DEF_HELPER_FLAGS_2(neon_qneg_s8, TCG_CALL_NO_RWG, i32, env, i32)
+DEF_HELPER_FLAGS_2(neon_qneg_s16, TCG_CALL_NO_RWG, i32, env, i32)
+DEF_HELPER_FLAGS_2(neon_qneg_s32, TCG_CALL_NO_RWG, i32, env, i32)
+DEF_HELPER_FLAGS_2(neon_qneg_s64, TCG_CALL_NO_RWG, i64, env, i64)
+
+DEF_HELPER_3(neon_ceq_f32, i32, i32, i32, fpst)
+DEF_HELPER_3(neon_cge_f32, i32, i32, i32, fpst)
+DEF_HELPER_3(neon_cgt_f32, i32, i32, i32, fpst)
+DEF_HELPER_3(neon_acge_f32, i32, i32, i32, fpst)
+DEF_HELPER_3(neon_acgt_f32, i32, i32, i32, fpst)
+DEF_HELPER_3(neon_acge_f64, i64, i64, i64, fpst)
+DEF_HELPER_3(neon_acgt_f64, i64, i64, i64, fpst)
+
+/* iwmmxt_helper.c */
+DEF_HELPER_2(iwmmxt_maddsq, i64, i64, i64)
+DEF_HELPER_2(iwmmxt_madduq, i64, i64, i64)
+DEF_HELPER_2(iwmmxt_sadb, i64, i64, i64)
+DEF_HELPER_2(iwmmxt_sadw, i64, i64, i64)
+DEF_HELPER_2(iwmmxt_mulslw, i64, i64, i64)
+DEF_HELPER_2(iwmmxt_mulshw, i64, i64, i64)
+DEF_HELPER_2(iwmmxt_mululw, i64, i64, i64)
+DEF_HELPER_2(iwmmxt_muluhw, i64, i64, i64)
+DEF_HELPER_2(iwmmxt_macsw, i64, i64, i64)
+DEF_HELPER_2(iwmmxt_macuw, i64, i64, i64)
+DEF_HELPER_1(iwmmxt_setpsr_nz, i32, i64)
+
+#define DEF_IWMMXT_HELPER_SIZE_ENV(name) \
+DEF_HELPER_3(iwmmxt_##name##b, i64, env, i64, i64) \
+DEF_HELPER_3(iwmmxt_##name##w, i64, env, i64, i64) \
+DEF_HELPER_3(iwmmxt_##name##l, i64, env, i64, i64) \
+
+DEF_IWMMXT_HELPER_SIZE_ENV(unpackl)
+DEF_IWMMXT_HELPER_SIZE_ENV(unpackh)
+
+DEF_HELPER_2(iwmmxt_unpacklub, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpackluw, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpacklul, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpackhub, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpackhuw, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpackhul, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpacklsb, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpacklsw, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpacklsl, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpackhsb, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpackhsw, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpackhsl, i64, env, i64)
+
+DEF_IWMMXT_HELPER_SIZE_ENV(cmpeq)
+DEF_IWMMXT_HELPER_SIZE_ENV(cmpgtu)
+DEF_IWMMXT_HELPER_SIZE_ENV(cmpgts)
+
+DEF_IWMMXT_HELPER_SIZE_ENV(mins)
+DEF_IWMMXT_HELPER_SIZE_ENV(minu)
+DEF_IWMMXT_HELPER_SIZE_ENV(maxs)
+DEF_IWMMXT_HELPER_SIZE_ENV(maxu)
+
+DEF_IWMMXT_HELPER_SIZE_ENV(subn)
+DEF_IWMMXT_HELPER_SIZE_ENV(addn)
+DEF_IWMMXT_HELPER_SIZE_ENV(subu)
+DEF_IWMMXT_HELPER_SIZE_ENV(addu)
+DEF_IWMMXT_HELPER_SIZE_ENV(subs)
+DEF_IWMMXT_HELPER_SIZE_ENV(adds)
+
+DEF_HELPER_3(iwmmxt_avgb0, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_avgb1, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_avgw0, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_avgw1, i64, env, i64, i64)
+
+DEF_HELPER_3(iwmmxt_align, i64, i64, i64, i32)
+DEF_HELPER_4(iwmmxt_insr, i64, i64, i32, i32, i32)
+
+DEF_HELPER_1(iwmmxt_bcstb, i64, i32)
+DEF_HELPER_1(iwmmxt_bcstw, i64, i32)
+DEF_HELPER_1(iwmmxt_bcstl, i64, i32)
+
+DEF_HELPER_1(iwmmxt_addcb, i64, i64)
+DEF_HELPER_1(iwmmxt_addcw, i64, i64)
+DEF_HELPER_1(iwmmxt_addcl, i64, i64)
+
+DEF_HELPER_1(iwmmxt_msbb, i32, i64)
+DEF_HELPER_1(iwmmxt_msbw, i32, i64)
+DEF_HELPER_1(iwmmxt_msbl, i32, i64)
+
+DEF_HELPER_3(iwmmxt_srlw, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_srll, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_srlq, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_sllw, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_slll, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_sllq, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_sraw, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_sral, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_sraq, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_rorw, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_rorl, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_rorq, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_shufh, i64, env, i64, i32)
+
+DEF_HELPER_3(iwmmxt_packuw, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_packul, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_packuq, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_packsw, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_packsl, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_packsq, i64, env, i64, i64)
+
+DEF_HELPER_3(iwmmxt_muladdsl, i64, i64, i32, i32)
+DEF_HELPER_3(iwmmxt_muladdsw, i64, i64, i32, i32)
+DEF_HELPER_3(iwmmxt_muladdswl, i64, i64, i32, i32)
+
+DEF_HELPER_FLAGS_2(neon_unzip8, TCG_CALL_NO_RWG, void, ptr, ptr)
+DEF_HELPER_FLAGS_2(neon_unzip16, TCG_CALL_NO_RWG, void, ptr, ptr)
+DEF_HELPER_FLAGS_2(neon_qunzip8, TCG_CALL_NO_RWG, void, ptr, ptr)
+DEF_HELPER_FLAGS_2(neon_qunzip16, TCG_CALL_NO_RWG, void, ptr, ptr)
+DEF_HELPER_FLAGS_2(neon_qunzip32, TCG_CALL_NO_RWG, void, ptr, ptr)
+DEF_HELPER_FLAGS_2(neon_zip8, TCG_CALL_NO_RWG, void, ptr, ptr)
+DEF_HELPER_FLAGS_2(neon_zip16, TCG_CALL_NO_RWG, void, ptr, ptr)
+DEF_HELPER_FLAGS_2(neon_qzip8, TCG_CALL_NO_RWG, void, ptr, ptr)
+DEF_HELPER_FLAGS_2(neon_qzip16, TCG_CALL_NO_RWG, void, ptr, ptr)
+DEF_HELPER_FLAGS_2(neon_qzip32, TCG_CALL_NO_RWG, void, ptr, ptr)
+
+DEF_HELPER_FLAGS_4(crypto_aese, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_aesd, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(crypto_aesmc, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(crypto_aesimc, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(crypto_sha1su0, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sha1c, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sha1p, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sha1m, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(crypto_sha1h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(crypto_sha1su1, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(crypto_sha256h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sha256h2, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(crypto_sha256su0, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sha256su1, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(crypto_sha512h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sha512h2, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(crypto_sha512su0, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sha512su1, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(crypto_sm3tt1a, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sm3tt1b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sm3tt2a, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sm3tt2b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sm3partw1, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sm3partw2, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(crypto_sm4e, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(crypto_sm4ekey, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(crypto_rax1, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(crc32, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32)
+DEF_HELPER_FLAGS_3(crc32c, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32)
+
+DEF_HELPER_FLAGS_5(gvec_qrdmlah_s16, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_qrdmlsh_s16, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_qrdmlah_s32, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_qrdmlsh_s32, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(sve2_sqrdmlah_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrdmlah_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrdmlah_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrdmlah_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(sve2_sqrdmlsh_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_sdot_4b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_udot_4b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sdot_4h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_udot_4h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_usdot_4b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_sdot_2h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_udot_2h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_sdot_idx_4b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_udot_idx_4b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sdot_idx_4h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_udot_idx_4h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sudot_idx_4b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_usdot_idx_4b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_sdot_idx_2h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_udot_idx_2h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fcaddh, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fcadds, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fcaddd, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_6(gvec_fcmlah, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_6(gvec_fcmlah_idx, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_6(gvec_fcmlas, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_6(gvec_fcmlas_idx, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_6(gvec_fcmlad, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_4(gvec_sstoh, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_sitos, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_ustoh, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_uitos, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_tosszh, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_tosizs, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_touszh, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_touizs, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_4(gvec_vcvt_sf, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_vcvt_uf, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_vcvt_rz_fs, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_vcvt_rz_fu, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_4(gvec_vcvt_sh, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_vcvt_uh, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_vcvt_rz_hs, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_vcvt_rz_hu, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_4(gvec_vcvt_sd, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_vcvt_ud, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_vcvt_rz_ds, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_vcvt_rz_du, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_4(gvec_vcvt_rm_sd, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_vcvt_rm_ud, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_vcvt_rm_ss, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_vcvt_rm_us, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_vcvt_rm_sh, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_vcvt_rm_uh, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_4(gvec_vrint_rm_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_vrint_rm_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_4(gvec_vrintx_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_vrintx_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_4(gvec_frecpe_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_frecpe_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_frecpe_rpres_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_frecpe_d, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_4(gvec_frsqrte_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_frsqrte_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_frsqrte_rpres_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_frsqrte_d, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_4(gvec_fcgt0_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_fcgt0_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_fcgt0_d, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_4(gvec_fcge0_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_fcge0_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_fcge0_d, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_4(gvec_fceq0_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_fceq0_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_fceq0_d, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_4(gvec_fcle0_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_fcle0_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_fcle0_d, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_4(gvec_fclt0_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_fclt0_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_4(gvec_fclt0_d, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fadd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fadd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fadd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_bfadd, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fsub_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fsub_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fsub_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_bfsub, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fmul_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmul_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmul_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fabd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fabd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fabd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_ah_fabd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_ah_fabd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_ah_fabd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fceq_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fceq_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fceq_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fcge_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fcge_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fcge_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fcgt_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fcgt_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fcgt_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_facge_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_facge_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_facge_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_facgt_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_facgt_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_facgt_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fmax_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmax_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmax_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fmin_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmin_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmin_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fmaxnum_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmaxnum_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmaxnum_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fminnum_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fminnum_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fminnum_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_recps_nf_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_recps_nf_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_rsqrts_nf_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_rsqrts_nf_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fmla_nf_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmla_nf_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fmls_nf_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmls_nf_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_vfma_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_vfma_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_vfma_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_bfmla, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_vfms_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_vfms_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_vfms_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_bfmls, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_ah_vfms_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_ah_vfms_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_ah_vfms_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_ah_bfmls, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_ftsmul_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_ftsmul_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_ftsmul_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fmul_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmul_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmul_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fmla_nf_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmla_nf_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fmls_nf_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmls_nf_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_6(gvec_fmla_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_6(gvec_fmla_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_6(gvec_fmla_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_6(gvec_bfmla_idx, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_6(gvec_fmls_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_6(gvec_fmls_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_6(gvec_fmls_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_6(gvec_bfmls_idx, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_6(gvec_ah_fmls_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_6(gvec_ah_fmls_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_6(gvec_ah_fmls_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_6(gvec_ah_bfmls_idx, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_uqadd_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_uqadd_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_uqadd_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_uqadd_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sqadd_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sqadd_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sqadd_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sqadd_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_uqsub_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_uqsub_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_uqsub_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_uqsub_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sqsub_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sqsub_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sqsub_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sqsub_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_usqadd_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_usqadd_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_usqadd_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_usqadd_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_suqadd_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_suqadd_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_suqadd_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_suqadd_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fmlal_a32, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(gvec_fmlal_a64, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(gvec_fmlal_idx_a32, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_5(gvec_fmlal_idx_a64, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, env, i32)
+
+DEF_HELPER_FLAGS_2(frint32_s, TCG_CALL_NO_RWG, f32, f32, fpst)
+DEF_HELPER_FLAGS_2(frint64_s, TCG_CALL_NO_RWG, f32, f32, fpst)
+DEF_HELPER_FLAGS_2(frint32_d, TCG_CALL_NO_RWG, f64, f64, fpst)
+DEF_HELPER_FLAGS_2(frint64_d, TCG_CALL_NO_RWG, f64, f64, fpst)
+
+DEF_HELPER_FLAGS_3(gvec_ceq0_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_ceq0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_clt0_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_clt0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_cle0_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_cle0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_cgt0_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_cgt0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_cge0_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_cge0_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_smulh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_smulh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_smulh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_smulh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_umulh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_umulh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_umulh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_umulh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_sshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_sshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_ushl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_ushl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_pmul_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_pmull_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(neon_pmull_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(gvec_ssra_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_ssra_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_ssra_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_ssra_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(gvec_usra_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_usra_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_usra_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_usra_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(gvec_srshr_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_srshr_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_srshr_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_srshr_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(gvec_urshr_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_urshr_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_urshr_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_urshr_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(gvec_srsra_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_srsra_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_srsra_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_srsra_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(gvec_ursra_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_ursra_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_ursra_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_ursra_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(gvec_sri_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_sri_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_sri_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_sri_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(gvec_sli_b, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_sli_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_sli_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_sli_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_sabd_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_sabd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_sabd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_sabd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_uabd_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_uabd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_uabd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_uabd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_saba_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_saba_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_saba_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_saba_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_uaba_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_uaba_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_uaba_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_uaba_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_mul_idx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_mul_idx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_mul_idx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_mla_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_mla_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_mla_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_mls_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_mls_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_mls_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(neon_sqdmulh_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(neon_sqdmulh_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(neon_sqrdmulh_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(neon_sqrdmulh_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(neon_sqdmulh_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(neon_sqdmulh_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(neon_sqrdmulh_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(neon_sqrdmulh_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(neon_sqrdmlah_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(neon_sqrdmlah_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(neon_sqrdmlsh_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(neon_sqrdmlsh_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_sqdmulh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqdmulh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqdmulh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqdmulh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_sqrdmulh_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqrdmulh_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqrdmulh_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqrdmulh_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_sqdmulh_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqdmulh_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqdmulh_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sve2_sqrdmulh_idx_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqrdmulh_idx_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(sve2_sqrdmulh_idx_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_6(sve2_fmlal_zzzw_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_6(sve2_fmlal_zzxw_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, env, i32)
+
+DEF_HELPER_FLAGS_4(gvec_xar_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_smmla_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_ummla_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_usmmla_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_6(gvec_bfdot, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_6(gvec_bfdot_idx, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_6(sme2_bfvdot_idx, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, env, i32)
+
+DEF_HELPER_FLAGS_6(gvec_bfmmla, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, env, i32)
+
+DEF_HELPER_FLAGS_6(gvec_bfmlal, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_6(gvec_bfmlsl, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_6(gvec_ah_bfmlsl, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_6(gvec_bfmlal_idx, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_6(gvec_bfmlsl_idx, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_6(gvec_ah_bfmlsl_idx, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_sclamp_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sclamp_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sclamp_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_sclamp_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_uclamp_b, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_uclamp_h, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_uclamp_s, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(gvec_uclamp_d, TCG_CALL_NO_RWG,
+ void, ptr, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_5(gvec_faddp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_faddp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_faddp_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fmaxp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmaxp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmaxp_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fminp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fminp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fminp_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fmaxnump_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmaxnump_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fmaxnump_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_5(gvec_fminnump_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fminnump_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+DEF_HELPER_FLAGS_5(gvec_fminnump_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32)
+
+DEF_HELPER_FLAGS_4(gvec_addp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_addp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_addp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_addp_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_smaxp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_smaxp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_smaxp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_sminp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_sminp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_sminp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_umaxp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_umaxp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_umaxp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(gvec_uminp_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_uminp_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(gvec_uminp_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_3(gvec_urecpe_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+DEF_HELPER_FLAGS_3(gvec_ursqrte_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(sme2_luti2_1b, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(sme2_luti2_1h, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(sme2_luti2_1s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+
+DEF_HELPER_FLAGS_4(sme2_luti2_2b, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(sme2_luti2_2h, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(sme2_luti2_2s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+
+DEF_HELPER_FLAGS_4(sme2_luti2_4b, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(sme2_luti2_4h, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(sme2_luti2_4s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+
+DEF_HELPER_FLAGS_4(sme2_luti4_1b, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(sme2_luti4_1h, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(sme2_luti4_1s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+
+DEF_HELPER_FLAGS_4(sme2_luti4_2b, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(sme2_luti4_2h, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(sme2_luti4_2s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+
+DEF_HELPER_FLAGS_4(sme2_luti4_4h, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
+DEF_HELPER_FLAGS_4(sme2_luti4_4s, TCG_CALL_NO_RWG, void, ptr, ptr, env, i32)
diff --git a/target/arm/tcg/hflags.c b/target/arm/tcg/hflags.c
index 8d79b8b..59ab526 100644
--- a/target/arm/tcg/hflags.c
+++ b/target/arm/tcg/hflags.c
@@ -9,9 +9,13 @@
#include "cpu.h"
#include "internals.h"
#include "cpu-features.h"
-#include "exec/helper-proto.h"
+#include "exec/translation-block.h"
+#include "accel/tcg/cpu-ops.h"
#include "cpregs.h"
+#define HELPER_H "tcg/helper.h"
+#include "exec/helper-proto.h.inc"
+
static inline bool fgt_svc(CPUARMState *env, int el)
{
/*
@@ -210,6 +214,31 @@ static CPUARMTBFlags rebuild_hflags_a32(CPUARMState *env, int fp_el,
return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
}
+/*
+ * Return the exception level to which exceptions should be taken for ZT0.
+ * C.f. the ARM pseudocode function CheckSMEZT0Enabled, after the ZA check.
+ */
+static int zt0_exception_el(CPUARMState *env, int el)
+{
+#ifndef CONFIG_USER_ONLY
+ if (el <= 1
+ && !el_is_in_host(env, el)
+ && !FIELD_EX64(env->vfp.smcr_el[1], SMCR, EZT0)) {
+ return 1;
+ }
+ if (el <= 2
+ && arm_is_el2_enabled(env)
+ && !FIELD_EX64(env->vfp.smcr_el[2], SMCR, EZT0)) {
+ return 2;
+ }
+ if (arm_feature(env, ARM_FEATURE_EL3)
+ && !FIELD_EX64(env->vfp.smcr_el[3], SMCR, EZT0)) {
+ return 3;
+ }
+#endif
+ return 0;
+}
+
static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
ARMMMUIdx mmu_idx)
{
@@ -265,7 +294,14 @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
DP_TBFLAG_A64(flags, PSTATE_SM, 1);
DP_TBFLAG_A64(flags, SME_TRAP_NONSTREAMING, !sme_fa64(env, el));
}
- DP_TBFLAG_A64(flags, PSTATE_ZA, FIELD_EX64(env->svcr, SVCR, ZA));
+
+ if (FIELD_EX64(env->svcr, SVCR, ZA)) {
+ DP_TBFLAG_A64(flags, PSTATE_ZA, 1);
+ if (cpu_isar_feature(aa64_sme2, env_archcpu(env))) {
+ int zt0_el = zt0_exception_el(env, el);
+ DP_TBFLAG_A64(flags, ZT0EXC_EL, zt0_el);
+ }
+ }
}
sctlr = regime_sctlr(env, stage1);
@@ -498,7 +534,7 @@ void HELPER(rebuild_hflags_a64)(CPUARMState *env, int el)
env->hflags = rebuild_hflags_a64(env, el, fp_el, mmu_idx);
}
-void assert_hflags_rebuild_correctly(CPUARMState *env)
+static void assert_hflags_rebuild_correctly(CPUARMState *env)
{
#ifdef CONFIG_DEBUG_TCG
CPUARMTBFlags c = env->hflags;
@@ -506,10 +542,123 @@ void assert_hflags_rebuild_correctly(CPUARMState *env)
if (unlikely(c.flags != r.flags || c.flags2 != r.flags2)) {
fprintf(stderr, "TCG hflags mismatch "
- "(current:(0x%08x,0x" TARGET_FMT_lx ")"
- " rebuilt:(0x%08x,0x" TARGET_FMT_lx ")\n",
+ "(current:(0x%08x,0x%016" PRIx64 ")"
+ " rebuilt:(0x%08x,0x%016" PRIx64 ")\n",
c.flags, c.flags2, r.flags, r.flags2);
abort();
}
#endif
}
+
+static bool mve_no_pred(CPUARMState *env)
+{
+ /*
+ * Return true if there is definitely no predication of MVE
+ * instructions by VPR or LTPSIZE. (Returning false even if there
+ * isn't any predication is OK; generated code will just be
+ * a little worse.)
+ * If the CPU does not implement MVE then this TB flag is always 0.
+ *
+ * NOTE: if you change this logic, the "recalculate s->mve_no_pred"
+ * logic in gen_update_fp_context() needs to be updated to match.
+ *
+ * We do not include the effect of the ECI bits here -- they are
+ * tracked in other TB flags. This simplifies the logic for
+ * "when did we emit code that changes the MVE_NO_PRED TB flag
+ * and thus need to end the TB?".
+ */
+ if (cpu_isar_feature(aa32_mve, env_archcpu(env))) {
+ return false;
+ }
+ if (env->v7m.vpr) {
+ return false;
+ }
+ if (env->v7m.ltpsize < 4) {
+ return false;
+ }
+ return true;
+}
+
+TCGTBCPUState arm_get_tb_cpu_state(CPUState *cs)
+{
+ CPUARMState *env = cpu_env(cs);
+ CPUARMTBFlags flags;
+ vaddr pc;
+
+ assert_hflags_rebuild_correctly(env);
+ flags = env->hflags;
+
+ if (EX_TBFLAG_ANY(flags, AARCH64_STATE)) {
+ pc = env->pc;
+ if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
+ DP_TBFLAG_A64(flags, BTYPE, env->btype);
+ }
+ } else {
+ pc = env->regs[15];
+
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
+ FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S)
+ != env->v7m.secure) {
+ DP_TBFLAG_M32(flags, FPCCR_S_WRONG, 1);
+ }
+
+ if ((env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) &&
+ (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) ||
+ (env->v7m.secure &&
+ !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)))) {
+ /*
+ * ASPEN is set, but FPCA/SFPA indicate that there is no
+ * active FP context; we must create a new FP context before
+ * executing any FP insn.
+ */
+ DP_TBFLAG_M32(flags, NEW_FP_CTXT_NEEDED, 1);
+ }
+
+ bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
+ if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) {
+ DP_TBFLAG_M32(flags, LSPACT, 1);
+ }
+
+ if (mve_no_pred(env)) {
+ DP_TBFLAG_M32(flags, MVE_NO_PRED, 1);
+ }
+ } else {
+ /*
+ * Note that XSCALE_CPAR shares bits with VECSTRIDE.
+ * Note that VECLEN+VECSTRIDE are RES0 for M-profile.
+ */
+ if (arm_feature(env, ARM_FEATURE_XSCALE)) {
+ DP_TBFLAG_A32(flags, XSCALE_CPAR, env->cp15.c15_cpar);
+ } else {
+ DP_TBFLAG_A32(flags, VECLEN, env->vfp.vec_len);
+ DP_TBFLAG_A32(flags, VECSTRIDE, env->vfp.vec_stride);
+ }
+ if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) {
+ DP_TBFLAG_A32(flags, VFPEN, 1);
+ }
+ }
+
+ DP_TBFLAG_AM32(flags, THUMB, env->thumb);
+ DP_TBFLAG_AM32(flags, CONDEXEC, env->condexec_bits);
+ }
+
+ /*
+ * The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
+ * states defined in the ARM ARM for software singlestep:
+ * SS_ACTIVE PSTATE.SS State
+ * 0 x Inactive (the TB flag for SS is always 0)
+ * 1 0 Active-pending
+ * 1 1 Active-not-pending
+ * SS_ACTIVE is set in hflags; PSTATE__SS is computed every TB.
+ */
+ if (EX_TBFLAG_ANY(flags, SS_ACTIVE) && (env->pstate & PSTATE_SS)) {
+ DP_TBFLAG_ANY(flags, PSTATE__SS, 1);
+ }
+
+ return (TCGTBCPUState){
+ .pc = pc,
+ .flags = flags.flags,
+ .cs_base = flags.flags2,
+ };
+}
diff --git a/target/arm/tcg/iwmmxt_helper.c b/target/arm/tcg/iwmmxt_helper.c
index 610b1b2..ba054b6 100644
--- a/target/arm/tcg/iwmmxt_helper.c
+++ b/target/arm/tcg/iwmmxt_helper.c
@@ -22,7 +22,9 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/helper-proto.h"
+
+#define HELPER_H "tcg/helper.h"
+#include "exec/helper-proto.h.inc"
/* iwMMXt macros extracted from GNU gdb. */
diff --git a/target/arm/tcg/m_helper.c b/target/arm/tcg/m_helper.c
index f7354f3..28307b5 100644
--- a/target/arm/tcg/m_helper.c
+++ b/target/arm/tcg/m_helper.c
@@ -15,10 +15,9 @@
#include "qemu/main-loop.h"
#include "qemu/bitops.h"
#include "qemu/log.h"
-#include "exec/exec-all.h"
#include "exec/page-protection.h"
#ifdef CONFIG_TCG
-#include "exec/cpu_ldst.h"
+#include "accel/tcg/cpu-ldst.h"
#include "semihosting/common-semi.h"
#endif
#if !defined(CONFIG_USER_ONLY)
@@ -633,8 +632,11 @@ void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
}
/* Note that these stores can throw exceptions on MPU faults */
- cpu_stl_data_ra(env, sp, nextinst, GETPC());
- cpu_stl_data_ra(env, sp + 4, saved_psr, GETPC());
+ ARMMMUIdx mmu_idx = arm_mmu_idx(env);
+ MemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN,
+ arm_to_core_mmu_idx(mmu_idx));
+ cpu_stl_mmu(env, sp, nextinst, oi, GETPC());
+ cpu_stl_mmu(env, sp + 4, saved_psr, oi, GETPC());
env->regs[13] = sp;
env->regs[14] = 0xfeffffff;
@@ -1049,6 +1051,9 @@ void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
bool s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
bool lspact = env->v7m.fpccr[s] & R_V7M_FPCCR_LSPACT_MASK;
uintptr_t ra = GETPC();
+ ARMMMUIdx mmu_idx = arm_mmu_idx(env);
+ MemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN,
+ arm_to_core_mmu_idx(mmu_idx));
assert(env->v7m.secure);
@@ -1074,7 +1079,7 @@ void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
* Note that we do not use v7m_stack_write() here, because the
* accesses should not set the FSR bits for stacking errors if they
* fail. (In pseudocode terms, they are AccType_NORMAL, not AccType_STACK
- * or AccType_LAZYFP). Faults in cpu_stl_data_ra() will throw exceptions
+ * or AccType_LAZYFP). Faults in cpu_stl_mmu() will throw exceptions
* and longjmp out.
*/
if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) {
@@ -1090,12 +1095,12 @@ void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
if (i >= 16) {
faddr += 8; /* skip the slot for the FPSCR */
}
- cpu_stl_data_ra(env, faddr, slo, ra);
- cpu_stl_data_ra(env, faddr + 4, shi, ra);
+ cpu_stl_mmu(env, faddr, slo, oi, ra);
+ cpu_stl_mmu(env, faddr + 4, shi, oi, ra);
}
- cpu_stl_data_ra(env, fptr + 0x40, vfp_get_fpscr(env), ra);
+ cpu_stl_mmu(env, fptr + 0x40, vfp_get_fpscr(env), oi, ra);
if (cpu_isar_feature(aa32_mve, cpu)) {
- cpu_stl_data_ra(env, fptr + 0x44, env->v7m.vpr, ra);
+ cpu_stl_mmu(env, fptr + 0x44, env->v7m.vpr, oi, ra);
}
/*
@@ -1122,6 +1127,9 @@ void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
{
ARMCPU *cpu = env_archcpu(env);
uintptr_t ra = GETPC();
+ ARMMMUIdx mmu_idx = arm_mmu_idx(env);
+ MemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN,
+ arm_to_core_mmu_idx(mmu_idx));
/* fptr is the value of Rn, the frame pointer we load the FP regs from */
assert(env->v7m.secure);
@@ -1156,16 +1164,16 @@ void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
faddr += 8; /* skip the slot for the FPSCR and VPR */
}
- slo = cpu_ldl_data_ra(env, faddr, ra);
- shi = cpu_ldl_data_ra(env, faddr + 4, ra);
+ slo = cpu_ldl_mmu(env, faddr, oi, ra);
+ shi = cpu_ldl_mmu(env, faddr + 4, oi, ra);
dn = (uint64_t) shi << 32 | slo;
*aa32_vfp_dreg(env, i / 2) = dn;
}
- fpscr = cpu_ldl_data_ra(env, fptr + 0x40, ra);
+ fpscr = cpu_ldl_mmu(env, fptr + 0x40, oi, ra);
vfp_set_fpscr(env, fpscr);
if (cpu_isar_feature(aa32_mve, cpu)) {
- env->v7m.vpr = cpu_ldl_data_ra(env, fptr + 0x44, ra);
+ env->v7m.vpr = cpu_ldl_mmu(env, fptr + 0x44, oi, ra);
}
}
@@ -1938,7 +1946,7 @@ static bool do_v7m_function_return(ARMCPU *cpu)
* do them as secure, so work out what MMU index that is.
*/
mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
- oi = make_memop_idx(MO_LEUL, arm_to_core_mmu_idx(mmu_idx));
+ oi = make_memop_idx(MO_LEUL | MO_ALIGN, arm_to_core_mmu_idx(mmu_idx));
newpc = cpu_ldl_mmu(env, frameptr, oi, 0);
newpsr = cpu_ldl_mmu(env, frameptr + 4, oi, 0);
diff --git a/target/arm/tcg/meson.build b/target/arm/tcg/meson.build
index dd12cce..895facd 100644
--- a/target/arm/tcg/meson.build
+++ b/target/arm/tcg/meson.build
@@ -30,18 +30,10 @@ arm_ss.add(files(
'translate-mve.c',
'translate-neon.c',
'translate-vfp.c',
- 'crypto_helper.c',
- 'hflags.c',
- 'iwmmxt_helper.c',
'm_helper.c',
'mve_helper.c',
- 'neon_helper.c',
'op_helper.c',
- 'tlb_helper.c',
'vec_helper.c',
- 'tlb-insns.c',
- 'arith_helper.c',
- 'vfp_helper.c',
))
arm_ss.add(when: 'TARGET_AARCH64', if_true: files(
@@ -63,3 +55,27 @@ arm_system_ss.add(files(
arm_system_ss.add(when: 'CONFIG_ARM_V7M', if_true: files('cpu-v7m.c'))
arm_user_ss.add(when: 'TARGET_AARCH64', if_false: files('cpu-v7m.c'))
+
+arm_common_ss.add(zlib)
+
+arm_common_ss.add(files(
+ 'arith_helper.c',
+ 'crypto_helper.c',
+))
+
+arm_common_system_ss.add(files(
+ 'cpregs-at.c',
+ 'hflags.c',
+ 'iwmmxt_helper.c',
+ 'neon_helper.c',
+ 'tlb_helper.c',
+ 'tlb-insns.c',
+ 'vfp_helper.c',
+))
+arm_user_ss.add(files(
+ 'hflags.c',
+ 'iwmmxt_helper.c',
+ 'neon_helper.c',
+ 'tlb_helper.c',
+ 'vfp_helper.c',
+))
diff --git a/target/arm/tcg/mte_helper.c b/target/arm/tcg/mte_helper.c
index 5d6d8a1..0efc18a 100644
--- a/target/arm/tcg/mte_helper.c
+++ b/target/arm/tcg/mte_helper.c
@@ -21,22 +21,22 @@
#include "qemu/log.h"
#include "cpu.h"
#include "internals.h"
-#include "exec/exec-all.h"
#include "exec/page-protection.h"
#ifdef CONFIG_USER_ONLY
#include "user/cpu_loop.h"
#include "user/page-protection.h"
#else
-#include "exec/ram_addr.h"
+#include "system/ram_addr.h"
#endif
-#include "exec/cpu_ldst.h"
+#include "accel/tcg/cpu-ldst.h"
+#include "accel/tcg/probe.h"
#include "exec/helper-proto.h"
+#include "exec/tlb-flags.h"
#include "accel/tcg/cpu-ops.h"
#include "qapi/error.h"
#include "qemu/guest-random.h"
#include "mte_helper.h"
-
static int choose_nonexcluded_tag(int tag, int offset, uint16_t exclude)
{
if (exclude == 0xffff) {
@@ -62,6 +62,7 @@ uint8_t *allocation_tag_mem_probe(CPUARMState *env, int ptr_mmu_idx,
bool probe, uintptr_t ra)
{
#ifdef CONFIG_USER_ONLY
+ const size_t page_data_size = TARGET_PAGE_SIZE >> (LOG2_TAG_GRANULE + 1);
uint64_t clean_ptr = useronly_clean_ptr(ptr);
int flags = page_get_flags(clean_ptr);
uint8_t *tags;
@@ -82,7 +83,7 @@ uint8_t *allocation_tag_mem_probe(CPUARMState *env, int ptr_mmu_idx,
return NULL;
}
- tags = page_get_target_data(clean_ptr);
+ tags = page_get_target_data(clean_ptr, page_data_size);
index = extract32(ptr, LOG2_TAG_GRANULE + 1,
TARGET_PAGE_BITS - LOG2_TAG_GRANULE - 1);
diff --git a/target/arm/tcg/mve_helper.c b/target/arm/tcg/mve_helper.c
index 274003e..63ddcf3 100644
--- a/target/arm/tcg/mve_helper.c
+++ b/target/arm/tcg/mve_helper.c
@@ -22,8 +22,7 @@
#include "internals.h"
#include "vec_internal.h"
#include "exec/helper-proto.h"
-#include "exec/cpu_ldst.h"
-#include "exec/exec-all.h"
+#include "accel/tcg/cpu-ldst.h"
#include "tcg/tcg.h"
#include "fpu/softfloat.h"
#include "crypto/clmul.h"
@@ -149,13 +148,15 @@ static void mve_advance_vpt(CPUARMState *env)
}
/* For loads, predicated lanes are zeroed instead of keeping their old values */
-#define DO_VLDR(OP, MSIZE, LDTYPE, ESIZE, TYPE) \
+#define DO_VLDR(OP, MFLAG, MSIZE, MTYPE, LDTYPE, ESIZE, TYPE) \
void HELPER(mve_##OP)(CPUARMState *env, void *vd, uint32_t addr) \
{ \
TYPE *d = vd; \
uint16_t mask = mve_element_mask(env); \
uint16_t eci_mask = mve_eci_mask(env); \
unsigned b, e; \
+ int mmu_idx = arm_to_core_mmu_idx(arm_mmu_idx(env)); \
+ MemOpIdx oi = make_memop_idx(MFLAG | MO_ALIGN, mmu_idx); \
/* \
* R_SXTM allows the dest reg to become UNKNOWN for abandoned \
* beats so we don't care if we update part of the dest and \
@@ -164,46 +165,48 @@ static void mve_advance_vpt(CPUARMState *env)
for (b = 0, e = 0; b < 16; b += ESIZE, e++) { \
if (eci_mask & (1 << b)) { \
d[H##ESIZE(e)] = (mask & (1 << b)) ? \
- cpu_##LDTYPE##_data_ra(env, addr, GETPC()) : 0; \
+ (MTYPE)cpu_##LDTYPE##_mmu(env, addr, oi, GETPC()) : 0;\
} \
addr += MSIZE; \
} \
mve_advance_vpt(env); \
}
-#define DO_VSTR(OP, MSIZE, STTYPE, ESIZE, TYPE) \
+#define DO_VSTR(OP, MFLAG, MSIZE, STTYPE, ESIZE, TYPE) \
void HELPER(mve_##OP)(CPUARMState *env, void *vd, uint32_t addr) \
{ \
TYPE *d = vd; \
uint16_t mask = mve_element_mask(env); \
unsigned b, e; \
+ int mmu_idx = arm_to_core_mmu_idx(arm_mmu_idx(env)); \
+ MemOpIdx oi = make_memop_idx(MFLAG | MO_ALIGN, mmu_idx); \
for (b = 0, e = 0; b < 16; b += ESIZE, e++) { \
if (mask & (1 << b)) { \
- cpu_##STTYPE##_data_ra(env, addr, d[H##ESIZE(e)], GETPC()); \
+ cpu_##STTYPE##_mmu(env, addr, d[H##ESIZE(e)], oi, GETPC()); \
} \
addr += MSIZE; \
} \
mve_advance_vpt(env); \
}
-DO_VLDR(vldrb, 1, ldub, 1, uint8_t)
-DO_VLDR(vldrh, 2, lduw, 2, uint16_t)
-DO_VLDR(vldrw, 4, ldl, 4, uint32_t)
+DO_VLDR(vldrb, MO_UB, 1, uint8_t, ldb, 1, uint8_t)
+DO_VLDR(vldrh, MO_TEUW, 2, uint16_t, ldw, 2, uint16_t)
+DO_VLDR(vldrw, MO_TEUL, 4, uint32_t, ldl, 4, uint32_t)
-DO_VSTR(vstrb, 1, stb, 1, uint8_t)
-DO_VSTR(vstrh, 2, stw, 2, uint16_t)
-DO_VSTR(vstrw, 4, stl, 4, uint32_t)
+DO_VSTR(vstrb, MO_UB, 1, stb, 1, uint8_t)
+DO_VSTR(vstrh, MO_TEUW, 2, stw, 2, uint16_t)
+DO_VSTR(vstrw, MO_TEUL, 4, stl, 4, uint32_t)
-DO_VLDR(vldrb_sh, 1, ldsb, 2, int16_t)
-DO_VLDR(vldrb_sw, 1, ldsb, 4, int32_t)
-DO_VLDR(vldrb_uh, 1, ldub, 2, uint16_t)
-DO_VLDR(vldrb_uw, 1, ldub, 4, uint32_t)
-DO_VLDR(vldrh_sw, 2, ldsw, 4, int32_t)
-DO_VLDR(vldrh_uw, 2, lduw, 4, uint32_t)
+DO_VLDR(vldrb_sh, MO_SB, 1, int8_t, ldb, 2, int16_t)
+DO_VLDR(vldrb_sw, MO_SB, 1, int8_t, ldb, 4, int32_t)
+DO_VLDR(vldrb_uh, MO_UB, 1, uint8_t, ldb, 2, uint16_t)
+DO_VLDR(vldrb_uw, MO_UB, 1, uint8_t, ldb, 4, uint32_t)
+DO_VLDR(vldrh_sw, MO_TESW, 2, int16_t, ldw, 4, int32_t)
+DO_VLDR(vldrh_uw, MO_TEUW, 2, uint16_t, ldw, 4, uint32_t)
-DO_VSTR(vstrb_h, 1, stb, 2, int16_t)
-DO_VSTR(vstrb_w, 1, stb, 4, int32_t)
-DO_VSTR(vstrh_w, 2, stw, 4, int32_t)
+DO_VSTR(vstrb_h, MO_UB, 1, stb, 2, int16_t)
+DO_VSTR(vstrb_w, MO_UB, 1, stb, 4, int32_t)
+DO_VSTR(vstrh_w, MO_TEUW, 2, stw, 4, int32_t)
#undef DO_VLDR
#undef DO_VSTR
@@ -215,7 +218,7 @@ DO_VSTR(vstrh_w, 2, stw, 4, int32_t)
* For loads, predicated lanes are zeroed instead of retaining
* their previous values.
*/
-#define DO_VLDR_SG(OP, LDTYPE, ESIZE, TYPE, OFFTYPE, ADDRFN, WB) \
+#define DO_VLDR_SG(OP, MFLAG, MTYPE, LDTYPE, ESIZE, TYPE, OFFTYPE, ADDRFN, WB)\
void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \
uint32_t base) \
{ \
@@ -225,13 +228,15 @@ DO_VSTR(vstrh_w, 2, stw, 4, int32_t)
uint16_t eci_mask = mve_eci_mask(env); \
unsigned e; \
uint32_t addr; \
+ int mmu_idx = arm_to_core_mmu_idx(arm_mmu_idx(env)); \
+ MemOpIdx oi = make_memop_idx(MFLAG | MO_ALIGN, mmu_idx); \
for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE, eci_mask >>= ESIZE) { \
if (!(eci_mask & 1)) { \
continue; \
} \
addr = ADDRFN(base, m[H##ESIZE(e)]); \
d[H##ESIZE(e)] = (mask & 1) ? \
- cpu_##LDTYPE##_data_ra(env, addr, GETPC()) : 0; \
+ (MTYPE)cpu_##LDTYPE##_mmu(env, addr, oi, GETPC()) : 0; \
if (WB) { \
m[H##ESIZE(e)] = addr; \
} \
@@ -240,7 +245,7 @@ DO_VSTR(vstrh_w, 2, stw, 4, int32_t)
}
/* We know here TYPE is unsigned so always the same as the offset type */
-#define DO_VSTR_SG(OP, STTYPE, ESIZE, TYPE, ADDRFN, WB) \
+#define DO_VSTR_SG(OP, MFLAG, STTYPE, ESIZE, TYPE, ADDRFN, WB) \
void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \
uint32_t base) \
{ \
@@ -250,13 +255,15 @@ DO_VSTR(vstrh_w, 2, stw, 4, int32_t)
uint16_t eci_mask = mve_eci_mask(env); \
unsigned e; \
uint32_t addr; \
+ int mmu_idx = arm_to_core_mmu_idx(arm_mmu_idx(env)); \
+ MemOpIdx oi = make_memop_idx(MFLAG | MO_ALIGN, mmu_idx); \
for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE, eci_mask >>= ESIZE) { \
if (!(eci_mask & 1)) { \
continue; \
} \
addr = ADDRFN(base, m[H##ESIZE(e)]); \
if (mask & 1) { \
- cpu_##STTYPE##_data_ra(env, addr, d[H##ESIZE(e)], GETPC()); \
+ cpu_##STTYPE##_mmu(env, addr, d[H##ESIZE(e)], oi, GETPC()); \
} \
if (WB) { \
m[H##ESIZE(e)] = addr; \
@@ -283,13 +290,15 @@ DO_VSTR(vstrh_w, 2, stw, 4, int32_t)
uint16_t eci_mask = mve_eci_mask(env); \
unsigned e; \
uint32_t addr; \
+ int mmu_idx = arm_to_core_mmu_idx(arm_mmu_idx(env)); \
+ MemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mmu_idx); \
for (e = 0; e < 16 / 4; e++, mask >>= 4, eci_mask >>= 4) { \
if (!(eci_mask & 1)) { \
continue; \
} \
addr = ADDRFN(base, m[H4(e & ~1)]); \
addr += 4 * (e & 1); \
- d[H4(e)] = (mask & 1) ? cpu_ldl_data_ra(env, addr, GETPC()) : 0; \
+ d[H4(e)] = (mask & 1) ? cpu_ldl_mmu(env, addr, oi, GETPC()) : 0; \
if (WB && (e & 1)) { \
m[H4(e & ~1)] = addr - 4; \
} \
@@ -307,6 +316,8 @@ DO_VSTR(vstrh_w, 2, stw, 4, int32_t)
uint16_t eci_mask = mve_eci_mask(env); \
unsigned e; \
uint32_t addr; \
+ int mmu_idx = arm_to_core_mmu_idx(arm_mmu_idx(env)); \
+ MemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mmu_idx); \
for (e = 0; e < 16 / 4; e++, mask >>= 4, eci_mask >>= 4) { \
if (!(eci_mask & 1)) { \
continue; \
@@ -314,7 +325,7 @@ DO_VSTR(vstrh_w, 2, stw, 4, int32_t)
addr = ADDRFN(base, m[H4(e & ~1)]); \
addr += 4 * (e & 1); \
if (mask & 1) { \
- cpu_stl_data_ra(env, addr, d[H4(e)], GETPC()); \
+ cpu_stl_mmu(env, addr, d[H4(e)], oi, GETPC()); \
} \
if (WB && (e & 1)) { \
m[H4(e & ~1)] = addr - 4; \
@@ -328,40 +339,44 @@ DO_VSTR(vstrh_w, 2, stw, 4, int32_t)
#define ADDR_ADD_OSW(BASE, OFFSET) ((BASE) + ((OFFSET) << 2))
#define ADDR_ADD_OSD(BASE, OFFSET) ((BASE) + ((OFFSET) << 3))
-DO_VLDR_SG(vldrb_sg_sh, ldsb, 2, int16_t, uint16_t, ADDR_ADD, false)
-DO_VLDR_SG(vldrb_sg_sw, ldsb, 4, int32_t, uint32_t, ADDR_ADD, false)
-DO_VLDR_SG(vldrh_sg_sw, ldsw, 4, int32_t, uint32_t, ADDR_ADD, false)
+DO_VLDR_SG(vldrb_sg_sh, MO_SB, int8_t, ldb, 2, int16_t, uint16_t, ADDR_ADD, false)
+DO_VLDR_SG(vldrb_sg_sw, MO_SB, int8_t, ldb, 4, int32_t, uint32_t, ADDR_ADD, false)
+DO_VLDR_SG(vldrh_sg_sw, MO_TESW, int16_t, ldw, 4, int32_t, uint32_t, ADDR_ADD, false)
-DO_VLDR_SG(vldrb_sg_ub, ldub, 1, uint8_t, uint8_t, ADDR_ADD, false)
-DO_VLDR_SG(vldrb_sg_uh, ldub, 2, uint16_t, uint16_t, ADDR_ADD, false)
-DO_VLDR_SG(vldrb_sg_uw, ldub, 4, uint32_t, uint32_t, ADDR_ADD, false)
-DO_VLDR_SG(vldrh_sg_uh, lduw, 2, uint16_t, uint16_t, ADDR_ADD, false)
-DO_VLDR_SG(vldrh_sg_uw, lduw, 4, uint32_t, uint32_t, ADDR_ADD, false)
-DO_VLDR_SG(vldrw_sg_uw, ldl, 4, uint32_t, uint32_t, ADDR_ADD, false)
+DO_VLDR_SG(vldrb_sg_ub, MO_UB, uint8_t, ldb, 1, uint8_t, uint8_t, ADDR_ADD, false)
+DO_VLDR_SG(vldrb_sg_uh, MO_UB, uint8_t, ldb, 2, uint16_t, uint16_t, ADDR_ADD, false)
+DO_VLDR_SG(vldrb_sg_uw, MO_UB, uint8_t, ldb, 4, uint32_t, uint32_t, ADDR_ADD, false)
+DO_VLDR_SG(vldrh_sg_uh, MO_TEUW, uint16_t, ldw, 2, uint16_t, uint16_t, ADDR_ADD, false)
+DO_VLDR_SG(vldrh_sg_uw, MO_TEUW, uint16_t, ldw, 4, uint32_t, uint32_t, ADDR_ADD, false)
+DO_VLDR_SG(vldrw_sg_uw, MO_TEUL, uint32_t, ldl, 4, uint32_t, uint32_t, ADDR_ADD, false)
DO_VLDR64_SG(vldrd_sg_ud, ADDR_ADD, false)
-DO_VLDR_SG(vldrh_sg_os_sw, ldsw, 4, int32_t, uint32_t, ADDR_ADD_OSH, false)
-DO_VLDR_SG(vldrh_sg_os_uh, lduw, 2, uint16_t, uint16_t, ADDR_ADD_OSH, false)
-DO_VLDR_SG(vldrh_sg_os_uw, lduw, 4, uint32_t, uint32_t, ADDR_ADD_OSH, false)
-DO_VLDR_SG(vldrw_sg_os_uw, ldl, 4, uint32_t, uint32_t, ADDR_ADD_OSW, false)
+DO_VLDR_SG(vldrh_sg_os_sw, MO_TESW, int16_t, ldw, 4,
+ int32_t, uint32_t, ADDR_ADD_OSH, false)
+DO_VLDR_SG(vldrh_sg_os_uh, MO_TEUW, uint16_t, ldw, 2,
+ uint16_t, uint16_t, ADDR_ADD_OSH, false)
+DO_VLDR_SG(vldrh_sg_os_uw, MO_TEUW, uint16_t, ldw, 4,
+ uint32_t, uint32_t, ADDR_ADD_OSH, false)
+DO_VLDR_SG(vldrw_sg_os_uw, MO_TEUL, uint32_t, ldl, 4,
+ uint32_t, uint32_t, ADDR_ADD_OSW, false)
DO_VLDR64_SG(vldrd_sg_os_ud, ADDR_ADD_OSD, false)
-DO_VSTR_SG(vstrb_sg_ub, stb, 1, uint8_t, ADDR_ADD, false)
-DO_VSTR_SG(vstrb_sg_uh, stb, 2, uint16_t, ADDR_ADD, false)
-DO_VSTR_SG(vstrb_sg_uw, stb, 4, uint32_t, ADDR_ADD, false)
-DO_VSTR_SG(vstrh_sg_uh, stw, 2, uint16_t, ADDR_ADD, false)
-DO_VSTR_SG(vstrh_sg_uw, stw, 4, uint32_t, ADDR_ADD, false)
-DO_VSTR_SG(vstrw_sg_uw, stl, 4, uint32_t, ADDR_ADD, false)
+DO_VSTR_SG(vstrb_sg_ub, MO_UB, stb, 1, uint8_t, ADDR_ADD, false)
+DO_VSTR_SG(vstrb_sg_uh, MO_UB, stb, 2, uint16_t, ADDR_ADD, false)
+DO_VSTR_SG(vstrb_sg_uw, MO_UB, stb, 4, uint32_t, ADDR_ADD, false)
+DO_VSTR_SG(vstrh_sg_uh, MO_TEUW, stw, 2, uint16_t, ADDR_ADD, false)
+DO_VSTR_SG(vstrh_sg_uw, MO_TEUW, stw, 4, uint32_t, ADDR_ADD, false)
+DO_VSTR_SG(vstrw_sg_uw, MO_TEUL, stl, 4, uint32_t, ADDR_ADD, false)
DO_VSTR64_SG(vstrd_sg_ud, ADDR_ADD, false)
-DO_VSTR_SG(vstrh_sg_os_uh, stw, 2, uint16_t, ADDR_ADD_OSH, false)
-DO_VSTR_SG(vstrh_sg_os_uw, stw, 4, uint32_t, ADDR_ADD_OSH, false)
-DO_VSTR_SG(vstrw_sg_os_uw, stl, 4, uint32_t, ADDR_ADD_OSW, false)
+DO_VSTR_SG(vstrh_sg_os_uh, MO_TEUW, stw, 2, uint16_t, ADDR_ADD_OSH, false)
+DO_VSTR_SG(vstrh_sg_os_uw, MO_TEUW, stw, 4, uint32_t, ADDR_ADD_OSH, false)
+DO_VSTR_SG(vstrw_sg_os_uw, MO_TEUL, stl, 4, uint32_t, ADDR_ADD_OSW, false)
DO_VSTR64_SG(vstrd_sg_os_ud, ADDR_ADD_OSD, false)
-DO_VLDR_SG(vldrw_sg_wb_uw, ldl, 4, uint32_t, uint32_t, ADDR_ADD, true)
+DO_VLDR_SG(vldrw_sg_wb_uw, MO_TEUL, uint32_t, ldl, 4, uint32_t, uint32_t, ADDR_ADD, true)
DO_VLDR64_SG(vldrd_sg_wb_ud, ADDR_ADD, true)
-DO_VSTR_SG(vstrw_sg_wb_uw, stl, 4, uint32_t, ADDR_ADD, true)
+DO_VSTR_SG(vstrw_sg_wb_uw, MO_TEUL, stl, 4, uint32_t, ADDR_ADD, true)
DO_VSTR64_SG(vstrd_sg_wb_ud, ADDR_ADD, true)
/*
@@ -388,13 +403,15 @@ DO_VSTR64_SG(vstrd_sg_wb_ud, ADDR_ADD, true)
uint16_t mask = mve_eci_mask(env); \
static const uint8_t off[4] = { O1, O2, O3, O4 }; \
uint32_t addr, data; \
+ int mmu_idx = arm_to_core_mmu_idx(arm_mmu_idx(env)); \
+ MemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mmu_idx); \
for (beat = 0; beat < 4; beat++, mask >>= 4) { \
if ((mask & 1) == 0) { \
/* ECI says skip this beat */ \
continue; \
} \
addr = base + off[beat] * 4; \
- data = cpu_ldl_le_data_ra(env, addr, GETPC()); \
+ data = cpu_ldl_mmu(env, addr, oi, GETPC()); \
for (e = 0; e < 4; e++, data >>= 8) { \
uint8_t *qd = (uint8_t *)aa32_vfp_qreg(env, qnidx + e); \
qd[H1(off[beat])] = data; \
@@ -412,13 +429,15 @@ DO_VSTR64_SG(vstrd_sg_wb_ud, ADDR_ADD, true)
uint32_t addr, data; \
int y; /* y counts 0 2 0 2 */ \
uint16_t *qd; \
+ int mmu_idx = arm_to_core_mmu_idx(arm_mmu_idx(env)); \
+ MemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mmu_idx); \
for (beat = 0, y = 0; beat < 4; beat++, mask >>= 4, y ^= 2) { \
if ((mask & 1) == 0) { \
/* ECI says skip this beat */ \
continue; \
} \
addr = base + off[beat] * 8 + (beat & 1) * 4; \
- data = cpu_ldl_le_data_ra(env, addr, GETPC()); \
+ data = cpu_ldl_mmu(env, addr, oi, GETPC()); \
qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + y); \
qd[H2(off[beat])] = data; \
data >>= 16; \
@@ -437,13 +456,15 @@ DO_VSTR64_SG(vstrd_sg_wb_ud, ADDR_ADD, true)
uint32_t addr, data; \
uint32_t *qd; \
int y; \
+ int mmu_idx = arm_to_core_mmu_idx(arm_mmu_idx(env)); \
+ MemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mmu_idx); \
for (beat = 0; beat < 4; beat++, mask >>= 4) { \
if ((mask & 1) == 0) { \
/* ECI says skip this beat */ \
continue; \
} \
addr = base + off[beat] * 4; \
- data = cpu_ldl_le_data_ra(env, addr, GETPC()); \
+ data = cpu_ldl_mmu(env, addr, oi, GETPC()); \
y = (beat + (O1 & 2)) & 3; \
qd = (uint32_t *)aa32_vfp_qreg(env, qnidx + y); \
qd[H4(off[beat] >> 2)] = data; \
@@ -474,13 +495,15 @@ DO_VLD4W(vld43w, 6, 7, 8, 9)
static const uint8_t off[4] = { O1, O2, O3, O4 }; \
uint32_t addr, data; \
uint8_t *qd; \
+ int mmu_idx = arm_to_core_mmu_idx(arm_mmu_idx(env)); \
+ MemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mmu_idx); \
for (beat = 0; beat < 4; beat++, mask >>= 4) { \
if ((mask & 1) == 0) { \
/* ECI says skip this beat */ \
continue; \
} \
addr = base + off[beat] * 2; \
- data = cpu_ldl_le_data_ra(env, addr, GETPC()); \
+ data = cpu_ldl_mmu(env, addr, oi, GETPC()); \
for (e = 0; e < 4; e++, data >>= 8) { \
qd = (uint8_t *)aa32_vfp_qreg(env, qnidx + (e & 1)); \
qd[H1(off[beat] + (e >> 1))] = data; \
@@ -498,13 +521,15 @@ DO_VLD4W(vld43w, 6, 7, 8, 9)
uint32_t addr, data; \
int e; \
uint16_t *qd; \
+ int mmu_idx = arm_to_core_mmu_idx(arm_mmu_idx(env)); \
+ MemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mmu_idx); \
for (beat = 0; beat < 4; beat++, mask >>= 4) { \
if ((mask & 1) == 0) { \
/* ECI says skip this beat */ \
continue; \
} \
addr = base + off[beat] * 4; \
- data = cpu_ldl_le_data_ra(env, addr, GETPC()); \
+ data = cpu_ldl_mmu(env, addr, oi, GETPC()); \
for (e = 0; e < 2; e++, data >>= 16) { \
qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + e); \
qd[H2(off[beat])] = data; \
@@ -521,13 +546,15 @@ DO_VLD4W(vld43w, 6, 7, 8, 9)
static const uint8_t off[4] = { O1, O2, O3, O4 }; \
uint32_t addr, data; \
uint32_t *qd; \
+ int mmu_idx = arm_to_core_mmu_idx(arm_mmu_idx(env)); \
+ MemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mmu_idx); \
for (beat = 0; beat < 4; beat++, mask >>= 4) { \
if ((mask & 1) == 0) { \
/* ECI says skip this beat */ \
continue; \
} \
addr = base + off[beat]; \
- data = cpu_ldl_le_data_ra(env, addr, GETPC()); \
+ data = cpu_ldl_mmu(env, addr, oi, GETPC()); \
qd = (uint32_t *)aa32_vfp_qreg(env, qnidx + (beat & 1)); \
qd[H4(off[beat] >> 3)] = data; \
} \
@@ -550,6 +577,8 @@ DO_VLD2W(vld21w, 8, 12, 16, 20)
uint16_t mask = mve_eci_mask(env); \
static const uint8_t off[4] = { O1, O2, O3, O4 }; \
uint32_t addr, data; \
+ int mmu_idx = arm_to_core_mmu_idx(arm_mmu_idx(env)); \
+ MemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mmu_idx); \
for (beat = 0; beat < 4; beat++, mask >>= 4) { \
if ((mask & 1) == 0) { \
/* ECI says skip this beat */ \
@@ -561,7 +590,7 @@ DO_VLD2W(vld21w, 8, 12, 16, 20)
uint8_t *qd = (uint8_t *)aa32_vfp_qreg(env, qnidx + e); \
data = (data << 8) | qd[H1(off[beat])]; \
} \
- cpu_stl_le_data_ra(env, addr, data, GETPC()); \
+ cpu_stl_mmu(env, addr, data, oi, GETPC()); \
} \
}
@@ -575,6 +604,8 @@ DO_VLD2W(vld21w, 8, 12, 16, 20)
uint32_t addr, data; \
int y; /* y counts 0 2 0 2 */ \
uint16_t *qd; \
+ int mmu_idx = arm_to_core_mmu_idx(arm_mmu_idx(env)); \
+ MemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mmu_idx); \
for (beat = 0, y = 0; beat < 4; beat++, mask >>= 4, y ^= 2) { \
if ((mask & 1) == 0) { \
/* ECI says skip this beat */ \
@@ -585,7 +616,7 @@ DO_VLD2W(vld21w, 8, 12, 16, 20)
data = qd[H2(off[beat])]; \
qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + y + 1); \
data |= qd[H2(off[beat])] << 16; \
- cpu_stl_le_data_ra(env, addr, data, GETPC()); \
+ cpu_stl_mmu(env, addr, data, oi, GETPC()); \
} \
}
@@ -599,6 +630,8 @@ DO_VLD2W(vld21w, 8, 12, 16, 20)
uint32_t addr, data; \
uint32_t *qd; \
int y; \
+ int mmu_idx = arm_to_core_mmu_idx(arm_mmu_idx(env)); \
+ MemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mmu_idx); \
for (beat = 0; beat < 4; beat++, mask >>= 4) { \
if ((mask & 1) == 0) { \
/* ECI says skip this beat */ \
@@ -608,7 +641,7 @@ DO_VLD2W(vld21w, 8, 12, 16, 20)
y = (beat + (O1 & 2)) & 3; \
qd = (uint32_t *)aa32_vfp_qreg(env, qnidx + y); \
data = qd[H4(off[beat] >> 2)]; \
- cpu_stl_le_data_ra(env, addr, data, GETPC()); \
+ cpu_stl_mmu(env, addr, data, oi, GETPC()); \
} \
}
@@ -636,6 +669,8 @@ DO_VST4W(vst43w, 6, 7, 8, 9)
static const uint8_t off[4] = { O1, O2, O3, O4 }; \
uint32_t addr, data; \
uint8_t *qd; \
+ int mmu_idx = arm_to_core_mmu_idx(arm_mmu_idx(env)); \
+ MemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mmu_idx); \
for (beat = 0; beat < 4; beat++, mask >>= 4) { \
if ((mask & 1) == 0) { \
/* ECI says skip this beat */ \
@@ -647,7 +682,7 @@ DO_VST4W(vst43w, 6, 7, 8, 9)
qd = (uint8_t *)aa32_vfp_qreg(env, qnidx + (e & 1)); \
data = (data << 8) | qd[H1(off[beat] + (e >> 1))]; \
} \
- cpu_stl_le_data_ra(env, addr, data, GETPC()); \
+ cpu_stl_mmu(env, addr, data, oi, GETPC()); \
} \
}
@@ -661,6 +696,8 @@ DO_VST4W(vst43w, 6, 7, 8, 9)
uint32_t addr, data; \
int e; \
uint16_t *qd; \
+ int mmu_idx = arm_to_core_mmu_idx(arm_mmu_idx(env)); \
+ MemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mmu_idx); \
for (beat = 0; beat < 4; beat++, mask >>= 4) { \
if ((mask & 1) == 0) { \
/* ECI says skip this beat */ \
@@ -672,7 +709,7 @@ DO_VST4W(vst43w, 6, 7, 8, 9)
qd = (uint16_t *)aa32_vfp_qreg(env, qnidx + e); \
data = (data << 16) | qd[H2(off[beat])]; \
} \
- cpu_stl_le_data_ra(env, addr, data, GETPC()); \
+ cpu_stl_mmu(env, addr, data, oi, GETPC()); \
} \
}
@@ -685,6 +722,8 @@ DO_VST4W(vst43w, 6, 7, 8, 9)
static const uint8_t off[4] = { O1, O2, O3, O4 }; \
uint32_t addr, data; \
uint32_t *qd; \
+ int mmu_idx = arm_to_core_mmu_idx(arm_mmu_idx(env)); \
+ MemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mmu_idx); \
for (beat = 0; beat < 4; beat++, mask >>= 4) { \
if ((mask & 1) == 0) { \
/* ECI says skip this beat */ \
@@ -693,7 +732,7 @@ DO_VST4W(vst43w, 6, 7, 8, 9)
addr = base + off[beat]; \
qd = (uint32_t *)aa32_vfp_qreg(env, qnidx + (beat & 1)); \
data = qd[H4(off[beat] >> 3)]; \
- cpu_stl_le_data_ra(env, addr, data, GETPC()); \
+ cpu_stl_mmu(env, addr, data, oi, GETPC()); \
} \
}
@@ -2165,27 +2204,6 @@ DO_VSHLL_ALL(vshllt, true)
DO_VSHRN(OP##tb, true, 1, uint8_t, 2, uint16_t, FN) \
DO_VSHRN(OP##th, true, 2, uint16_t, 4, uint32_t, FN)
-static inline uint64_t do_urshr(uint64_t x, unsigned sh)
-{
- if (likely(sh < 64)) {
- return (x >> sh) + ((x >> (sh - 1)) & 1);
- } else if (sh == 64) {
- return x >> 63;
- } else {
- return 0;
- }
-}
-
-static inline int64_t do_srshr(int64_t x, unsigned sh)
-{
- if (likely(sh < 64)) {
- return (x >> sh) + ((x >> (sh - 1)) & 1);
- } else {
- /* Rounding the sign bit always produces 0. */
- return 0;
- }
-}
-
DO_VSHRN_ALL(vshrn, DO_SHR)
DO_VSHRN_ALL(vrshrn, do_urshr)
diff --git a/target/arm/tcg/neon_helper.c b/target/arm/tcg/neon_helper.c
index e2cc7cf..8d288f3 100644
--- a/target/arm/tcg/neon_helper.c
+++ b/target/arm/tcg/neon_helper.c
@@ -9,11 +9,13 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/helper-proto.h"
#include "tcg/tcg-gvec-desc.h"
#include "fpu/softfloat.h"
#include "vec_internal.h"
+#define HELPER_H "tcg/helper.h"
+#include "exec/helper-proto.h.inc"
+
#define SIGNBIT (uint32_t)0x80000000
#define SIGNBIT64 ((uint64_t)1 << 63)
@@ -227,15 +229,30 @@ NEON_GVEC_VOP2(gvec_srshl_h, int16_t)
#undef NEON_FN
#define NEON_FN(dest, src1, src2) \
+ (dest = do_sqrshl_bhs(src1, src2, 16, true, NULL))
+NEON_GVEC_VOP2(sme2_srshl_h, int16_t)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) \
(dest = do_sqrshl_bhs(src1, (int8_t)src2, 32, true, NULL))
NEON_GVEC_VOP2(gvec_srshl_s, int32_t)
#undef NEON_FN
#define NEON_FN(dest, src1, src2) \
+ (dest = do_sqrshl_bhs(src1, src2, 32, true, NULL))
+NEON_GVEC_VOP2(sme2_srshl_s, int32_t)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) \
(dest = do_sqrshl_d(src1, (int8_t)src2, true, NULL))
NEON_GVEC_VOP2(gvec_srshl_d, int64_t)
#undef NEON_FN
+#define NEON_FN(dest, src1, src2) \
+ (dest = do_sqrshl_d(src1, src2, true, NULL))
+NEON_GVEC_VOP2(sme2_srshl_d, int64_t)
+#undef NEON_FN
+
uint32_t HELPER(neon_rshl_s32)(uint32_t val, uint32_t shift)
{
return do_sqrshl_bhs(val, (int8_t)shift, 32, true, NULL);
@@ -259,15 +276,30 @@ NEON_GVEC_VOP2(gvec_urshl_h, uint16_t)
#undef NEON_FN
#define NEON_FN(dest, src1, src2) \
+ (dest = do_uqrshl_bhs(src1, (int16_t)src2, 16, true, NULL))
+NEON_GVEC_VOP2(sme2_urshl_h, uint16_t)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) \
(dest = do_uqrshl_bhs(src1, (int8_t)src2, 32, true, NULL))
NEON_GVEC_VOP2(gvec_urshl_s, int32_t)
#undef NEON_FN
#define NEON_FN(dest, src1, src2) \
+ (dest = do_uqrshl_bhs(src1, src2, 32, true, NULL))
+NEON_GVEC_VOP2(sme2_urshl_s, int32_t)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) \
(dest = do_uqrshl_d(src1, (int8_t)src2, true, NULL))
NEON_GVEC_VOP2(gvec_urshl_d, int64_t)
#undef NEON_FN
+#define NEON_FN(dest, src1, src2) \
+ (dest = do_uqrshl_d(src1, src2, true, NULL))
+NEON_GVEC_VOP2(sme2_urshl_d, int64_t)
+#undef NEON_FN
+
uint32_t HELPER(neon_rshl_u32)(uint32_t val, uint32_t shift)
{
return do_uqrshl_bhs(val, (int8_t)shift, 32, true, NULL);
diff --git a/target/arm/tcg/op_helper.c b/target/arm/tcg/op_helper.c
index 30786fd..575e566 100644
--- a/target/arm/tcg/op_helper.c
+++ b/target/arm/tcg/op_helper.c
@@ -20,10 +20,11 @@
#include "qemu/main-loop.h"
#include "cpu.h"
#include "exec/helper-proto.h"
+#include "exec/target_page.h"
#include "internals.h"
#include "cpu-features.h"
-#include "exec/exec-all.h"
-#include "exec/cpu_ldst.h"
+#include "accel/tcg/cpu-ldst.h"
+#include "accel/tcg/probe.h"
#include "cpregs.h"
#define SIGNBIT (uint32_t)0x80000000
@@ -1221,7 +1222,7 @@ uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i)
}
}
-void HELPER(probe_access)(CPUARMState *env, target_ulong ptr,
+void HELPER(probe_access)(CPUARMState *env, vaddr ptr,
uint32_t access_type, uint32_t mmu_idx,
uint32_t size)
{
diff --git a/target/arm/tcg/pauth_helper.c b/target/arm/tcg/pauth_helper.c
index c4b1430..c591c30 100644
--- a/target/arm/tcg/pauth_helper.c
+++ b/target/arm/tcg/pauth_helper.c
@@ -21,8 +21,7 @@
#include "cpu.h"
#include "internals.h"
#include "cpu-features.h"
-#include "exec/exec-all.h"
-#include "exec/cpu_ldst.h"
+#include "accel/tcg/cpu-ldst.h"
#include "exec/helper-proto.h"
#include "tcg/tcg-gvec-desc.h"
#include "qemu/xxhash.h"
diff --git a/target/arm/tcg/sme.decode b/target/arm/tcg/sme.decode
index 628804e..6bb9aa2 100644
--- a/target/arm/tcg/sme.decode
+++ b/target/arm/tcg/sme.decode
@@ -22,30 +22,139 @@
### SME Misc
ZERO 11000000 00 001 00000000000 imm:8
+ZERO_zt0 11000000 01 001 00000000000 00000001
### SME Move into/from Array
%mova_rs 13:2 !function=plus_12
-&mova esz rs pg zr za_imm v:bool to_vec:bool
+%mova_rv 13:2 !function=plus_8
+&mova_a rv zr off
+&mova_p esz rs pg zr za off v:bool
+&mova_t esz rs zr za off v:bool
-MOVA 11000000 esz:2 00000 0 v:1 .. pg:3 zr:5 0 za_imm:4 \
- &mova to_vec=0 rs=%mova_rs
-MOVA 11000000 11 00000 1 v:1 .. pg:3 zr:5 0 za_imm:4 \
- &mova to_vec=0 rs=%mova_rs esz=4
+MOVA_tz 11000000 00 00000 0 v:1 .. pg:3 zr:5 0 off:4 \
+ &mova_p rs=%mova_rs esz=0 za=0
+MOVA_tz 11000000 01 00000 0 v:1 .. pg:3 zr:5 0 za:1 off:3 \
+ &mova_p rs=%mova_rs esz=1
+MOVA_tz 11000000 10 00000 0 v:1 .. pg:3 zr:5 0 za:2 off:2 \
+ &mova_p rs=%mova_rs esz=2
+MOVA_tz 11000000 11 00000 0 v:1 .. pg:3 zr:5 0 za:3 off:1 \
+ &mova_p rs=%mova_rs esz=3
+MOVA_tz 11000000 11 00000 1 v:1 .. pg:3 zr:5 0 za:4 \
+ &mova_p rs=%mova_rs esz=4 off=0
-MOVA 11000000 esz:2 00001 0 v:1 .. pg:3 0 za_imm:4 zr:5 \
- &mova to_vec=1 rs=%mova_rs
-MOVA 11000000 11 00001 1 v:1 .. pg:3 0 za_imm:4 zr:5 \
- &mova to_vec=1 rs=%mova_rs esz=4
+MOVA_zt 11000000 00 00001 0 v:1 .. pg:3 0 off:4 zr:5 \
+ &mova_p rs=%mova_rs esz=0 za=0
+MOVA_zt 11000000 01 00001 0 v:1 .. pg:3 0 za:1 off:3 zr:5 \
+ &mova_p rs=%mova_rs esz=1
+MOVA_zt 11000000 10 00001 0 v:1 .. pg:3 0 za:2 off:2 zr:5 \
+ &mova_p rs=%mova_rs esz=2
+MOVA_zt 11000000 11 00001 0 v:1 .. pg:3 0 za:3 off:1 zr:5 \
+ &mova_p rs=%mova_rs esz=3
+MOVA_zt 11000000 11 00001 1 v:1 .. pg:3 0 za:4 zr:5 \
+ &mova_p rs=%mova_rs esz=4 off=0
+
+MOVA_tz2 11000000 00 00010 0 v:1 .. 000 zr:4 0 00 off:3 \
+ &mova_t rs=%mova_rs esz=0 za=0
+MOVA_tz2 11000000 01 00010 0 v:1 .. 000 zr:4 0 00 za:1 off:2 \
+ &mova_t rs=%mova_rs esz=1
+MOVA_tz2 11000000 10 00010 0 v:1 .. 000 zr:4 0 00 za:2 off:1 \
+ &mova_t rs=%mova_rs esz=2
+MOVA_tz2 11000000 11 00010 0 v:1 .. 000 zr:4 0 00 za:3 \
+ &mova_t rs=%mova_rs esz=3 off=0
+
+MOVA_zt2 11000000 00 00011 0 v:1 .. 000 00 off:3 zr:4 0 \
+ &mova_t rs=%mova_rs esz=0 za=0
+MOVA_zt2 11000000 01 00011 0 v:1 .. 000 00 za:1 off:2 zr:4 0 \
+ &mova_t rs=%mova_rs esz=1
+MOVA_zt2 11000000 10 00011 0 v:1 .. 000 00 za:2 off:1 zr:4 0 \
+ &mova_t rs=%mova_rs esz=2
+MOVA_zt2 11000000 11 00011 0 v:1 .. 000 00 za:3 zr:4 0 \
+ &mova_t rs=%mova_rs esz=3 off=0
+
+MOVA_tz4 11000000 00 00010 0 v:1 .. 001 zr:3 00 000 off:2 \
+ &mova_t rs=%mova_rs esz=0 za=0
+MOVA_tz4 11000000 01 00010 0 v:1 .. 001 zr:3 00 000 za:1 off:1 \
+ &mova_t rs=%mova_rs esz=1
+MOVA_tz4 11000000 10 00010 0 v:1 .. 001 zr:3 00 000 za:2 \
+ &mova_t rs=%mova_rs esz=2 off=0
+MOVA_tz4 11000000 11 00010 0 v:1 .. 001 zr:3 00 00 za:3 \
+ &mova_t rs=%mova_rs esz=3 off=0
+
+MOVA_zt4 11000000 00 00011 0 v:1 .. 001 000 off:2 zr:3 00 \
+ &mova_t rs=%mova_rs esz=0 za=0
+MOVA_zt4 11000000 01 00011 0 v:1 .. 001 000 za:1 off:1 zr:3 00 \
+ &mova_t rs=%mova_rs esz=1
+MOVA_zt4 11000000 10 00011 0 v:1 .. 001 000 za:2 zr:3 00 \
+ &mova_t rs=%mova_rs esz=2 off=0
+MOVA_zt4 11000000 11 00011 0 v:1 .. 001 00 za:3 zr:3 00 \
+ &mova_t rs=%mova_rs esz=3 off=0
+
+MOVA_az2 11000000 00 00010 00 .. 010 zr:4 000 off:3 \
+ &mova_a rv=%mova_rv
+MOVA_az4 11000000 00 00010 00 .. 011 zr:3 0000 off:3 \
+ &mova_a rv=%mova_rv
+
+MOVA_za2 11000000 00 00011 00 .. 010 00 off:3 zr:4 0 \
+ &mova_a rv=%mova_rv
+MOVA_za4 11000000 00 00011 00 .. 011 00 off:3 zr:3 00 \
+ &mova_a rv=%mova_rv
+
+### SME Move and Zero
+
+MOVAZ_za2 11000000 00000110 0 .. 01010 off:3 zr:4 0 \
+ &mova_a rv=%mova_rv
+MOVAZ_za4 11000000 00000110 0 .. 01110 off:3 zr:3 00 \
+ &mova_a rv=%mova_rv
+
+MOVAZ_zt 11000000 00 00001 0 v:1 .. 0001 off:4 zr:5 \
+ &mova_t rs=%mova_rs esz=0 za=0
+MOVAZ_zt 11000000 01 00001 0 v:1 .. 0001 za:1 off:3 zr:5 \
+ &mova_t rs=%mova_rs esz=1
+MOVAZ_zt 11000000 10 00001 0 v:1 .. 0001 za:2 off:2 zr:5 \
+ &mova_t rs=%mova_rs esz=2
+MOVAZ_zt 11000000 11 00001 0 v:1 .. 0001 za:3 off:1 zr:5 \
+ &mova_t rs=%mova_rs esz=3
+MOVAZ_zt 11000000 11 00001 1 v:1 .. 0001 za:4 zr:5 \
+ &mova_t rs=%mova_rs esz=4 off=0
+
+MOVAZ_zt2 11000000 00 00011 0 v:1 .. 00010 off:3 zr:4 0 \
+ &mova_t rs=%mova_rs esz=0 za=0
+MOVAZ_zt2 11000000 01 00011 0 v:1 .. 00010 za:1 off:2 zr:4 0 \
+ &mova_t rs=%mova_rs esz=1
+MOVAZ_zt2 11000000 10 00011 0 v:1 .. 00010 za:2 off:1 zr:4 0 \
+ &mova_t rs=%mova_rs esz=2
+MOVAZ_zt2 11000000 11 00011 0 v:1 .. 00010 za:3 zr:4 0 \
+ &mova_t rs=%mova_rs esz=3 off=0
+
+MOVAZ_zt4 11000000 00 00011 0 v:1 .. 001100 off:2 zr:3 00 \
+ &mova_t rs=%mova_rs esz=0 za=0
+MOVAZ_zt4 11000000 01 00011 0 v:1 .. 001100 za:1 off:1 zr:3 00 \
+ &mova_t rs=%mova_rs esz=1
+MOVAZ_zt4 11000000 10 00011 0 v:1 .. 001100 za:2 zr:3 00 \
+ &mova_t rs=%mova_rs esz=2 off=0
+MOVAZ_zt4 11000000 11 00011 0 v:1 .. 00110 za:3 zr:3 00 \
+ &mova_t rs=%mova_rs esz=3 off=0
+
+### SME Move into/from ZT0
+
+MOVT_rzt 1100 0000 0100 1100 0 off:3 00 11111 rt:5
+MOVT_ztr 1100 0000 0100 1110 0 off:3 00 11111 rt:5
### SME Memory
-&ldst esz rs pg rn rm za_imm v:bool st:bool
+&ldst esz rs pg rn rm za off v:bool st:bool
-LDST1 1110000 0 esz:2 st:1 rm:5 v:1 .. pg:3 rn:5 0 za_imm:4 \
- &ldst rs=%mova_rs
-LDST1 1110000 111 st:1 rm:5 v:1 .. pg:3 rn:5 0 za_imm:4 \
- &ldst esz=4 rs=%mova_rs
+LDST1 1110000 0 00 st:1 rm:5 v:1 .. pg:3 rn:5 0 off:4 \
+ &ldst rs=%mova_rs esz=0 za=0
+LDST1 1110000 0 01 st:1 rm:5 v:1 .. pg:3 rn:5 0 za:1 off:3 \
+ &ldst rs=%mova_rs esz=1
+LDST1 1110000 0 10 st:1 rm:5 v:1 .. pg:3 rn:5 0 za:2 off:2 \
+ &ldst rs=%mova_rs esz=2
+LDST1 1110000 0 11 st:1 rm:5 v:1 .. pg:3 rn:5 0 za:3 off:1 \
+ &ldst rs=%mova_rs esz=3
+LDST1 1110000 1 11 st:1 rm:5 v:1 .. pg:3 rn:5 0 za:4 \
+ &ldst rs=%mova_rs esz=4 off=0
&ldstr rv rn imm
@ldstr ....... ... . ...... .. ... rn:5 . imm:4 \
@@ -54,6 +163,12 @@ LDST1 1110000 111 st:1 rm:5 v:1 .. pg:3 rn:5 0 za_imm:4 \
LDR 1110000 100 0 000000 .. 000 ..... 0 .... @ldstr
STR 1110000 100 1 000000 .. 000 ..... 0 .... @ldstr
+&ldstzt0 rn
+@ldstzt0 ....... ... . ...... .. ... rn:5 ..... &ldstzt0
+
+LDR_zt0 1110000 100 0 111111 00 000 ..... 00000 @ldstzt0
+STR_zt0 1110000 100 1 111111 00 000 ..... 00000 @ldstzt0
+
### SME Add Vector to Array
&adda zad zn pm pn
@@ -68,14 +183,18 @@ ADDVA_d 11000000 11 01000 1 ... ... ..... 00 ... @adda_64
### SME Outer Product
&op zad zn zm pm pn sub:bool
+@op_16 ........ ... zm:5 pm:3 pn:3 zn:5 sub:1 ... zad:1 &op
@op_32 ........ ... zm:5 pm:3 pn:3 zn:5 sub:1 .. zad:2 &op
@op_64 ........ ... zm:5 pm:3 pn:3 zn:5 sub:1 . zad:3 &op
+FMOPA_h 10000001 100 ..... ... ... ..... . 100 . @op_16
FMOPA_s 10000000 100 ..... ... ... ..... . 00 .. @op_32
FMOPA_d 10000000 110 ..... ... ... ..... . 0 ... @op_64
-BFMOPA 10000001 100 ..... ... ... ..... . 00 .. @op_32
-FMOPA_h 10000001 101 ..... ... ... ..... . 00 .. @op_32
+BFMOPA 10000001 101 ..... ... ... ..... . 100 . @op_16
+
+BFMOPA_w 10000001 100 ..... ... ... ..... . 00 .. @op_32
+FMOPA_w_h 10000001 101 ..... ... ... ..... . 00 .. @op_32
SMOPA_s 1010000 0 10 0 ..... ... ... ..... . 00 .. @op_32
SUMOPA_s 1010000 0 10 1 ..... ... ... ..... . 00 .. @op_32
@@ -86,3 +205,789 @@ SMOPA_d 1010000 0 11 0 ..... ... ... ..... . 0 ... @op_64
SUMOPA_d 1010000 0 11 1 ..... ... ... ..... . 0 ... @op_64
USMOPA_d 1010000 1 11 0 ..... ... ... ..... . 0 ... @op_64
UMOPA_d 1010000 1 11 1 ..... ... ... ..... . 0 ... @op_64
+
+BMOPA 1000000 0 10 0 ..... ... ... ..... . 10 .. @op_32
+SMOPA2_s 1010000 0 10 0 ..... ... ... ..... . 10 .. @op_32
+UMOPA2_s 1010000 1 10 0 ..... ... ... ..... . 10 .. @op_32
+
+### SME2 Multi-vector Multiple and Single SVE Destructive
+
+%zd_ax2 1:4 !function=times_2
+%zd_ax4 2:3 !function=times_4
+
+&z2z_en zdn zm esz n
+@z2z_2x1 ....... . esz:2 .. zm:4 ....0. ..... .... . \
+ &z2z_en n=2 zdn=%zd_ax2
+@z2z_4x1 ....... . esz:2 .. zm:4 ....1. ..... ...0 . \
+ &z2z_en n=4 zdn=%zd_ax4
+
+SMAX_n1 1100000 1 .. 10 .... 1010.0 00000 .... 0 @z2z_2x1
+SMAX_n1 1100000 1 .. 10 .... 1010.0 00000 .... 0 @z2z_4x1
+UMAX_n1 1100000 1 .. 10 .... 1010.0 00000 .... 1 @z2z_2x1
+UMAX_n1 1100000 1 .. 10 .... 1010.0 00000 .... 1 @z2z_4x1
+SMIN_n1 1100000 1 .. 10 .... 1010.0 00001 .... 0 @z2z_2x1
+SMIN_n1 1100000 1 .. 10 .... 1010.0 00001 .... 0 @z2z_4x1
+UMIN_n1 1100000 1 .. 10 .... 1010.0 00001 .... 1 @z2z_2x1
+UMIN_n1 1100000 1 .. 10 .... 1010.0 00001 .... 1 @z2z_4x1
+
+FMAX_n1 1100000 1 .. 10 .... 1010.0 01000 .... 0 @z2z_2x1
+FMAX_n1 1100000 1 .. 10 .... 1010.0 01000 .... 0 @z2z_4x1
+FMIN_n1 1100000 1 .. 10 .... 1010.0 01000 .... 1 @z2z_2x1
+FMIN_n1 1100000 1 .. 10 .... 1010.0 01000 .... 1 @z2z_4x1
+FMAXNM_n1 1100000 1 .. 10 .... 1010.0 01001 .... 0 @z2z_2x1
+FMAXNM_n1 1100000 1 .. 10 .... 1010.0 01001 .... 0 @z2z_4x1
+FMINNM_n1 1100000 1 .. 10 .... 1010.0 01001 .... 1 @z2z_2x1
+FMINNM_n1 1100000 1 .. 10 .... 1010.0 01001 .... 1 @z2z_4x1
+
+SRSHL_n1 1100000 1 .. 10 .... 1010.0 10001 .... 0 @z2z_2x1
+SRSHL_n1 1100000 1 .. 10 .... 1010.0 10001 .... 0 @z2z_4x1
+URSHL_n1 1100000 1 .. 10 .... 1010.0 10001 .... 1 @z2z_2x1
+URSHL_n1 1100000 1 .. 10 .... 1010.0 10001 .... 1 @z2z_4x1
+
+ADD_n1 1100000 1 .. 10 .... 1010.0 11000 .... 0 @z2z_2x1
+ADD_n1 1100000 1 .. 10 .... 1010.0 11000 .... 0 @z2z_4x1
+
+SQDMULH_n1 1100000 1 .. 10 .... 1010.1 00000 .... 0 @z2z_2x1
+SQDMULH_n1 1100000 1 .. 10 .... 1010.1 00000 .... 0 @z2z_4x1
+
+### SME2 Multi-vector Multiple Vectors SVE Destructive
+
+%zm_ax2 17:4 !function=times_2
+%zm_ax4 18:3 !function=times_4
+
+@z2z_2x2 ....... . esz:2 . ....0 ....0. ..... .... . \
+ &z2z_en n=2 zdn=%zd_ax2 zm=%zm_ax2
+@z2z_4x4 ....... . esz:2 . ...00 ....1. ..... ...0 . \
+ &z2z_en n=4 zdn=%zd_ax4 zm=%zm_ax4
+
+SMAX_nn 1100000 1 .. 1 ..... 1011.0 00000 .... 0 @z2z_2x2
+SMAX_nn 1100000 1 .. 1 ..... 1011.0 00000 .... 0 @z2z_4x4
+UMAX_nn 1100000 1 .. 1 ..... 1011.0 00000 .... 1 @z2z_2x2
+UMAX_nn 1100000 1 .. 1 ..... 1011.0 00000 .... 1 @z2z_4x4
+SMIN_nn 1100000 1 .. 1 ..... 1011.0 00001 .... 0 @z2z_2x2
+SMIN_nn 1100000 1 .. 1 ..... 1011.0 00001 .... 0 @z2z_4x4
+UMIN_nn 1100000 1 .. 1 ..... 1011.0 00001 .... 1 @z2z_2x2
+UMIN_nn 1100000 1 .. 1 ..... 1011.0 00001 .... 1 @z2z_4x4
+
+FMAX_nn 1100000 1 .. 1 ..... 1011.0 01000 .... 0 @z2z_2x2
+FMAX_nn 1100000 1 .. 1 ..... 1011.0 01000 .... 0 @z2z_4x4
+FMIN_nn 1100000 1 .. 1 ..... 1011.0 01000 .... 1 @z2z_2x2
+FMIN_nn 1100000 1 .. 1 ..... 1011.0 01000 .... 1 @z2z_4x4
+FMAXNM_nn 1100000 1 .. 1 ..... 1011.0 01001 .... 0 @z2z_2x2
+FMAXNM_nn 1100000 1 .. 1 ..... 1011.0 01001 .... 0 @z2z_4x4
+FMINNM_nn 1100000 1 .. 1 ..... 1011.0 01001 .... 1 @z2z_2x2
+FMINNM_nn 1100000 1 .. 1 ..... 1011.0 01001 .... 1 @z2z_4x4
+
+SRSHL_nn 1100000 1 .. 1 ..... 1011.0 10001 .... 0 @z2z_2x2
+SRSHL_nn 1100000 1 .. 1 ..... 1011.0 10001 .... 0 @z2z_4x4
+URSHL_nn 1100000 1 .. 1 ..... 1011.0 10001 .... 1 @z2z_2x2
+URSHL_nn 1100000 1 .. 1 ..... 1011.0 10001 .... 1 @z2z_4x4
+
+SQDMULH_nn 1100000 1 .. 1 ..... 1011.1 00000 .... 0 @z2z_2x2
+SQDMULH_nn 1100000 1 .. 1 ..... 1011.1 00000 .... 0 @z2z_4x4
+
+### SME2 Multi-vector Multiple and Single Array Vectors
+
+&azz_n n off rv zn zm
+@azz_nx1_o3 ........ .... zm:4 ...... zn:5 .. off:3 &azz_n rv=%mova_rv
+
+ADD_azz_n1_s 11000001 0010 .... 0 .. 110 ..... 10 ... @azz_nx1_o3 n=2
+ADD_azz_n1_s 11000001 0011 .... 0 .. 110 ..... 10 ... @azz_nx1_o3 n=4
+ADD_azz_n1_d 11000001 0110 .... 0 .. 110 ..... 10 ... @azz_nx1_o3 n=2
+ADD_azz_n1_d 11000001 0111 .... 0 .. 110 ..... 10 ... @azz_nx1_o3 n=4
+
+SUB_azz_n1_s 11000001 0010 .... 0 .. 110 ..... 11 ... @azz_nx1_o3 n=2
+SUB_azz_n1_s 11000001 0011 .... 0 .. 110 ..... 11 ... @azz_nx1_o3 n=4
+SUB_azz_n1_d 11000001 0110 .... 0 .. 110 ..... 11 ... @azz_nx1_o3 n=2
+SUB_azz_n1_d 11000001 0111 .... 0 .. 110 ..... 11 ... @azz_nx1_o3 n=4
+
+%off3_x2 0:3 !function=times_2
+%off2_x2 0:2 !function=times_2
+
+@azz_nx1_o3x2 ........ ... . zm:4 . .. ... zn:5 .. ... \
+ &azz_n off=%off3_x2 rv=%mova_rv
+@azz_nx1_o2x2 ........ ... . zm:4 . .. ... zn:5 ... .. \
+ &azz_n off=%off2_x2 rv=%mova_rv
+
+FMLAL_n1 11000001 001 0 .... 0 .. 011 ..... 00 ... @azz_nx1_o3x2 n=1
+FMLAL_n1 11000001 001 0 .... 0 .. 010 ..... 000 .. @azz_nx1_o2x2 n=2
+FMLAL_n1 11000001 001 1 .... 0 .. 010 ..... 000 .. @azz_nx1_o2x2 n=4
+
+FMLSL_n1 11000001 001 0 .... 0 .. 011 ..... 01 ... @azz_nx1_o3x2 n=1
+FMLSL_n1 11000001 001 0 .... 0 .. 010 ..... 010 .. @azz_nx1_o2x2 n=2
+FMLSL_n1 11000001 001 1 .... 0 .. 010 ..... 010 .. @azz_nx1_o2x2 n=4
+
+BFMLAL_n1 11000001 001 0 .... 0 .. 011 ..... 10 ... @azz_nx1_o3x2 n=1
+BFMLAL_n1 11000001 001 0 .... 0 .. 010 ..... 100 .. @azz_nx1_o2x2 n=2
+BFMLAL_n1 11000001 001 1 .... 0 .. 010 ..... 100 .. @azz_nx1_o2x2 n=4
+
+BFMLSL_n1 11000001 001 0 .... 0 .. 011 ..... 11 ... @azz_nx1_o3x2 n=1
+BFMLSL_n1 11000001 001 0 .... 0 .. 010 ..... 110 .. @azz_nx1_o2x2 n=2
+BFMLSL_n1 11000001 001 1 .... 0 .. 010 ..... 110 .. @azz_nx1_o2x2 n=4
+
+FDOT_n1 11000001 001 0 .... 0 .. 100 ..... 00 ... @azz_nx1_o3 n=2
+FDOT_n1 11000001 001 1 .... 0 .. 100 ..... 00 ... @azz_nx1_o3 n=4
+
+BFDOT_n1 11000001 001 0 .... 0 .. 100 ..... 10 ... @azz_nx1_o3 n=2
+BFDOT_n1 11000001 001 1 .... 0 .. 100 ..... 10 ... @azz_nx1_o3 n=4
+
+USDOT_n1 11000001 001 0 .... 0 .. 101 ..... 01 ... @azz_nx1_o3 n=2
+USDOT_n1 11000001 001 1 .... 0 .. 101 ..... 01 ... @azz_nx1_o3 n=4
+
+SUDOT_n1 11000001 001 0 .... 0 .. 101 ..... 11 ... @azz_nx1_o3 n=2
+SUDOT_n1 11000001 001 1 .... 0 .. 101 ..... 11 ... @azz_nx1_o3 n=4
+
+SDOT_n1_4b 11000001 001 0 .... 0 .. 101 ..... 00 ... @azz_nx1_o3 n=2
+SDOT_n1_4b 11000001 001 1 .... 0 .. 101 ..... 00 ... @azz_nx1_o3 n=4
+SDOT_n1_4h 11000001 011 0 .... 0 .. 101 ..... 00 ... @azz_nx1_o3 n=2
+SDOT_n1_4h 11000001 011 1 .... 0 .. 101 ..... 00 ... @azz_nx1_o3 n=4
+SDOT_n1_2h 11000001 011 0 .... 0 .. 101 ..... 01 ... @azz_nx1_o3 n=2
+SDOT_n1_2h 11000001 011 1 .... 0 .. 101 ..... 01 ... @azz_nx1_o3 n=4
+
+UDOT_n1_4b 11000001 001 0 .... 0 .. 101 ..... 10 ... @azz_nx1_o3 n=2
+UDOT_n1_4b 11000001 001 1 .... 0 .. 101 ..... 10 ... @azz_nx1_o3 n=4
+UDOT_n1_4h 11000001 011 0 .... 0 .. 101 ..... 10 ... @azz_nx1_o3 n=2
+UDOT_n1_4h 11000001 011 1 .... 0 .. 101 ..... 10 ... @azz_nx1_o3 n=4
+UDOT_n1_2h 11000001 011 0 .... 0 .. 101 ..... 11 ... @azz_nx1_o3 n=2
+UDOT_n1_2h 11000001 011 1 .... 0 .. 101 ..... 11 ... @azz_nx1_o3 n=4
+
+SMLAL_n1 11000001 011 0 .... 0 .. 011 ..... 00 ... @azz_nx1_o3x2 n=1
+SMLAL_n1 11000001 011 0 .... 0 .. 010 ..... 000 .. @azz_nx1_o2x2 n=2
+SMLAL_n1 11000001 011 1 .... 0 .. 010 ..... 000 .. @azz_nx1_o2x2 n=4
+
+SMLSL_n1 11000001 011 0 .... 0 .. 011 ..... 01 ... @azz_nx1_o3x2 n=1
+SMLSL_n1 11000001 011 0 .... 0 .. 010 ..... 010 .. @azz_nx1_o2x2 n=2
+SMLSL_n1 11000001 011 1 .... 0 .. 010 ..... 010 .. @azz_nx1_o2x2 n=4
+
+UMLAL_n1 11000001 011 0 .... 0 .. 011 ..... 10 ... @azz_nx1_o3x2 n=1
+UMLAL_n1 11000001 011 0 .... 0 .. 010 ..... 100 .. @azz_nx1_o2x2 n=2
+UMLAL_n1 11000001 011 1 .... 0 .. 010 ..... 100 .. @azz_nx1_o2x2 n=4
+
+UMLSL_n1 11000001 011 0 .... 0 .. 011 ..... 11 ... @azz_nx1_o3x2 n=1
+UMLSL_n1 11000001 011 0 .... 0 .. 010 ..... 110 .. @azz_nx1_o2x2 n=2
+UMLSL_n1 11000001 011 1 .... 0 .. 010 ..... 110 .. @azz_nx1_o2x2 n=4
+
+%off2_x4 0:2 !function=times_4
+%off1_x4 0:1 !function=times_4
+
+@azz_nx1_o2x4 ........ ... . zm:4 . .. ... zn:5 ... .. \
+ &azz_n off=%off2_x4 rv=%mova_rv
+@azz_nx1_o1x4 ........ ... . zm:4 . .. ... zn:5 .... . \
+ &azz_n off=%off1_x4 rv=%mova_rv
+
+SMLALL_n1_s 11000001 001 0 .... 0 .. 001 ..... 000 .. @azz_nx1_o2x4 n=1
+SMLALL_n1_d 11000001 011 0 .... 0 .. 001 ..... 000 .. @azz_nx1_o2x4 n=1
+SMLALL_n1_s 11000001 001 0 .... 0 .. 000 ..... 0000 . @azz_nx1_o1x4 n=2
+SMLALL_n1_d 11000001 011 0 .... 0 .. 000 ..... 0000 . @azz_nx1_o1x4 n=2
+SMLALL_n1_s 11000001 001 1 .... 0 .. 000 ..... 0000 . @azz_nx1_o1x4 n=4
+SMLALL_n1_d 11000001 011 1 .... 0 .. 000 ..... 0000 . @azz_nx1_o1x4 n=4
+
+SMLSLL_n1_s 11000001 001 0 .... 0 .. 001 ..... 010 .. @azz_nx1_o2x4 n=1
+SMLSLL_n1_d 11000001 011 0 .... 0 .. 001 ..... 010 .. @azz_nx1_o2x4 n=1
+SMLSLL_n1_s 11000001 001 0 .... 0 .. 000 ..... 0100 . @azz_nx1_o1x4 n=2
+SMLSLL_n1_d 11000001 011 0 .... 0 .. 000 ..... 0100 . @azz_nx1_o1x4 n=2
+SMLSLL_n1_s 11000001 001 1 .... 0 .. 000 ..... 0100 . @azz_nx1_o1x4 n=4
+SMLSLL_n1_d 11000001 011 1 .... 0 .. 000 ..... 0100 . @azz_nx1_o1x4 n=4
+
+UMLALL_n1_s 11000001 001 0 .... 0 .. 001 ..... 100 .. @azz_nx1_o2x4 n=1
+UMLALL_n1_d 11000001 011 0 .... 0 .. 001 ..... 100 .. @azz_nx1_o2x4 n=1
+UMLALL_n1_s 11000001 001 0 .... 0 .. 000 ..... 1000 . @azz_nx1_o1x4 n=2
+UMLALL_n1_d 11000001 011 0 .... 0 .. 000 ..... 1000 . @azz_nx1_o1x4 n=2
+UMLALL_n1_s 11000001 001 1 .... 0 .. 000 ..... 1000 . @azz_nx1_o1x4 n=4
+UMLALL_n1_d 11000001 011 1 .... 0 .. 000 ..... 1000 . @azz_nx1_o1x4 n=4
+
+UMLSLL_n1_s 11000001 001 0 .... 0 .. 001 ..... 110 .. @azz_nx1_o2x4 n=1
+UMLSLL_n1_d 11000001 011 0 .... 0 .. 001 ..... 110 .. @azz_nx1_o2x4 n=1
+UMLSLL_n1_s 11000001 001 0 .... 0 .. 000 ..... 1100 . @azz_nx1_o1x4 n=2
+UMLSLL_n1_d 11000001 011 0 .... 0 .. 000 ..... 1100 . @azz_nx1_o1x4 n=2
+UMLSLL_n1_s 11000001 001 1 .... 0 .. 000 ..... 1100 . @azz_nx1_o1x4 n=4
+UMLSLL_n1_d 11000001 011 1 .... 0 .. 000 ..... 1100 . @azz_nx1_o1x4 n=4
+
+USMLALL_n1_s 11000001 001 0 .... 0 .. 001 ..... 001 .. @azz_nx1_o2x4 n=1
+USMLALL_n1_s 11000001 001 0 .... 0 .. 000 ..... 0010 . @azz_nx1_o1x4 n=2
+USMLALL_n1_s 11000001 001 1 .... 0 .. 000 ..... 0010 . @azz_nx1_o1x4 n=4
+
+SUMLALL_n1_s 11000001 001 0 .... 0 .. 000 ..... 1010 . @azz_nx1_o1x4 n=2
+SUMLALL_n1_s 11000001 001 1 .... 0 .. 000 ..... 1010 . @azz_nx1_o1x4 n=4
+
+BFMLA_n1 11000001 011 0 .... 0 .. 111 ..... 00 ... @azz_nx1_o3 n=2
+FMLA_n1_h 11000001 001 0 .... 0 .. 111 ..... 00 ... @azz_nx1_o3 n=2
+FMLA_n1_s 11000001 001 0 .... 0 .. 110 ..... 00 ... @azz_nx1_o3 n=2
+FMLA_n1_d 11000001 011 0 .... 0 .. 110 ..... 00 ... @azz_nx1_o3 n=2
+
+BFMLA_n1 11000001 011 1 .... 0 .. 111 ..... 00 ... @azz_nx1_o3 n=4
+FMLA_n1_h 11000001 001 1 .... 0 .. 111 ..... 00 ... @azz_nx1_o3 n=4
+FMLA_n1_s 11000001 001 1 .... 0 .. 110 ..... 00 ... @azz_nx1_o3 n=4
+FMLA_n1_d 11000001 011 1 .... 0 .. 110 ..... 00 ... @azz_nx1_o3 n=4
+
+BFMLS_n1 11000001 011 0 .... 0 .. 111 ..... 01 ... @azz_nx1_o3 n=2
+FMLS_n1_h 11000001 001 0 .... 0 .. 111 ..... 01 ... @azz_nx1_o3 n=2
+FMLS_n1_s 11000001 001 0 .... 0 .. 110 ..... 01 ... @azz_nx1_o3 n=2
+FMLS_n1_d 11000001 011 0 .... 0 .. 110 ..... 01 ... @azz_nx1_o3 n=2
+
+BFMLS_n1 11000001 011 1 .... 0 .. 111 ..... 01 ... @azz_nx1_o3 n=4
+FMLS_n1_h 11000001 001 1 .... 0 .. 111 ..... 01 ... @azz_nx1_o3 n=4
+FMLS_n1_s 11000001 001 1 .... 0 .. 110 ..... 01 ... @azz_nx1_o3 n=4
+FMLS_n1_d 11000001 011 1 .... 0 .. 110 ..... 01 ... @azz_nx1_o3 n=4
+
+### SME2 Multi-vector Multiple Array Vectors
+
+%zn_ax2 6:4 !function=times_2
+%zn_ax4 7:3 !function=times_4
+
+@azz_2x2_o3 ........ ... ..... . .. ... ..... .. off:3 \
+ &azz_n n=2 rv=%mova_rv zn=%zn_ax2 zm=%zm_ax2
+@azz_4x4_o3 ........ ... ..... . .. ... ..... .. off:3 \
+ &azz_n n=4 rv=%mova_rv zn=%zn_ax4 zm=%zm_ax4
+
+ADD_azz_nn_s 11000001 101 ....0 0 .. 110 ....0 10 ... @azz_2x2_o3
+ADD_azz_nn_s 11000001 101 ...01 0 .. 110 ...00 10 ... @azz_4x4_o3
+ADD_azz_nn_d 11000001 111 ....0 0 .. 110 ....0 10 ... @azz_2x2_o3
+ADD_azz_nn_d 11000001 111 ...01 0 .. 110 ...00 10 ... @azz_4x4_o3
+
+SUB_azz_nn_s 11000001 101 ....0 0 .. 110 ....0 11 ... @azz_2x2_o3
+SUB_azz_nn_s 11000001 101 ...01 0 .. 110 ...00 11 ... @azz_4x4_o3
+SUB_azz_nn_d 11000001 111 ....0 0 .. 110 ....0 11 ... @azz_2x2_o3
+SUB_azz_nn_d 11000001 111 ...01 0 .. 110 ...00 11 ... @azz_4x4_o3
+
+@azz_2x2_o2x2 ........ ... ..... . .. ... ..... ... .. \
+ &azz_n n=2 rv=%mova_rv zn=%zn_ax2 zm=%zm_ax2 off=%off2_x2
+@azz_4x4_o2x2 ........ ... ..... . .. ... ..... ... .. \
+ &azz_n n=4 rv=%mova_rv zn=%zn_ax4 zm=%zm_ax4 off=%off2_x2
+
+FMLAL_nn 11000001 101 ....0 0 .. 010 ....0 000 .. @azz_2x2_o2x2
+FMLAL_nn 11000001 101 ...01 0 .. 010 ...00 000 .. @azz_4x4_o2x2
+
+FMLSL_nn 11000001 101 ....0 0 .. 010 ....0 010 .. @azz_2x2_o2x2
+FMLSL_nn 11000001 101 ...01 0 .. 010 ...00 010 .. @azz_4x4_o2x2
+
+BFMLAL_nn 11000001 101 ....0 0 .. 010 ....0 100 .. @azz_2x2_o2x2
+BFMLAL_nn 11000001 101 ...01 0 .. 010 ...00 100 .. @azz_4x4_o2x2
+
+BFMLSL_nn 11000001 101 ....0 0 .. 010 ....0 110 .. @azz_2x2_o2x2
+BFMLSL_nn 11000001 101 ...01 0 .. 010 ...00 110 .. @azz_4x4_o2x2
+
+FDOT_nn 11000001 101 ....0 0 .. 100 ....0 00 ... @azz_2x2_o3
+FDOT_nn 11000001 101 ...01 0 .. 100 ...00 00 ... @azz_4x4_o3
+
+BFDOT_nn 11000001 101 ....0 0 .. 100 ....0 10 ... @azz_2x2_o3
+BFDOT_nn 11000001 101 ...01 0 .. 100 ...00 10 ... @azz_4x4_o3
+
+USDOT_nn 11000001 101 ....0 0 .. 101 ....0 01 ... @azz_2x2_o3
+USDOT_nn 11000001 101 ...01 0 .. 101 ...00 01 ... @azz_4x4_o3
+
+SDOT_nn_4b 11000001 101 ....0 0 .. 101 ....0 00 ... @azz_2x2_o3
+SDOT_nn_4b 11000001 101 ...01 0 .. 101 ...00 00 ... @azz_4x4_o3
+SDOT_nn_4h 11000001 111 ....0 0 .. 101 ....0 00 ... @azz_2x2_o3
+SDOT_nn_4h 11000001 111 ...01 0 .. 101 ...00 00 ... @azz_4x4_o3
+SDOT_nn_2h 11000001 111 ....0 0 .. 101 ....0 01 ... @azz_2x2_o3
+SDOT_nn_2h 11000001 111 ...01 0 .. 101 ...00 01 ... @azz_4x4_o3
+
+UDOT_nn_4b 11000001 101 ....0 0 .. 101 ....0 10 ... @azz_2x2_o3
+UDOT_nn_4b 11000001 101 ...01 0 .. 101 ...00 10 ... @azz_4x4_o3
+UDOT_nn_4h 11000001 111 ....0 0 .. 101 ....0 10 ... @azz_2x2_o3
+UDOT_nn_4h 11000001 111 ...01 0 .. 101 ...00 10 ... @azz_4x4_o3
+UDOT_nn_2h 11000001 111 ....0 0 .. 101 ....0 11 ... @azz_2x2_o3
+UDOT_nn_2h 11000001 111 ...01 0 .. 101 ...00 11 ... @azz_4x4_o3
+
+SMLAL_nn 11000001 111 ....0 0 .. 010 ....0 000 .. @azz_2x2_o2x2
+SMLAL_nn 11000001 111 ...01 0 .. 010 ...00 000 .. @azz_4x4_o2x2
+
+SMLSL_nn 11000001 111 ....0 0 .. 010 ....0 010 .. @azz_2x2_o2x2
+SMLSL_nn 11000001 111 ...01 0 .. 010 ...00 010 .. @azz_4x4_o2x2
+
+UMLAL_nn 11000001 111 ....0 0 .. 010 ....0 100 .. @azz_2x2_o2x2
+UMLAL_nn 11000001 111 ...01 0 .. 010 ...00 100 .. @azz_4x4_o2x2
+
+UMLSL_nn 11000001 111 ....0 0 .. 010 ....0 110 .. @azz_2x2_o2x2
+UMLSL_nn 11000001 111 ...01 0 .. 010 ...00 110 .. @azz_4x4_o2x2
+
+@azz_2x2_o1x4 ........ ... ..... . .. ... ..... ... .. \
+ &azz_n n=2 rv=%mova_rv zn=%zn_ax2 zm=%zm_ax2 off=%off1_x4
+@azz_4x4_o1x4 ........ ... ..... . .. ... ..... ... .. \
+ &azz_n n=4 rv=%mova_rv zn=%zn_ax4 zm=%zm_ax4 off=%off1_x4
+
+SMLALL_nn_s 11000001 101 ....0 0 .. 000 ....0 0000 . @azz_2x2_o1x4
+SMLALL_nn_d 11000001 111 ....0 0 .. 000 ....0 0000 . @azz_2x2_o1x4
+SMLALL_nn_s 11000001 101 ...01 0 .. 000 ...00 0000 . @azz_4x4_o1x4
+SMLALL_nn_d 11000001 111 ...01 0 .. 000 ...00 0000 . @azz_4x4_o1x4
+
+SMLSLL_nn_s 11000001 101 ....0 0 .. 000 ....0 0100 . @azz_2x2_o1x4
+SMLSLL_nn_d 11000001 111 ....0 0 .. 000 ....0 0100 . @azz_2x2_o1x4
+SMLSLL_nn_s 11000001 101 ...01 0 .. 000 ...00 0100 . @azz_4x4_o1x4
+SMLSLL_nn_d 11000001 111 ...01 0 .. 000 ...00 0100 . @azz_4x4_o1x4
+
+UMLALL_nn_s 11000001 101 ....0 0 .. 000 ....0 1000 . @azz_2x2_o1x4
+UMLALL_nn_d 11000001 111 ....0 0 .. 000 ....0 1000 . @azz_2x2_o1x4
+UMLALL_nn_s 11000001 101 ...01 0 .. 000 ...00 1000 . @azz_4x4_o1x4
+UMLALL_nn_d 11000001 111 ...01 0 .. 000 ...00 1000 . @azz_4x4_o1x4
+
+UMLSLL_nn_s 11000001 101 ....0 0 .. 000 ....0 1100 . @azz_2x2_o1x4
+UMLSLL_nn_d 11000001 111 ....0 0 .. 000 ....0 1100 . @azz_2x2_o1x4
+UMLSLL_nn_s 11000001 101 ...01 0 .. 000 ...00 1100 . @azz_4x4_o1x4
+UMLSLL_nn_d 11000001 111 ...01 0 .. 000 ...00 1100 . @azz_4x4_o1x4
+
+USMLALL_nn_s 11000001 101 ....0 0 .. 000 ....0 0010 . @azz_2x2_o1x4
+USMLALL_nn_s 11000001 101 ...01 0 .. 000 ...00 0010 . @azz_4x4_o1x4
+
+BFMLA_nn 11000001 111 ....0 0 .. 100 ....0 01 ... @azz_2x2_o3
+FMLA_nn_h 11000001 101 ....0 0 .. 100 ....0 01 ... @azz_2x2_o3
+FMLA_nn_s 11000001 101 ....0 0 .. 110 ....0 00 ... @azz_2x2_o3
+FMLA_nn_d 11000001 111 ....0 0 .. 110 ....0 00 ... @azz_2x2_o3
+
+BFMLA_nn 11000001 111 ...01 0 .. 100 ...00 01 ... @azz_4x4_o3
+FMLA_nn_h 11000001 101 ...01 0 .. 100 ...00 01 ... @azz_4x4_o3
+FMLA_nn_s 11000001 101 ...01 0 .. 110 ...00 00 ... @azz_4x4_o3
+FMLA_nn_d 11000001 111 ...01 0 .. 110 ...00 00 ... @azz_4x4_o3
+
+BFMLS_nn 11000001 111 ....0 0 .. 100 ....0 11 ... @azz_2x2_o3
+FMLS_nn_h 11000001 101 ....0 0 .. 100 ....0 11 ... @azz_2x2_o3
+FMLS_nn_s 11000001 101 ....0 0 .. 110 ....0 01 ... @azz_2x2_o3
+FMLS_nn_d 11000001 111 ....0 0 .. 110 ....0 01 ... @azz_2x2_o3
+
+BFMLS_nn 11000001 111 ...01 0 .. 100 ...00 11 ... @azz_4x4_o3
+FMLS_nn_h 11000001 101 ...01 0 .. 100 ...00 11 ... @azz_4x4_o3
+FMLS_nn_s 11000001 101 ...01 0 .. 110 ...00 01 ... @azz_4x4_o3
+FMLS_nn_d 11000001 111 ...01 0 .. 110 ...00 01 ... @azz_4x4_o3
+
+&az_n n off rv zm
+@az_2x2_o3 ........ ... ..... . .. ... ..... .. off:3 \
+ &az_n n=2 rv=%mova_rv zm=%zn_ax2
+@az_4x4_o3 ........ ... ..... . .. ... ..... .. off:3 \
+ &az_n n=4 rv=%mova_rv zm=%zn_ax4
+
+FADD_nn_h 11000001 101 00100 0 .. 111 ....0 00 ... @az_2x2_o3
+FADD_nn_s 11000001 101 00000 0 .. 111 ....0 00 ... @az_2x2_o3
+FADD_nn_d 11000001 111 00000 0 .. 111 ....0 00 ... @az_2x2_o3
+FADD_nn_h 11000001 101 00101 0 .. 111 ...00 00 ... @az_4x4_o3
+FADD_nn_s 11000001 101 00001 0 .. 111 ...00 00 ... @az_4x4_o3
+FADD_nn_d 11000001 111 00001 0 .. 111 ...00 00 ... @az_4x4_o3
+
+FSUB_nn_h 11000001 101 00100 0 .. 111 ....0 01 ... @az_2x2_o3
+FSUB_nn_s 11000001 101 00000 0 .. 111 ....0 01 ... @az_2x2_o3
+FSUB_nn_d 11000001 111 00000 0 .. 111 ....0 01 ... @az_2x2_o3
+FSUB_nn_h 11000001 101 00101 0 .. 111 ...00 01 ... @az_4x4_o3
+FSUB_nn_s 11000001 101 00001 0 .. 111 ...00 01 ... @az_4x4_o3
+FSUB_nn_d 11000001 111 00001 0 .. 111 ...00 01 ... @az_4x4_o3
+
+BFADD_nn 11000001 111 00100 0 .. 111 ....0 00 ... @az_2x2_o3
+BFADD_nn 11000001 111 00101 0 .. 111 ...00 00 ... @az_4x4_o3
+BFSUB_nn 11000001 111 00100 0 .. 111 ....0 01 ... @az_2x2_o3
+BFSUB_nn 11000001 111 00101 0 .. 111 ...00 01 ... @az_4x4_o3
+
+### SME2 Multi-vector Indexed
+
+&azx_n n off rv zn zm idx
+
+%idx3_15_10 15:1 10:2
+%idx2_10_2 10:2 2:1
+
+@azx_1x1_o3x2 ........ .... zm:4 . .. . .. zn:5 .. ... \
+ &azx_n n=1 rv=%mova_rv off=%off3_x2 idx=%idx3_15_10
+@azx_2x1_o2x2 ........ .... zm:4 . .. . .. ..... .. ... \
+ &azx_n n=2 rv=%mova_rv off=%off2_x2 zn=%zn_ax2 idx=%idx2_10_2
+@azx_4x1_o2x2 ........ .... zm:4 . .. . .. ..... .. ... \
+ &azx_n n=4 rv=%mova_rv off=%off2_x2 zn=%zn_ax4 idx=%idx2_10_2
+
+FMLAL_nx 11000001 1000 .... . .. 1 .. ..... 00 ... @azx_1x1_o3x2
+FMLAL_nx 11000001 1001 .... 0 .. 1 .. ....0 00 ... @azx_2x1_o2x2
+FMLAL_nx 11000001 1001 .... 1 .. 1 .. ...00 00 ... @azx_4x1_o2x2
+
+FMLSL_nx 11000001 1000 .... . .. 1 .. ..... 01 ... @azx_1x1_o3x2
+FMLSL_nx 11000001 1001 .... 0 .. 1 .. ....0 01 ... @azx_2x1_o2x2
+FMLSL_nx 11000001 1001 .... 1 .. 1 .. ...00 01 ... @azx_4x1_o2x2
+
+BFMLAL_nx 11000001 1000 .... . .. 1 .. ..... 10 ... @azx_1x1_o3x2
+BFMLAL_nx 11000001 1001 .... 0 .. 1 .. ....0 10 ... @azx_2x1_o2x2
+BFMLAL_nx 11000001 1001 .... 1 .. 1 .. ...00 10 ... @azx_4x1_o2x2
+
+BFMLSL_nx 11000001 1000 .... . .. 1 .. ..... 11 ... @azx_1x1_o3x2
+BFMLSL_nx 11000001 1001 .... 0 .. 1 .. ....0 11 ... @azx_2x1_o2x2
+BFMLSL_nx 11000001 1001 .... 1 .. 1 .. ...00 11 ... @azx_4x1_o2x2
+
+@azx_2x1_i2_o3 ........ .... zm:4 . .. . idx:2 .... ... off:3 \
+ &azx_n n=2 rv=%mova_rv zn=%zn_ax2
+@azx_4x1_i2_o3 ........ .... zm:4 . .. . idx:2 .... ... off:3 \
+ &azx_n n=4 rv=%mova_rv zn=%zn_ax4
+@azx_2x1_i1_o3 ........ .... zm:4 . .. .. idx:1 .... ... off:3 \
+ &azx_n n=2 rv=%mova_rv zn=%zn_ax2
+@azx_4x1_i1_o3 ........ .... zm:4 . .. .. idx:1 .... ... off:3 \
+ &azx_n n=4 rv=%mova_rv zn=%zn_ax4
+
+FDOT_nx 11000001 0101 .... 0 .. 1 .. ....0 01 ... @azx_2x1_i2_o3
+FDOT_nx 11000001 0101 .... 1 .. 1 .. ...00 01 ... @azx_4x1_i2_o3
+
+BFDOT_nx 11000001 0101 .... 0 .. 1 .. ....0 11 ... @azx_2x1_i2_o3
+BFDOT_nx 11000001 0101 .... 1 .. 1 .. ...00 11 ... @azx_4x1_i2_o3
+
+FVDOT 11000001 0101 .... 0 .. 0 .. ....0 01 ... @azx_2x1_i2_o3
+BFVDOT 11000001 0101 .... 0 .. 0 .. ....0 11 ... @azx_2x1_i2_o3
+
+SDOT_nx_2h 11000001 0101 .... 0 .. 1 .. ....0 00 ... @azx_2x1_i2_o3
+SDOT_nx_2h 11000001 0101 .... 1 .. 1 .. ...00 00 ... @azx_4x1_i2_o3
+SDOT_nx_4b 11000001 0101 .... 0 .. 1 .. ....1 00 ... @azx_2x1_i2_o3
+SDOT_nx_4b 11000001 0101 .... 1 .. 1 .. ...01 00 ... @azx_4x1_i2_o3
+SDOT_nx_4h 11000001 1101 .... 0 .. 00 . ....0 01 ... @azx_2x1_i1_o3
+SDOT_nx_4h 11000001 1101 .... 1 .. 00 . ...00 01 ... @azx_4x1_i1_o3
+
+UDOT_nx_2h 11000001 0101 .... 0 .. 1 .. ....0 10 ... @azx_2x1_i2_o3
+UDOT_nx_2h 11000001 0101 .... 1 .. 1 .. ...00 10 ... @azx_4x1_i2_o3
+UDOT_nx_4b 11000001 0101 .... 0 .. 1 .. ....1 10 ... @azx_2x1_i2_o3
+UDOT_nx_4b 11000001 0101 .... 1 .. 1 .. ...01 10 ... @azx_4x1_i2_o3
+UDOT_nx_4h 11000001 1101 .... 0 .. 00 . ....0 11 ... @azx_2x1_i1_o3
+UDOT_nx_4h 11000001 1101 .... 1 .. 00 . ...00 11 ... @azx_4x1_i1_o3
+
+USDOT_nx 11000001 0101 .... 0 .. 1 .. ....1 01 ... @azx_2x1_i2_o3
+USDOT_nx 11000001 0101 .... 1 .. 1 .. ...01 01 ... @azx_4x1_i2_o3
+
+SUDOT_nx 11000001 0101 .... 0 .. 1 .. ....1 11 ... @azx_2x1_i2_o3
+SUDOT_nx 11000001 0101 .... 1 .. 1 .. ...01 11 ... @azx_4x1_i2_o3
+
+SVDOT_nx_2h 11000001 0101 .... 0 .. 0 .. ....1 00 ... @azx_2x1_i2_o3
+SVDOT_nx_4b 11000001 0101 .... 1 .. 0 .. ...01 00 ... @azx_4x1_i2_o3
+SVDOT_nx_4h 11000001 1101 .... 1 .. 01 . ...00 01 ... @azx_4x1_i1_o3
+
+UVDOT_nx_2h 11000001 0101 .... 0 .. 0 .. ....1 10 ... @azx_2x1_i2_o3
+UVDOT_nx_4b 11000001 0101 .... 1 .. 0 .. ...01 10 ... @azx_4x1_i2_o3
+UVDOT_nx_4h 11000001 1101 .... 1 .. 01 . ...00 11 ... @azx_4x1_i1_o3
+
+SUVDOT_nx_4b 11000001 0101 .... 1 .. 0 .. ...01 11 ... @azx_4x1_i2_o3
+USVDOT_nx_4b 11000001 0101 .... 1 .. 0 .. ...01 01 ... @azx_4x1_i2_o3
+
+SMLAL_nx 11000001 1100 .... . .. 1 .. ..... 00 ... @azx_1x1_o3x2
+SMLAL_nx 11000001 1101 .... 0 .. 1 .. ....0 00 ... @azx_2x1_o2x2
+SMLAL_nx 11000001 1101 .... 1 .. 1 .. ...00 00 ... @azx_4x1_o2x2
+
+SMLSL_nx 11000001 1100 .... . .. 1 .. ..... 01 ... @azx_1x1_o3x2
+SMLSL_nx 11000001 1101 .... 0 .. 1 .. ....0 01 ... @azx_2x1_o2x2
+SMLSL_nx 11000001 1101 .... 1 .. 1 .. ...00 01 ... @azx_4x1_o2x2
+
+UMLAL_nx 11000001 1100 .... . .. 1 .. ..... 10 ... @azx_1x1_o3x2
+UMLAL_nx 11000001 1101 .... 0 .. 1 .. ....0 10 ... @azx_2x1_o2x2
+UMLAL_nx 11000001 1101 .... 1 .. 1 .. ...00 10 ... @azx_4x1_o2x2
+
+UMLSL_nx 11000001 1100 .... . .. 1 .. ..... 11 ... @azx_1x1_o3x2
+UMLSL_nx 11000001 1101 .... 0 .. 1 .. ....0 11 ... @azx_2x1_o2x2
+UMLSL_nx 11000001 1101 .... 1 .. 1 .. ...00 11 ... @azx_4x1_o2x2
+
+%idx4_15_10 15:1 10:3
+%idx4_10_1 10:2 1:2
+%idx3_10_1 10:1 1:2
+
+@azx_1x1_i4_o2 ........ .... zm:4 . .. ... zn:5 ... .. \
+ &azx_n n=1 rv=%mova_rv off=%off2_x4 idx=%idx4_15_10
+@azx_1x1_i3_o2 ........ .... zm:4 . .. ... zn:5 ... .. \
+ &azx_n n=1 rv=%mova_rv off=%off2_x4 idx=%idx3_15_10
+@azx_2x1_i4_o1 ........ .... zm:4 . .. ... ..... ... .. \
+ &azx_n n=2 rv=%mova_rv off=%off1_x4 zn=%zn_ax2 idx=%idx4_10_1
+@azx_2x1_i3_o1 ........ .... zm:4 . .. ... ..... ... .. \
+ &azx_n n=2 rv=%mova_rv off=%off1_x4 zn=%zn_ax2 idx=%idx3_10_1
+@azx_4x1_i4_o1 ........ .... zm:4 . .. ... ..... ... .. \
+ &azx_n n=4 rv=%mova_rv off=%off1_x4 zn=%zn_ax4 idx=%idx4_10_1
+@azx_4x1_i3_o1 ........ .... zm:4 . .. ... ..... ... .. \
+ &azx_n n=4 rv=%mova_rv off=%off1_x4 zn=%zn_ax4 idx=%idx3_10_1
+
+SMLALL_nx_s 11000001 0000 .... . .. ... ..... 000 .. @azx_1x1_i4_o2
+SMLALL_nx_d 11000001 1000 .... . .. 0.. ..... 000 .. @azx_1x1_i3_o2
+SMLALL_nx_s 11000001 0001 .... 0 .. 0.. ....0 00 ... @azx_2x1_i4_o1
+SMLALL_nx_d 11000001 1001 .... 0 .. 00. ....0 00 ... @azx_2x1_i3_o1
+SMLALL_nx_s 11000001 0001 .... 1 .. 0.. ...00 00 ... @azx_4x1_i4_o1
+SMLALL_nx_d 11000001 1001 .... 1 .. 00. ...00 00 ... @azx_4x1_i3_o1
+
+SMLSLL_nx_s 11000001 0000 .... . .. ... ..... 010 .. @azx_1x1_i4_o2
+SMLSLL_nx_d 11000001 1000 .... . .. 0.. ..... 010 .. @azx_1x1_i3_o2
+SMLSLL_nx_s 11000001 0001 .... 0 .. 0.. ....0 01 ... @azx_2x1_i4_o1
+SMLSLL_nx_d 11000001 1001 .... 0 .. 00. ....0 01 ... @azx_2x1_i3_o1
+SMLSLL_nx_s 11000001 0001 .... 1 .. 0.. ...00 01 ... @azx_4x1_i4_o1
+SMLSLL_nx_d 11000001 1001 .... 1 .. 00. ...00 01 ... @azx_4x1_i3_o1
+
+UMLALL_nx_s 11000001 0000 .... . .. ... ..... 100 .. @azx_1x1_i4_o2
+UMLALL_nx_d 11000001 1000 .... . .. 0.. ..... 100 .. @azx_1x1_i3_o2
+UMLALL_nx_s 11000001 0001 .... 0 .. 0.. ....0 10 ... @azx_2x1_i4_o1
+UMLALL_nx_d 11000001 1001 .... 0 .. 00. ....0 10 ... @azx_2x1_i3_o1
+UMLALL_nx_s 11000001 0001 .... 1 .. 0.. ...00 10 ... @azx_4x1_i4_o1
+UMLALL_nx_d 11000001 1001 .... 1 .. 00. ...00 10 ... @azx_4x1_i3_o1
+
+UMLSLL_nx_s 11000001 0000 .... . .. ... ..... 110 .. @azx_1x1_i4_o2
+UMLSLL_nx_d 11000001 1000 .... . .. 0.. ..... 110 .. @azx_1x1_i3_o2
+UMLSLL_nx_s 11000001 0001 .... 0 .. 0.. ....0 11 ... @azx_2x1_i4_o1
+UMLSLL_nx_d 11000001 1001 .... 0 .. 00. ....0 11 ... @azx_2x1_i3_o1
+UMLSLL_nx_s 11000001 0001 .... 1 .. 0.. ...00 11 ... @azx_4x1_i4_o1
+UMLSLL_nx_d 11000001 1001 .... 1 .. 00. ...00 11 ... @azx_4x1_i3_o1
+
+USMLALL_nx_s 11000001 0000 .... . .. ... ..... 001 .. @azx_1x1_i4_o2
+USMLALL_nx_s 11000001 0001 .... 0 .. 0.. ....1 00 ... @azx_2x1_i4_o1
+USMLALL_nx_s 11000001 0001 .... 1 .. 0.. ...01 00 ... @azx_4x1_i4_o1
+
+SUMLALL_nx_s 11000001 0000 .... . .. ... ..... 101 .. @azx_1x1_i4_o2
+SUMLALL_nx_s 11000001 0001 .... 0 .. 0.. ....1 10 ... @azx_2x1_i4_o1
+SUMLALL_nx_s 11000001 0001 .... 1 .. 0.. ...01 10 ... @azx_4x1_i4_o1
+
+%idx3_10_3 10:2 3:1
+@azx_2x1_i3_o3 ........ .... zm:4 . .. ... ..... .. off:3 \
+ &azx_n n=2 rv=%mova_rv zn=%zn_ax2 idx=%idx3_10_3
+@azx_4x1_i3_o3 ........ .... zm:4 . .. ... ..... .. off:3 \
+ &azx_n n=4 rv=%mova_rv zn=%zn_ax4 idx=%idx3_10_3
+
+BFMLA_nx 11000001 0001 .... 0 .. 1.. ....1 0 .... @azx_2x1_i3_o3
+FMLA_nx_h 11000001 0001 .... 0 .. 1.. ....0 0 .... @azx_2x1_i3_o3
+FMLA_nx_s 11000001 0101 .... 0 .. 0.. ....0 00 ... @azx_2x1_i2_o3
+FMLA_nx_d 11000001 1101 .... 0 .. 00. ....0 00 ... @azx_2x1_i1_o3
+
+BFMLA_nx 11000001 0001 .... 1 .. 1.. ...01 0 .... @azx_4x1_i3_o3
+FMLA_nx_h 11000001 0001 .... 1 .. 1.. ...00 0 .... @azx_4x1_i3_o3
+FMLA_nx_s 11000001 0101 .... 1 .. 0.. ...00 00 ... @azx_4x1_i2_o3
+FMLA_nx_d 11000001 1101 .... 1 .. 00. ...00 00 ... @azx_4x1_i1_o3
+
+BFMLS_nx 11000001 0001 .... 0 .. 1.. ....1 1 .... @azx_2x1_i3_o3
+FMLS_nx_h 11000001 0001 .... 0 .. 1.. ....0 1 .... @azx_2x1_i3_o3
+FMLS_nx_s 11000001 0101 .... 0 .. 0.. ....0 10 ... @azx_2x1_i2_o3
+FMLS_nx_d 11000001 1101 .... 0 .. 00. ....0 10 ... @azx_2x1_i1_o3
+
+BFMLS_nx 11000001 0001 .... 1 .. 1.. ...01 1 .... @azx_4x1_i3_o3
+FMLS_nx_h 11000001 0001 .... 1 .. 1.. ...00 1 .... @azx_4x1_i3_o3
+FMLS_nx_s 11000001 0101 .... 1 .. 0.. ...00 10 ... @azx_4x1_i2_o3
+FMLS_nx_d 11000001 1101 .... 1 .. 00. ...00 10 ... @azx_4x1_i1_o3
+
+### SME2 Add / Sub array accumulators
+
+ADD_aaz_s 11000001 101 000000 .. 111 ....0 10 ... @az_2x2_o3
+ADD_aaz_s 11000001 101 000010 .. 111 ...00 10 ... @az_4x4_o3
+ADD_aaz_d 11000001 111 000000 .. 111 ....0 10 ... @az_2x2_o3
+ADD_aaz_d 11000001 111 000010 .. 111 ...00 10 ... @az_4x4_o3
+
+SUB_aaz_s 11000001 101 000000 .. 111 ....0 11 ... @az_2x2_o3
+SUB_aaz_s 11000001 101 000010 .. 111 ...00 11 ... @az_4x4_o3
+SUB_aaz_d 11000001 111 000000 .. 111 ....0 11 ... @az_2x2_o3
+SUB_aaz_d 11000001 111 000010 .. 111 ...00 11 ... @az_4x4_o3
+
+### SME2 Multi-vector SVE Constructive Unary
+
+&zz_e zd zn esz
+&zz_n zd zn n
+@zz_1x2 ........ ... ..... ...... ..... zd:5 \
+ &zz_n n=1 zn=%zn_ax2
+@zz_1x4 ........ ... ..... ...... ..... zd:5 \
+ &zz_n n=1 zn=%zn_ax4
+@zz_2x1 ........ ... ..... ...... zn:5 ..... \
+ &zz_n n=1 zd=%zd_ax2
+@zz_2x2 ........ ... ..... ...... .... . ..... \
+ &zz_n n=2 zd=%zd_ax2 zn=%zn_ax2
+@zz_4x4 ........ ... ..... ...... .... . ..... \
+ &zz_n n=4 zd=%zd_ax4 zn=%zn_ax4
+@zz_4x2_n1 ........ ... ..... ...... .... . ..... \
+ &zz_n n=1 zd=%zd_ax4 zn=%zn_ax2
+
+BFCVT 11000001 011 00000 111000 ....0 ..... @zz_1x2
+BFCVTN 11000001 011 00000 111000 ....1 ..... @zz_1x2
+
+FCVT_n 11000001 001 00000 111000 ....0 ..... @zz_1x2
+FCVTN 11000001 001 00000 111000 ....1 ..... @zz_1x2
+
+FCVT_w 11000001 101 00000 111000 ..... ....0 @zz_2x1
+FCVTL 11000001 101 00000 111000 ..... ....1 @zz_2x1
+
+FCVTZS 11000001 001 00001 111000 ....0 ....0 @zz_2x2
+FCVTZS 11000001 001 10001 111000 ...00 ...00 @zz_4x4
+FCVTZU 11000001 001 00001 111000 ....1 ....0 @zz_2x2
+FCVTZU 11000001 001 10001 111000 ...01 ...00 @zz_4x4
+
+SCVTF 11000001 001 00010 111000 ....0 ....0 @zz_2x2
+SCVTF 11000001 001 10010 111000 ...00 ...00 @zz_4x4
+UCVTF 11000001 001 00010 111000 ....1 ....0 @zz_2x2
+UCVTF 11000001 001 10010 111000 ...01 ...00 @zz_4x4
+
+FRINTN 11000001 101 01000 111000 ....0 ....0 @zz_2x2
+FRINTN 11000001 101 11000 111000 ...00 ...00 @zz_4x4
+FRINTP 11000001 101 01001 111000 ....0 ....0 @zz_2x2
+FRINTP 11000001 101 11001 111000 ...00 ...00 @zz_4x4
+FRINTM 11000001 101 01010 111000 ....0 ....0 @zz_2x2
+FRINTM 11000001 101 11010 111000 ...00 ...00 @zz_4x4
+FRINTA 11000001 101 01100 111000 ....0 ....0 @zz_2x2
+FRINTA 11000001 101 11100 111000 ...00 ...00 @zz_4x4
+
+SQCVT_sh 11000001 001 00011 111000 ....0 ..... @zz_1x2
+UQCVT_sh 11000001 001 00011 111000 ....1 ..... @zz_1x2
+SQCVTU_sh 11000001 011 00011 111000 ....0 ..... @zz_1x2
+
+SQCVT_sb 11000001 001 10011 111000 ...00 ..... @zz_1x4
+UQCVT_sb 11000001 001 10011 111000 ...01 ..... @zz_1x4
+SQCVTU_sb 11000001 011 10011 111000 ...00 ..... @zz_1x4
+
+SQCVT_dh 11000001 101 10011 111000 ...00 ..... @zz_1x4
+UQCVT_dh 11000001 101 10011 111000 ...01 ..... @zz_1x4
+SQCVTU_dh 11000001 111 10011 111000 ...00 ..... @zz_1x4
+
+SQCVTN_sb 11000001 001 10011 111000 ...10 ..... @zz_1x4
+UQCVTN_sb 11000001 001 10011 111000 ...11 ..... @zz_1x4
+SQCVTUN_sb 11000001 011 10011 111000 ...10 ..... @zz_1x4
+
+SQCVTN_dh 11000001 101 10011 111000 ...10 ..... @zz_1x4
+UQCVTN_dh 11000001 101 10011 111000 ...11 ..... @zz_1x4
+SQCVTUN_dh 11000001 111 10011 111000 ...10 ..... @zz_1x4
+
+SUNPK_2bh 11000001 011 00101 111000 ..... ....0 @zz_2x1
+SUNPK_2hs 11000001 101 00101 111000 ..... ....0 @zz_2x1
+SUNPK_2sd 11000001 111 00101 111000 ..... ....0 @zz_2x1
+
+UUNPK_2bh 11000001 011 00101 111000 ..... ....1 @zz_2x1
+UUNPK_2hs 11000001 101 00101 111000 ..... ....1 @zz_2x1
+UUNPK_2sd 11000001 111 00101 111000 ..... ....1 @zz_2x1
+
+SUNPK_4bh 11000001 011 10101 111000 ....0 ...00 @zz_4x2_n1
+SUNPK_4hs 11000001 101 10101 111000 ....0 ...00 @zz_4x2_n1
+SUNPK_4sd 11000001 111 10101 111000 ....0 ...00 @zz_4x2_n1
+
+UUNPK_4bh 11000001 011 10101 111000 ....0 ...01 @zz_4x2_n1
+UUNPK_4hs 11000001 101 10101 111000 ....0 ...01 @zz_4x2_n1
+UUNPK_4sd 11000001 111 10101 111000 ....0 ...01 @zz_4x2_n1
+
+ZIP_4 11000001 esz:2 1 10110 111000 ...00 ... 00 \
+ &zz_e zd=%zd_ax4 zn=%zn_ax4
+ZIP_4 11000001 001 10111 111000 ...00 ... 00 \
+ &zz_e esz=4 zd=%zd_ax4 zn=%zn_ax4
+
+UZP_4 11000001 esz:2 1 10110 111000 ...00 ... 10 \
+ &zz_e zd=%zd_ax4 zn=%zn_ax4
+UZP_4 11000001 001 10111 111000 ...00 ... 10 \
+ &zz_e esz=4 zd=%zd_ax4 zn=%zn_ax4
+
+### SME2 Multi-vector SVE Constructive Binary
+
+&rshr zd zn shift
+
+%rshr_sh_shift 16:4 !function=rsub_16
+%rshr_sb_shift 16:5 !function=rsub_32
+%rshr_dh_shift 22:1 16:5 !function=rsub_64
+
+@rshr_sh ........ .... .... ...... ..... zd:5 \
+ &rshr zn=%zn_ax2 shift=%rshr_sh_shift
+@rshr_sb ........ ... ..... ...... ..... zd:5 \
+ &rshr zn=%zn_ax4 shift=%rshr_sb_shift
+@rshr_dh ........ ... ..... ...... ..... zd:5 \
+ &rshr zn=%zn_ax4 shift=%rshr_dh_shift
+
+SQRSHR_sh 11000001 1110 .... 110101 ....0 ..... @rshr_sh
+UQRSHR_sh 11000001 1110 .... 110101 ....1 ..... @rshr_sh
+SQRSHRU_sh 11000001 1111 .... 110101 ....0 ..... @rshr_sh
+
+SQRSHR_sb 11000001 011 ..... 110110 ...00 ..... @rshr_sb
+SQRSHR_dh 11000001 1.1 ..... 110110 ...00 ..... @rshr_dh
+UQRSHR_sb 11000001 011 ..... 110110 ...01 ..... @rshr_sb
+UQRSHR_dh 11000001 1.1 ..... 110110 ...01 ..... @rshr_dh
+SQRSHRU_sb 11000001 011 ..... 110110 ...10 ..... @rshr_sb
+SQRSHRU_dh 11000001 1.1 ..... 110110 ...10 ..... @rshr_dh
+
+SQRSHRN_sh 01000101 1011 .... 001010 ....0 ..... @rshr_sh
+UQRSHRN_sh 01000101 1011 .... 001110 ....0 ..... @rshr_sh
+SQRSHRUN_sh 01000101 1011 .... 000010 ....0 ..... @rshr_sh
+
+SQRSHRN_sb 11000001 011 ..... 110111 ...00 ..... @rshr_sb
+SQRSHRN_dh 11000001 1.1 ..... 110111 ...00 ..... @rshr_dh
+UQRSHRN_sb 11000001 011 ..... 110111 ...01 ..... @rshr_sb
+UQRSHRN_dh 11000001 1.1 ..... 110111 ...01 ..... @rshr_dh
+SQRSHRUN_sb 11000001 011 ..... 110111 ...10 ..... @rshr_sb
+SQRSHRUN_dh 11000001 1.1 ..... 110111 ...10 ..... @rshr_dh
+
+&zzz_e zd zn zm esz
+
+ZIP_2 11000001 esz:2 1 zm:5 110100 zn:5 .... 0 \
+ &zzz_e zd=%zd_ax2
+ZIP_2 11000001 00 1 zm:5 110101 zn:5 .... 0 \
+ &zzz_e zd=%zd_ax2 esz=4
+
+UZP_2 11000001 esz:2 1 zm:5 110100 zn:5 .... 1 \
+ &zzz_e zd=%zd_ax2
+UZP_2 11000001 00 1 zm:5 110101 zn:5 .... 1 \
+ &zzz_e zd=%zd_ax2 esz=4
+
+&zzz_en zd zn zm esz n
+
+FCLAMP 11000001 esz:2 1 zm:5 110000 zn:5 .... 0 \
+ &zzz_en zd=%zd_ax2 n=2
+FCLAMP 11000001 esz:2 1 zm:5 110010 zn:5 ...0 0 \
+ &zzz_en zd=%zd_ax4 n=4
+
+SCLAMP 11000001 esz:2 1 zm:5 110001 zn:5 .... 0 \
+ &zzz_en zd=%zd_ax2 n=2
+SCLAMP 11000001 esz:2 1 zm:5 110011 zn:5 ...0 0 \
+ &zzz_en zd=%zd_ax4 n=4
+
+UCLAMP 11000001 esz:2 1 zm:5 110001 zn:5 .... 1 \
+ &zzz_en zd=%zd_ax2 n=2
+UCLAMP 11000001 esz:2 1 zm:5 110011 zn:5 ...0 1 \
+ &zzz_en zd=%zd_ax4 n=4
+
+### SME2 Multi-vector SVE Select
+
+%sel_pg 10:3 !function=plus_8
+
+SEL 11000001 esz:2 1 ....0 100 ... ....0 ....0 \
+ n=2 zd=%zd_ax2 zn=%zn_ax2 zm=%zm_ax2 pg=%sel_pg
+SEL 11000001 esz:2 1 ...01 100 ... ...00 ...00 \
+ n=4 zd=%zd_ax4 zn=%zn_ax4 zm=%zm_ax4 pg=%sel_pg
+
+### SME Multiple Zero
+
+&zero_za rv off ngrp nvec
+
+ZERO_za 11000000 000011 000 .. 0000000000 off:3 \
+ &zero_za ngrp=2 nvec=1 rv=%mova_rv
+ZERO_za 11000000 000011 100 .. 0000000000 off:3 \
+ &zero_za ngrp=4 nvec=1 rv=%mova_rv
+
+ZERO_za 11000000 000011 001 .. 0000000000 ... \
+ &zero_za ngrp=1 nvec=2 rv=%mova_rv off=%off3_x2
+ZERO_za 11000000 000011 010 .. 0000000000 0.. \
+ &zero_za ngrp=2 nvec=2 rv=%mova_rv off=%off2_x2
+ZERO_za 11000000 000011 011 .. 0000000000 0.. \
+ &zero_za ngrp=4 nvec=2 rv=%mova_rv off=%off2_x2
+
+ZERO_za 11000000 000011 101 .. 0000000000 0.. \
+ &zero_za ngrp=1 nvec=4 rv=%mova_rv off=%off2_x4
+ZERO_za 11000000 000011 110 .. 0000000000 00. \
+ &zero_za ngrp=2 nvec=4 rv=%mova_rv off=%off1_x4
+ZERO_za 11000000 000011 111 .. 0000000000 00. \
+ &zero_za ngrp=4 nvec=4 rv=%mova_rv off=%off1_x4
+
+### SME Lookup Table Read
+
+&lut zd zn idx
+
+# LUTI2, consecutive
+LUTI2_c_1b 1100 0000 1100 11 idx:4 00 00 zn:5 zd:5 &lut
+LUTI2_c_1h 1100 0000 1100 11 idx:4 01 00 zn:5 zd:5 &lut
+LUTI2_c_1s 1100 0000 1100 11 idx:4 10 00 zn:5 zd:5 &lut
+
+LUTI2_c_2b 1100 0000 1000 11 idx:3 1 00 00 zn:5 .... 0 &lut zd=%zd_ax2
+LUTI2_c_2h 1100 0000 1000 11 idx:3 1 01 00 zn:5 .... 0 &lut zd=%zd_ax2
+LUTI2_c_2s 1100 0000 1000 11 idx:3 1 10 00 zn:5 .... 0 &lut zd=%zd_ax2
+
+LUTI2_c_4b 1100 0000 1000 11 idx:2 10 00 00 zn:5 ... 00 &lut zd=%zd_ax4
+LUTI2_c_4h 1100 0000 1000 11 idx:2 10 01 00 zn:5 ... 00 &lut zd=%zd_ax4
+LUTI2_c_4s 1100 0000 1000 11 idx:2 10 10 00 zn:5 ... 00 &lut zd=%zd_ax4
+
+# LUTI2, strided (must check zd alignment)
+LUTI2_s_2b 1100 0000 1001 11 idx:3 1 00 00 zn:5 zd:5 &lut
+LUTI2_s_2h 1100 0000 1001 11 idx:3 1 01 00 zn:5 zd:5 &lut
+
+LUTI2_s_4b 1100 0000 1001 11 idx:2 10 00 00 zn:5 zd:5 &lut
+LUTI2_s_4h 1100 0000 1001 11 idx:2 10 01 00 zn:5 zd:5 &lut
+
+# LUTI4, consecutive
+LUTI4_c_1b 1100 0000 1100 101 idx:3 00 00 zn:5 zd:5 &lut
+LUTI4_c_1h 1100 0000 1100 101 idx:3 01 00 zn:5 zd:5 &lut
+LUTI4_c_1s 1100 0000 1100 101 idx:3 10 00 zn:5 zd:5 &lut
+
+LUTI4_c_2b 1100 0000 1000 101 idx:2 1 00 00 zn:5 .... 0 &lut zd=%zd_ax2
+LUTI4_c_2h 1100 0000 1000 101 idx:2 1 01 00 zn:5 .... 0 &lut zd=%zd_ax2
+LUTI4_c_2s 1100 0000 1000 101 idx:2 1 10 00 zn:5 .... 0 &lut zd=%zd_ax2
+
+LUTI4_c_4h 1100 0000 1000 101 idx:1 10 01 00 zn:5 ... 00 &lut zd=%zd_ax4
+LUTI4_c_4s 1100 0000 1000 101 idx:1 10 10 00 zn:5 ... 00 &lut zd=%zd_ax4
+
+# LUTI4, strided (must check zd alignment)
+LUTI4_s_2b 1100 0000 1001 101 idx:2 1 00 00 zn:5 zd:5 &lut
+LUTI4_s_2h 1100 0000 1001 101 idx:2 1 01 00 zn:5 zd:5 &lut
+
+LUTI4_s_4h 1100 0000 1001 101 idx:1 10 01 00 zn:5 zd:5 &lut
diff --git a/target/arm/tcg/sme_helper.c b/target/arm/tcg/sme_helper.c
index dcc48e4..bb8ed1e 100644
--- a/target/arm/tcg/sme_helper.c
+++ b/target/arm/tcg/sme_helper.c
@@ -22,13 +22,20 @@
#include "internals.h"
#include "tcg/tcg-gvec-desc.h"
#include "exec/helper-proto.h"
-#include "exec/cpu_ldst.h"
-#include "exec/exec-all.h"
+#include "accel/tcg/cpu-ldst.h"
+#include "accel/tcg/helper-retaddr.h"
#include "qemu/int128.h"
#include "fpu/softfloat.h"
#include "vec_internal.h"
#include "sve_ldst_internal.h"
+
+static bool vectors_overlap(ARMVectorReg *x, unsigned nx,
+ ARMVectorReg *y, unsigned ny)
+{
+ return !(x + nx <= y || y + ny <= x);
+}
+
void helper_set_svcr(CPUARMState *env, uint32_t val, uint32_t mask)
{
aarch64_set_svcr(env, val, mask);
@@ -39,12 +46,12 @@ void helper_sme_zero(CPUARMState *env, uint32_t imm, uint32_t svl)
uint32_t i;
/*
- * Special case clearing the entire ZA space.
+ * Special case clearing the entire ZArray.
* This falls into the CONSTRAINED UNPREDICTABLE zeroing of any
* parts of the ZA storage outside of SVL.
*/
if (imm == 0xff) {
- memset(env->zarray, 0, sizeof(env->zarray));
+ memset(env->za_state.za, 0, sizeof(env->za_state.za));
return;
}
@@ -54,7 +61,7 @@ void helper_sme_zero(CPUARMState *env, uint32_t imm, uint32_t svl)
*/
for (i = 0; i < svl; i++) {
if (imm & (1 << (i % 8))) {
- memset(&env->zarray[i], 0, svl);
+ memset(&env->za_state.za[i], 0, svl);
}
}
}
@@ -206,6 +213,110 @@ void HELPER(sme_mova_zc_q)(void *vd, void *za, void *vg, uint32_t desc)
#undef DO_MOVA_Z
+void HELPER(sme2_mova_zc_b)(void *vdst, void *vsrc, uint32_t desc)
+{
+ const uint8_t *src = vsrc;
+ uint8_t *dst = vdst;
+ size_t i, n = simd_oprsz(desc);
+
+ for (i = 0; i < n; ++i) {
+ dst[i] = src[tile_vslice_index(i)];
+ }
+}
+
+void HELPER(sme2_mova_zc_h)(void *vdst, void *vsrc, uint32_t desc)
+{
+ const uint16_t *src = vsrc;
+ uint16_t *dst = vdst;
+ size_t i, n = simd_oprsz(desc) / 2;
+
+ for (i = 0; i < n; ++i) {
+ dst[i] = src[tile_vslice_index(i)];
+ }
+}
+
+void HELPER(sme2_mova_zc_s)(void *vdst, void *vsrc, uint32_t desc)
+{
+ const uint32_t *src = vsrc;
+ uint32_t *dst = vdst;
+ size_t i, n = simd_oprsz(desc) / 4;
+
+ for (i = 0; i < n; ++i) {
+ dst[i] = src[tile_vslice_index(i)];
+ }
+}
+
+void HELPER(sme2_mova_zc_d)(void *vdst, void *vsrc, uint32_t desc)
+{
+ const uint64_t *src = vsrc;
+ uint64_t *dst = vdst;
+ size_t i, n = simd_oprsz(desc) / 8;
+
+ for (i = 0; i < n; ++i) {
+ dst[i] = src[tile_vslice_index(i)];
+ }
+}
+
+void HELPER(sme2p1_movaz_zc_b)(void *vdst, void *vsrc, uint32_t desc)
+{
+ uint8_t *src = vsrc;
+ uint8_t *dst = vdst;
+ size_t i, n = simd_oprsz(desc);
+
+ for (i = 0; i < n; ++i) {
+ dst[i] = src[tile_vslice_index(i)];
+ src[tile_vslice_index(i)] = 0;
+ }
+}
+
+void HELPER(sme2p1_movaz_zc_h)(void *vdst, void *vsrc, uint32_t desc)
+{
+ uint16_t *src = vsrc;
+ uint16_t *dst = vdst;
+ size_t i, n = simd_oprsz(desc) / 2;
+
+ for (i = 0; i < n; ++i) {
+ dst[i] = src[tile_vslice_index(i)];
+ src[tile_vslice_index(i)] = 0;
+ }
+}
+
+void HELPER(sme2p1_movaz_zc_s)(void *vdst, void *vsrc, uint32_t desc)
+{
+ uint32_t *src = vsrc;
+ uint32_t *dst = vdst;
+ size_t i, n = simd_oprsz(desc) / 4;
+
+ for (i = 0; i < n; ++i) {
+ dst[i] = src[tile_vslice_index(i)];
+ src[tile_vslice_index(i)] = 0;
+ }
+}
+
+void HELPER(sme2p1_movaz_zc_d)(void *vdst, void *vsrc, uint32_t desc)
+{
+ uint64_t *src = vsrc;
+ uint64_t *dst = vdst;
+ size_t i, n = simd_oprsz(desc) / 8;
+
+ for (i = 0; i < n; ++i) {
+ dst[i] = src[tile_vslice_index(i)];
+ src[tile_vslice_index(i)] = 0;
+ }
+}
+
+void HELPER(sme2p1_movaz_zc_q)(void *vdst, void *vsrc, uint32_t desc)
+{
+ Int128 *src = vsrc;
+ Int128 *dst = vdst;
+ size_t i, n = simd_oprsz(desc) / 16;
+
+ for (i = 0; i < n; ++i) {
+ dst[i] = src[tile_vslice_index(i)];
+ memset(&src[tile_vslice_index(i)], 0, 16);
+ }
+}
+
/*
* Clear elements in a tile slice comprising len bytes.
*/
@@ -314,6 +425,26 @@ static void copy_vertical_q(void *vdst, const void *vsrc, size_t len)
}
}
+void HELPER(sme2_mova_cz_b)(void *vdst, void *vsrc, uint32_t desc)
+{
+ copy_vertical_b(vdst, vsrc, simd_oprsz(desc));
+}
+
+void HELPER(sme2_mova_cz_h)(void *vdst, void *vsrc, uint32_t desc)
+{
+ copy_vertical_h(vdst, vsrc, simd_oprsz(desc));
+}
+
+void HELPER(sme2_mova_cz_s)(void *vdst, void *vsrc, uint32_t desc)
+{
+ copy_vertical_s(vdst, vsrc, simd_oprsz(desc));
+}
+
+void HELPER(sme2_mova_cz_d)(void *vdst, void *vsrc, uint32_t desc)
+{
+ copy_vertical_d(vdst, vsrc, simd_oprsz(desc));
+}
+
/*
* Host and TLB primitives for vertical tile slice addressing.
*/
@@ -344,54 +475,22 @@ static inline void sme_##NAME##_v_tlb(CPUARMState *env, void *za, \
TLB(env, useronly_clean_ptr(addr), val, ra); \
}
-/*
- * The ARMVectorReg elements are stored in host-endian 64-bit units.
- * For 128-bit quantities, the sequence defined by the Elem[] pseudocode
- * corresponds to storing the two 64-bit pieces in little-endian order.
- */
-#define DO_LDQ(HNAME, VNAME, BE, HOST, TLB) \
-static inline void HNAME##_host(void *za, intptr_t off, void *host) \
-{ \
- uint64_t val0 = HOST(host), val1 = HOST(host + 8); \
- uint64_t *ptr = za + off; \
- ptr[0] = BE ? val1 : val0, ptr[1] = BE ? val0 : val1; \
-} \
+#define DO_LDQ(HNAME, VNAME) \
static inline void VNAME##_v_host(void *za, intptr_t off, void *host) \
{ \
HNAME##_host(za, tile_vslice_offset(off), host); \
} \
-static inline void HNAME##_tlb(CPUARMState *env, void *za, intptr_t off, \
- target_ulong addr, uintptr_t ra) \
-{ \
- uint64_t val0 = TLB(env, useronly_clean_ptr(addr), ra); \
- uint64_t val1 = TLB(env, useronly_clean_ptr(addr + 8), ra); \
- uint64_t *ptr = za + off; \
- ptr[0] = BE ? val1 : val0, ptr[1] = BE ? val0 : val1; \
-} \
static inline void VNAME##_v_tlb(CPUARMState *env, void *za, intptr_t off, \
target_ulong addr, uintptr_t ra) \
{ \
HNAME##_tlb(env, za, tile_vslice_offset(off), addr, ra); \
}
-#define DO_STQ(HNAME, VNAME, BE, HOST, TLB) \
-static inline void HNAME##_host(void *za, intptr_t off, void *host) \
-{ \
- uint64_t *ptr = za + off; \
- HOST(host, ptr[BE]); \
- HOST(host + 8, ptr[!BE]); \
-} \
+#define DO_STQ(HNAME, VNAME) \
static inline void VNAME##_v_host(void *za, intptr_t off, void *host) \
{ \
HNAME##_host(za, tile_vslice_offset(off), host); \
} \
-static inline void HNAME##_tlb(CPUARMState *env, void *za, intptr_t off, \
- target_ulong addr, uintptr_t ra) \
-{ \
- uint64_t *ptr = za + off; \
- TLB(env, useronly_clean_ptr(addr), ptr[BE], ra); \
- TLB(env, useronly_clean_ptr(addr + 8), ptr[!BE], ra); \
-} \
static inline void VNAME##_v_tlb(CPUARMState *env, void *za, intptr_t off, \
target_ulong addr, uintptr_t ra) \
{ \
@@ -406,8 +505,8 @@ DO_LD(ld1s_le, uint32_t, ldl_le_p, cpu_ldl_le_data_ra)
DO_LD(ld1d_be, uint64_t, ldq_be_p, cpu_ldq_be_data_ra)
DO_LD(ld1d_le, uint64_t, ldq_le_p, cpu_ldq_le_data_ra)
-DO_LDQ(sve_ld1qq_be, sme_ld1q_be, 1, ldq_be_p, cpu_ldq_be_data_ra)
-DO_LDQ(sve_ld1qq_le, sme_ld1q_le, 0, ldq_le_p, cpu_ldq_le_data_ra)
+DO_LDQ(sve_ld1qq_be, sme_ld1q_be)
+DO_LDQ(sve_ld1qq_le, sme_ld1q_le)
DO_ST(st1b, uint8_t, stb_p, cpu_stb_data_ra)
DO_ST(st1h_be, uint16_t, stw_be_p, cpu_stw_be_data_ra)
@@ -417,8 +516,8 @@ DO_ST(st1s_le, uint32_t, stl_le_p, cpu_stl_le_data_ra)
DO_ST(st1d_be, uint64_t, stq_be_p, cpu_stq_be_data_ra)
DO_ST(st1d_le, uint64_t, stq_le_p, cpu_stq_le_data_ra)
-DO_STQ(sve_st1qq_be, sme_st1q_be, 1, stq_be_p, cpu_stq_be_data_ra)
-DO_STQ(sve_st1qq_le, sme_st1q_le, 0, stq_le_p, cpu_stq_le_data_ra)
+DO_STQ(sve_st1qq_be, sme_st1q_be)
+DO_STQ(sve_st1qq_le, sme_st1q_le)
#undef DO_LD
#undef DO_ST
@@ -903,28 +1002,69 @@ void HELPER(sme_addva_d)(void *vzda, void *vzn, void *vpn,
}
}
-void HELPER(sme_fmopa_s)(void *vza, void *vzn, void *vzm, void *vpn,
- void *vpm, float_status *fpst_in, uint32_t desc)
+static void do_fmopa_h(void *vza, void *vzn, void *vzm, uint16_t *pn,
+ uint16_t *pm, float_status *fpst, uint32_t desc,
+ uint16_t negx, int negf)
{
intptr_t row, col, oprsz = simd_maxsz(desc);
- uint32_t neg = simd_data(desc) << 31;
- uint16_t *pn = vpn, *pm = vpm;
- float_status fpst;
- /*
- * Make a copy of float_status because this operation does not
- * update the cumulative fp exception status. It also produces
- * default nans.
- */
- fpst = *fpst_in;
- set_default_nan_mode(true, &fpst);
+ for (row = 0; row < oprsz; ) {
+ uint16_t pa = pn[H2(row >> 4)];
+ do {
+ if (pa & 1) {
+ void *vza_row = vza + tile_vslice_offset(row);
+ uint16_t n = *(uint32_t *)(vzn + H1_2(row)) ^ negx;
+
+ for (col = 0; col < oprsz; ) {
+ uint16_t pb = pm[H2(col >> 4)];
+ do {
+ if (pb & 1) {
+ uint16_t *a = vza_row + H1_2(col);
+ uint16_t *m = vzm + H1_2(col);
+ *a = float16_muladd(n, *m, *a, negf, fpst);
+ }
+ col += 2;
+ pb >>= 2;
+ } while (col & 15);
+ }
+ }
+ row += 2;
+ pa >>= 2;
+ } while (row & 15);
+ }
+}
+
+void HELPER(sme_fmopa_h)(void *vza, void *vzn, void *vzm, void *vpn,
+ void *vpm, float_status *fpst, uint32_t desc)
+{
+ do_fmopa_h(vza, vzn, vzm, vpn, vpm, fpst, desc, 0, 0);
+}
+
+void HELPER(sme_fmops_h)(void *vza, void *vzn, void *vzm, void *vpn,
+ void *vpm, float_status *fpst, uint32_t desc)
+{
+ do_fmopa_h(vza, vzn, vzm, vpn, vpm, fpst, desc, 1u << 15, 0);
+}
+
+void HELPER(sme_ah_fmops_h)(void *vza, void *vzn, void *vzm, void *vpn,
+ void *vpm, float_status *fpst, uint32_t desc)
+{
+ do_fmopa_h(vza, vzn, vzm, vpn, vpm, fpst, desc, 0,
+ float_muladd_negate_product);
+}
+
+static void do_fmopa_s(void *vza, void *vzn, void *vzm, uint16_t *pn,
+ uint16_t *pm, float_status *fpst, uint32_t desc,
+ uint32_t negx, int negf)
+{
+ intptr_t row, col, oprsz = simd_maxsz(desc);
for (row = 0; row < oprsz; ) {
uint16_t pa = pn[H2(row >> 4)];
do {
if (pa & 1) {
void *vza_row = vza + tile_vslice_offset(row);
- uint32_t n = *(uint32_t *)(vzn + H1_4(row)) ^ neg;
+ uint32_t n = *(uint32_t *)(vzn + H1_4(row)) ^ negx;
for (col = 0; col < oprsz; ) {
uint16_t pb = pm[H2(col >> 4)];
@@ -932,7 +1072,7 @@ void HELPER(sme_fmopa_s)(void *vza, void *vzn, void *vzm, void *vpn,
if (pb & 1) {
uint32_t *a = vza_row + H1_4(col);
uint32_t *m = vzm + H1_4(col);
- *a = float32_muladd(n, *m, *a, 0, &fpst);
+ *a = float32_muladd(n, *m, *a, negf, fpst);
}
col += 4;
pb >>= 4;
@@ -945,32 +1085,116 @@ void HELPER(sme_fmopa_s)(void *vza, void *vzn, void *vzm, void *vpn,
}
}
-void HELPER(sme_fmopa_d)(void *vza, void *vzn, void *vzm, void *vpn,
- void *vpm, float_status *fpst_in, uint32_t desc)
+void HELPER(sme_fmopa_s)(void *vza, void *vzn, void *vzm, void *vpn,
+ void *vpm, float_status *fpst, uint32_t desc)
{
- intptr_t row, col, oprsz = simd_oprsz(desc) / 8;
- uint64_t neg = (uint64_t)simd_data(desc) << 63;
- uint64_t *za = vza, *zn = vzn, *zm = vzm;
- uint8_t *pn = vpn, *pm = vpm;
- float_status fpst = *fpst_in;
+ do_fmopa_s(vza, vzn, vzm, vpn, vpm, fpst, desc, 0, 0);
+}
- set_default_nan_mode(true, &fpst);
+void HELPER(sme_fmops_s)(void *vza, void *vzn, void *vzm, void *vpn,
+ void *vpm, float_status *fpst, uint32_t desc)
+{
+ do_fmopa_s(vza, vzn, vzm, vpn, vpm, fpst, desc, 1u << 31, 0);
+}
+
+void HELPER(sme_ah_fmops_s)(void *vza, void *vzn, void *vzm, void *vpn,
+ void *vpm, float_status *fpst, uint32_t desc)
+{
+ do_fmopa_s(vza, vzn, vzm, vpn, vpm, fpst, desc, 0,
+ float_muladd_negate_product);
+}
+
+static void do_fmopa_d(uint64_t *za, uint64_t *zn, uint64_t *zm, uint8_t *pn,
+ uint8_t *pm, float_status *fpst, uint32_t desc,
+ uint64_t negx, int negf)
+{
+ intptr_t row, col, oprsz = simd_oprsz(desc) / 8;
for (row = 0; row < oprsz; ++row) {
if (pn[H1(row)] & 1) {
uint64_t *za_row = &za[tile_vslice_index(row)];
- uint64_t n = zn[row] ^ neg;
+ uint64_t n = zn[row] ^ negx;
for (col = 0; col < oprsz; ++col) {
if (pm[H1(col)] & 1) {
uint64_t *a = &za_row[col];
- *a = float64_muladd(n, zm[col], *a, 0, &fpst);
+ *a = float64_muladd(n, zm[col], *a, negf, fpst);
}
}
}
}
}
+void HELPER(sme_fmopa_d)(void *vza, void *vzn, void *vzm, void *vpn,
+ void *vpm, float_status *fpst, uint32_t desc)
+{
+ do_fmopa_d(vza, vzn, vzm, vpn, vpm, fpst, desc, 0, 0);
+}
+
+void HELPER(sme_fmops_d)(void *vza, void *vzn, void *vzm, void *vpn,
+ void *vpm, float_status *fpst, uint32_t desc)
+{
+ do_fmopa_d(vza, vzn, vzm, vpn, vpm, fpst, desc, 1ull << 63, 0);
+}
+
+void HELPER(sme_ah_fmops_d)(void *vza, void *vzn, void *vzm, void *vpn,
+ void *vpm, float_status *fpst, uint32_t desc)
+{
+ do_fmopa_d(vza, vzn, vzm, vpn, vpm, fpst, desc, 0,
+ float_muladd_negate_product);
+}
+
+static void do_bfmopa(void *vza, void *vzn, void *vzm, uint16_t *pn,
+ uint16_t *pm, float_status *fpst, uint32_t desc,
+ uint16_t negx, int negf)
+{
+ intptr_t row, col, oprsz = simd_maxsz(desc);
+
+ for (row = 0; row < oprsz; ) {
+ uint16_t pa = pn[H2(row >> 4)];
+ do {
+ if (pa & 1) {
+ void *vza_row = vza + tile_vslice_offset(row);
+ uint16_t n = *(uint32_t *)(vzn + H1_2(row)) ^ negx;
+
+ for (col = 0; col < oprsz; ) {
+ uint16_t pb = pm[H2(col >> 4)];
+ do {
+ if (pb & 1) {
+ uint16_t *a = vza_row + H1_2(col);
+ uint16_t *m = vzm + H1_2(col);
+ *a = bfloat16_muladd(n, *m, *a, negf, fpst);
+ }
+ col += 2;
+ pb >>= 2;
+ } while (col & 15);
+ }
+ }
+ row += 2;
+ pa >>= 2;
+ } while (row & 15);
+ }
+}
+
+void HELPER(sme_bfmopa)(void *vza, void *vzn, void *vzm, void *vpn,
+ void *vpm, float_status *fpst, uint32_t desc)
+{
+ do_bfmopa(vza, vzn, vzm, vpn, vpm, fpst, desc, 0, 0);
+}
+
+void HELPER(sme_bfmops)(void *vza, void *vzn, void *vzm, void *vpn,
+ void *vpm, float_status *fpst, uint32_t desc)
+{
+ do_bfmopa(vza, vzn, vzm, vpn, vpm, fpst, desc, 1u << 15, 0);
+}
+
+void HELPER(sme_ah_bfmops)(void *vza, void *vzn, void *vzm, void *vpn,
+ void *vpm, float_status *fpst, uint32_t desc)
+{
+ do_bfmopa(vza, vzn, vzm, vpn, vpm, fpst, desc, 0,
+ float_muladd_negate_product);
+}
+
/*
* Alter PAIR as needed for controlling predicates being false,
* and for NEG on an enabled row element.
@@ -991,6 +1215,20 @@ static inline uint32_t f16mop_adj_pair(uint32_t pair, uint32_t pg, uint32_t neg)
return pair;
}
+static inline uint32_t f16mop_ah_neg_adj_pair(uint32_t pair, uint32_t pg)
+{
+ uint32_t l = pg & 1 ? float16_ah_chs(pair) : 0;
+ uint32_t h = pg & 4 ? float16_ah_chs(pair >> 16) : 0;
+ return l | (h << 16);
+}
+
+static inline uint32_t bf16mop_ah_neg_adj_pair(uint32_t pair, uint32_t pg)
+{
+ uint32_t l = pg & 1 ? bfloat16_ah_chs(pair) : 0;
+ uint32_t h = pg & 4 ? bfloat16_ah_chs(pair >> 16) : 0;
+ return l | (h << 16);
+}
+
static float32 f16_dotadd(float32 sum, uint32_t e1, uint32_t e2,
float_status *s_f16, float_status *s_std,
float_status *s_odd)
@@ -1005,49 +1243,67 @@ static float32 f16_dotadd(float32 sum, uint32_t e1, uint32_t e2,
* - we have pre-set-up copy of s_std which is set to round-to-odd,
* for the multiply (see below)
*/
- float64 e1r = float16_to_float64(e1 & 0xffff, true, s_f16);
- float64 e1c = float16_to_float64(e1 >> 16, true, s_f16);
- float64 e2r = float16_to_float64(e2 & 0xffff, true, s_f16);
- float64 e2c = float16_to_float64(e2 >> 16, true, s_f16);
- float64 t64;
+ float16 h1r = e1 & 0xffff;
+ float16 h1c = e1 >> 16;
+ float16 h2r = e2 & 0xffff;
+ float16 h2c = e2 >> 16;
float32 t32;
- /*
- * The ARM pseudocode function FPDot performs both multiplies
- * and the add with a single rounding operation. Emulate this
- * by performing the first multiply in round-to-odd, then doing
- * the second multiply as fused multiply-add, and rounding to
- * float32 all in one step.
- */
- t64 = float64_mul(e1r, e2r, s_odd);
- t64 = float64r32_muladd(e1c, e2c, t64, 0, s_std);
+ /* C.f. FPProcessNaNs4 */
+ if (float16_is_any_nan(h1r) || float16_is_any_nan(h1c) ||
+ float16_is_any_nan(h2r) || float16_is_any_nan(h2c)) {
+ float16 t16;
+
+ if (float16_is_signaling_nan(h1r, s_f16)) {
+ t16 = h1r;
+ } else if (float16_is_signaling_nan(h1c, s_f16)) {
+ t16 = h1c;
+ } else if (float16_is_signaling_nan(h2r, s_f16)) {
+ t16 = h2r;
+ } else if (float16_is_signaling_nan(h2c, s_f16)) {
+ t16 = h2c;
+ } else if (float16_is_any_nan(h1r)) {
+ t16 = h1r;
+ } else if (float16_is_any_nan(h1c)) {
+ t16 = h1c;
+ } else if (float16_is_any_nan(h2r)) {
+ t16 = h2r;
+ } else {
+ t16 = h2c;
+ }
+ t32 = float16_to_float32(t16, true, s_f16);
+ } else {
+ float64 e1r = float16_to_float64(h1r, true, s_f16);
+ float64 e1c = float16_to_float64(h1c, true, s_f16);
+ float64 e2r = float16_to_float64(h2r, true, s_f16);
+ float64 e2c = float16_to_float64(h2c, true, s_f16);
+ float64 t64;
+
+ /*
+ * The ARM pseudocode function FPDot performs both multiplies
+ * and the add with a single rounding operation. Emulate this
+ * by performing the first multiply in round-to-odd, then doing
+ * the second multiply as fused multiply-add, and rounding to
+ * float32 all in one step.
+ */
+ t64 = float64_mul(e1r, e2r, s_odd);
+ t64 = float64r32_muladd(e1c, e2c, t64, 0, s_std);
- /* This conversion is exact, because we've already rounded. */
- t32 = float64_to_float32(t64, s_std);
+ /* This conversion is exact, because we've already rounded. */
+ t32 = float64_to_float32(t64, s_std);
+ }
/* The final accumulation step is not fused. */
return float32_add(sum, t32, s_std);
}
-void HELPER(sme_fmopa_h)(void *vza, void *vzn, void *vzm, void *vpn,
- void *vpm, CPUARMState *env, uint32_t desc)
+static void do_fmopa_w_h(void *vza, void *vzn, void *vzm, uint16_t *pn,
+ uint16_t *pm, CPUARMState *env, uint32_t desc,
+ uint32_t negx, bool ah_neg)
{
intptr_t row, col, oprsz = simd_maxsz(desc);
- uint32_t neg = simd_data(desc) * 0x80008000u;
- uint16_t *pn = vpn, *pm = vpm;
- float_status fpst_odd, fpst_std, fpst_f16;
+ float_status fpst_odd = env->vfp.fp_status[FPST_ZA];
- /*
- * Make copies of the fp status fields we use, because this operation
- * does not update the cumulative fp exception status. It also
- * produces default NaNs. We also need a second copy of fp_status with
- * round-to-odd -- see above.
- */
- fpst_f16 = env->vfp.fp_status[FPST_A64_F16];
- fpst_std = env->vfp.fp_status[FPST_A64];
- set_default_nan_mode(true, &fpst_std);
- set_default_nan_mode(true, &fpst_f16);
- fpst_odd = fpst_std;
set_float_rounding_mode(float_round_to_odd, &fpst_odd);
for (row = 0; row < oprsz; ) {
@@ -1056,7 +1312,11 @@ void HELPER(sme_fmopa_h)(void *vza, void *vzn, void *vzm, void *vpn,
void *vza_row = vza + tile_vslice_offset(row);
uint32_t n = *(uint32_t *)(vzn + H1_4(row));
- n = f16mop_adj_pair(n, prow, neg);
+ if (ah_neg) {
+ n = f16mop_ah_neg_adj_pair(n, prow);
+ } else {
+ n = f16mop_adj_pair(n, prow, negx);
+ }
for (col = 0; col < oprsz; ) {
uint16_t pcol = pm[H2(col >> 4)];
@@ -1067,7 +1327,9 @@ void HELPER(sme_fmopa_h)(void *vza, void *vzn, void *vzm, void *vpn,
m = f16mop_adj_pair(m, pcol, 0);
*a = f16_dotadd(*a, n, m,
- &fpst_f16, &fpst_std, &fpst_odd);
+ &env->vfp.fp_status[FPST_ZA_F16],
+ &env->vfp.fp_status[FPST_ZA],
+ &fpst_odd);
}
col += 4;
pcol >>= 4;
@@ -1079,12 +1341,103 @@ void HELPER(sme_fmopa_h)(void *vza, void *vzn, void *vzm, void *vpn,
}
}
-void HELPER(sme_bfmopa)(void *vza, void *vzn, void *vzm,
- void *vpn, void *vpm, CPUARMState *env, uint32_t desc)
+void HELPER(sme_fmopa_w_h)(void *vza, void *vzn, void *vzm, void *vpn,
+ void *vpm, CPUARMState *env, uint32_t desc)
+{
+ do_fmopa_w_h(vza, vzn, vzm, vpn, vpm, env, desc, 0, false);
+}
+
+void HELPER(sme_fmops_w_h)(void *vza, void *vzn, void *vzm, void *vpn,
+ void *vpm, CPUARMState *env, uint32_t desc)
+{
+ do_fmopa_w_h(vza, vzn, vzm, vpn, vpm, env, desc, 0x80008000u, false);
+}
+
+void HELPER(sme_ah_fmops_w_h)(void *vza, void *vzn, void *vzm, void *vpn,
+ void *vpm, CPUARMState *env, uint32_t desc)
+{
+ do_fmopa_w_h(vza, vzn, vzm, vpn, vpm, env, desc, 0, true);
+}
+
+void HELPER(sme2_fdot_h)(void *vd, void *vn, void *vm, void *va,
+ CPUARMState *env, uint32_t desc)
+{
+ intptr_t i, oprsz = simd_maxsz(desc);
+ bool za = extract32(desc, SIMD_DATA_SHIFT, 1);
+ float_status *fpst_std = &env->vfp.fp_status[za ? FPST_ZA : FPST_A64];
+ float_status *fpst_f16 = &env->vfp.fp_status[za ? FPST_ZA_F16 : FPST_A64_F16];
+ float_status fpst_odd = *fpst_std;
+ float32 *d = vd, *a = va;
+ uint32_t *n = vn, *m = vm;
+
+ set_float_rounding_mode(float_round_to_odd, &fpst_odd);
+
+ for (i = 0; i < oprsz / sizeof(float32); ++i) {
+ d[H4(i)] = f16_dotadd(a[H4(i)], n[H4(i)], m[H4(i)],
+ fpst_f16, fpst_std, &fpst_odd);
+ }
+}
+
+void HELPER(sme2_fdot_idx_h)(void *vd, void *vn, void *vm, void *va,
+ CPUARMState *env, uint32_t desc)
+{
+ intptr_t i, j, oprsz = simd_maxsz(desc);
+ intptr_t elements = oprsz / sizeof(float32);
+ intptr_t eltspersegment = MIN(4, elements);
+ int idx = extract32(desc, SIMD_DATA_SHIFT, 2);
+ bool za = extract32(desc, SIMD_DATA_SHIFT + 2, 1);
+ float_status *fpst_std = &env->vfp.fp_status[za ? FPST_ZA : FPST_A64];
+ float_status *fpst_f16 = &env->vfp.fp_status[za ? FPST_ZA_F16 : FPST_A64_F16];
+ float_status fpst_odd = *fpst_std;
+ float32 *d = vd, *a = va;
+ uint32_t *n = vn, *m = (uint32_t *)vm + H4(idx);
+
+ set_float_rounding_mode(float_round_to_odd, &fpst_odd);
+
+ for (i = 0; i < elements; i += eltspersegment) {
+ uint32_t mm = m[i];
+ for (j = 0; j < eltspersegment; ++j) {
+ d[H4(i + j)] = f16_dotadd(a[H4(i + j)], n[H4(i + j)], mm,
+ fpst_f16, fpst_std, &fpst_odd);
+ }
+ }
+}
+
+void HELPER(sme2_fvdot_idx_h)(void *vd, void *vn, void *vm, void *va,
+ CPUARMState *env, uint32_t desc)
+{
+ intptr_t i, j, oprsz = simd_maxsz(desc);
+ intptr_t elements = oprsz / sizeof(float32);
+ intptr_t eltspersegment = MIN(4, elements);
+ int idx = extract32(desc, SIMD_DATA_SHIFT, 2);
+ int sel = extract32(desc, SIMD_DATA_SHIFT + 2, 1);
+ float_status fpst_odd, *fpst_std, *fpst_f16;
+ float32 *d = vd, *a = va;
+ uint16_t *n0 = vn;
+ uint16_t *n1 = vn + sizeof(ARMVectorReg);
+ uint32_t *m = (uint32_t *)vm + H4(idx);
+
+ fpst_std = &env->vfp.fp_status[FPST_ZA];
+ fpst_f16 = &env->vfp.fp_status[FPST_ZA_F16];
+ fpst_odd = *fpst_std;
+ set_float_rounding_mode(float_round_to_odd, &fpst_odd);
+
+ for (i = 0; i < elements; i += eltspersegment) {
+ uint32_t mm = m[i];
+ for (j = 0; j < eltspersegment; ++j) {
+ uint32_t nn = (n0[H2(2 * (i + j) + sel)])
+ | (n1[H2(2 * (i + j) + sel)] << 16);
+ d[i + H4(j)] = f16_dotadd(a[i + H4(j)], nn, mm,
+ fpst_f16, fpst_std, &fpst_odd);
+ }
+ }
+}
+
+static void do_bfmopa_w(void *vza, void *vzn, void *vzm,
+ uint16_t *pn, uint16_t *pm, CPUARMState *env,
+ uint32_t desc, uint32_t negx, bool ah_neg)
{
intptr_t row, col, oprsz = simd_maxsz(desc);
- uint32_t neg = simd_data(desc) * 0x80008000u;
- uint16_t *pn = vpn, *pm = vpm;
float_status fpst, fpst_odd;
if (is_ebf(env, &fpst, &fpst_odd)) {
@@ -1094,7 +1447,11 @@ void HELPER(sme_bfmopa)(void *vza, void *vzn, void *vzm,
void *vza_row = vza + tile_vslice_offset(row);
uint32_t n = *(uint32_t *)(vzn + H1_4(row));
- n = f16mop_adj_pair(n, prow, neg);
+ if (ah_neg) {
+ n = bf16mop_ah_neg_adj_pair(n, prow);
+ } else {
+ n = f16mop_adj_pair(n, prow, negx);
+ }
for (col = 0; col < oprsz; ) {
uint16_t pcol = pm[H2(col >> 4)];
@@ -1121,7 +1478,11 @@ void HELPER(sme_bfmopa)(void *vza, void *vzn, void *vzm,
void *vza_row = vza + tile_vslice_offset(row);
uint32_t n = *(uint32_t *)(vzn + H1_4(row));
- n = f16mop_adj_pair(n, prow, neg);
+ if (ah_neg) {
+ n = bf16mop_ah_neg_adj_pair(n, prow);
+ } else {
+ n = f16mop_adj_pair(n, prow, negx);
+ }
for (col = 0; col < oprsz; ) {
uint16_t pcol = pm[H2(col >> 4)];
@@ -1144,6 +1505,24 @@ void HELPER(sme_bfmopa)(void *vza, void *vzn, void *vzm,
}
}
+void HELPER(sme_bfmopa_w)(void *vza, void *vzn, void *vzm, void *vpn,
+ void *vpm, CPUARMState *env, uint32_t desc)
+{
+ do_bfmopa_w(vza, vzn, vzm, vpn, vpm, env, desc, 0, false);
+}
+
+void HELPER(sme_bfmops_w)(void *vza, void *vzn, void *vzm, void *vpn,
+ void *vpm, CPUARMState *env, uint32_t desc)
+{
+ do_bfmopa_w(vza, vzn, vzm, vpn, vpm, env, desc, 0x80008000u, false);
+}
+
+void HELPER(sme_ah_bfmops_w)(void *vza, void *vzn, void *vzm, void *vpn,
+ void *vpm, CPUARMState *env, uint32_t desc)
+{
+ do_bfmopa_w(vza, vzn, vzm, vpn, vpm, env, desc, 0, true);
+}
+
typedef uint32_t IMOPFn32(uint32_t, uint32_t, uint32_t, uint8_t, bool);
static inline void do_imopa_s(uint32_t *za, uint32_t *zn, uint32_t *zm,
uint8_t *pn, uint8_t *pm,
@@ -1188,7 +1567,7 @@ static inline void do_imopa_d(uint64_t *za, uint64_t *zn, uint64_t *zm,
}
}
-#define DEF_IMOP_32(NAME, NTYPE, MTYPE) \
+#define DEF_IMOP_8x4_32(NAME, NTYPE, MTYPE) \
static uint32_t NAME(uint32_t n, uint32_t m, uint32_t a, uint8_t p, bool neg) \
{ \
uint32_t sum = 0; \
@@ -1201,7 +1580,7 @@ static uint32_t NAME(uint32_t n, uint32_t m, uint32_t a, uint8_t p, bool neg) \
return neg ? a - sum : a + sum; \
}
-#define DEF_IMOP_64(NAME, NTYPE, MTYPE) \
+#define DEF_IMOP_16x4_64(NAME, NTYPE, MTYPE) \
static uint64_t NAME(uint64_t n, uint64_t m, uint64_t a, uint8_t p, bool neg) \
{ \
uint64_t sum = 0; \
@@ -1214,27 +1593,1070 @@ static uint64_t NAME(uint64_t n, uint64_t m, uint64_t a, uint8_t p, bool neg) \
return neg ? a - sum : a + sum; \
}
-DEF_IMOP_32(smopa_s, int8_t, int8_t)
-DEF_IMOP_32(umopa_s, uint8_t, uint8_t)
-DEF_IMOP_32(sumopa_s, int8_t, uint8_t)
-DEF_IMOP_32(usmopa_s, uint8_t, int8_t)
+DEF_IMOP_8x4_32(smopa_s, int8_t, int8_t)
+DEF_IMOP_8x4_32(umopa_s, uint8_t, uint8_t)
+DEF_IMOP_8x4_32(sumopa_s, int8_t, uint8_t)
+DEF_IMOP_8x4_32(usmopa_s, uint8_t, int8_t)
-DEF_IMOP_64(smopa_d, int16_t, int16_t)
-DEF_IMOP_64(umopa_d, uint16_t, uint16_t)
-DEF_IMOP_64(sumopa_d, int16_t, uint16_t)
-DEF_IMOP_64(usmopa_d, uint16_t, int16_t)
+DEF_IMOP_16x4_64(smopa_d, int16_t, int16_t)
+DEF_IMOP_16x4_64(umopa_d, uint16_t, uint16_t)
+DEF_IMOP_16x4_64(sumopa_d, int16_t, uint16_t)
+DEF_IMOP_16x4_64(usmopa_d, uint16_t, int16_t)
-#define DEF_IMOPH(NAME, S) \
- void HELPER(sme_##NAME##_##S)(void *vza, void *vzn, void *vzm, \
+#define DEF_IMOPH(P, NAME, S) \
+ void HELPER(P##_##NAME##_##S)(void *vza, void *vzn, void *vzm, \
void *vpn, void *vpm, uint32_t desc) \
{ do_imopa_##S(vza, vzn, vzm, vpn, vpm, desc, NAME##_##S); }
-DEF_IMOPH(smopa, s)
-DEF_IMOPH(umopa, s)
-DEF_IMOPH(sumopa, s)
-DEF_IMOPH(usmopa, s)
+DEF_IMOPH(sme, smopa, s)
+DEF_IMOPH(sme, umopa, s)
+DEF_IMOPH(sme, sumopa, s)
+DEF_IMOPH(sme, usmopa, s)
+
+DEF_IMOPH(sme, smopa, d)
+DEF_IMOPH(sme, umopa, d)
+DEF_IMOPH(sme, sumopa, d)
+DEF_IMOPH(sme, usmopa, d)
+
+static uint32_t bmopa_s(uint32_t n, uint32_t m, uint32_t a, uint8_t p, bool neg)
+{
+ uint32_t sum = ctpop32(~(n ^ m));
+ if (neg) {
+ sum = -sum;
+ }
+ if (!(p & 1)) {
+ sum = 0;
+ }
+ return a + sum;
+}
+
+DEF_IMOPH(sme2, bmopa, s)
+
+#define DEF_IMOP_16x2_32(NAME, NTYPE, MTYPE) \
+static uint32_t NAME(uint32_t n, uint32_t m, uint32_t a, uint8_t p, bool neg) \
+{ \
+ uint32_t sum = 0; \
+ /* Apply P to N as a mask, making the inactive elements 0. */ \
+ n &= expand_pred_h(p); \
+ sum += (NTYPE)(n >> 0) * (MTYPE)(m >> 0); \
+ sum += (NTYPE)(n >> 16) * (MTYPE)(m >> 16); \
+ return neg ? a - sum : a + sum; \
+}
+
+DEF_IMOP_16x2_32(smopa2_s, int16_t, int16_t)
+DEF_IMOP_16x2_32(umopa2_s, uint16_t, uint16_t)
+
+DEF_IMOPH(sme2, smopa2, s)
+DEF_IMOPH(sme2, umopa2, s)
+
+#define DO_VDOT_IDX(NAME, TYPED, TYPEN, TYPEM, HD, HN) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
+{ \
+ intptr_t svl = simd_oprsz(desc); \
+ intptr_t elements = svl / sizeof(TYPED); \
+ intptr_t eltperseg = 16 / sizeof(TYPED); \
+ intptr_t nreg = sizeof(TYPED) / sizeof(TYPEN); \
+ intptr_t vstride = (svl / nreg) * sizeof(ARMVectorReg); \
+ intptr_t zstride = sizeof(ARMVectorReg) / sizeof(TYPEN); \
+ intptr_t idx = extract32(desc, SIMD_DATA_SHIFT, 2); \
+ TYPEN *n = vn; \
+ TYPEM *m = vm; \
+ for (intptr_t r = 0; r < nreg; r++) { \
+ TYPED *d = vd + r * vstride; \
+ for (intptr_t seg = 0; seg < elements; seg += eltperseg) { \
+ intptr_t s = seg + idx; \
+ for (intptr_t e = seg; e < seg + eltperseg; e++) { \
+ TYPED sum = d[HD(e)]; \
+ for (intptr_t i = 0; i < nreg; i++) { \
+ TYPED nn = n[i * zstride + HN(nreg * e + r)]; \
+ TYPED mm = m[HN(nreg * s + i)]; \
+ sum += nn * mm; \
+ } \
+ d[HD(e)] = sum; \
+ } \
+ } \
+ } \
+}
+
+DO_VDOT_IDX(sme2_svdot_idx_4b, int32_t, int8_t, int8_t, H4, H1)
+DO_VDOT_IDX(sme2_uvdot_idx_4b, uint32_t, uint8_t, uint8_t, H4, H1)
+DO_VDOT_IDX(sme2_suvdot_idx_4b, int32_t, int8_t, uint8_t, H4, H1)
+DO_VDOT_IDX(sme2_usvdot_idx_4b, int32_t, uint8_t, int8_t, H4, H1)
+
+DO_VDOT_IDX(sme2_svdot_idx_4h, int64_t, int16_t, int16_t, H8, H2)
+DO_VDOT_IDX(sme2_uvdot_idx_4h, uint64_t, uint16_t, uint16_t, H8, H2)
+
+DO_VDOT_IDX(sme2_svdot_idx_2h, int32_t, int16_t, int16_t, H4, H2)
+DO_VDOT_IDX(sme2_uvdot_idx_2h, uint32_t, uint16_t, uint16_t, H4, H2)
+
+#undef DO_VDOT_IDX
+
+#define DO_MLALL(NAME, TYPEW, TYPEN, TYPEM, HW, HN, OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \
+{ \
+ intptr_t elements = simd_oprsz(desc) / sizeof(TYPEW); \
+ intptr_t sel = extract32(desc, SIMD_DATA_SHIFT, 2); \
+ TYPEW *d = vd, *a = va; TYPEN *n = vn; TYPEM *m = vm; \
+ for (intptr_t i = 0; i < elements; ++i) { \
+ TYPEW nn = n[HN(i * 4 + sel)]; \
+ TYPEM mm = m[HN(i * 4 + sel)]; \
+ d[HW(i)] = a[HW(i)] OP (nn * mm); \
+ } \
+}
+
+DO_MLALL(sme2_smlall_s, int32_t, int8_t, int8_t, H4, H1, +)
+DO_MLALL(sme2_smlall_d, int64_t, int16_t, int16_t, H8, H2, +)
+DO_MLALL(sme2_smlsll_s, int32_t, int8_t, int8_t, H4, H1, -)
+DO_MLALL(sme2_smlsll_d, int64_t, int16_t, int16_t, H8, H2, -)
+
+DO_MLALL(sme2_umlall_s, uint32_t, uint8_t, uint8_t, H4, H1, +)
+DO_MLALL(sme2_umlall_d, uint64_t, uint16_t, uint16_t, H8, H2, +)
+DO_MLALL(sme2_umlsll_s, uint32_t, uint8_t, uint8_t, H4, H1, -)
+DO_MLALL(sme2_umlsll_d, uint64_t, uint16_t, uint16_t, H8, H2, -)
+
+DO_MLALL(sme2_usmlall_s, uint32_t, uint8_t, int8_t, H4, H1, +)
+
+#undef DO_MLALL
+
+#define DO_MLALL_IDX(NAME, TYPEW, TYPEN, TYPEM, HW, HN, OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \
+{ \
+ intptr_t elements = simd_oprsz(desc) / sizeof(TYPEW); \
+ intptr_t eltspersegment = 16 / sizeof(TYPEW); \
+ intptr_t sel = extract32(desc, SIMD_DATA_SHIFT, 2); \
+ intptr_t idx = extract32(desc, SIMD_DATA_SHIFT + 2, 4); \
+ TYPEW *d = vd, *a = va; TYPEN *n = vn; TYPEM *m = vm; \
+ for (intptr_t i = 0; i < elements; i += eltspersegment) { \
+ TYPEW mm = m[HN(i * 4 + idx)]; \
+ for (intptr_t j = 0; j < eltspersegment; ++j) { \
+ TYPEN nn = n[HN((i + j) * 4 + sel)]; \
+ d[HW(i + j)] = a[HW(i + j)] OP (nn * mm); \
+ } \
+ } \
+}
+
+DO_MLALL_IDX(sme2_smlall_idx_s, int32_t, int8_t, int8_t, H4, H1, +)
+DO_MLALL_IDX(sme2_smlall_idx_d, int64_t, int16_t, int16_t, H8, H2, +)
+DO_MLALL_IDX(sme2_smlsll_idx_s, int32_t, int8_t, int8_t, H4, H1, -)
+DO_MLALL_IDX(sme2_smlsll_idx_d, int64_t, int16_t, int16_t, H8, H2, -)
+
+DO_MLALL_IDX(sme2_umlall_idx_s, uint32_t, uint8_t, uint8_t, H4, H1, +)
+DO_MLALL_IDX(sme2_umlall_idx_d, uint64_t, uint16_t, uint16_t, H8, H2, +)
+DO_MLALL_IDX(sme2_umlsll_idx_s, uint32_t, uint8_t, uint8_t, H4, H1, -)
+DO_MLALL_IDX(sme2_umlsll_idx_d, uint64_t, uint16_t, uint16_t, H8, H2, -)
+
+DO_MLALL_IDX(sme2_usmlall_idx_s, uint32_t, uint8_t, int8_t, H4, H1, +)
+DO_MLALL_IDX(sme2_sumlall_idx_s, uint32_t, int8_t, uint8_t, H4, H1, +)
+
+#undef DO_MLALL_IDX
+
+/* Convert and compress */
+void HELPER(sme2_bfcvt)(void *vd, void *vs, float_status *fpst, uint32_t desc)
+{
+ ARMVectorReg scratch;
+ size_t oprsz = simd_oprsz(desc);
+ size_t i, n = oprsz / 4;
+ float32 *s0 = vs;
+ float32 *s1 = vs + sizeof(ARMVectorReg);
+ bfloat16 *d = vd;
+
+ if (vd == s1) {
+ s1 = memcpy(&scratch, s1, oprsz);
+ }
+
+ for (i = 0; i < n; ++i) {
+ d[H2(i)] = float32_to_bfloat16(s0[H4(i)], fpst);
+ }
+ for (i = 0; i < n; ++i) {
+ d[H2(i) + n] = float32_to_bfloat16(s1[H4(i)], fpst);
+ }
+}
+
+void HELPER(sme2_fcvt_n)(void *vd, void *vs, float_status *fpst, uint32_t desc)
+{
+ ARMVectorReg scratch;
+ size_t oprsz = simd_oprsz(desc);
+ size_t i, n = oprsz / 4;
+ float32 *s0 = vs;
+ float32 *s1 = vs + sizeof(ARMVectorReg);
+ float16 *d = vd;
+
+ if (vd == s1) {
+ s1 = memcpy(&scratch, s1, oprsz);
+ }
+
+ for (i = 0; i < n; ++i) {
+ d[H2(i)] = sve_f32_to_f16(s0[H4(i)], fpst);
+ }
+ for (i = 0; i < n; ++i) {
+ d[H2(i) + n] = sve_f32_to_f16(s1[H4(i)], fpst);
+ }
+}
+
+#define SQCVT2(NAME, TW, TN, HW, HN, SAT) \
+void HELPER(NAME)(void *vd, void *vs, uint32_t desc) \
+{ \
+ ARMVectorReg scratch; \
+ size_t oprsz = simd_oprsz(desc), n = oprsz / sizeof(TW); \
+ TW *s0 = vs, *s1 = vs + sizeof(ARMVectorReg); \
+ TN *d = vd; \
+ if (vectors_overlap(vd, 1, vs, 2)) { \
+ d = (TN *)&scratch; \
+ } \
+ for (size_t i = 0; i < n; ++i) { \
+ d[HN(i)] = SAT(s0[HW(i)]); \
+ d[HN(i + n)] = SAT(s1[HW(i)]); \
+ } \
+ if (d != vd) { \
+ memcpy(vd, d, oprsz); \
+ } \
+}
+
+SQCVT2(sme2_sqcvt_sh, int32_t, int16_t, H4, H2, do_ssat_h)
+SQCVT2(sme2_uqcvt_sh, uint32_t, uint16_t, H4, H2, do_usat_h)
+SQCVT2(sme2_sqcvtu_sh, int32_t, uint16_t, H4, H2, do_usat_h)
+
+#undef SQCVT2
+
+#define SQCVT4(NAME, TW, TN, HW, HN, SAT) \
+void HELPER(NAME)(void *vd, void *vs, uint32_t desc) \
+{ \
+ ARMVectorReg scratch; \
+ size_t oprsz = simd_oprsz(desc), n = oprsz / sizeof(TW); \
+ TW *s0 = vs, *s1 = vs + sizeof(ARMVectorReg); \
+ TW *s2 = vs + 2 * sizeof(ARMVectorReg); \
+ TW *s3 = vs + 3 * sizeof(ARMVectorReg); \
+ TN *d = vd; \
+ if (vectors_overlap(vd, 1, vs, 4)) { \
+ d = (TN *)&scratch; \
+ } \
+ for (size_t i = 0; i < n; ++i) { \
+ d[HN(i)] = SAT(s0[HW(i)]); \
+ d[HN(i + n)] = SAT(s1[HW(i)]); \
+ d[HN(i + 2 * n)] = SAT(s2[HW(i)]); \
+ d[HN(i + 3 * n)] = SAT(s3[HW(i)]); \
+ } \
+ if (d != vd) { \
+ memcpy(vd, d, oprsz); \
+ } \
+}
+
+SQCVT4(sme2_sqcvt_sb, int32_t, int8_t, H4, H2, do_ssat_b)
+SQCVT4(sme2_uqcvt_sb, uint32_t, uint8_t, H4, H2, do_usat_b)
+SQCVT4(sme2_sqcvtu_sb, int32_t, uint8_t, H4, H2, do_usat_b)
+
+SQCVT4(sme2_sqcvt_dh, int64_t, int16_t, H8, H2, do_ssat_h)
+SQCVT4(sme2_uqcvt_dh, uint64_t, uint16_t, H8, H2, do_usat_h)
+SQCVT4(sme2_sqcvtu_dh, int64_t, uint16_t, H8, H2, do_usat_h)
+
+#undef SQCVT4
+
+#define SQRSHR2(NAME, TW, TN, HW, HN, RSHR, SAT) \
+void HELPER(NAME)(void *vd, void *vs, uint32_t desc) \
+{ \
+ ARMVectorReg scratch; \
+ size_t oprsz = simd_oprsz(desc), n = oprsz / sizeof(TW); \
+ int shift = simd_data(desc); \
+ TW *s0 = vs, *s1 = vs + sizeof(ARMVectorReg); \
+ TN *d = vd; \
+ if (vectors_overlap(vd, 1, vs, 2)) { \
+ d = (TN *)&scratch; \
+ } \
+ for (size_t i = 0; i < n; ++i) { \
+ d[HN(i)] = SAT(RSHR(s0[HW(i)], shift)); \
+ d[HN(i + n)] = SAT(RSHR(s1[HW(i)], shift)); \
+ } \
+ if (d != vd) { \
+ memcpy(vd, d, oprsz); \
+ } \
+}
+
+SQRSHR2(sme2_sqrshr_sh, int32_t, int16_t, H4, H2, do_srshr, do_ssat_h)
+SQRSHR2(sme2_uqrshr_sh, uint32_t, uint16_t, H4, H2, do_urshr, do_usat_h)
+SQRSHR2(sme2_sqrshru_sh, int32_t, uint16_t, H4, H2, do_srshr, do_usat_h)
+
+#undef SQRSHR2
+
+#define SQRSHR4(NAME, TW, TN, HW, HN, RSHR, SAT) \
+void HELPER(NAME)(void *vd, void *vs, uint32_t desc) \
+{ \
+ ARMVectorReg scratch; \
+ size_t oprsz = simd_oprsz(desc), n = oprsz / sizeof(TW); \
+ int shift = simd_data(desc); \
+ TW *s0 = vs, *s1 = vs + sizeof(ARMVectorReg); \
+ TW *s2 = vs + 2 * sizeof(ARMVectorReg); \
+ TW *s3 = vs + 3 * sizeof(ARMVectorReg); \
+ TN *d = vd; \
+ if (vectors_overlap(vd, 1, vs, 4)) { \
+ d = (TN *)&scratch; \
+ } \
+ for (size_t i = 0; i < n; ++i) { \
+ d[HN(i)] = SAT(RSHR(s0[HW(i)], shift)); \
+ d[HN(i + n)] = SAT(RSHR(s1[HW(i)], shift)); \
+ d[HN(i + 2 * n)] = SAT(RSHR(s2[HW(i)], shift)); \
+ d[HN(i + 3 * n)] = SAT(RSHR(s3[HW(i)], shift)); \
+ } \
+ if (d != vd) { \
+ memcpy(vd, d, oprsz); \
+ } \
+}
+
+SQRSHR4(sme2_sqrshr_sb, int32_t, int8_t, H4, H2, do_srshr, do_ssat_b)
+SQRSHR4(sme2_uqrshr_sb, uint32_t, uint8_t, H4, H2, do_urshr, do_usat_b)
+SQRSHR4(sme2_sqrshru_sb, int32_t, uint8_t, H4, H2, do_srshr, do_usat_b)
+
+SQRSHR4(sme2_sqrshr_dh, int64_t, int16_t, H8, H2, do_srshr, do_ssat_h)
+SQRSHR4(sme2_uqrshr_dh, uint64_t, uint16_t, H8, H2, do_urshr, do_usat_h)
+SQRSHR4(sme2_sqrshru_dh, int64_t, uint16_t, H8, H2, do_srshr, do_usat_h)
+
+#undef SQRSHR4
+
+/* Convert and interleave */
+void HELPER(sme2_bfcvtn)(void *vd, void *vs, float_status *fpst, uint32_t desc)
+{
+ size_t i, n = simd_oprsz(desc) / 4;
+ float32 *s0 = vs;
+ float32 *s1 = vs + sizeof(ARMVectorReg);
+ bfloat16 *d = vd;
+
+ for (i = 0; i < n; ++i) {
+ bfloat16 d0 = float32_to_bfloat16(s0[H4(i)], fpst);
+ bfloat16 d1 = float32_to_bfloat16(s1[H4(i)], fpst);
+ d[H2(i * 2 + 0)] = d0;
+ d[H2(i * 2 + 1)] = d1;
+ }
+}
+
+void HELPER(sme2_fcvtn)(void *vd, void *vs, float_status *fpst, uint32_t desc)
+{
+ size_t i, n = simd_oprsz(desc) / 4;
+ float32 *s0 = vs;
+ float32 *s1 = vs + sizeof(ARMVectorReg);
+ bfloat16 *d = vd;
+
+ for (i = 0; i < n; ++i) {
+ bfloat16 d0 = sve_f32_to_f16(s0[H4(i)], fpst);
+ bfloat16 d1 = sve_f32_to_f16(s1[H4(i)], fpst);
+ d[H2(i * 2 + 0)] = d0;
+ d[H2(i * 2 + 1)] = d1;
+ }
+}
+
+#define SQCVTN2(NAME, TW, TN, HW, HN, SAT) \
+void HELPER(NAME)(void *vd, void *vs, uint32_t desc) \
+{ \
+ ARMVectorReg scratch; \
+ size_t oprsz = simd_oprsz(desc), n = oprsz / sizeof(TW); \
+ TW *s0 = vs, *s1 = vs + sizeof(ARMVectorReg); \
+ TN *d = vd; \
+ if (vectors_overlap(vd, 1, vs, 2)) { \
+ d = (TN *)&scratch; \
+ } \
+ for (size_t i = 0; i < n; ++i) { \
+ d[HN(2 * i + 0)] = SAT(s0[HW(i)]); \
+ d[HN(2 * i + 1)] = SAT(s1[HW(i)]); \
+ } \
+ if (d != vd) { \
+ memcpy(vd, d, oprsz); \
+ } \
+}
+
+SQCVTN2(sme2_sqcvtn_sh, int32_t, int16_t, H4, H2, do_ssat_h)
+SQCVTN2(sme2_uqcvtn_sh, uint32_t, uint16_t, H4, H2, do_usat_h)
+SQCVTN2(sme2_sqcvtun_sh, int32_t, uint16_t, H4, H2, do_usat_h)
+
+#undef SQCVTN2
+
+#define SQCVTN4(NAME, TW, TN, HW, HN, SAT) \
+void HELPER(NAME)(void *vd, void *vs, uint32_t desc) \
+{ \
+ ARMVectorReg scratch; \
+ size_t oprsz = simd_oprsz(desc), n = oprsz / sizeof(TW); \
+ TW *s0 = vs, *s1 = vs + sizeof(ARMVectorReg); \
+ TW *s2 = vs + 2 * sizeof(ARMVectorReg); \
+ TW *s3 = vs + 3 * sizeof(ARMVectorReg); \
+ TN *d = vd; \
+ if (vectors_overlap(vd, 1, vs, 4)) { \
+ d = (TN *)&scratch; \
+ } \
+ for (size_t i = 0; i < n; ++i) { \
+ d[HN(4 * i + 0)] = SAT(s0[HW(i)]); \
+ d[HN(4 * i + 1)] = SAT(s1[HW(i)]); \
+ d[HN(4 * i + 2)] = SAT(s2[HW(i)]); \
+ d[HN(4 * i + 3)] = SAT(s3[HW(i)]); \
+ } \
+ if (d != vd) { \
+ memcpy(vd, d, oprsz); \
+ } \
+}
+
+SQCVTN4(sme2_sqcvtn_sb, int32_t, int8_t, H4, H1, do_ssat_b)
+SQCVTN4(sme2_uqcvtn_sb, uint32_t, uint8_t, H4, H1, do_usat_b)
+SQCVTN4(sme2_sqcvtun_sb, int32_t, uint8_t, H4, H1, do_usat_b)
+
+SQCVTN4(sme2_sqcvtn_dh, int64_t, int16_t, H8, H2, do_ssat_h)
+SQCVTN4(sme2_uqcvtn_dh, uint64_t, uint16_t, H8, H2, do_usat_h)
+SQCVTN4(sme2_sqcvtun_dh, int64_t, uint16_t, H8, H2, do_usat_h)
+
+#undef SQCVTN4
+
+#define SQRSHRN2(NAME, TW, TN, HW, HN, RSHR, SAT) \
+void HELPER(NAME)(void *vd, void *vs, uint32_t desc) \
+{ \
+ ARMVectorReg scratch; \
+ size_t oprsz = simd_oprsz(desc), n = oprsz / sizeof(TW); \
+ int shift = simd_data(desc); \
+ TW *s0 = vs, *s1 = vs + sizeof(ARMVectorReg); \
+ TN *d = vd; \
+ if (vectors_overlap(vd, 1, vs, 2)) { \
+ d = (TN *)&scratch; \
+ } \
+ for (size_t i = 0; i < n; ++i) { \
+ d[HN(2 * i + 0)] = SAT(RSHR(s0[HW(i)], shift)); \
+ d[HN(2 * i + 1)] = SAT(RSHR(s1[HW(i)], shift)); \
+ } \
+ if (d != vd) { \
+ memcpy(vd, d, oprsz); \
+ } \
+}
+
+SQRSHRN2(sme2_sqrshrn_sh, int32_t, int16_t, H4, H2, do_srshr, do_ssat_h)
+SQRSHRN2(sme2_uqrshrn_sh, uint32_t, uint16_t, H4, H2, do_urshr, do_usat_h)
+SQRSHRN2(sme2_sqrshrun_sh, int32_t, uint16_t, H4, H2, do_srshr, do_usat_h)
+
+#undef SQRSHRN2
+
+#define SQRSHRN4(NAME, TW, TN, HW, HN, RSHR, SAT) \
+void HELPER(NAME)(void *vd, void *vs, uint32_t desc) \
+{ \
+ ARMVectorReg scratch; \
+ size_t oprsz = simd_oprsz(desc), n = oprsz / sizeof(TW); \
+ int shift = simd_data(desc); \
+ TW *s0 = vs, *s1 = vs + sizeof(ARMVectorReg); \
+ TW *s2 = vs + 2 * sizeof(ARMVectorReg); \
+ TW *s3 = vs + 3 * sizeof(ARMVectorReg); \
+ TN *d = vd; \
+ if (vectors_overlap(vd, 1, vs, 4)) { \
+ d = (TN *)&scratch; \
+ } \
+ for (size_t i = 0; i < n; ++i) { \
+ d[HN(4 * i + 0)] = SAT(RSHR(s0[HW(i)], shift)); \
+ d[HN(4 * i + 1)] = SAT(RSHR(s1[HW(i)], shift)); \
+ d[HN(4 * i + 2)] = SAT(RSHR(s2[HW(i)], shift)); \
+ d[HN(4 * i + 3)] = SAT(RSHR(s3[HW(i)], shift)); \
+ } \
+ if (d != vd) { \
+ memcpy(vd, d, oprsz); \
+ } \
+}
+
+SQRSHRN4(sme2_sqrshrn_sb, int32_t, int8_t, H4, H1, do_srshr, do_ssat_b)
+SQRSHRN4(sme2_uqrshrn_sb, uint32_t, uint8_t, H4, H1, do_urshr, do_usat_b)
+SQRSHRN4(sme2_sqrshrun_sb, int32_t, uint8_t, H4, H1, do_srshr, do_usat_b)
+
+SQRSHRN4(sme2_sqrshrn_dh, int64_t, int16_t, H8, H2, do_srshr, do_ssat_h)
+SQRSHRN4(sme2_uqrshrn_dh, uint64_t, uint16_t, H8, H2, do_urshr, do_usat_h)
+SQRSHRN4(sme2_sqrshrun_dh, int64_t, uint16_t, H8, H2, do_srshr, do_usat_h)
+
+#undef SQRSHRN4
+
+/* Expand and convert */
+void HELPER(sme2_fcvt_w)(void *vd, void *vs, float_status *fpst, uint32_t desc)
+{
+ ARMVectorReg scratch;
+ size_t oprsz = simd_oprsz(desc);
+ size_t i, n = oprsz / 4;
+ float16 *s = vs;
+ float32 *d0 = vd;
+ float32 *d1 = vd + sizeof(ARMVectorReg);
+
+ if (vectors_overlap(vd, 1, vs, 2)) {
+ s = memcpy(&scratch, s, oprsz);
+ }
+
+ for (i = 0; i < n; ++i) {
+ d0[H4(i)] = sve_f16_to_f32(s[H2(i)], fpst);
+ }
+ for (i = 0; i < n; ++i) {
+ d1[H4(i)] = sve_f16_to_f32(s[H2(n + i)], fpst);
+ }
+}
+
+#define UNPK(NAME, SREG, TW, TN, HW, HN) \
+void HELPER(NAME)(void *vd, void *vs, uint32_t desc) \
+{ \
+ ARMVectorReg scratch[SREG]; \
+ size_t oprsz = simd_oprsz(desc); \
+ size_t n = oprsz / sizeof(TW); \
+ if (vectors_overlap(vd, 2 * SREG, vs, SREG)) { \
+ vs = memcpy(scratch, vs, sizeof(scratch)); \
+ } \
+ for (size_t r = 0; r < SREG; ++r) { \
+ TN *s = vs + r * sizeof(ARMVectorReg); \
+ for (size_t i = 0; i < 2; ++i) { \
+ TW *d = vd + (2 * r + i) * sizeof(ARMVectorReg); \
+ for (size_t e = 0; e < n; ++e) { \
+ d[HW(e)] = s[HN(i * n + e)]; \
+ } \
+ } \
+ } \
+}
+
+UNPK(sme2_sunpk2_bh, 1, int16_t, int8_t, H2, H1)
+UNPK(sme2_sunpk2_hs, 1, int32_t, int16_t, H4, H2)
+UNPK(sme2_sunpk2_sd, 1, int64_t, int32_t, H8, H4)
+
+UNPK(sme2_sunpk4_bh, 2, int16_t, int8_t, H2, H1)
+UNPK(sme2_sunpk4_hs, 2, int32_t, int16_t, H4, H2)
+UNPK(sme2_sunpk4_sd, 2, int64_t, int32_t, H8, H4)
+
+UNPK(sme2_uunpk2_bh, 1, uint16_t, uint8_t, H2, H1)
+UNPK(sme2_uunpk2_hs, 1, uint32_t, uint16_t, H4, H2)
+UNPK(sme2_uunpk2_sd, 1, uint64_t, uint32_t, H8, H4)
+
+UNPK(sme2_uunpk4_bh, 2, uint16_t, uint8_t, H2, H1)
+UNPK(sme2_uunpk4_hs, 2, uint32_t, uint16_t, H4, H2)
+UNPK(sme2_uunpk4_sd, 2, uint64_t, uint32_t, H8, H4)
+
+#undef UNPK
+
+/* Deinterleave and convert. */
+void HELPER(sme2_fcvtl)(void *vd, void *vs, float_status *fpst, uint32_t desc)
+{
+ size_t i, n = simd_oprsz(desc) / 4;
+ float16 *s = vs;
+ float32 *d0 = vd;
+ float32 *d1 = vd + sizeof(ARMVectorReg);
+
+ for (i = 0; i < n; ++i) {
+ float32 v0 = sve_f16_to_f32(s[H2(i * 2 + 0)], fpst);
+ float32 v1 = sve_f16_to_f32(s[H2(i * 2 + 1)], fpst);
+ d0[H4(i)] = v0;
+ d1[H4(i)] = v1;
+ }
+}
+
+void HELPER(sme2_scvtf)(void *vd, void *vs, float_status *fpst, uint32_t desc)
+{
+ size_t i, n = simd_oprsz(desc) / 4;
+ int32_t *d = vd;
+ float32 *s = vs;
+
+ for (i = 0; i < n; ++i) {
+ d[i] = int32_to_float32(s[i], fpst);
+ }
+}
+
+void HELPER(sme2_ucvtf)(void *vd, void *vs, float_status *fpst, uint32_t desc)
+{
+ size_t i, n = simd_oprsz(desc) / 4;
+ uint32_t *d = vd;
+ float32 *s = vs;
+
+ for (i = 0; i < n; ++i) {
+ d[i] = uint32_to_float32(s[i], fpst);
+ }
+}
+
+#define ZIP2(NAME, TYPE, H) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
+{ \
+ ARMVectorReg scratch[2]; \
+ size_t oprsz = simd_oprsz(desc); \
+ size_t pairs = oprsz / (sizeof(TYPE) * 2); \
+ TYPE *n = vn, *m = vm; \
+ if (vectors_overlap(vd, 2, vn, 1)) { \
+ n = memcpy(&scratch[0], vn, oprsz); \
+ } \
+ if (vectors_overlap(vd, 2, vm, 1)) { \
+ m = memcpy(&scratch[1], vm, oprsz); \
+ } \
+ for (size_t r = 0; r < 2; ++r) { \
+ TYPE *d = vd + r * sizeof(ARMVectorReg); \
+ size_t base = r * pairs; \
+ for (size_t p = 0; p < pairs; ++p) { \
+ d[H(2 * p + 0)] = n[base + H(p)]; \
+ d[H(2 * p + 1)] = m[base + H(p)]; \
+ } \
+ } \
+}
+
+ZIP2(sme2_zip2_b, uint8_t, H1)
+ZIP2(sme2_zip2_h, uint16_t, H2)
+ZIP2(sme2_zip2_s, uint32_t, H4)
+ZIP2(sme2_zip2_d, uint64_t, )
+ZIP2(sme2_zip2_q, Int128, )
+
+#undef ZIP2
+
+#define ZIP4(NAME, TYPE, H) \
+void HELPER(NAME)(void *vd, void *vs, uint32_t desc) \
+{ \
+ ARMVectorReg scratch[4]; \
+ size_t oprsz = simd_oprsz(desc); \
+ size_t quads = oprsz / (sizeof(TYPE) * 4); \
+ TYPE *s0, *s1, *s2, *s3; \
+ if (vs == vd) { \
+ vs = memcpy(scratch, vs, sizeof(scratch)); \
+ } \
+ s0 = vs; \
+ s1 = vs + sizeof(ARMVectorReg); \
+ s2 = vs + 2 * sizeof(ARMVectorReg); \
+ s3 = vs + 3 * sizeof(ARMVectorReg); \
+ for (size_t r = 0; r < 4; ++r) { \
+ TYPE *d = vd + r * sizeof(ARMVectorReg); \
+ size_t base = r * quads; \
+ for (size_t q = 0; q < quads; ++q) { \
+ d[H(4 * q + 0)] = s0[base + H(q)]; \
+ d[H(4 * q + 1)] = s1[base + H(q)]; \
+ d[H(4 * q + 2)] = s2[base + H(q)]; \
+ d[H(4 * q + 3)] = s3[base + H(q)]; \
+ } \
+ } \
+}
+
+ZIP4(sme2_zip4_b, uint8_t, H1)
+ZIP4(sme2_zip4_h, uint16_t, H2)
+ZIP4(sme2_zip4_s, uint32_t, H4)
+ZIP4(sme2_zip4_d, uint64_t, )
+ZIP4(sme2_zip4_q, Int128, )
+
+#undef ZIP4
+
+#define UZP2(NAME, TYPE, H) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
+{ \
+ ARMVectorReg scratch[2]; \
+ size_t oprsz = simd_oprsz(desc); \
+ size_t pairs = oprsz / (sizeof(TYPE) * 2); \
+ TYPE *d0 = vd, *d1 = vd + sizeof(ARMVectorReg); \
+ if (vectors_overlap(vd, 2, vn, 1)) { \
+ vn = memcpy(&scratch[0], vn, oprsz); \
+ } \
+ if (vectors_overlap(vd, 2, vm, 1)) { \
+ vm = memcpy(&scratch[1], vm, oprsz); \
+ } \
+ for (size_t r = 0; r < 2; ++r) { \
+ TYPE *s = r ? vm : vn; \
+ size_t base = r * pairs; \
+ for (size_t p = 0; p < pairs; ++p) { \
+ d0[base + H(p)] = s[H(2 * p + 0)]; \
+ d1[base + H(p)] = s[H(2 * p + 1)]; \
+ } \
+ } \
+}
+
+UZP2(sme2_uzp2_b, uint8_t, H1)
+UZP2(sme2_uzp2_h, uint16_t, H2)
+UZP2(sme2_uzp2_s, uint32_t, H4)
+UZP2(sme2_uzp2_d, uint64_t, )
+UZP2(sme2_uzp2_q, Int128, )
+
+#undef UZP2
+
+#define UZP4(NAME, TYPE, H) \
+void HELPER(NAME)(void *vd, void *vs, uint32_t desc) \
+{ \
+ ARMVectorReg scratch[4]; \
+ size_t oprsz = simd_oprsz(desc); \
+ size_t quads = oprsz / (sizeof(TYPE) * 4); \
+ TYPE *d0, *d1, *d2, *d3; \
+ if (vs == vd) { \
+ vs = memcpy(scratch, vs, sizeof(scratch)); \
+ } \
+ d0 = vd; \
+ d1 = vd + sizeof(ARMVectorReg); \
+ d2 = vd + 2 * sizeof(ARMVectorReg); \
+ d3 = vd + 3 * sizeof(ARMVectorReg); \
+ for (size_t r = 0; r < 4; ++r) { \
+ TYPE *s = vs + r * sizeof(ARMVectorReg); \
+ size_t base = r * quads; \
+ for (size_t q = 0; q < quads; ++q) { \
+ d0[base + H(q)] = s[H(4 * q + 0)]; \
+ d1[base + H(q)] = s[H(4 * q + 1)]; \
+ d2[base + H(q)] = s[H(4 * q + 2)]; \
+ d3[base + H(q)] = s[H(4 * q + 3)]; \
+ } \
+ } \
+}
+
+UZP4(sme2_uzp4_b, uint8_t, H1)
+UZP4(sme2_uzp4_h, uint16_t, H2)
+UZP4(sme2_uzp4_s, uint32_t, H4)
+UZP4(sme2_uzp4_d, uint64_t, )
+UZP4(sme2_uzp4_q, Int128, )
+
+#undef UZP4
+
+#define ICLAMP(NAME, TYPE, H) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
+{ \
+ size_t stride = sizeof(ARMVectorReg) / sizeof(TYPE); \
+ size_t elements = simd_oprsz(desc) / sizeof(TYPE); \
+ size_t nreg = simd_data(desc); \
+ TYPE *d = vd, *n = vn, *m = vm; \
+ for (size_t e = 0; e < elements; e++) { \
+ TYPE nn = n[H(e)], mm = m[H(e)]; \
+ for (size_t r = 0; r < nreg; r++) { \
+ TYPE *dd = &d[r * stride + H(e)]; \
+ *dd = MIN(MAX(*dd, nn), mm); \
+ } \
+ } \
+}
+
+ICLAMP(sme2_sclamp_b, int8_t, H1)
+ICLAMP(sme2_sclamp_h, int16_t, H2)
+ICLAMP(sme2_sclamp_s, int32_t, H4)
+ICLAMP(sme2_sclamp_d, int64_t, H8)
+
+ICLAMP(sme2_uclamp_b, uint8_t, H1)
+ICLAMP(sme2_uclamp_h, uint16_t, H2)
+ICLAMP(sme2_uclamp_s, uint32_t, H4)
+ICLAMP(sme2_uclamp_d, uint64_t, H8)
+
+#undef ICLAMP
+
+/*
+ * Note the argument ordering to minnum and maxnum must match
+ * the ARM pseudocode so that NaNs are propagated properly.
+ */
+#define FCLAMP(NAME, TYPE, H) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, \
+ float_status *fpst, uint32_t desc) \
+{ \
+ size_t stride = sizeof(ARMVectorReg) / sizeof(TYPE); \
+ size_t elements = simd_oprsz(desc) / sizeof(TYPE); \
+ size_t nreg = simd_data(desc); \
+ TYPE *d = vd, *n = vn, *m = vm; \
+ for (size_t e = 0; e < elements; e++) { \
+ TYPE nn = n[H(e)], mm = m[H(e)]; \
+ for (size_t r = 0; r < nreg; r++) { \
+ TYPE *dd = &d[r * stride + H(e)]; \
+ *dd = TYPE##_minnum(TYPE##_maxnum(nn, *dd, fpst), mm, fpst); \
+ } \
+ } \
+}
+
+FCLAMP(sme2_fclamp_h, float16, H2)
+FCLAMP(sme2_fclamp_s, float32, H4)
+FCLAMP(sme2_fclamp_d, float64, H8)
+FCLAMP(sme2_bfclamp, bfloat16, H2)
+
+#undef FCLAMP
+
+void HELPER(sme2_sel_b)(void *vd, void *vn, void *vm,
+ uint32_t png, uint32_t desc)
+{
+ int vl = simd_oprsz(desc);
+ int nreg = simd_data(desc);
+ int elements = vl / sizeof(uint8_t);
+ DecodeCounter p = decode_counter(png, vl, MO_8);
+
+ if (p.lg2_stride == 0) {
+ if (p.invert) {
+ for (int r = 0; r < nreg; r++) {
+ uint8_t *d = vd + r * sizeof(ARMVectorReg);
+ uint8_t *n = vn + r * sizeof(ARMVectorReg);
+ uint8_t *m = vm + r * sizeof(ARMVectorReg);
+ int split = p.count - r * elements;
+
+ if (split <= 0) {
+ memcpy(d, n, vl); /* all true */
+ } else if (elements <= split) {
+ memcpy(d, m, vl); /* all false */
+ } else {
+ for (int e = 0; e < split; e++) {
+ d[H1(e)] = m[H1(e)];
+ }
+ for (int e = split; e < elements; e++) {
+ d[H1(e)] = n[H1(e)];
+ }
+ }
+ }
+ } else {
+ for (int r = 0; r < nreg; r++) {
+ uint8_t *d = vd + r * sizeof(ARMVectorReg);
+ uint8_t *n = vn + r * sizeof(ARMVectorReg);
+ uint8_t *m = vm + r * sizeof(ARMVectorReg);
+ int split = p.count - r * elements;
+
+ if (split <= 0) {
+ memcpy(d, m, vl); /* all false */
+ } else if (elements <= split) {
+ memcpy(d, n, vl); /* all true */
+ } else {
+ for (int e = 0; e < split; e++) {
+ d[H1(e)] = n[H1(e)];
+ }
+ for (int e = split; e < elements; e++) {
+ d[H1(e)] = m[H1(e)];
+ }
+ }
+ }
+ }
+ } else {
+ int estride = 1 << p.lg2_stride;
+ if (p.invert) {
+ for (int r = 0; r < nreg; r++) {
+ uint8_t *d = vd + r * sizeof(ARMVectorReg);
+ uint8_t *n = vn + r * sizeof(ARMVectorReg);
+ uint8_t *m = vm + r * sizeof(ARMVectorReg);
+ int split = p.count - r * elements;
+ int e = 0;
+
+ for (; e < MIN(split, elements); e++) {
+ d[H1(e)] = m[H1(e)];
+ }
+ for (; e < elements; e += estride) {
+ d[H1(e)] = n[H1(e)];
+ for (int i = 1; i < estride; i++) {
+ d[H1(e + i)] = m[H1(e + i)];
+ }
+ }
+ }
+ } else {
+ for (int r = 0; r < nreg; r++) {
+ uint8_t *d = vd + r * sizeof(ARMVectorReg);
+ uint8_t *n = vn + r * sizeof(ARMVectorReg);
+ uint8_t *m = vm + r * sizeof(ARMVectorReg);
+ int split = p.count - r * elements;
+ int e = 0;
+
+ for (; e < MIN(split, elements); e += estride) {
+ d[H1(e)] = n[H1(e)];
+ for (int i = 1; i < estride; i++) {
+ d[H1(e + i)] = m[H1(e + i)];
+ }
+ }
+ for (; e < elements; e++) {
+ d[H1(e)] = m[H1(e)];
+ }
+ }
+ }
+ }
+}
+
+void HELPER(sme2_sel_h)(void *vd, void *vn, void *vm,
+ uint32_t png, uint32_t desc)
+{
+ int vl = simd_oprsz(desc);
+ int nreg = simd_data(desc);
+ int elements = vl / sizeof(uint16_t);
+ DecodeCounter p = decode_counter(png, vl, MO_16);
+
+ if (p.lg2_stride == 0) {
+ if (p.invert) {
+ for (int r = 0; r < nreg; r++) {
+ uint16_t *d = vd + r * sizeof(ARMVectorReg);
+ uint16_t *n = vn + r * sizeof(ARMVectorReg);
+ uint16_t *m = vm + r * sizeof(ARMVectorReg);
+ int split = p.count - r * elements;
+
+ if (split <= 0) {
+ memcpy(d, n, vl); /* all true */
+ } else if (elements <= split) {
+ memcpy(d, m, vl); /* all false */
+ } else {
+ for (int e = 0; e < split; e++) {
+ d[H2(e)] = m[H2(e)];
+ }
+ for (int e = split; e < elements; e++) {
+ d[H2(e)] = n[H2(e)];
+ }
+ }
+ }
+ } else {
+ for (int r = 0; r < nreg; r++) {
+ uint16_t *d = vd + r * sizeof(ARMVectorReg);
+ uint16_t *n = vn + r * sizeof(ARMVectorReg);
+ uint16_t *m = vm + r * sizeof(ARMVectorReg);
+ int split = p.count - r * elements;
+
+ if (split <= 0) {
+ memcpy(d, m, vl); /* all false */
+ } else if (elements <= split) {
+ memcpy(d, n, vl); /* all true */
+ } else {
+ for (int e = 0; e < split; e++) {
+ d[H2(e)] = n[H2(e)];
+ }
+ for (int e = split; e < elements; e++) {
+ d[H2(e)] = m[H2(e)];
+ }
+ }
+ }
+ }
+ } else {
+ int estride = 1 << p.lg2_stride;
+ if (p.invert) {
+ for (int r = 0; r < nreg; r++) {
+ uint16_t *d = vd + r * sizeof(ARMVectorReg);
+ uint16_t *n = vn + r * sizeof(ARMVectorReg);
+ uint16_t *m = vm + r * sizeof(ARMVectorReg);
+ int split = p.count - r * elements;
+ int e = 0;
+
+ for (; e < MIN(split, elements); e++) {
+ d[H2(e)] = m[H2(e)];
+ }
+ for (; e < elements; e += estride) {
+ d[H2(e)] = n[H2(e)];
+ for (int i = 1; i < estride; i++) {
+ d[H2(e + i)] = m[H2(e + i)];
+ }
+ }
+ }
+ } else {
+ for (int r = 0; r < nreg; r++) {
+ uint16_t *d = vd + r * sizeof(ARMVectorReg);
+ uint16_t *n = vn + r * sizeof(ARMVectorReg);
+ uint16_t *m = vm + r * sizeof(ARMVectorReg);
+ int split = p.count - r * elements;
+ int e = 0;
+
+ for (; e < MIN(split, elements); e += estride) {
+ d[H2(e)] = n[H2(e)];
+ for (int i = 1; i < estride; i++) {
+ d[H2(e + i)] = m[H2(e + i)];
+ }
+ }
+ for (; e < elements; e++) {
+ d[H2(e)] = m[H2(e)];
+ }
+ }
+ }
+ }
+}
-DEF_IMOPH(smopa, d)
-DEF_IMOPH(umopa, d)
-DEF_IMOPH(sumopa, d)
-DEF_IMOPH(usmopa, d)
+void HELPER(sme2_sel_s)(void *vd, void *vn, void *vm,
+ uint32_t png, uint32_t desc)
+{
+ int vl = simd_oprsz(desc);
+ int nreg = simd_data(desc);
+ int elements = vl / sizeof(uint32_t);
+ DecodeCounter p = decode_counter(png, vl, MO_32);
+
+ if (p.lg2_stride == 0) {
+ if (p.invert) {
+ for (int r = 0; r < nreg; r++) {
+ uint32_t *d = vd + r * sizeof(ARMVectorReg);
+ uint32_t *n = vn + r * sizeof(ARMVectorReg);
+ uint32_t *m = vm + r * sizeof(ARMVectorReg);
+ int split = p.count - r * elements;
+
+ if (split <= 0) {
+ memcpy(d, n, vl); /* all true */
+ } else if (elements <= split) {
+ memcpy(d, m, vl); /* all false */
+ } else {
+ for (int e = 0; e < split; e++) {
+ d[H4(e)] = m[H4(e)];
+ }
+ for (int e = split; e < elements; e++) {
+ d[H4(e)] = n[H4(e)];
+ }
+ }
+ }
+ } else {
+ for (int r = 0; r < nreg; r++) {
+ uint32_t *d = vd + r * sizeof(ARMVectorReg);
+ uint32_t *n = vn + r * sizeof(ARMVectorReg);
+ uint32_t *m = vm + r * sizeof(ARMVectorReg);
+ int split = p.count - r * elements;
+
+ if (split <= 0) {
+ memcpy(d, m, vl); /* all false */
+ } else if (elements <= split) {
+ memcpy(d, n, vl); /* all true */
+ } else {
+ for (int e = 0; e < split; e++) {
+ d[H4(e)] = n[H4(e)];
+ }
+ for (int e = split; e < elements; e++) {
+ d[H4(e)] = m[H4(e)];
+ }
+ }
+ }
+ }
+ } else {
+ /* p.esz must be MO_64, so stride must be 2. */
+ if (p.invert) {
+ for (int r = 0; r < nreg; r++) {
+ uint32_t *d = vd + r * sizeof(ARMVectorReg);
+ uint32_t *n = vn + r * sizeof(ARMVectorReg);
+ uint32_t *m = vm + r * sizeof(ARMVectorReg);
+ int split = p.count - r * elements;
+ int e = 0;
+
+ for (; e < MIN(split, elements); e++) {
+ d[H4(e)] = m[H4(e)];
+ }
+ for (; e < elements; e += 2) {
+ d[H4(e)] = n[H4(e)];
+ d[H4(e + 1)] = m[H4(e + 1)];
+ }
+ }
+ } else {
+ for (int r = 0; r < nreg; r++) {
+ uint32_t *d = vd + r * sizeof(ARMVectorReg);
+ uint32_t *n = vn + r * sizeof(ARMVectorReg);
+ uint32_t *m = vm + r * sizeof(ARMVectorReg);
+ int split = p.count - r * elements;
+ int e = 0;
+
+ for (; e < MIN(split, elements); e += 2) {
+ d[H4(e)] = n[H4(e)];
+ d[H4(e + 1)] = m[H4(e + 1)];
+ }
+ for (; e < elements; e++) {
+ d[H4(e)] = m[H4(e)];
+ }
+ }
+ }
+ }
+}
+
+void HELPER(sme2_sel_d)(void *vd, void *vn, void *vm,
+ uint32_t png, uint32_t desc)
+{
+ int vl = simd_oprsz(desc);
+ int nreg = simd_data(desc);
+ int elements = vl / sizeof(uint64_t);
+ DecodeCounter p = decode_counter(png, vl, MO_64);
+
+ if (p.invert) {
+ for (int r = 0; r < nreg; r++) {
+ uint64_t *d = vd + r * sizeof(ARMVectorReg);
+ uint64_t *n = vn + r * sizeof(ARMVectorReg);
+ uint64_t *m = vm + r * sizeof(ARMVectorReg);
+ int split = p.count - r * elements;
+
+ if (split <= 0) {
+ memcpy(d, n, vl); /* all true */
+ } else if (elements <= split) {
+ memcpy(d, m, vl); /* all false */
+ } else {
+ memcpy(d, m, split * sizeof(uint64_t));
+ memcpy(d + split, n + split,
+ (elements - split) * sizeof(uint64_t));
+ }
+ }
+ } else {
+ for (int r = 0; r < nreg; r++) {
+ uint64_t *d = vd + r * sizeof(ARMVectorReg);
+ uint64_t *n = vn + r * sizeof(ARMVectorReg);
+ uint64_t *m = vm + r * sizeof(ARMVectorReg);
+ int split = p.count - r * elements;
+
+ if (split <= 0) {
+ memcpy(d, m, vl); /* all false */
+ } else if (elements <= split) {
+ memcpy(d, n, vl); /* all true */
+ } else {
+ memcpy(d, n, split * sizeof(uint64_t));
+ memcpy(d + split, m + split,
+ (elements - split) * sizeof(uint64_t));
+ }
+ }
+ }
+}
diff --git a/target/arm/tcg/sve.decode b/target/arm/tcg/sve.decode
index 04b6fcc..2efd5f5 100644
--- a/target/arm/tcg/sve.decode
+++ b/target/arm/tcg/sve.decode
@@ -30,6 +30,7 @@
%size_23 23:2
%dtype_23_13 23:2 13:2
%index3_22_19 22:1 19:2
+%index3_22_17 22:1 17:2
%index3_19_11 19:2 11:1
%index2_20_11 20:1 11:1
@@ -57,6 +58,11 @@
# as propagated via the MOVPRFX instruction.
%reg_movprfx 0:5
+%rn_ax2 6:4 !function=times_2
+
+%pnd 0:3 !function=plus_8
+%pnn 5:3 !function=plus_8
+
###########################################################################
# Named attribute sets. These are used to make nice(er) names
# when creating helpers common to those for the individual
@@ -102,6 +108,7 @@
# Two operand
@pd_pn ........ esz:2 .. .... ....... rn:4 . rd:4 &rr_esz
@rd_rn ........ esz:2 ...... ...... rn:5 rd:5 &rr_esz
+@rd_rnx2 ........ ... ..... ...... ..... rd:5 &rr_esz rn=%rn_ax2
# Two operand with governing predicate, flags setting
@pd_pg_pn_s ........ . s:1 ...... .. pg:4 . rn:4 . rd:4 &rpr_s
@@ -131,11 +138,11 @@
@rda_rn_rm ........ esz:2 . rm:5 ... ... rn:5 rd:5 \
&rrrr_esz ra=%reg_movprfx
-# Four operand with unused vector element size
-@rda_rn_rm_e0 ........ ... rm:5 ... ... rn:5 rd:5 \
- &rrrr_esz esz=0 ra=%reg_movprfx
-@rdn_ra_rm_e0 ........ ... rm:5 ... ... ra:5 rd:5 \
- &rrrr_esz esz=0 rn=%reg_movprfx
+# Four operand with explicit vector element size
+@rda_rn_rm_ex ........ ... rm:5 ... ... rn:5 rd:5 \
+ &rrrr_esz ra=%reg_movprfx
+@rdn_ra_rm_ex ........ ... rm:5 ... ... ra:5 rd:5 \
+ &rrrr_esz rn=%reg_movprfx
# Three operand with "memory" size, aka immediate left shift
@rd_rn_msz_rm ........ ... rm:5 .... imm:2 rn:5 rd:5 &rrri
@@ -222,6 +229,9 @@
@rprr_load_dt ....... dtype:4 rm:5 ... pg:3 rn:5 rd:5 &rprr_load
@rpri_load_dt ....... dtype:4 . imm:s4 ... pg:3 rn:5 rd:5 &rpri_load
+@rprr_load ....... .... rm:5 ... pg:3 rn:5 rd:5 &rprr_load
+@rpri_load ....... .... . imm:s4 ... pg:3 rn:5 rd:5 &rpri_load
+
@rprr_load_msz ....... .... rm:5 ... pg:3 rn:5 rd:5 \
&rprr_load dtype=%msz_dtype
@rpri_load_msz ....... .... . imm:s4 ... pg:3 rn:5 rd:5 \
@@ -245,7 +255,7 @@
# Stores; user must fill in ESZ, MSZ, NREG as needed.
@rprr_store ....... .. .. rm:5 ... pg:3 rn:5 rd:5 &rprr_store
-@rpri_store_msz ....... msz:2 .. . imm:s4 ... pg:3 rn:5 rd:5 &rpri_store
+@rpri_store ....... .. .. . imm:s4 ... pg:3 rn:5 rd:5 &rpri_store
@rprr_store_esz_n0 ....... .. esz:2 rm:5 ... pg:3 rn:5 rd:5 \
&rprr_store nreg=0
@rprr_scatter_store ....... msz:2 .. rm:5 ... pg:3 rn:5 rd:5 \
@@ -320,6 +330,11 @@ ORV 00000100 .. 011 000 001 ... ..... ..... @rd_pg_rn
EORV 00000100 .. 011 001 001 ... ..... ..... @rd_pg_rn
ANDV 00000100 .. 011 010 001 ... ..... ..... @rd_pg_rn
+# SVE2.1 bitwise logical reduction (quadwords)
+ORQV 00000100 .. 011 100 001 ... ..... ..... @rd_pg_rn
+EORQV 00000100 .. 011 101 001 ... ..... ..... @rd_pg_rn
+ANDQV 00000100 .. 011 110 001 ... ..... ..... @rd_pg_rn
+
# SVE constructive prefix (predicated)
MOVPRFX_z 00000100 .. 010 000 001 ... ..... ..... @rd_pg_rn
MOVPRFX_m 00000100 .. 010 001 001 ... ..... ..... @rd_pg_rn
@@ -335,6 +350,13 @@ UMAXV 00000100 .. 001 001 001 ... ..... ..... @rd_pg_rn
SMINV 00000100 .. 001 010 001 ... ..... ..... @rd_pg_rn
UMINV 00000100 .. 001 011 001 ... ..... ..... @rd_pg_rn
+# SVE2.1 segment reduction
+ADDQV 00000100 .. 000 101 001 ... ..... ..... @rd_pg_rn
+SMAXQV 00000100 .. 001 100 001 ... ..... ..... @rd_pg_rn
+SMINQV 00000100 .. 001 110 001 ... ..... ..... @rd_pg_rn
+UMAXQV 00000100 .. 001 101 001 ... ..... ..... @rd_pg_rn
+UMINQV 00000100 .. 001 111 001 ... ..... ..... @rd_pg_rn
+
### SVE Shift by Immediate - Predicated Group
# SVE bitwise shift by immediate (predicated)
@@ -428,12 +450,12 @@ XAR 00000100 .. 1 ..... 001 101 rm:5 rd:5 &rrri_esz \
rn=%reg_movprfx esz=%tszimm16_esz imm=%tszimm16_shr
# SVE2 bitwise ternary operations
-EOR3 00000100 00 1 ..... 001 110 ..... ..... @rdn_ra_rm_e0
-BSL 00000100 00 1 ..... 001 111 ..... ..... @rdn_ra_rm_e0
-BCAX 00000100 01 1 ..... 001 110 ..... ..... @rdn_ra_rm_e0
-BSL1N 00000100 01 1 ..... 001 111 ..... ..... @rdn_ra_rm_e0
-BSL2N 00000100 10 1 ..... 001 111 ..... ..... @rdn_ra_rm_e0
-NBSL 00000100 11 1 ..... 001 111 ..... ..... @rdn_ra_rm_e0
+EOR3 00000100 00 1 ..... 001 110 ..... ..... @rdn_ra_rm_ex esz=0
+BSL 00000100 00 1 ..... 001 111 ..... ..... @rdn_ra_rm_ex esz=0
+BCAX 00000100 01 1 ..... 001 110 ..... ..... @rdn_ra_rm_ex esz=0
+BSL1N 00000100 01 1 ..... 001 111 ..... ..... @rdn_ra_rm_ex esz=0
+BSL2N 00000100 10 1 ..... 001 111 ..... ..... @rdn_ra_rm_ex esz=0
+NBSL 00000100 11 1 ..... 001 111 ..... ..... @rdn_ra_rm_ex esz=0
### SVE Index Generation Group
@@ -559,6 +581,14 @@ DUP_s 00000101 .. 1 00000 001110 ..... ..... @rd_rn
DUP_x 00000101 .. 1 ..... 001000 rn:5 rd:5 \
&rri imm=%imm7_22_16
+# SVE Permute Vector - one source quadwords
+DUPQ 00000101 001 imm:4 1 001001 rn:5 rd:5 &rri_esz esz=0
+DUPQ 00000101 001 imm:3 10 001001 rn:5 rd:5 &rri_esz esz=1
+DUPQ 00000101 001 imm:2 100 001001 rn:5 rd:5 &rri_esz esz=2
+DUPQ 00000101 001 imm:1 1000 001001 rn:5 rd:5 &rri_esz esz=3
+
+EXTQ 00000101 0110 imm:4 001001 rn:5 rd:5 &rri
+
# SVE insert SIMD&FP scalar register
INSR_f 00000101 .. 1 10100 001110 ..... ..... @rdn_rm
@@ -568,6 +598,22 @@ INSR_r 00000101 .. 1 00100 001110 ..... ..... @rdn_rm
# SVE reverse vector elements
REV_v 00000101 .. 1 11000 001110 ..... ..... @rd_rn
+# SVE move predicate to/from vector
+
+PMOV_pv 00000101 00 101 01 0001110 rn:5 0 rd:4 \
+ &rri_esz esz=0 imm=0
+PMOV_pv 00000101 00 101 1 imm:1 0001110 rn:5 0 rd:4 &rri_esz esz=1
+PMOV_pv 00000101 01 101 imm:2 0001110 rn:5 0 rd:4 &rri_esz esz=2
+PMOV_pv 00000101 1. 101 .. 0001110 rn:5 0 rd:4 \
+ &rri_esz esz=3 imm=%index3_22_17
+
+PMOV_vp 00000101 00 101 01 1001110 0 rn:4 rd:5 \
+ &rri_esz esz=0 imm=0
+PMOV_vp 00000101 00 101 1 imm:1 1001110 0 rn:4 rd:5 &rri_esz esz=1
+PMOV_vp 00000101 01 101 imm:2 1001110 0 rn:4 rd:5 &rri_esz esz=2
+PMOV_vp 00000101 1. 101 .. 1001110 0 rn:4 rd:5 \
+ &rri_esz esz=3 imm=%index3_22_17
+
# SVE vector table lookup
TBL 00000101 .. 1 ..... 001100 ..... ..... @rd_rn_rm
@@ -614,6 +660,15 @@ UZP2_q 00000101 10 1 ..... 000 011 ..... ..... @rd_rn_rm_e0
TRN1_q 00000101 10 1 ..... 000 110 ..... ..... @rd_rn_rm_e0
TRN2_q 00000101 10 1 ..... 000 111 ..... ..... @rd_rn_rm_e0
+# SVE2.1 permute vector elements (quadwords)
+ZIPQ1 01000100 .. 0 ..... 111 000 ..... ..... @rd_rn_rm
+ZIPQ2 01000100 .. 0 ..... 111 001 ..... ..... @rd_rn_rm
+UZPQ1 01000100 .. 0 ..... 111 010 ..... ..... @rd_rn_rm
+UZPQ2 01000100 .. 0 ..... 111 011 ..... ..... @rd_rn_rm
+
+TBLQ 01000100 .. 0 ..... 111 110 ..... ..... @rd_rn_rm
+TBXQ 00000101 .. 1 ..... 001 101 ..... ..... @rd_rn_rm
+
### SVE Permute - Predicated Group
# SVE compress active elements
@@ -725,6 +780,7 @@ PTEST 00100101 01 010000 11 pg:4 0 rn:4 0 0000
# SVE predicate initialize
PTRUE 00100101 esz:2 01100 s:1 111000 pat:5 0 rd:4
+PTRUE_cnt 00100101 esz:2 1000000111100000010 ... rd=%pnd
# SVE initialize FFR
SETFFR 00100101 0010 1100 1001 0000 0000 0000
@@ -765,7 +821,8 @@ BRKN 00100101 0. 01100001 .... 0 .... 0 .... @pd_pg_pn_s
### SVE Predicate Count Group
# SVE predicate count
-CNTP 00100101 .. 100 000 10 .... 0 .... ..... @rd_pg4_pn
+CNTP 00100101 .. 100 000 10 .... 0 .... ..... @rd_pg4_pn
+CNTP_c 00100101 esz:2 100 000 10 000 vl:1 1 rn:4 rd:5
# SVE inc/dec register by predicate count
INCDECP_r 00100101 .. 10110 d:1 10001 00 .... ..... @incdec_pred u=1
@@ -786,11 +843,35 @@ SINCDECP_z 00100101 .. 1010 d:1 u:1 10000 00 .... ..... @incdec2_pred
CTERM 00100101 1 sf:1 1 rm:5 001000 rn:5 ne:1 0000
# SVE integer compare scalar count and limit
-WHILE 00100101 esz:2 1 rm:5 000 sf:1 u:1 lt:1 rn:5 eq:1 rd:4
+&while esz rd rn rm sf u eq
+WHILE_lt 00100101 esz:2 1 rm:5 000 sf:1 u:1 1 rn:5 eq:1 rd:4 &while
+WHILE_gt 00100101 esz:2 1 rm:5 000 sf:1 u:1 0 rn:5 eq:1 rd:4 &while
# SVE2 pointer conflict compare
WHILE_ptr 00100101 esz:2 1 rm:5 001 100 rn:5 rw:1 rd:4
+# SVE2.1 predicate pair
+%pd_pair 1:3 !function=times_2
+@while_pair ........ esz:2 . rm:5 .... u:1 . rn:5 . ... eq:1 \
+ &while rd=%pd_pair sf=1
+
+WHILE_lt_pair 00100101 .. 1 ..... 0101 . 1 ..... 1 ... . @while_pair
+WHILE_gt_pair 00100101 .. 1 ..... 0101 . 0 ..... 1 ... . @while_pair
+
+# SVE2.1 predicate as count
+@while_cnt ........ esz:2 . rm:5 .... u:1 . rn:5 . eq:1 ... \
+ &while rd=%pnd sf=1
+
+WHILE_lt_cnt2 00100101 .. 1 ..... 0100 . 1 ..... 1 . ... @while_cnt
+WHILE_lt_cnt4 00100101 .. 1 ..... 0110 . 1 ..... 1 . ... @while_cnt
+WHILE_gt_cnt2 00100101 .. 1 ..... 0100 . 0 ..... 1 . ... @while_cnt
+WHILE_gt_cnt4 00100101 .. 1 ..... 0110 . 0 ..... 1 . ... @while_cnt
+
+# SVE2.1 extract mask predicate from predicate-as-counter
+&pext rd rn esz imm
+PEXT_1 00100101 esz:2 1 00000 0111 00 imm:2 ... 1 rd:4 &pext rn=%pnn
+PEXT_2 00100101 esz:2 1 00000 0111 010 imm:1 ... 1 rd:4 &pext rn=%pnn
+
### SVE Integer Wide Immediate - Unpredicated Group
# SVE broadcast floating-point immediate (unpredicated)
@@ -851,10 +932,13 @@ CDOT_zzzz 01000100 esz:2 0 rm:5 0001 rot:2 rn:5 rd:5 ra=%reg_movprfx
#### SVE Multiply - Indexed
# SVE integer dot product (indexed)
-SDOT_zzxw_s 01000100 10 1 ..... 000000 ..... ..... @rrxr_2 esz=2
-SDOT_zzxw_d 01000100 11 1 ..... 000000 ..... ..... @rrxr_1 esz=3
-UDOT_zzxw_s 01000100 10 1 ..... 000001 ..... ..... @rrxr_2 esz=2
-UDOT_zzxw_d 01000100 11 1 ..... 000001 ..... ..... @rrxr_1 esz=3
+SDOT_zzxw_4s 01000100 10 1 ..... 000000 ..... ..... @rrxr_2 esz=2
+SDOT_zzxw_4d 01000100 11 1 ..... 000000 ..... ..... @rrxr_1 esz=3
+UDOT_zzxw_4s 01000100 10 1 ..... 000001 ..... ..... @rrxr_2 esz=2
+UDOT_zzxw_4d 01000100 11 1 ..... 000001 ..... ..... @rrxr_1 esz=3
+
+SDOT_zzxw_2s 01000100 10 0 ..... 110010 ..... ..... @rrxr_2 esz=2
+UDOT_zzxw_2s 01000100 10 0 ..... 110011 ..... ..... @rrxr_2 esz=2
# SVE2 integer multiply-add (indexed)
MLA_zzxz_h 01000100 0. 1 ..... 000010 ..... ..... @rrxr_3 esz=1
@@ -873,8 +957,8 @@ SQRDMLSH_zzxz_s 01000100 10 1 ..... 000101 ..... ..... @rrxr_2 esz=2
SQRDMLSH_zzxz_d 01000100 11 1 ..... 000101 ..... ..... @rrxr_1 esz=3
# SVE mixed sign dot product (indexed)
-USDOT_zzxw_s 01000100 10 1 ..... 000110 ..... ..... @rrxr_2 esz=2
-SUDOT_zzxw_s 01000100 10 1 ..... 000111 ..... ..... @rrxr_2 esz=2
+USDOT_zzxw_4s 01000100 10 1 ..... 000110 ..... ..... @rrxr_2 esz=2
+SUDOT_zzxw_4s 01000100 10 1 ..... 000111 ..... ..... @rrxr_2 esz=2
# SVE2 saturating multiply-add (indexed)
SQDMLALB_zzxw_s 01000100 10 1 ..... 0010.0 ..... ..... @rrxr_3a esz=2
@@ -990,6 +1074,14 @@ FMINNMV 01100101 .. 000 101 001 ... ..... ..... @rd_pg_rn
FMAXV 01100101 .. 000 110 001 ... ..... ..... @rd_pg_rn
FMINV 01100101 .. 000 111 001 ... ..... ..... @rd_pg_rn
+### SVE FP recursive reduction (quadwords)
+
+FADDQV 01100100 .. 010 000 101 ... ..... ..... @rd_pg_rn
+FMAXNMQV 01100100 .. 010 100 101 ... ..... ..... @rd_pg_rn
+FMINNMQV 01100100 .. 010 101 101 ... ..... ..... @rd_pg_rn
+FMAXQV 01100100 .. 010 110 101 ... ..... ..... @rd_pg_rn
+FMINQV 01100100 .. 010 111 101 ... ..... ..... @rd_pg_rn
+
## SVE Floating Point Unary Operations - Unpredicated Group
FRECPE 01100101 .. 001 110 001100 ..... ..... @rd_rn
@@ -1151,12 +1243,24 @@ LD1_zpiz 1000010 .. 01 ..... 1.. ... ..... ..... \
# SVE contiguous load (scalar plus scalar)
LD_zprr 1010010 .... ..... 010 ... ..... ..... @rprr_load_dt nreg=0
+# LD1W (128-bit element)
+LD_zprr 1010010 1000 rm:5 100 pg:3 rn:5 rd:5 \
+ &rprr_load dtype=16 nreg=0
+# LD1D (128-bit element)
+LD_zprr 1010010 1100 rm:5 100 pg:3 rn:5 rd:5 \
+ &rprr_load dtype=17 nreg=0
# SVE contiguous first-fault load (scalar plus scalar)
LDFF1_zprr 1010010 .... ..... 011 ... ..... ..... @rprr_load_dt nreg=0
# SVE contiguous load (scalar plus immediate)
LD_zpri 1010010 .... 0.... 101 ... ..... ..... @rpri_load_dt nreg=0
+# LD1W (128-bit element)
+LD_zpri 1010010 1000 1 imm:s4 001 pg:3 rn:5 rd:5 \
+ &rpri_load dtype=16 nreg=0
+# LD1D (128-bit element)
+LD_zpri 1010010 1100 1 imm:s4 001 pg:3 rn:5 rd:5 \
+ &rpri_load dtype=17 nreg=0
# SVE contiguous non-fault load (scalar plus immediate)
LDNF1_zpri 1010010 .... 1.... 101 ... ..... ..... @rpri_load_dt nreg=0
@@ -1166,12 +1270,26 @@ LDNF1_zpri 1010010 .... 1.... 101 ... ..... ..... @rpri_load_dt nreg=0
# SVE load multiple structures (scalar plus scalar)
# LD2B, LD2H, LD2W, LD2D; etc.
LD_zprr 1010010 .. nreg:2 ..... 110 ... ..... ..... @rprr_load_msz
+# LD[234]Q
+LD_zprr 1010010 01 01 ..... 100 ... ..... ..... \
+ @rprr_load dtype=18 nreg=1
+LD_zprr 1010010 10 01 ..... 100 ... ..... ..... \
+ @rprr_load dtype=18 nreg=2
+LD_zprr 1010010 11 01 ..... 100 ... ..... ..... \
+ @rprr_load dtype=18 nreg=3
# SVE contiguous non-temporal load (scalar plus immediate)
# LDNT1B, LDNT1H, LDNT1W, LDNT1D
# SVE load multiple structures (scalar plus immediate)
# LD2B, LD2H, LD2W, LD2D; etc.
LD_zpri 1010010 .. nreg:2 0.... 111 ... ..... ..... @rpri_load_msz
+# LD[234]Q
+LD_zpri 1010010 01 001 .... 111 ... ..... ..... \
+ @rpri_load dtype=18 nreg=1
+LD_zpri 1010010 10 001 .... 111 ... ..... ..... \
+ @rpri_load dtype=18 nreg=2
+LD_zpri 1010010 11 001 .... 111 ... ..... ..... \
+ @rpri_load dtype=18 nreg=3
# SVE load and broadcast quadword (scalar plus scalar)
LD1RQ_zprr 1010010 .. 00 ..... 000 ... ..... ..... \
@@ -1222,6 +1340,10 @@ LD1_zprz 1100010 10 1. ..... 1.. ... ..... ..... \
LD1_zprz 1100010 11 1. ..... 11. ... ..... ..... \
@rprr_g_load_sc esz=3 msz=3 u=1
+# LD1Q
+LD1_zprz 1100 0100 000 rm:5 101 pg:3 rn:5 rd:5 \
+ &rprr_gather_load u=0 ff=0 xs=2 esz=4 msz=4 scale=0
+
# SVE 64-bit gather load (vector plus immediate)
LD1_zpiz 1100010 .. 01 ..... 1.. ... ..... ..... \
@rpri_g_load esz=3
@@ -1245,8 +1367,20 @@ STR_zri 1110010 11 0. ..... 010 ... ..... ..... @rd_rn_i9
# SVE contiguous store (scalar plus immediate)
# ST1B, ST1H, ST1W, ST1D; require msz <= esz
-ST_zpri 1110010 .. esz:2 0.... 111 ... ..... ..... \
- @rpri_store_msz nreg=0
+ST_zpri 1110010 00 esz:2 0.... 111 ... ..... ..... \
+ @rpri_store msz=0 nreg=0
+ST_zpri 1110010 01 esz:2 0.... 111 ... ..... ..... \
+ @rpri_store msz=1 nreg=0
+ST_zpri 1110010 10 10 0.... 111 ... ..... ..... \
+ @rpri_store msz=2 esz=2 nreg=0
+ST_zpri 1110010 10 11 0.... 111 ... ..... ..... \
+ @rpri_store msz=2 esz=3 nreg=0
+ST_zpri 1110010 11 11 0.... 111 ... ..... ..... \
+ @rpri_store msz=3 esz=3 nreg=0
+ST_zpri 1110010 10 00 0.... 111 ... ..... ..... \
+ @rpri_store msz=2 esz=4 nreg=0
+ST_zpri 1110010 11 10 0.... 111 ... ..... ..... \
+ @rpri_store msz=3 esz=4 nreg=0
# SVE contiguous store (scalar plus scalar)
# ST1B, ST1H, ST1W, ST1D; require msz <= esz
@@ -1255,20 +1389,40 @@ ST_zprr 1110010 00 .. ..... 010 ... ..... ..... \
@rprr_store_esz_n0 msz=0
ST_zprr 1110010 01 .. ..... 010 ... ..... ..... \
@rprr_store_esz_n0 msz=1
-ST_zprr 1110010 10 .. ..... 010 ... ..... ..... \
- @rprr_store_esz_n0 msz=2
+ST_zprr 1110010 10 10 ..... 010 ... ..... ..... \
+ @rprr_store msz=2 esz=2 nreg=0
+ST_zprr 1110010 10 11 ..... 010 ... ..... ..... \
+ @rprr_store msz=2 esz=3 nreg=0
ST_zprr 1110010 11 11 ..... 010 ... ..... ..... \
@rprr_store msz=3 esz=3 nreg=0
+ST_zprr 1110010 10 00 ..... 010 ... ..... ..... \
+ @rprr_store msz=2 esz=4 nreg=0
+ST_zprr 1110010 11 10 ..... 010 ... ..... ..... \
+ @rprr_store msz=3 esz=4 nreg=0
# SVE contiguous non-temporal store (scalar plus immediate) (nreg == 0)
# SVE store multiple structures (scalar plus immediate) (nreg != 0)
ST_zpri 1110010 .. nreg:2 1.... 111 ... ..... ..... \
- @rpri_store_msz esz=%size_23
+ @rpri_store msz=%size_23 esz=%size_23
+# ST[234]Q
+ST_zpri 11100100 01 00 .... 000 ... ..... ..... \
+ @rpri_store msz=4 esz=4 nreg=1
+ST_zpri 11100100 10 00 .... 000 ... ..... ..... \
+ @rpri_store msz=4 esz=4 nreg=2
+ST_zpri 11100100 11 00 .... 000 ... ..... ..... \
+ @rpri_store msz=4 esz=4 nreg=3
# SVE contiguous non-temporal store (scalar plus scalar) (nreg == 0)
# SVE store multiple structures (scalar plus scalar) (nreg != 0)
-ST_zprr 1110010 msz:2 nreg:2 ..... 011 ... ..... ..... \
- @rprr_store esz=%size_23
+ST_zprr 1110010 .. nreg:2 ..... 011 ... ..... ..... \
+ @rprr_store msz=%size_23 esz=%size_23
+# ST[234]Q
+ST_zprr 11100100 01 1 ..... 000 ... ..... ..... \
+ @rprr_store msz=4 esz=4 nreg=1
+ST_zprr 11100100 10 1 ..... 000 ... ..... ..... \
+ @rprr_store msz=4 esz=4 nreg=2
+ST_zprr 11100100 11 1 ..... 000 ... ..... ..... \
+ @rprr_store msz=4 esz=4 nreg=3
# SVE 32-bit scatter store (scalar plus 32-bit scaled offsets)
# Require msz > 0 && msz <= esz.
@@ -1293,6 +1447,10 @@ ST1_zprz 1110010 .. 01 ..... 101 ... ..... ..... \
ST1_zprz 1110010 .. 00 ..... 101 ... ..... ..... \
@rprr_scatter_store xs=2 esz=3 scale=0
+# ST1Q
+ST1_zprz 1110 0100 001 rm:5 001 pg:3 rn:5 rd:5 \
+ &rprr_scatter_store xs=2 msz=4 esz=4 scale=0
+
# SVE 64-bit scatter store (vector plus immediate)
ST1_zpiz 1110010 .. 10 ..... 101 ... ..... ..... \
@rpri_scatter_store esz=3
@@ -1450,9 +1608,9 @@ EORTB 01000101 .. 0 ..... 10010 1 ..... ..... @rd_rn_rm
## SVE integer matrix multiply accumulate
-SMMLA 01000101 00 0 ..... 10011 0 ..... ..... @rda_rn_rm_e0
-USMMLA 01000101 10 0 ..... 10011 0 ..... ..... @rda_rn_rm_e0
-UMMLA 01000101 11 0 ..... 10011 0 ..... ..... @rda_rn_rm_e0
+SMMLA 01000101 00 0 ..... 10011 0 ..... ..... @rda_rn_rm_ex esz=2
+USMMLA 01000101 10 0 ..... 10011 0 ..... ..... @rda_rn_rm_ex esz=2
+UMMLA 01000101 11 0 ..... 10011 0 ..... ..... @rda_rn_rm_ex esz=2
## SVE2 bitwise permute
@@ -1504,13 +1662,22 @@ UABA 01000101 .. 0 ..... 11111 1 ..... ..... @rd_rn_rm
#### SVE2 Narrowing
## SVE2 saturating extract narrow
-
# Bits 23, 18-16 are zero, limited in the translator via esz < 3 & imm == 0.
-SQXTNB 01000101 .. 1 ..... 010 000 ..... ..... @rd_rn_tszimm_shl
+
+{
+ SQCVTN_sh 01000101 00 1 10001 010 000 ....0 ..... @rd_rnx2 esz=1
+ SQXTNB 01000101 .. 1 ..... 010 000 ..... ..... @rd_rn_tszimm_shl
+}
SQXTNT 01000101 .. 1 ..... 010 001 ..... ..... @rd_rn_tszimm_shl
-UQXTNB 01000101 .. 1 ..... 010 010 ..... ..... @rd_rn_tszimm_shl
+{
+ UQCVTN_sh 01000101 00 1 10001 010 010 ....0 ..... @rd_rnx2 esz=1
+ UQXTNB 01000101 .. 1 ..... 010 010 ..... ..... @rd_rn_tszimm_shl
+}
UQXTNT 01000101 .. 1 ..... 010 011 ..... ..... @rd_rn_tszimm_shl
-SQXTUNB 01000101 .. 1 ..... 010 100 ..... ..... @rd_rn_tszimm_shl
+{
+ SQCVTUN_sh 01000101 00 1 10001 010 100 ....0 ..... @rd_rnx2 esz=1
+ SQXTUNB 01000101 .. 1 ..... 010 100 ..... ..... @rd_rn_tszimm_shl
+}
SQXTUNT 01000101 .. 1 ..... 010 101 ..... ..... @rd_rn_tszimm_shl
## SVE2 bitwise shift right narrow
@@ -1597,14 +1764,17 @@ UMLSLT_zzzw 01000100 .. 0 ..... 010 111 ..... ..... @rda_rn_rm
CMLA_zzzz 01000100 esz:2 0 rm:5 0010 rot:2 rn:5 rd:5 ra=%reg_movprfx
SQRDCMLAH_zzzz 01000100 esz:2 0 rm:5 0011 rot:2 rn:5 rd:5 ra=%reg_movprfx
-## SVE mixed sign dot product
+## SVE dot product
+
+SDOT_zzzz_2s 01000100 00 0 ..... 110 010 ..... ..... @rda_rn_rm_ex esz=2
+UDOT_zzzz_2s 01000100 00 0 ..... 110 011 ..... ..... @rda_rn_rm_ex esz=2
-USDOT_zzzz 01000100 .. 0 ..... 011 110 ..... ..... @rda_rn_rm
+USDOT_zzzz_4s 01000100 10 0 ..... 011 110 ..... ..... @rda_rn_rm_ex esz=2
### SVE2 floating point matrix multiply accumulate
-BFMMLA 01100100 01 1 ..... 111 001 ..... ..... @rda_rn_rm_e0
-FMMLA_s 01100100 10 1 ..... 111 001 ..... ..... @rda_rn_rm_e0
-FMMLA_d 01100100 11 1 ..... 111 001 ..... ..... @rda_rn_rm_e0
+BFMMLA 01100100 01 1 ..... 111 001 ..... ..... @rda_rn_rm_ex esz=1
+FMMLA_s 01100100 10 1 ..... 111 001 ..... ..... @rda_rn_rm_ex esz=2
+FMMLA_d 01100100 11 1 ..... 111 001 ..... ..... @rda_rn_rm_ex esz=3
### SVE2 Memory Gather Load Group
@@ -1654,26 +1824,35 @@ FCVTLT_sd 01100100 11 0010 11 101 ... ..... ..... @rd_pg_rn_e0
FLOGB 01100101 00 011 esz:2 0101 pg:3 rn:5 rd:5 &rpr_esz
### SVE2 floating-point multiply-add long (vectors)
-FMLALB_zzzw 01100100 10 1 ..... 10 0 00 0 ..... ..... @rda_rn_rm_e0
-FMLALT_zzzw 01100100 10 1 ..... 10 0 00 1 ..... ..... @rda_rn_rm_e0
-FMLSLB_zzzw 01100100 10 1 ..... 10 1 00 0 ..... ..... @rda_rn_rm_e0
-FMLSLT_zzzw 01100100 10 1 ..... 10 1 00 1 ..... ..... @rda_rn_rm_e0
+FMLALB_zzzw 01100100 10 1 ..... 10 0 00 0 ..... ..... @rda_rn_rm_ex esz=2
+FMLALT_zzzw 01100100 10 1 ..... 10 0 00 1 ..... ..... @rda_rn_rm_ex esz=2
+FMLSLB_zzzw 01100100 10 1 ..... 10 1 00 0 ..... ..... @rda_rn_rm_ex esz=2
+FMLSLT_zzzw 01100100 10 1 ..... 10 1 00 1 ..... ..... @rda_rn_rm_ex esz=2
-BFMLALB_zzzw 01100100 11 1 ..... 10 0 00 0 ..... ..... @rda_rn_rm_e0
-BFMLALT_zzzw 01100100 11 1 ..... 10 0 00 1 ..... ..... @rda_rn_rm_e0
+BFMLALB_zzzw 01100100 11 1 ..... 10 0 00 0 ..... ..... @rda_rn_rm_ex esz=2
+BFMLALT_zzzw 01100100 11 1 ..... 10 0 00 1 ..... ..... @rda_rn_rm_ex esz=2
+BFMLSLB_zzzw 01100100 11 1 ..... 10 1 00 0 ..... ..... @rda_rn_rm_ex esz=2
+BFMLSLT_zzzw 01100100 11 1 ..... 10 1 00 1 ..... ..... @rda_rn_rm_ex esz=2
-### SVE2 floating-point bfloat16 dot-product
-BFDOT_zzzz 01100100 01 1 ..... 10 0 00 0 ..... ..... @rda_rn_rm_e0
+### SVE2 floating-point dot-product
+FDOT_zzzz 01100100 00 1 ..... 10 0 00 0 ..... ..... @rda_rn_rm_ex esz=2
+BFDOT_zzzz 01100100 01 1 ..... 10 0 00 0 ..... ..... @rda_rn_rm_ex esz=2
### SVE2 floating-point multiply-add long (indexed)
+
FMLALB_zzxw 01100100 10 1 ..... 0100.0 ..... ..... @rrxr_3a esz=2
FMLALT_zzxw 01100100 10 1 ..... 0100.1 ..... ..... @rrxr_3a esz=2
FMLSLB_zzxw 01100100 10 1 ..... 0110.0 ..... ..... @rrxr_3a esz=2
FMLSLT_zzxw 01100100 10 1 ..... 0110.1 ..... ..... @rrxr_3a esz=2
+
BFMLALB_zzxw 01100100 11 1 ..... 0100.0 ..... ..... @rrxr_3a esz=2
BFMLALT_zzxw 01100100 11 1 ..... 0100.1 ..... ..... @rrxr_3a esz=2
+BFMLSLB_zzxw 01100100 11 1 ..... 0110.0 ..... ..... @rrxr_3a esz=2
+BFMLSLT_zzxw 01100100 11 1 ..... 0110.1 ..... ..... @rrxr_3a esz=2
-### SVE2 floating-point bfloat16 dot-product (indexed)
+### SVE2 floating-point dot-product (indexed)
+
+FDOT_zzxz 01100100 00 1 ..... 010000 ..... ..... @rrxr_2 esz=2
BFDOT_zzxz 01100100 01 1 ..... 010000 ..... ..... @rrxr_2 esz=2
### SVE broadcast predicate element
@@ -1700,3 +1879,55 @@ PSEL 00100101 .1 1 000 .. 01 .... 0 .... 0 .... \
SCLAMP 01000100 .. 0 ..... 110000 ..... ..... @rda_rn_rm
UCLAMP 01000100 .. 0 ..... 110001 ..... ..... @rda_rn_rm
+
+FCLAMP 01100100 .. 1 ..... 001001 ..... ..... @rda_rn_rm
+
+### SVE2p1 multi-vec contiguous load
+
+&zcrr_ldst rd png rn rm esz nreg
+&zcri_ldst rd png rn imm esz nreg
+%png 10:3 !function=plus_8
+%zd_ax2 1:4 !function=times_2
+%zd_ax4 2:3 !function=times_4
+
+LD1_zcrr 10100000000 rm:5 0 esz:2 ... rn:5 .... - \
+ &zcrr_ldst %png rd=%zd_ax2 nreg=2
+LD1_zcrr 10100000000 rm:5 1 esz:2 ... rn:5 ... 0- \
+ &zcrr_ldst %png rd=%zd_ax4 nreg=4
+
+ST1_zcrr 10100000001 rm:5 0 esz:2 ... rn:5 .... - \
+ &zcrr_ldst %png rd=%zd_ax2 nreg=2
+ST1_zcrr 10100000001 rm:5 1 esz:2 ... rn:5 ... 0- \
+ &zcrr_ldst %png rd=%zd_ax4 nreg=4
+
+LD1_zcri 101000000100 imm:s4 0 esz:2 ... rn:5 .... - \
+ &zcri_ldst %png rd=%zd_ax2 nreg=2
+LD1_zcri 101000000100 imm:s4 1 esz:2 ... rn:5 ... 0- \
+ &zcri_ldst %png rd=%zd_ax4 nreg=4
+
+ST1_zcri 101000000110 imm:s4 0 esz:2 ... rn:5 .... - \
+ &zcri_ldst %png rd=%zd_ax2 nreg=2
+ST1_zcri 101000000110 imm:s4 1 esz:2 ... rn:5 ... 0- \
+ &zcri_ldst %png rd=%zd_ax4 nreg=4
+
+# Note: N bit and 0 bit (for nreg4) still mashed in rd.
+# This is handled within gen_ldst_c().
+LD1_zcrr_stride 10100001000 rm:5 0 esz:2 ... rn:5 rd:5 \
+ &zcrr_ldst %png nreg=2
+LD1_zcrr_stride 10100001000 rm:5 1 esz:2 ... rn:5 rd:5 \
+ &zcrr_ldst %png nreg=4
+
+ST1_zcrr_stride 10100001001 rm:5 0 esz:2 ... rn:5 rd:5 \
+ &zcrr_ldst %png nreg=2
+ST1_zcrr_stride 10100001001 rm:5 1 esz:2 ... rn:5 rd:5 \
+ &zcrr_ldst %png nreg=4
+
+LD1_zcri_stride 101000010100 imm:s4 0 esz:2 ... rn:5 rd:5 \
+ &zcri_ldst %png nreg=2
+LD1_zcri_stride 101000010100 imm:s4 1 esz:2 ... rn:5 rd:5 \
+ &zcri_ldst %png nreg=4
+
+ST1_zcri_stride 101000010110 imm:s4 0 esz:2 ... rn:5 rd:5 \
+ &zcri_ldst %png nreg=2
+ST1_zcri_stride 101000010110 imm:s4 1 esz:2 ... rn:5 rd:5 \
+ &zcri_ldst %png nreg=4
diff --git a/target/arm/tcg/sve_helper.c b/target/arm/tcg/sve_helper.c
index d786b4b..43b872c 100644
--- a/target/arm/tcg/sve_helper.c
+++ b/target/arm/tcg/sve_helper.c
@@ -20,15 +20,19 @@
#include "qemu/osdep.h"
#include "cpu.h"
#include "internals.h"
-#include "exec/exec-all.h"
#include "exec/page-protection.h"
#include "exec/helper-proto.h"
+#include "exec/target_page.h"
+#include "exec/tlb-flags.h"
#include "tcg/tcg-gvec-desc.h"
#include "fpu/softfloat.h"
#include "tcg/tcg.h"
#include "vec_internal.h"
#include "sve_ldst_internal.h"
+#include "accel/tcg/cpu-ldst.h"
+#include "accel/tcg/helper-retaddr.h"
#include "accel/tcg/cpu-ops.h"
+#include "accel/tcg/probe.h"
#ifdef CONFIG_USER_ONLY
#include "user/page-protection.h"
#endif
@@ -119,6 +123,11 @@ static inline uint64_t expand_pred_s(uint8_t byte)
return word[byte & 0x11];
}
+static inline uint64_t expand_pred_d(uint8_t byte)
+{
+ return -(uint64_t)(byte & 1);
+}
+
#define LOGICAL_PPPP(NAME, FUNC) \
void HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, uint32_t desc) \
{ \
@@ -202,6 +211,7 @@ void HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, uint32_t desc) \
#define DO_EOR(N, M) (N ^ M)
#define DO_ORR(N, M) (N | M)
#define DO_BIC(N, M) (N & ~M)
+#define DO_ORC(N, M) (N | ~M)
#define DO_ADD(N, M) (N + M)
#define DO_SUB(N, M) (N - M)
#define DO_MAX(N, M) ((N) >= (M) ? (N) : (M))
@@ -523,14 +533,9 @@ DO_ZPZZ(sve2_uhsub_zpzz_h, uint16_t, H1_2, DO_HSUB_BHS)
DO_ZPZZ(sve2_uhsub_zpzz_s, uint32_t, H1_4, DO_HSUB_BHS)
DO_ZPZZ_D(sve2_uhsub_zpzz_d, uint64_t, DO_HSUB_D)
-static inline int32_t do_sat_bhs(int64_t val, int64_t min, int64_t max)
-{
- return val >= max ? max : val <= min ? min : val;
-}
-
-#define DO_SQADD_B(n, m) do_sat_bhs((int64_t)n + m, INT8_MIN, INT8_MAX)
-#define DO_SQADD_H(n, m) do_sat_bhs((int64_t)n + m, INT16_MIN, INT16_MAX)
-#define DO_SQADD_S(n, m) do_sat_bhs((int64_t)n + m, INT32_MIN, INT32_MAX)
+#define DO_SQADD_B(n, m) do_ssat_b((int64_t)n + m)
+#define DO_SQADD_H(n, m) do_ssat_h((int64_t)n + m)
+#define DO_SQADD_S(n, m) do_ssat_s((int64_t)n + m)
static inline int64_t do_sqadd_d(int64_t n, int64_t m)
{
@@ -547,9 +552,9 @@ DO_ZPZZ(sve2_sqadd_zpzz_h, int16_t, H1_2, DO_SQADD_H)
DO_ZPZZ(sve2_sqadd_zpzz_s, int32_t, H1_4, DO_SQADD_S)
DO_ZPZZ_D(sve2_sqadd_zpzz_d, int64_t, do_sqadd_d)
-#define DO_UQADD_B(n, m) do_sat_bhs((int64_t)n + m, 0, UINT8_MAX)
-#define DO_UQADD_H(n, m) do_sat_bhs((int64_t)n + m, 0, UINT16_MAX)
-#define DO_UQADD_S(n, m) do_sat_bhs((int64_t)n + m, 0, UINT32_MAX)
+#define DO_UQADD_B(n, m) do_usat_b((int64_t)n + m)
+#define DO_UQADD_H(n, m) do_usat_h((int64_t)n + m)
+#define DO_UQADD_S(n, m) do_usat_s((int64_t)n + m)
static inline uint64_t do_uqadd_d(uint64_t n, uint64_t m)
{
@@ -562,9 +567,9 @@ DO_ZPZZ(sve2_uqadd_zpzz_h, uint16_t, H1_2, DO_UQADD_H)
DO_ZPZZ(sve2_uqadd_zpzz_s, uint32_t, H1_4, DO_UQADD_S)
DO_ZPZZ_D(sve2_uqadd_zpzz_d, uint64_t, do_uqadd_d)
-#define DO_SQSUB_B(n, m) do_sat_bhs((int64_t)n - m, INT8_MIN, INT8_MAX)
-#define DO_SQSUB_H(n, m) do_sat_bhs((int64_t)n - m, INT16_MIN, INT16_MAX)
-#define DO_SQSUB_S(n, m) do_sat_bhs((int64_t)n - m, INT32_MIN, INT32_MAX)
+#define DO_SQSUB_B(n, m) do_ssat_b((int64_t)n - m)
+#define DO_SQSUB_H(n, m) do_ssat_h((int64_t)n - m)
+#define DO_SQSUB_S(n, m) do_ssat_s((int64_t)n - m)
static inline int64_t do_sqsub_d(int64_t n, int64_t m)
{
@@ -581,9 +586,9 @@ DO_ZPZZ(sve2_sqsub_zpzz_h, int16_t, H1_2, DO_SQSUB_H)
DO_ZPZZ(sve2_sqsub_zpzz_s, int32_t, H1_4, DO_SQSUB_S)
DO_ZPZZ_D(sve2_sqsub_zpzz_d, int64_t, do_sqsub_d)
-#define DO_UQSUB_B(n, m) do_sat_bhs((int64_t)n - m, 0, UINT8_MAX)
-#define DO_UQSUB_H(n, m) do_sat_bhs((int64_t)n - m, 0, UINT16_MAX)
-#define DO_UQSUB_S(n, m) do_sat_bhs((int64_t)n - m, 0, UINT32_MAX)
+#define DO_UQSUB_B(n, m) do_usat_b((int64_t)n - m)
+#define DO_UQSUB_H(n, m) do_usat_h((int64_t)n - m)
+#define DO_UQSUB_S(n, m) do_usat_s((int64_t)n - m)
static inline uint64_t do_uqsub_d(uint64_t n, uint64_t m)
{
@@ -595,12 +600,9 @@ DO_ZPZZ(sve2_uqsub_zpzz_h, uint16_t, H1_2, DO_UQSUB_H)
DO_ZPZZ(sve2_uqsub_zpzz_s, uint32_t, H1_4, DO_UQSUB_S)
DO_ZPZZ_D(sve2_uqsub_zpzz_d, uint64_t, do_uqsub_d)
-#define DO_SUQADD_B(n, m) \
- do_sat_bhs((int64_t)(int8_t)n + m, INT8_MIN, INT8_MAX)
-#define DO_SUQADD_H(n, m) \
- do_sat_bhs((int64_t)(int16_t)n + m, INT16_MIN, INT16_MAX)
-#define DO_SUQADD_S(n, m) \
- do_sat_bhs((int64_t)(int32_t)n + m, INT32_MIN, INT32_MAX)
+#define DO_SUQADD_B(n, m) do_ssat_b((int64_t)(int8_t)n + m)
+#define DO_SUQADD_H(n, m) do_ssat_h((int64_t)(int16_t)n + m)
+#define DO_SUQADD_S(n, m) do_ssat_s((int64_t)(int32_t)n + m)
static inline int64_t do_suqadd_d(int64_t n, uint64_t m)
{
@@ -630,12 +632,9 @@ DO_ZPZZ(sve2_suqadd_zpzz_h, uint16_t, H1_2, DO_SUQADD_H)
DO_ZPZZ(sve2_suqadd_zpzz_s, uint32_t, H1_4, DO_SUQADD_S)
DO_ZPZZ_D(sve2_suqadd_zpzz_d, uint64_t, do_suqadd_d)
-#define DO_USQADD_B(n, m) \
- do_sat_bhs((int64_t)n + (int8_t)m, 0, UINT8_MAX)
-#define DO_USQADD_H(n, m) \
- do_sat_bhs((int64_t)n + (int16_t)m, 0, UINT16_MAX)
-#define DO_USQADD_S(n, m) \
- do_sat_bhs((int64_t)n + (int32_t)m, 0, UINT32_MAX)
+#define DO_USQADD_B(n, m) do_usat_b((int64_t)n + (int8_t)m)
+#define DO_USQADD_H(n, m) do_usat_h((int64_t)n + (int16_t)m)
+#define DO_USQADD_S(n, m) do_usat_s((int64_t)n + (int32_t)m)
static inline uint64_t do_usqadd_d(uint64_t n, int64_t m)
{
@@ -1222,37 +1221,29 @@ void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
} \
}
-#define DO_SQXTN_H(n) do_sat_bhs(n, INT8_MIN, INT8_MAX)
-#define DO_SQXTN_S(n) do_sat_bhs(n, INT16_MIN, INT16_MAX)
-#define DO_SQXTN_D(n) do_sat_bhs(n, INT32_MIN, INT32_MAX)
-
-DO_XTNB(sve2_sqxtnb_h, int16_t, DO_SQXTN_H)
-DO_XTNB(sve2_sqxtnb_s, int32_t, DO_SQXTN_S)
-DO_XTNB(sve2_sqxtnb_d, int64_t, DO_SQXTN_D)
+DO_XTNB(sve2_sqxtnb_h, int16_t, do_ssat_b)
+DO_XTNB(sve2_sqxtnb_s, int32_t, do_ssat_h)
+DO_XTNB(sve2_sqxtnb_d, int64_t, do_ssat_s)
-DO_XTNT(sve2_sqxtnt_h, int16_t, int8_t, H1, DO_SQXTN_H)
-DO_XTNT(sve2_sqxtnt_s, int32_t, int16_t, H1_2, DO_SQXTN_S)
-DO_XTNT(sve2_sqxtnt_d, int64_t, int32_t, H1_4, DO_SQXTN_D)
+DO_XTNT(sve2_sqxtnt_h, int16_t, int8_t, H1, do_ssat_b)
+DO_XTNT(sve2_sqxtnt_s, int32_t, int16_t, H1_2, do_ssat_h)
+DO_XTNT(sve2_sqxtnt_d, int64_t, int32_t, H1_4, do_ssat_s)
-#define DO_UQXTN_H(n) do_sat_bhs(n, 0, UINT8_MAX)
-#define DO_UQXTN_S(n) do_sat_bhs(n, 0, UINT16_MAX)
-#define DO_UQXTN_D(n) do_sat_bhs(n, 0, UINT32_MAX)
+DO_XTNB(sve2_uqxtnb_h, uint16_t, do_usat_b)
+DO_XTNB(sve2_uqxtnb_s, uint32_t, do_usat_h)
+DO_XTNB(sve2_uqxtnb_d, uint64_t, do_usat_s)
-DO_XTNB(sve2_uqxtnb_h, uint16_t, DO_UQXTN_H)
-DO_XTNB(sve2_uqxtnb_s, uint32_t, DO_UQXTN_S)
-DO_XTNB(sve2_uqxtnb_d, uint64_t, DO_UQXTN_D)
+DO_XTNT(sve2_uqxtnt_h, uint16_t, uint8_t, H1, do_usat_b)
+DO_XTNT(sve2_uqxtnt_s, uint32_t, uint16_t, H1_2, do_usat_h)
+DO_XTNT(sve2_uqxtnt_d, uint64_t, uint32_t, H1_4, do_usat_s)
-DO_XTNT(sve2_uqxtnt_h, uint16_t, uint8_t, H1, DO_UQXTN_H)
-DO_XTNT(sve2_uqxtnt_s, uint32_t, uint16_t, H1_2, DO_UQXTN_S)
-DO_XTNT(sve2_uqxtnt_d, uint64_t, uint32_t, H1_4, DO_UQXTN_D)
+DO_XTNB(sve2_sqxtunb_h, int16_t, do_usat_b)
+DO_XTNB(sve2_sqxtunb_s, int32_t, do_usat_h)
+DO_XTNB(sve2_sqxtunb_d, int64_t, do_usat_s)
-DO_XTNB(sve2_sqxtunb_h, int16_t, DO_UQXTN_H)
-DO_XTNB(sve2_sqxtunb_s, int32_t, DO_UQXTN_S)
-DO_XTNB(sve2_sqxtunb_d, int64_t, DO_UQXTN_D)
-
-DO_XTNT(sve2_sqxtunt_h, int16_t, int8_t, H1, DO_UQXTN_H)
-DO_XTNT(sve2_sqxtunt_s, int32_t, int16_t, H1_2, DO_UQXTN_S)
-DO_XTNT(sve2_sqxtunt_d, int64_t, int32_t, H1_4, DO_UQXTN_D)
+DO_XTNT(sve2_sqxtunt_h, int16_t, int8_t, H1, do_usat_b)
+DO_XTNT(sve2_sqxtunt_s, int32_t, int16_t, H1_2, do_usat_h)
+DO_XTNT(sve2_sqxtunt_d, int64_t, int32_t, H1_4, do_usat_s)
#undef DO_XTNB
#undef DO_XTNT
@@ -1829,6 +1820,52 @@ DO_VPZ_D(sve_uminv_d, uint64_t, uint64_t, -1, DO_MIN)
#undef DO_VPZ
#undef DO_VPZ_D
+#define DO_VPQ(NAME, TYPE, H, INIT, OP) \
+void HELPER(NAME)(void *vd, void *vn, void *vg, uint32_t desc) \
+{ \
+ TYPE tmp[16 / sizeof(TYPE)] = { [0 ... 16 / sizeof(TYPE) - 1] = INIT }; \
+ TYPE *n = vn; uint16_t *g = vg; \
+ uintptr_t oprsz = simd_oprsz(desc); \
+ uintptr_t nseg = oprsz / 16, nsegelt = 16 / sizeof(TYPE); \
+ for (uintptr_t s = 0; s < nseg; s++) { \
+ uint16_t pg = g[H2(s)]; \
+ for (uintptr_t e = 0; e < nsegelt; e++, pg >>= sizeof(TYPE)) { \
+ if (pg & 1) { \
+ tmp[e] = OP(tmp[H(e)], n[s * nsegelt + H(e)]); \
+ } \
+ } \
+ } \
+ memcpy(vd, tmp, 16); \
+ clear_tail(vd, 16, simd_maxsz(desc)); \
+}
+
+DO_VPQ(sve2p1_addqv_b, uint8_t, H1, 0, DO_ADD)
+DO_VPQ(sve2p1_addqv_h, uint16_t, H2, 0, DO_ADD)
+DO_VPQ(sve2p1_addqv_s, uint32_t, H4, 0, DO_ADD)
+DO_VPQ(sve2p1_addqv_d, uint64_t, H8, 0, DO_ADD)
+
+DO_VPQ(sve2p1_smaxqv_b, int8_t, H1, INT8_MIN, DO_MAX)
+DO_VPQ(sve2p1_smaxqv_h, int16_t, H2, INT16_MIN, DO_MAX)
+DO_VPQ(sve2p1_smaxqv_s, int32_t, H4, INT32_MIN, DO_MAX)
+DO_VPQ(sve2p1_smaxqv_d, int64_t, H8, INT64_MIN, DO_MAX)
+
+DO_VPQ(sve2p1_sminqv_b, int8_t, H1, INT8_MAX, DO_MIN)
+DO_VPQ(sve2p1_sminqv_h, int16_t, H2, INT16_MAX, DO_MIN)
+DO_VPQ(sve2p1_sminqv_s, int32_t, H4, INT32_MAX, DO_MIN)
+DO_VPQ(sve2p1_sminqv_d, int64_t, H8, INT64_MAX, DO_MIN)
+
+DO_VPQ(sve2p1_umaxqv_b, uint8_t, H1, 0, DO_MAX)
+DO_VPQ(sve2p1_umaxqv_h, uint16_t, H2, 0, DO_MAX)
+DO_VPQ(sve2p1_umaxqv_s, uint32_t, H4, 0, DO_MAX)
+DO_VPQ(sve2p1_umaxqv_d, uint64_t, H8, 0, DO_MAX)
+
+DO_VPQ(sve2p1_uminqv_b, uint8_t, H1, -1, DO_MIN)
+DO_VPQ(sve2p1_uminqv_h, uint16_t, H2, -1, DO_MIN)
+DO_VPQ(sve2p1_uminqv_s, uint32_t, H4, -1, DO_MIN)
+DO_VPQ(sve2p1_uminqv_d, uint64_t, H8, -1, DO_MIN)
+
+#undef DO_VPQ
+
/* Two vector operand, one scalar operand, unpredicated. */
#define DO_ZZI(NAME, TYPE, OP) \
void HELPER(NAME)(void *vd, void *vn, uint64_t s64, uint32_t desc) \
@@ -1869,10 +1906,46 @@ DO_ZZI(sve_umini_d, uint64_t, DO_MIN)
#undef DO_ZZI
+#define DO_LOGIC_QV(NAME, SUFF, INIT, VOP, POP) \
+void HELPER(NAME ## _ ## SUFF)(void *vd, void *vn, void *vg, uint32_t desc) \
+{ \
+ unsigned seg = simd_oprsz(desc) / 16; \
+ uint64_t r0 = INIT, r1 = INIT; \
+ for (unsigned s = 0; s < seg; s++) { \
+ uint64_t p0 = expand_pred_##SUFF(*(uint8_t *)(vg + H1(s * 2))); \
+ uint64_t p1 = expand_pred_##SUFF(*(uint8_t *)(vg + H1(s * 2 + 1))); \
+ uint64_t v0 = *(uint64_t *)(vn + s * 16); \
+ uint64_t v1 = *(uint64_t *)(vn + s * 16 + 8); \
+ v0 = POP(v0, p0), v1 = POP(v1, p1); \
+ r0 = VOP(r0, v0), r1 = VOP(r1, v1); \
+ } \
+ *(uint64_t *)(vd + 0) = r0; \
+ *(uint64_t *)(vd + 8) = r1; \
+ clear_tail(vd, 16, simd_maxsz(desc)); \
+}
+
+DO_LOGIC_QV(sve2p1_orqv, b, 0, DO_ORR, DO_AND)
+DO_LOGIC_QV(sve2p1_orqv, h, 0, DO_ORR, DO_AND)
+DO_LOGIC_QV(sve2p1_orqv, s, 0, DO_ORR, DO_AND)
+DO_LOGIC_QV(sve2p1_orqv, d, 0, DO_ORR, DO_AND)
+
+DO_LOGIC_QV(sve2p1_eorqv, b, 0, DO_EOR, DO_AND)
+DO_LOGIC_QV(sve2p1_eorqv, h, 0, DO_EOR, DO_AND)
+DO_LOGIC_QV(sve2p1_eorqv, s, 0, DO_EOR, DO_AND)
+DO_LOGIC_QV(sve2p1_eorqv, d, 0, DO_EOR, DO_AND)
+
+DO_LOGIC_QV(sve2p1_andqv, b, -1, DO_AND, DO_ORC)
+DO_LOGIC_QV(sve2p1_andqv, h, -1, DO_AND, DO_ORC)
+DO_LOGIC_QV(sve2p1_andqv, s, -1, DO_AND, DO_ORC)
+DO_LOGIC_QV(sve2p1_andqv, d, -1, DO_AND, DO_ORC)
+
+#undef DO_LOGIC_QV
+
#undef DO_AND
#undef DO_ORR
#undef DO_EOR
#undef DO_BIC
+#undef DO_ORC
#undef DO_ADD
#undef DO_SUB
#undef DO_MAX
@@ -2065,27 +2138,6 @@ void HELPER(NAME)(void *vd, void *vn, void *vg, uint32_t desc) \
when N is negative, add 2**M-1. */
#define DO_ASRD(N, M) ((N + (N < 0 ? ((__typeof(N))1 << M) - 1 : 0)) >> M)
-static inline uint64_t do_urshr(uint64_t x, unsigned sh)
-{
- if (likely(sh < 64)) {
- return (x >> sh) + ((x >> (sh - 1)) & 1);
- } else if (sh == 64) {
- return x >> 63;
- } else {
- return 0;
- }
-}
-
-static inline int64_t do_srshr(int64_t x, unsigned sh)
-{
- if (likely(sh < 64)) {
- return (x >> sh) + ((x >> (sh - 1)) & 1);
- } else {
- /* Rounding the sign bit always produces 0. */
- return 0;
- }
-}
-
DO_ZPZI(sve_asr_zpzi_b, int8_t, H1, DO_SHR)
DO_ZPZI(sve_asr_zpzi_h, int16_t, H1_2, DO_SHR)
DO_ZPZI(sve_asr_zpzi_s, int32_t, H1_4, DO_SHR)
@@ -2183,10 +2235,9 @@ DO_SHRNT(sve2_rshrnt_h, uint16_t, uint8_t, H1_2, H1, do_urshr)
DO_SHRNT(sve2_rshrnt_s, uint32_t, uint16_t, H1_4, H1_2, do_urshr)
DO_SHRNT(sve2_rshrnt_d, uint64_t, uint32_t, H1_8, H1_4, do_urshr)
-#define DO_SQSHRUN_H(x, sh) do_sat_bhs((int64_t)(x) >> sh, 0, UINT8_MAX)
-#define DO_SQSHRUN_S(x, sh) do_sat_bhs((int64_t)(x) >> sh, 0, UINT16_MAX)
-#define DO_SQSHRUN_D(x, sh) \
- do_sat_bhs((int64_t)(x) >> (sh < 64 ? sh : 63), 0, UINT32_MAX)
+#define DO_SQSHRUN_H(x, sh) do_usat_b((int64_t)(x) >> sh)
+#define DO_SQSHRUN_S(x, sh) do_usat_h((int64_t)(x) >> sh)
+#define DO_SQSHRUN_D(x, sh) do_usat_s((int64_t)(x) >> (sh < 64 ? sh : 63))
DO_SHRNB(sve2_sqshrunb_h, int16_t, uint8_t, DO_SQSHRUN_H)
DO_SHRNB(sve2_sqshrunb_s, int32_t, uint16_t, DO_SQSHRUN_S)
@@ -2196,9 +2247,9 @@ DO_SHRNT(sve2_sqshrunt_h, int16_t, uint8_t, H1_2, H1, DO_SQSHRUN_H)
DO_SHRNT(sve2_sqshrunt_s, int32_t, uint16_t, H1_4, H1_2, DO_SQSHRUN_S)
DO_SHRNT(sve2_sqshrunt_d, int64_t, uint32_t, H1_8, H1_4, DO_SQSHRUN_D)
-#define DO_SQRSHRUN_H(x, sh) do_sat_bhs(do_srshr(x, sh), 0, UINT8_MAX)
-#define DO_SQRSHRUN_S(x, sh) do_sat_bhs(do_srshr(x, sh), 0, UINT16_MAX)
-#define DO_SQRSHRUN_D(x, sh) do_sat_bhs(do_srshr(x, sh), 0, UINT32_MAX)
+#define DO_SQRSHRUN_H(x, sh) do_usat_b(do_srshr(x, sh))
+#define DO_SQRSHRUN_S(x, sh) do_usat_h(do_srshr(x, sh))
+#define DO_SQRSHRUN_D(x, sh) do_usat_s(do_srshr(x, sh))
DO_SHRNB(sve2_sqrshrunb_h, int16_t, uint8_t, DO_SQRSHRUN_H)
DO_SHRNB(sve2_sqrshrunb_s, int32_t, uint16_t, DO_SQRSHRUN_S)
@@ -2208,9 +2259,9 @@ DO_SHRNT(sve2_sqrshrunt_h, int16_t, uint8_t, H1_2, H1, DO_SQRSHRUN_H)
DO_SHRNT(sve2_sqrshrunt_s, int32_t, uint16_t, H1_4, H1_2, DO_SQRSHRUN_S)
DO_SHRNT(sve2_sqrshrunt_d, int64_t, uint32_t, H1_8, H1_4, DO_SQRSHRUN_D)
-#define DO_SQSHRN_H(x, sh) do_sat_bhs(x >> sh, INT8_MIN, INT8_MAX)
-#define DO_SQSHRN_S(x, sh) do_sat_bhs(x >> sh, INT16_MIN, INT16_MAX)
-#define DO_SQSHRN_D(x, sh) do_sat_bhs(x >> sh, INT32_MIN, INT32_MAX)
+#define DO_SQSHRN_H(x, sh) do_ssat_b(x >> sh)
+#define DO_SQSHRN_S(x, sh) do_ssat_h(x >> sh)
+#define DO_SQSHRN_D(x, sh) do_ssat_s(x >> sh)
DO_SHRNB(sve2_sqshrnb_h, int16_t, uint8_t, DO_SQSHRN_H)
DO_SHRNB(sve2_sqshrnb_s, int32_t, uint16_t, DO_SQSHRN_S)
@@ -2220,9 +2271,9 @@ DO_SHRNT(sve2_sqshrnt_h, int16_t, uint8_t, H1_2, H1, DO_SQSHRN_H)
DO_SHRNT(sve2_sqshrnt_s, int32_t, uint16_t, H1_4, H1_2, DO_SQSHRN_S)
DO_SHRNT(sve2_sqshrnt_d, int64_t, uint32_t, H1_8, H1_4, DO_SQSHRN_D)
-#define DO_SQRSHRN_H(x, sh) do_sat_bhs(do_srshr(x, sh), INT8_MIN, INT8_MAX)
-#define DO_SQRSHRN_S(x, sh) do_sat_bhs(do_srshr(x, sh), INT16_MIN, INT16_MAX)
-#define DO_SQRSHRN_D(x, sh) do_sat_bhs(do_srshr(x, sh), INT32_MIN, INT32_MAX)
+#define DO_SQRSHRN_H(x, sh) do_ssat_b(do_srshr(x, sh))
+#define DO_SQRSHRN_S(x, sh) do_ssat_h(do_srshr(x, sh))
+#define DO_SQRSHRN_D(x, sh) do_ssat_s(do_srshr(x, sh))
DO_SHRNB(sve2_sqrshrnb_h, int16_t, uint8_t, DO_SQRSHRN_H)
DO_SHRNB(sve2_sqrshrnb_s, int32_t, uint16_t, DO_SQRSHRN_S)
@@ -2984,6 +3035,56 @@ void HELPER(sve_rev_d)(void *vd, void *vn, uint32_t desc)
}
}
+/*
+ * TODO: This could use half_shuffle64 and similar bit tricks to
+ * expand blocks of bits at once.
+ */
+#define DO_PMOV_PV(NAME, ESIZE) \
+void HELPER(NAME)(void *vd, void *vs, uint32_t desc) \
+{ \
+ unsigned vl = simd_oprsz(desc); \
+ unsigned idx = simd_data(desc); \
+ unsigned elements = vl / ESIZE; \
+ ARMPredicateReg *d = vd; \
+ ARMVectorReg *s = vs; \
+ memset(d, 0, sizeof(*d)); \
+ for (unsigned e = 0; e < elements; ++e) { \
+ depositn(d->p, e * ESIZE, 1, extractn(s->d, elements * idx + e, 1)); \
+ } \
+}
+
+DO_PMOV_PV(pmov_pv_h, 2)
+DO_PMOV_PV(pmov_pv_s, 4)
+DO_PMOV_PV(pmov_pv_d, 8)
+
+#undef DO_PMOV_PV
+
+/*
+ * TODO: This could use half_unshuffle64 and similar bit tricks to
+ * compress blocks of bits at once.
+ */
+#define DO_PMOV_VP(NAME, ESIZE) \
+void HELPER(NAME)(void *vd, void *vs, uint32_t desc) \
+{ \
+ unsigned vl = simd_oprsz(desc); \
+ unsigned idx = simd_data(desc); \
+ unsigned elements = vl / ESIZE; \
+ ARMVectorReg *d = vd; \
+ ARMPredicateReg *s = vs; \
+ if (idx == 0) { \
+ memset(d, 0, vl); \
+ } \
+ for (unsigned e = 0; e < elements; ++e) { \
+ depositn(d->d, elements * idx + e, 1, extractn(s->p, e * ESIZE, 1)); \
+ } \
+}
+
+DO_PMOV_VP(pmov_vp_h, 2)
+DO_PMOV_VP(pmov_vp_s, 4)
+DO_PMOV_VP(pmov_vp_d, 8)
+
+#undef DO_PMOV_VP
+
typedef void tb_impl_fn(void *, void *, void *, void *, uintptr_t, bool);
static inline void do_tbl1(void *vd, void *vn, void *vm, uint32_t desc,
@@ -3449,6 +3550,45 @@ DO_UZP(sve_uzp_s, uint32_t, H1_4)
DO_UZP(sve_uzp_d, uint64_t, H1_8)
DO_UZP(sve2_uzp_q, Int128, )
+typedef void perseg_zzz_fn(void *vd, void *vn, void *vm, uint32_t desc);
+
+static void do_perseg_zzz(void *vd, void *vn, void *vm,
+ uint32_t desc, perseg_zzz_fn *fn)
+{
+ intptr_t oprsz = simd_oprsz(desc);
+
+ desc = simd_desc(16, 16, simd_data(desc));
+ for (intptr_t i = 0; i < oprsz; i += 16) {
+ fn(vd + i, vn + i, vm + i, desc);
+ }
+}
+
+#define DO_PERSEG_ZZZ(NAME, FUNC) \
+ void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
+ { do_perseg_zzz(vd, vn, vm, desc, FUNC); }
+
+DO_PERSEG_ZZZ(sve2p1_uzpq_b, helper_sve_uzp_b)
+DO_PERSEG_ZZZ(sve2p1_uzpq_h, helper_sve_uzp_h)
+DO_PERSEG_ZZZ(sve2p1_uzpq_s, helper_sve_uzp_s)
+DO_PERSEG_ZZZ(sve2p1_uzpq_d, helper_sve_uzp_d)
+
+DO_PERSEG_ZZZ(sve2p1_zipq_b, helper_sve_zip_b)
+DO_PERSEG_ZZZ(sve2p1_zipq_h, helper_sve_zip_h)
+DO_PERSEG_ZZZ(sve2p1_zipq_s, helper_sve_zip_s)
+DO_PERSEG_ZZZ(sve2p1_zipq_d, helper_sve_zip_d)
+
+DO_PERSEG_ZZZ(sve2p1_tblq_b, helper_sve_tbl_b)
+DO_PERSEG_ZZZ(sve2p1_tblq_h, helper_sve_tbl_h)
+DO_PERSEG_ZZZ(sve2p1_tblq_s, helper_sve_tbl_s)
+DO_PERSEG_ZZZ(sve2p1_tblq_d, helper_sve_tbl_d)
+
+DO_PERSEG_ZZZ(sve2p1_tbxq_b, helper_sve2_tbx_b)
+DO_PERSEG_ZZZ(sve2p1_tbxq_h, helper_sve2_tbx_h)
+DO_PERSEG_ZZZ(sve2p1_tbxq_s, helper_sve2_tbx_s)
+DO_PERSEG_ZZZ(sve2p1_tbxq_d, helper_sve2_tbx_d)
+
+#undef DO_PERSEG_ZZZ
+
#define DO_TRN(NAME, TYPE, H) \
void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
{ \
@@ -3989,15 +4129,6 @@ static uint32_t compute_brks_m(uint64_t *d, uint64_t *n, uint64_t *g,
return flags;
}
-static uint32_t do_zero(ARMPredicateReg *d, intptr_t oprsz)
-{
- /* It is quicker to zero the whole predicate than loop on OPRSZ.
- * The compiler should turn this into 4 64-bit integer stores.
- */
- memset(d, 0, sizeof(ARMPredicateReg));
- return PREDTEST_INIT;
-}
-
void HELPER(sve_brkpa)(void *vd, void *vn, void *vm, void *vg,
uint32_t pred_desc)
{
@@ -4005,7 +4136,7 @@ void HELPER(sve_brkpa)(void *vd, void *vn, void *vm, void *vg,
if (last_active_pred(vn, vg, oprsz)) {
compute_brk_z(vd, vm, vg, oprsz, true);
} else {
- do_zero(vd, oprsz);
+ memset(vd, 0, sizeof(ARMPredicateReg));
}
}
@@ -4016,7 +4147,8 @@ uint32_t HELPER(sve_brkpas)(void *vd, void *vn, void *vm, void *vg,
if (last_active_pred(vn, vg, oprsz)) {
return compute_brks_z(vd, vm, vg, oprsz, true);
} else {
- return do_zero(vd, oprsz);
+ memset(vd, 0, sizeof(ARMPredicateReg));
+ return PREDTEST_INIT;
}
}
@@ -4027,7 +4159,7 @@ void HELPER(sve_brkpb)(void *vd, void *vn, void *vm, void *vg,
if (last_active_pred(vn, vg, oprsz)) {
compute_brk_z(vd, vm, vg, oprsz, false);
} else {
- do_zero(vd, oprsz);
+ memset(vd, 0, sizeof(ARMPredicateReg));
}
}
@@ -4038,7 +4170,8 @@ uint32_t HELPER(sve_brkpbs)(void *vd, void *vn, void *vm, void *vg,
if (last_active_pred(vn, vg, oprsz)) {
return compute_brks_z(vd, vm, vg, oprsz, false);
} else {
- return do_zero(vd, oprsz);
+ memset(vd, 0, sizeof(ARMPredicateReg));
+ return PREDTEST_INIT;
}
}
@@ -4094,35 +4227,30 @@ void HELPER(sve_brkn)(void *vd, void *vn, void *vg, uint32_t pred_desc)
{
intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
if (!last_active_pred(vn, vg, oprsz)) {
- do_zero(vd, oprsz);
- }
-}
-
-/* As if PredTest(Ones(PL), D, esz). */
-static uint32_t predtest_ones(ARMPredicateReg *d, intptr_t oprsz,
- uint64_t esz_mask)
-{
- uint32_t flags = PREDTEST_INIT;
- intptr_t i;
-
- for (i = 0; i < oprsz / 8; i++) {
- flags = iter_predtest_fwd(d->p[i], esz_mask, flags);
- }
- if (oprsz & 7) {
- uint64_t mask = ~(-1ULL << (8 * (oprsz & 7)));
- flags = iter_predtest_fwd(d->p[i], esz_mask & mask, flags);
+ memset(vd, 0, sizeof(ARMPredicateReg));
}
- return flags;
}
uint32_t HELPER(sve_brkns)(void *vd, void *vn, void *vg, uint32_t pred_desc)
{
intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
if (last_active_pred(vn, vg, oprsz)) {
- return predtest_ones(vd, oprsz, -1);
- } else {
- return do_zero(vd, oprsz);
+ ARMPredicateReg *d = vd;
+ uint32_t flags = PREDTEST_INIT;
+ intptr_t i;
+
+ /* As if PredTest(Ones(PL), D, MO_8). */
+ for (i = 0; i < oprsz / 8; i++) {
+ flags = iter_predtest_fwd(d->p[i], -1, flags);
+ }
+ if (oprsz & 7) {
+ uint64_t mask = ~(-1ULL << (8 * (oprsz & 7)));
+ flags = iter_predtest_fwd(d->p[i], mask, flags);
+ }
+ return flags;
}
+ memset(vd, 0, sizeof(ARMPredicateReg));
+ return PREDTEST_INIT;
}
uint64_t HELPER(sve_cntp)(void *vn, void *vg, uint32_t pred_desc)
@@ -4139,66 +4267,200 @@ uint64_t HELPER(sve_cntp)(void *vn, void *vg, uint32_t pred_desc)
return sum;
}
-uint32_t HELPER(sve_whilel)(void *vd, uint32_t count, uint32_t pred_desc)
+uint64_t HELPER(sve2p1_cntp_c)(uint32_t png, uint32_t desc)
{
- intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
- intptr_t esz = FIELD_EX32(pred_desc, PREDDESC, ESZ);
- uint64_t esz_mask = pred_esz_masks[esz];
- ARMPredicateReg *d = vd;
- uint32_t flags;
- intptr_t i;
+ int pl = FIELD_EX32(desc, PREDDESC, OPRSZ);
+ int vl = pl * 8;
+ unsigned v_esz = FIELD_EX32(desc, PREDDESC, ESZ);
+ int lg2_width = FIELD_EX32(desc, PREDDESC, DATA) + 1;
+ DecodeCounter p = decode_counter(png, vl, v_esz);
+ unsigned maxelem = (vl << lg2_width) >> v_esz;
+ unsigned count = p.count;
+
+ if (p.invert) {
+ if (count >= maxelem) {
+ return 0;
+ }
+ count = maxelem - count;
+ } else {
+ count = MIN(count, maxelem);
+ }
+ return count >> p.lg2_stride;
+}
+
+/* C.f. Arm pseudocode EncodePredCount */
+static uint64_t encode_pred_count(uint32_t elements, uint32_t count,
+ uint32_t esz, bool invert)
+{
+ uint32_t pred;
- /* Begin with a zero predicate register. */
- flags = do_zero(d, oprsz);
if (count == 0) {
- return flags;
+ return 0;
+ }
+ if (invert) {
+ count = elements - count;
+ } else if (count == elements) {
+ count = 0;
+ invert = true;
}
- /* Set all of the requested bits. */
- for (i = 0; i < count / 64; ++i) {
- d->p[i] = esz_mask;
+ pred = (count << 1) | 1;
+ pred <<= esz;
+ pred |= invert << 15;
+
+ return pred;
+}
+
+/* C.f. Arm pseudocode PredCountTest */
+static uint32_t pred_count_test(uint32_t elements, uint32_t count, bool invert)
+{
+ uint32_t flags;
+
+ if (count == 0) {
+ flags = 1; /* !N, Z, C */
+ } else if (!invert) {
+ flags = (1u << 31) | 2; /* N, !Z */
+ flags |= count != elements; /* C */
+ } else {
+ flags = 2; /* !Z, !C */
+ flags |= (count == elements) << 31; /* N */
}
- if (count & 63) {
- d->p[i] = MAKE_64BIT_MASK(0, count & 63) & esz_mask;
+ return flags;
+}
+
+/* D must be cleared on entry. */
+static void do_whilel(ARMPredicateReg *d, uint64_t esz_mask,
+ uint32_t count, uint32_t oprbits)
+{
+ tcg_debug_assert(count <= oprbits);
+ if (count) {
+ uint32_t i;
+
+ /* Set all of the requested bits. */
+ for (i = 0; i < count / 64; ++i) {
+ d->p[i] = esz_mask;
+ }
+ if (count & 63) {
+ d->p[i] = MAKE_64BIT_MASK(0, count & 63) & esz_mask;
+ }
}
+}
- return predtest_ones(d, oprsz, esz_mask);
+uint32_t HELPER(sve_whilel)(void *vd, uint32_t count, uint32_t pred_desc)
+{
+ uint32_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
+ uint32_t esz = FIELD_EX32(pred_desc, PREDDESC, ESZ);
+ uint32_t oprbits = oprsz * 8;
+ uint64_t esz_mask = pred_esz_masks[esz];
+ ARMPredicateReg *d = vd;
+
+ count <<= esz;
+ memset(d, 0, sizeof(*d));
+ do_whilel(d, esz_mask, count, oprbits);
+ return pred_count_test(oprbits, count, false);
}
-uint32_t HELPER(sve_whileg)(void *vd, uint32_t count, uint32_t pred_desc)
+uint32_t HELPER(sve_while2l)(void *vd, uint32_t count, uint32_t pred_desc)
{
- intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
- intptr_t esz = FIELD_EX32(pred_desc, PREDDESC, ESZ);
+ uint32_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
+ uint32_t esz = FIELD_EX32(pred_desc, PREDDESC, ESZ);
+ uint32_t oprbits = oprsz * 8;
uint64_t esz_mask = pred_esz_masks[esz];
ARMPredicateReg *d = vd;
- intptr_t i, invcount, oprbits;
- uint64_t bits;
- if (count == 0) {
- return do_zero(d, oprsz);
+ count <<= esz;
+ memset(d, 0, 2 * sizeof(*d));
+ if (count <= oprbits) {
+ do_whilel(&d[0], esz_mask, count, oprbits);
+ } else {
+ do_whilel(&d[0], esz_mask, oprbits, oprbits);
+ do_whilel(&d[1], esz_mask, count - oprbits, oprbits);
}
- oprbits = oprsz * 8;
+ return pred_count_test(2 * oprbits, count, false);
+}
+
+uint32_t HELPER(sve_whilecl)(void *vd, uint32_t count, uint32_t pred_desc)
+{
+ uint32_t pl = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
+ uint32_t esz = FIELD_EX32(pred_desc, PREDDESC, ESZ);
+ uint32_t scale = FIELD_EX32(pred_desc, PREDDESC, DATA);
+ uint32_t vl = pl * 8;
+ uint32_t elements = (vl >> esz) << scale;
+ ARMPredicateReg *d = vd;
+
+ *d = (ARMPredicateReg) {
+ .p[0] = encode_pred_count(elements, count, esz, false)
+ };
+ return pred_count_test(elements, count, false);
+}
+
+/* D must be cleared on entry. */
+static void do_whileg(ARMPredicateReg *d, uint64_t esz_mask,
+ uint32_t count, uint32_t oprbits)
+{
tcg_debug_assert(count <= oprbits);
+ if (count) {
+ uint32_t i, invcount = oprbits - count;
+ uint64_t bits = esz_mask & MAKE_64BIT_MASK(invcount & 63, 64);
- bits = esz_mask;
- if (oprbits & 63) {
- bits &= MAKE_64BIT_MASK(0, oprbits & 63);
+ for (i = invcount / 64; i < oprbits / 64; ++i) {
+ d->p[i] = bits;
+ bits = esz_mask;
+ }
+ if (oprbits & 63) {
+ d->p[i] = bits & MAKE_64BIT_MASK(0, oprbits & 63);
+ }
}
+}
- invcount = oprbits - count;
- for (i = (oprsz - 1) / 8; i > invcount / 64; --i) {
- d->p[i] = bits;
- bits = esz_mask;
- }
+uint32_t HELPER(sve_whileg)(void *vd, uint32_t count, uint32_t pred_desc)
+{
+ uint32_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
+ uint32_t esz = FIELD_EX32(pred_desc, PREDDESC, ESZ);
+ uint32_t oprbits = oprsz * 8;
+ uint64_t esz_mask = pred_esz_masks[esz];
+ ARMPredicateReg *d = vd;
- d->p[i] = bits & MAKE_64BIT_MASK(invcount & 63, 64);
+ count <<= esz;
+ memset(d, 0, sizeof(*d));
+ do_whileg(d, esz_mask, count, oprbits);
+ return pred_count_test(oprbits, count, true);
+}
+
+uint32_t HELPER(sve_while2g)(void *vd, uint32_t count, uint32_t pred_desc)
+{
+ uint32_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
+ uint32_t esz = FIELD_EX32(pred_desc, PREDDESC, ESZ);
+ uint32_t oprbits = oprsz * 8;
+ uint64_t esz_mask = pred_esz_masks[esz];
+ ARMPredicateReg *d = vd;
- while (--i >= 0) {
- d->p[i] = 0;
+ count <<= esz;
+ memset(d, 0, 2 * sizeof(*d));
+ if (count <= oprbits) {
+ do_whileg(&d[1], esz_mask, count, oprbits);
+ } else {
+ do_whilel(&d[1], esz_mask, oprbits, oprbits);
+ do_whileg(&d[0], esz_mask, count - oprbits, oprbits);
}
- return predtest_ones(d, oprsz, esz_mask);
+ return pred_count_test(2 * oprbits, count, true);
+}
+
+uint32_t HELPER(sve_whilecg)(void *vd, uint32_t count, uint32_t pred_desc)
+{
+ uint32_t pl = FIELD_EX32(pred_desc, PREDDESC, OPRSZ);
+ uint32_t esz = FIELD_EX32(pred_desc, PREDDESC, ESZ);
+ uint32_t scale = FIELD_EX32(pred_desc, PREDDESC, DATA);
+ uint32_t vl = pl * 8;
+ uint32_t elements = (vl >> esz) << scale;
+ ARMPredicateReg *d = vd;
+
+ *d = (ARMPredicateReg) {
+ .p[0] = encode_pred_count(elements, count, esz, true)
+ };
+ return pred_count_test(elements, count, true);
}
/* Recursive reduction on a function;
@@ -4209,19 +4471,20 @@ uint32_t HELPER(sve_whileg)(void *vd, uint32_t count, uint32_t pred_desc)
* The recursion is bounded to depth 7 (128 fp16 elements), so there's
* little to gain with a more complex non-recursive form.
*/
-#define DO_REDUCE(NAME, TYPE, H, FUNC, IDENT) \
-static TYPE NAME##_reduce(TYPE *data, float_status *status, uintptr_t n) \
+#define DO_REDUCE(NAME, SUF, TYPE, H, FUNC, IDENT) \
+static TYPE FUNC##_reduce(TYPE *data, float_status *status, uintptr_t n) \
{ \
if (n == 1) { \
return *data; \
} else { \
uintptr_t half = n / 2; \
- TYPE lo = NAME##_reduce(data, status, half); \
- TYPE hi = NAME##_reduce(data + half, status, half); \
+ TYPE lo = FUNC##_reduce(data, status, half); \
+ TYPE hi = FUNC##_reduce(data + half, status, half); \
return FUNC(lo, hi, status); \
} \
} \
-uint64_t HELPER(NAME)(void *vn, void *vg, float_status *s, uint32_t desc) \
+uint64_t helper_sve_##NAME##v_##SUF(void *vn, void *vg, \
+ float_status *s, uint32_t desc) \
{ \
uintptr_t i, oprsz = simd_oprsz(desc), maxsz = simd_data(desc); \
TYPE data[sizeof(ARMVectorReg) / sizeof(TYPE)]; \
@@ -4236,39 +4499,54 @@ uint64_t HELPER(NAME)(void *vn, void *vg, float_status *s, uint32_t desc) \
for (; i < maxsz; i += sizeof(TYPE)) { \
*(TYPE *)((void *)data + i) = IDENT; \
} \
- return NAME##_reduce(data, s, maxsz / sizeof(TYPE)); \
+ return FUNC##_reduce(data, s, maxsz / sizeof(TYPE)); \
+} \
+void helper_sve2p1_##NAME##qv_##SUF(void *vd, void *vn, void *vg, \
+ float_status *status, uint32_t desc) \
+{ \
+ unsigned oprsz = simd_oprsz(desc), segments = oprsz / 16; \
+ for (unsigned e = 0; e < 16; e += sizeof(TYPE)) { \
+ TYPE data[ARM_MAX_VQ]; \
+ for (unsigned s = 0; s < segments; s++) { \
+ uint16_t pg = *(uint16_t *)(vg + H1_2(s * 2)); \
+ TYPE nn = *(TYPE *)(vn + H(s * 16 + H(e))); \
+ data[s] = (pg >> e) & 1 ? nn : IDENT; \
+ } \
+ *(TYPE *)(vd + H(e)) = FUNC##_reduce(data, status, segments); \
+ } \
+ clear_tail(vd, 16, simd_maxsz(desc)); \
}
-DO_REDUCE(sve_faddv_h, float16, H1_2, float16_add, float16_zero)
-DO_REDUCE(sve_faddv_s, float32, H1_4, float32_add, float32_zero)
-DO_REDUCE(sve_faddv_d, float64, H1_8, float64_add, float64_zero)
+DO_REDUCE(fadd,h, float16, H1_2, float16_add, float16_zero)
+DO_REDUCE(fadd,s, float32, H1_4, float32_add, float32_zero)
+DO_REDUCE(fadd,d, float64, H1_8, float64_add, float64_zero)
/* Identity is floatN_default_nan, without the function call. */
-DO_REDUCE(sve_fminnmv_h, float16, H1_2, float16_minnum, 0x7E00)
-DO_REDUCE(sve_fminnmv_s, float32, H1_4, float32_minnum, 0x7FC00000)
-DO_REDUCE(sve_fminnmv_d, float64, H1_8, float64_minnum, 0x7FF8000000000000ULL)
+DO_REDUCE(fminnm,h, float16, H1_2, float16_minnum, 0x7E00)
+DO_REDUCE(fminnm,s, float32, H1_4, float32_minnum, 0x7FC00000)
+DO_REDUCE(fminnm,d, float64, H1_8, float64_minnum, 0x7FF8000000000000ULL)
-DO_REDUCE(sve_fmaxnmv_h, float16, H1_2, float16_maxnum, 0x7E00)
-DO_REDUCE(sve_fmaxnmv_s, float32, H1_4, float32_maxnum, 0x7FC00000)
-DO_REDUCE(sve_fmaxnmv_d, float64, H1_8, float64_maxnum, 0x7FF8000000000000ULL)
+DO_REDUCE(fmaxnm,h, float16, H1_2, float16_maxnum, 0x7E00)
+DO_REDUCE(fmaxnm,s, float32, H1_4, float32_maxnum, 0x7FC00000)
+DO_REDUCE(fmaxnm,d, float64, H1_8, float64_maxnum, 0x7FF8000000000000ULL)
-DO_REDUCE(sve_fminv_h, float16, H1_2, float16_min, float16_infinity)
-DO_REDUCE(sve_fminv_s, float32, H1_4, float32_min, float32_infinity)
-DO_REDUCE(sve_fminv_d, float64, H1_8, float64_min, float64_infinity)
+DO_REDUCE(fmin,h, float16, H1_2, float16_min, float16_infinity)
+DO_REDUCE(fmin,s, float32, H1_4, float32_min, float32_infinity)
+DO_REDUCE(fmin,d, float64, H1_8, float64_min, float64_infinity)
-DO_REDUCE(sve_fmaxv_h, float16, H1_2, float16_max, float16_chs(float16_infinity))
-DO_REDUCE(sve_fmaxv_s, float32, H1_4, float32_max, float32_chs(float32_infinity))
-DO_REDUCE(sve_fmaxv_d, float64, H1_8, float64_max, float64_chs(float64_infinity))
+DO_REDUCE(fmax,h, float16, H1_2, float16_max, float16_chs(float16_infinity))
+DO_REDUCE(fmax,s, float32, H1_4, float32_max, float32_chs(float32_infinity))
+DO_REDUCE(fmax,d, float64, H1_8, float64_max, float64_chs(float64_infinity))
-DO_REDUCE(sve_ah_fminv_h, float16, H1_2, helper_vfp_ah_minh, float16_infinity)
-DO_REDUCE(sve_ah_fminv_s, float32, H1_4, helper_vfp_ah_mins, float32_infinity)
-DO_REDUCE(sve_ah_fminv_d, float64, H1_8, helper_vfp_ah_mind, float64_infinity)
+DO_REDUCE(ah_fmin,h, float16, H1_2, helper_vfp_ah_minh, float16_infinity)
+DO_REDUCE(ah_fmin,s, float32, H1_4, helper_vfp_ah_mins, float32_infinity)
+DO_REDUCE(ah_fmin,d, float64, H1_8, helper_vfp_ah_mind, float64_infinity)
-DO_REDUCE(sve_ah_fmaxv_h, float16, H1_2, helper_vfp_ah_maxh,
+DO_REDUCE(ah_fmax,h, float16, H1_2, helper_vfp_ah_maxh,
float16_chs(float16_infinity))
-DO_REDUCE(sve_ah_fmaxv_s, float32, H1_4, helper_vfp_ah_maxs,
+DO_REDUCE(ah_fmax,s, float32, H1_4, helper_vfp_ah_maxs,
float32_chs(float32_infinity))
-DO_REDUCE(sve_ah_fmaxv_d, float64, H1_8, helper_vfp_ah_maxd,
+DO_REDUCE(ah_fmax,d, float64, H1_8, helper_vfp_ah_maxd,
float64_chs(float64_infinity))
#undef DO_REDUCE
@@ -4550,7 +4828,7 @@ void HELPER(NAME)(void *vd, void *vn, void *vg, \
* FZ16. When converting from fp16, this affects flushing input denormals;
* when converting to fp16, this affects flushing output denormals.
*/
-static inline float32 sve_f16_to_f32(float16 f, float_status *fpst)
+float32 sve_f16_to_f32(float16 f, float_status *fpst)
{
bool save = get_flush_inputs_to_zero(fpst);
float32 ret;
@@ -4572,7 +4850,7 @@ static inline float64 sve_f16_to_f64(float16 f, float_status *fpst)
return ret;
}
-static inline float16 sve_f32_to_f16(float32 f, float_status *fpst)
+float16 sve_f32_to_f16(float32 f, float_status *fpst)
{
bool save = get_flush_to_zero(fpst);
float16 ret;
@@ -6081,6 +6359,9 @@ DO_LD1_2(ld1sds, MO_64, MO_32)
DO_LD1_2(ld1dd, MO_64, MO_64)
+DO_LD1_2(ld1squ, MO_32, MO_128)
+DO_LD1_2(ld1dqu, MO_64, MO_128)
+
#undef DO_LD1_1
#undef DO_LD1_2
@@ -6140,6 +6421,10 @@ DO_LDN_2(2, dd, MO_64)
DO_LDN_2(3, dd, MO_64)
DO_LDN_2(4, dd, MO_64)
+DO_LDN_2(2, qq, MO_128)
+DO_LDN_2(3, qq, MO_128)
+DO_LDN_2(4, qq, MO_128)
+
#undef DO_LDN_1
#undef DO_LDN_2
@@ -6703,6 +6988,13 @@ DO_STN_2(2, dd, MO_64, MO_64)
DO_STN_2(3, dd, MO_64, MO_64)
DO_STN_2(4, dd, MO_64, MO_64)
+DO_STN_2(1, sq, MO_128, MO_32)
+DO_STN_2(1, dq, MO_128, MO_64)
+
+DO_STN_2(2, qq, MO_128, MO_128)
+DO_STN_2(3, qq, MO_128, MO_128)
+DO_STN_2(4, qq, MO_128, MO_128)
+
#undef DO_STN_1
#undef DO_STN_2
@@ -6919,6 +7211,9 @@ DO_LD1_ZPZ_D(dd_be, zsu, MO_64)
DO_LD1_ZPZ_D(dd_be, zss, MO_64)
DO_LD1_ZPZ_D(dd_be, zd, MO_64)
+DO_LD1_ZPZ_D(qq_le, zd, MO_128)
+DO_LD1_ZPZ_D(qq_be, zd, MO_128)
+
#undef DO_LD1_ZPZ_S
#undef DO_LD1_ZPZ_D
@@ -7305,9 +7600,505 @@ DO_ST1_ZPZ_D(sd_be, zd, MO_32)
DO_ST1_ZPZ_D(dd_le, zd, MO_64)
DO_ST1_ZPZ_D(dd_be, zd, MO_64)
+DO_ST1_ZPZ_D(qq_le, zd, MO_128)
+DO_ST1_ZPZ_D(qq_be, zd, MO_128)
+
#undef DO_ST1_ZPZ_S
#undef DO_ST1_ZPZ_D
+/*
+ * SVE2.1 consecutive register load/store
+ */
+
+static unsigned sve2p1_cont_ldst_elements(SVEContLdSt *info, vaddr addr,
+ uint32_t png, intptr_t reg_max,
+ int N, int v_esz)
+{
+ const int esize = 1 << v_esz;
+ intptr_t reg_off_first = -1, reg_off_last = -1, reg_off_split;
+ DecodeCounter p = decode_counter(png, reg_max, v_esz);
+ unsigned b_count = p.count << v_esz;
+ unsigned b_stride = 1 << (v_esz + p.lg2_stride);
+ intptr_t page_split;
+
+ /* Set all of the element indices to -1, and the TLB data to 0. */
+ memset(info, -1, offsetof(SVEContLdSt, page));
+ memset(info->page, 0, sizeof(info->page));
+
+ if (p.invert) {
+ if (b_count >= reg_max * N) {
+ return 0;
+ }
+ reg_off_first = b_count;
+ reg_off_last = reg_max * N - b_stride;
+ } else {
+ if (b_count == 0) {
+ return 0;
+ }
+ reg_off_first = 0;
+ reg_off_last = MIN(b_count - esize, reg_max * N - b_stride);
+ }
+
+ info->reg_off_first[0] = reg_off_first;
+ info->mem_off_first[0] = reg_off_first;
+
+ page_split = -(addr | TARGET_PAGE_MASK);
+ if (reg_off_last + esize <= page_split || reg_off_first >= page_split) {
+ /* The entire operation fits within a single page. */
+ info->reg_off_last[0] = reg_off_last;
+ return b_stride;
+ }
+
+ info->page_split = page_split;
+ reg_off_split = ROUND_DOWN(page_split, esize);
+
+ /*
+ * This is the last full element on the first page, but it is not
+ * necessarily active. If there is no full element, i.e. the first
+ * active element is the one that's split, this value remains -1.
+ * It is useful as iteration bounds.
+ */
+ if (reg_off_split != 0) {
+ info->reg_off_last[0] = ROUND_DOWN(reg_off_split - esize, b_stride);
+ }
+
+ /* Determine if an unaligned element spans the pages. */
+ if (page_split & (esize - 1)) {
+ /* It is helpful to know if the split element is active. */
+ if ((reg_off_split & (b_stride - 1)) == 0) {
+ info->reg_off_split = reg_off_split;
+ info->mem_off_split = reg_off_split;
+ }
+ reg_off_split += esize;
+ }
+
+ /*
+ * We do want the first active element on the second page, because
+ * this may affect the address reported in an exception.
+ */
+ reg_off_split = ROUND_UP(reg_off_split, b_stride);
+ if (reg_off_split <= reg_off_last) {
+ info->reg_off_first[1] = reg_off_split;
+ info->mem_off_first[1] = reg_off_split;
+ info->reg_off_last[1] = reg_off_last;
+ }
+ return b_stride;
+}
+
+static void sve2p1_cont_ldst_watchpoints(SVEContLdSt *info, CPUARMState *env,
+ target_ulong addr, unsigned estride,
+ int esize, int wp_access, uintptr_t ra)
+{
+#ifndef CONFIG_USER_ONLY
+ intptr_t count_off, count_last;
+ int flags0 = info->page[0].flags;
+ int flags1 = info->page[1].flags;
+
+ if (likely(!((flags0 | flags1) & TLB_WATCHPOINT))) {
+ return;
+ }
+
+ /* Indicate that watchpoints are handled. */
+ info->page[0].flags = flags0 & ~TLB_WATCHPOINT;
+ info->page[1].flags = flags1 & ~TLB_WATCHPOINT;
+
+ if (flags0 & TLB_WATCHPOINT) {
+ count_off = info->reg_off_first[0];
+ count_last = info->reg_off_split;
+ if (count_last < 0) {
+ count_last = info->reg_off_last[0];
+ }
+ do {
+ cpu_check_watchpoint(env_cpu(env), addr + count_off,
+ esize, info->page[0].attrs, wp_access, ra);
+ count_off += estride;
+ } while (count_off <= count_last);
+ }
+
+ count_off = info->reg_off_first[1];
+ if ((flags1 & TLB_WATCHPOINT) && count_off >= 0) {
+ count_last = info->reg_off_last[1];
+ do {
+ cpu_check_watchpoint(env_cpu(env), addr + count_off,
+ esize, info->page[1].attrs,
+ wp_access, ra);
+ count_off += estride;
+ } while (count_off <= count_last);
+ }
+#endif
+}
+
+static void sve2p1_cont_ldst_mte_check(SVEContLdSt *info, CPUARMState *env,
+ target_ulong addr, unsigned estride,
+ int esize, uint32_t mtedesc,
+ uintptr_t ra)
+{
+ intptr_t count_off, count_last;
+
+ /*
+ * TODO: estride is always a small power of two, <= 8.
+ * Manipulate the stride within the loops such that
+ * - first iteration hits addr + off, as required,
+ * - second iteration hits ALIGN_UP(addr, 16),
+ * - other iterations advance addr by 16.
+ * This will minimize the probing to once per MTE granule.
+ */
+
+ /* Process the page only if MemAttr == Tagged. */
+ if (info->page[0].tagged) {
+ count_off = info->reg_off_first[0];
+ count_last = info->reg_off_split;
+ if (count_last < 0) {
+ count_last = info->reg_off_last[0];
+ }
+
+ do {
+ mte_check(env, mtedesc, addr + count_off, ra);
+ count_off += estride;
+ } while (count_off <= count_last);
+ }
+
+ count_off = info->reg_off_first[1];
+ if (count_off >= 0 && info->page[1].tagged) {
+ count_last = info->reg_off_last[1];
+ do {
+ mte_check(env, mtedesc, addr + count_off, ra);
+ count_off += estride;
+ } while (count_off <= count_last);
+ }
+}
+
+static inline QEMU_ALWAYS_INLINE
+void sve2p1_ld1_c(CPUARMState *env, ARMVectorReg *zd, const vaddr addr,
+ uint32_t png, uint32_t desc,
+ const uintptr_t ra, const MemOp esz,
+ sve_ldst1_host_fn *host_fn,
+ sve_ldst1_tlb_fn *tlb_fn)
+{
+ const unsigned N = (desc >> SIMD_DATA_SHIFT) & 1 ? 4 : 2;
+ const unsigned rstride = 1 << ((desc >> (SIMD_DATA_SHIFT + 1)) % 4);
+ uint32_t mtedesc = desc >> (SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT);
+ const intptr_t reg_max = simd_oprsz(desc);
+ const unsigned esize = 1 << esz;
+ intptr_t count_off, count_last;
+ intptr_t reg_off, reg_last, reg_n;
+ SVEContLdSt info;
+ unsigned estride, flags;
+ void *host;
+
+ estride = sve2p1_cont_ldst_elements(&info, addr, png, reg_max, N, esz);
+ if (estride == 0) {
+ /* The entire predicate was false; no load occurs. */
+ for (unsigned n = 0; n < N; n++) {
+ memset(zd + n * rstride, 0, reg_max);
+ }
+ return;
+ }
+
+ /* Probe the page(s). Exit with exception for any invalid page. */
+ sve_cont_ldst_pages(&info, FAULT_ALL, env, addr, MMU_DATA_LOAD, ra);
+
+ /* Handle watchpoints for all active elements. */
+ sve2p1_cont_ldst_watchpoints(&info, env, addr, estride,
+ esize, BP_MEM_READ, ra);
+
+ /*
+ * Handle mte checks for all active elements.
+ * Since TBI must be set for MTE, !mtedesc => !mte_active.
+ */
+ if (mtedesc) {
+ sve2p1_cont_ldst_mte_check(&info, env, estride, addr,
+ esize, mtedesc, ra);
+ }
+
+ flags = info.page[0].flags | info.page[1].flags;
+ if (unlikely(flags != 0)) {
+ /*
+ * At least one page includes MMIO.
+ * Any bus operation can fail with cpu_transaction_failed,
+ * which for ARM will raise SyncExternal. Perform the load
+ * into scratch memory to preserve register state until the end.
+ */
+ ARMVectorReg scratch[4] = { };
+
+ count_off = info.reg_off_first[0];
+ count_last = info.reg_off_last[1];
+ if (count_last < 0) {
+ count_last = info.reg_off_split;
+ if (count_last < 0) {
+ count_last = info.reg_off_last[0];
+ }
+ }
+ reg_off = count_off % reg_max;
+ reg_n = count_off / reg_max;
+
+ do {
+ reg_last = MIN(count_last - count_off, reg_max - esize);
+ do {
+ tlb_fn(env, &scratch[reg_n], reg_off, addr + count_off, ra);
+ reg_off += estride;
+ count_off += estride;
+ } while (reg_off <= reg_last);
+ reg_off = 0;
+ reg_n++;
+ } while (count_off <= count_last);
+
+ for (unsigned n = 0; n < N; ++n) {
+ memcpy(&zd[n * rstride], &scratch[n], reg_max);
+ }
+ return;
+ }
+
+ /* The entire operation is in RAM, on valid pages. */
+
+ for (unsigned n = 0; n < N; ++n) {
+ memset(&zd[n * rstride], 0, reg_max);
+ }
+
+ count_off = info.reg_off_first[0];
+ count_last = info.reg_off_last[0];
+ reg_off = count_off % reg_max;
+ reg_n = count_off / reg_max;
+ host = info.page[0].host;
+
+ set_helper_retaddr(ra);
+
+ do {
+ reg_last = MIN(count_last - reg_n * reg_max, reg_max - esize);
+ do {
+ host_fn(&zd[reg_n * rstride], reg_off, host + count_off);
+ reg_off += estride;
+ count_off += estride;
+ } while (reg_off <= reg_last);
+ reg_off = 0;
+ reg_n++;
+ } while (count_off <= count_last);
+
+ clear_helper_retaddr();
+
+ /*
+ * Use the slow path to manage the cross-page misalignment.
+ * But we know this is RAM and cannot trap.
+ */
+ count_off = info.reg_off_split;
+ if (unlikely(count_off >= 0)) {
+ reg_off = count_off % reg_max;
+ reg_n = count_off / reg_max;
+ tlb_fn(env, &zd[reg_n * rstride], reg_off, addr + count_off, ra);
+ }
+
+ count_off = info.reg_off_first[1];
+ if (unlikely(count_off >= 0)) {
+ count_last = info.reg_off_last[1];
+ reg_off = count_off % reg_max;
+ reg_n = count_off / reg_max;
+ host = info.page[1].host;
+
+ set_helper_retaddr(ra);
+
+ do {
+ reg_last = MIN(count_last - reg_n * reg_max, reg_max - esize);
+ do {
+ host_fn(&zd[reg_n * rstride], reg_off, host + count_off);
+ reg_off += estride;
+ count_off += estride;
+ } while (reg_off <= reg_last);
+ reg_off = 0;
+ reg_n++;
+ } while (count_off <= count_last);
+
+ clear_helper_retaddr();
+ }
+}
+
+void HELPER(sve2p1_ld1bb_c)(CPUARMState *env, void *vd, target_ulong addr,
+ uint32_t png, uint32_t desc)
+{
+ sve2p1_ld1_c(env, vd, addr, png, desc, GETPC(), MO_8,
+ sve_ld1bb_host, sve_ld1bb_tlb);
+}
+
+#define DO_LD1_2(NAME, ESZ) \
+void HELPER(sve2p1_##NAME##_le_c)(CPUARMState *env, void *vd, \
+ target_ulong addr, uint32_t png, \
+ uint32_t desc) \
+{ \
+ sve2p1_ld1_c(env, vd, addr, png, desc, GETPC(), ESZ, \
+ sve_##NAME##_le_host, sve_##NAME##_le_tlb); \
+} \
+void HELPER(sve2p1_##NAME##_be_c)(CPUARMState *env, void *vd, \
+ target_ulong addr, uint32_t png, \
+ uint32_t desc) \
+{ \
+ sve2p1_ld1_c(env, vd, addr, png, desc, GETPC(), ESZ, \
+ sve_##NAME##_be_host, sve_##NAME##_be_tlb); \
+}
+
+DO_LD1_2(ld1hh, MO_16)
+DO_LD1_2(ld1ss, MO_32)
+DO_LD1_2(ld1dd, MO_64)
+
+#undef DO_LD1_2
+
+static inline QEMU_ALWAYS_INLINE
+void sve2p1_st1_c(CPUARMState *env, ARMVectorReg *zd, const vaddr addr,
+ uint32_t png, uint32_t desc,
+ const uintptr_t ra, const int esz,
+ sve_ldst1_host_fn *host_fn,
+ sve_ldst1_tlb_fn *tlb_fn)
+{
+ const unsigned N = (desc >> SIMD_DATA_SHIFT) & 1 ? 4 : 2;
+ const unsigned rstride = 1 << ((desc >> (SIMD_DATA_SHIFT + 1)) % 4);
+ uint32_t mtedesc = desc >> (SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT);
+ const intptr_t reg_max = simd_oprsz(desc);
+ const unsigned esize = 1 << esz;
+ intptr_t count_off, count_last;
+ intptr_t reg_off, reg_last, reg_n;
+ SVEContLdSt info;
+ unsigned estride, flags;
+ void *host;
+
+ estride = sve2p1_cont_ldst_elements(&info, addr, png, reg_max, N, esz);
+ if (estride == 0) {
+ /* The entire predicate was false; no store occurs. */
+ return;
+ }
+
+ /* Probe the page(s). Exit with exception for any invalid page. */
+ sve_cont_ldst_pages(&info, FAULT_ALL, env, addr, MMU_DATA_STORE, ra);
+
+ /* Handle watchpoints for all active elements. */
+ sve2p1_cont_ldst_watchpoints(&info, env, addr, estride,
+ esize, BP_MEM_WRITE, ra);
+
+ /*
+ * Handle mte checks for all active elements.
+ * Since TBI must be set for MTE, !mtedesc => !mte_active.
+ */
+ if (mtedesc) {
+ sve2p1_cont_ldst_mte_check(&info, env, estride, addr,
+ esize, mtedesc, ra);
+ }
+
+ flags = info.page[0].flags | info.page[1].flags;
+ if (unlikely(flags != 0)) {
+ /*
+ * At least one page includes MMIO.
+ * Any bus operation can fail with cpu_transaction_failed,
+ * which for ARM will raise SyncExternal. Perform the load
+ * into scratch memory to preserve register state until the end.
+ */
+ count_off = info.reg_off_first[0];
+ count_last = info.reg_off_last[1];
+ if (count_last < 0) {
+ count_last = info.reg_off_split;
+ if (count_last < 0) {
+ count_last = info.reg_off_last[0];
+ }
+ }
+ reg_off = count_off % reg_max;
+ reg_n = count_off / reg_max;
+
+ do {
+ reg_last = MIN(count_last - count_off, reg_max - esize);
+ do {
+ tlb_fn(env, &zd[reg_n * rstride], reg_off, addr + count_off, ra);
+ reg_off += estride;
+ count_off += estride;
+ } while (reg_off <= reg_last);
+ reg_off = 0;
+ reg_n++;
+ } while (count_off <= count_last);
+ return;
+ }
+
+ /* The entire operation is in RAM, on valid pages. */
+
+ count_off = info.reg_off_first[0];
+ count_last = info.reg_off_last[0];
+ reg_off = count_off % reg_max;
+ reg_n = count_off / reg_max;
+ host = info.page[0].host;
+
+ set_helper_retaddr(ra);
+
+ do {
+ reg_last = MIN(count_last - reg_n * reg_max, reg_max - esize);
+ do {
+ host_fn(&zd[reg_n * rstride], reg_off, host + count_off);
+ reg_off += estride;
+ count_off += estride;
+ } while (reg_off <= reg_last);
+ reg_off = 0;
+ reg_n++;
+ } while (count_off <= count_last);
+
+ clear_helper_retaddr();
+
+ /*
+ * Use the slow path to manage the cross-page misalignment.
+ * But we know this is RAM and cannot trap.
+ */
+ count_off = info.reg_off_split;
+ if (unlikely(count_off >= 0)) {
+ reg_off = count_off % reg_max;
+ reg_n = count_off / reg_max;
+ tlb_fn(env, &zd[reg_n * rstride], reg_off, addr + count_off, ra);
+ }
+
+ count_off = info.reg_off_first[1];
+ if (unlikely(count_off >= 0)) {
+ count_last = info.reg_off_last[1];
+ reg_off = count_off % reg_max;
+ reg_n = count_off / reg_max;
+ host = info.page[1].host;
+
+ set_helper_retaddr(ra);
+
+ do {
+ reg_last = MIN(count_last - reg_n * reg_max, reg_max - esize);
+ do {
+ host_fn(&zd[reg_n * rstride], reg_off, host + count_off);
+ reg_off += estride;
+ count_off += estride;
+ } while (reg_off <= reg_last);
+ reg_off = 0;
+ reg_n++;
+ } while (count_off <= count_last);
+
+ clear_helper_retaddr();
+ }
+}
+
+void HELPER(sve2p1_st1bb_c)(CPUARMState *env, void *vd, target_ulong addr,
+ uint32_t png, uint32_t desc)
+{
+ sve2p1_st1_c(env, vd, addr, png, desc, GETPC(), MO_8,
+ sve_st1bb_host, sve_st1bb_tlb);
+}
+
+#define DO_ST1_2(NAME, ESZ) \
+void HELPER(sve2p1_##NAME##_le_c)(CPUARMState *env, void *vd, \
+ target_ulong addr, uint32_t png, \
+ uint32_t desc) \
+{ \
+ sve2p1_st1_c(env, vd, addr, png, desc, GETPC(), ESZ, \
+ sve_##NAME##_le_host, sve_##NAME##_le_tlb); \
+} \
+void HELPER(sve2p1_##NAME##_be_c)(CPUARMState *env, void *vd, \
+ target_ulong addr, uint32_t png, \
+ uint32_t desc) \
+{ \
+ sve2p1_st1_c(env, vd, addr, png, desc, GETPC(), ESZ, \
+ sve_##NAME##_be_host, sve_##NAME##_be_tlb); \
+}
+
+DO_ST1_2(st1hh, MO_16)
+DO_ST1_2(st1ss, MO_32)
+DO_ST1_2(st1dd, MO_64)
+
+#undef DO_ST1_2
+
void HELPER(sve2_eor3)(void *vd, void *vn, void *vm, void *vk, uint32_t desc)
{
intptr_t i, opr_sz = simd_oprsz(desc) / 8;
@@ -7711,3 +8502,31 @@ DO_FCVTLT(sve2_fcvtlt_sd, uint64_t, uint32_t, H1_8, H1_4, float32_to_float64)
#undef DO_FCVTLT
#undef DO_FCVTNT
+
+void HELPER(pext)(void *vd, uint32_t png, uint32_t desc)
+{
+ int pl = FIELD_EX32(desc, PREDDESC, OPRSZ);
+ int vl = pl * 8;
+ unsigned v_esz = FIELD_EX32(desc, PREDDESC, ESZ);
+ int part = FIELD_EX32(desc, PREDDESC, DATA);
+ DecodeCounter p = decode_counter(png, vl, v_esz);
+ uint64_t mask = pred_esz_masks[v_esz + p.lg2_stride];
+ ARMPredicateReg *d = vd;
+
+ /*
+ * Convert from element count to byte count and adjust
+ * for the portion of the 4*VL counter to be extracted.
+ */
+ int b_count = (p.count << v_esz) - vl * part;
+
+ memset(d, 0, sizeof(*d));
+ if (p.invert) {
+ if (b_count <= 0) {
+ do_whilel(vd, mask, vl, vl);
+ } else if (b_count < vl) {
+ do_whileg(vd, mask, vl - b_count, vl);
+ }
+ } else if (b_count > 0) {
+ do_whilel(vd, mask, MIN(b_count, vl), vl);
+ }
+}
diff --git a/target/arm/tcg/sve_ldst_internal.h b/target/arm/tcg/sve_ldst_internal.h
index 4f159ec..c67cda9 100644
--- a/target/arm/tcg/sve_ldst_internal.h
+++ b/target/arm/tcg/sve_ldst_internal.h
@@ -20,7 +20,7 @@
#ifndef TARGET_ARM_SVE_LDST_INTERNAL_H
#define TARGET_ARM_SVE_LDST_INTERNAL_H
-#include "exec/cpu_ldst.h"
+#include "accel/tcg/cpu-ldst.h"
/*
* Load one element into @vd + @reg_off from @host.
@@ -116,6 +116,94 @@ DO_ST_PRIM_2(sd, H1_8, uint64_t, uint32_t, stl)
DO_LD_PRIM_2(dd, H1_8, uint64_t, uint64_t, ldq)
DO_ST_PRIM_2(dd, H1_8, uint64_t, uint64_t, stq)
+#define DO_LD_PRIM_3(NAME, FUNC) \
+ static inline void sve_##NAME##_host(void *vd, \
+ intptr_t reg_off, void *host) \
+ { sve_##FUNC##_host(vd, reg_off, host); \
+ *(uint64_t *)(vd + reg_off + 8) = 0; } \
+ static inline void sve_##NAME##_tlb(CPUARMState *env, void *vd, \
+ intptr_t reg_off, target_ulong addr, uintptr_t ra) \
+ { sve_##FUNC##_tlb(env, vd, reg_off, addr, ra); \
+ *(uint64_t *)(vd + reg_off + 8) = 0; }
+
+DO_LD_PRIM_3(ld1squ_be, ld1sdu_be)
+DO_LD_PRIM_3(ld1squ_le, ld1sdu_le)
+DO_LD_PRIM_3(ld1dqu_be, ld1dd_be)
+DO_LD_PRIM_3(ld1dqu_le, ld1dd_le)
+
+#define sve_st1sq_be_host sve_st1sd_be_host
+#define sve_st1sq_le_host sve_st1sd_le_host
+#define sve_st1sq_be_tlb sve_st1sd_be_tlb
+#define sve_st1sq_le_tlb sve_st1sd_le_tlb
+
+#define sve_st1dq_be_host sve_st1dd_be_host
+#define sve_st1dq_le_host sve_st1dd_le_host
+#define sve_st1dq_be_tlb sve_st1dd_be_tlb
+#define sve_st1dq_le_tlb sve_st1dd_le_tlb
+
+/*
+ * The ARMVectorReg elements are stored in host-endian 64-bit units.
+ * For 128-bit quantities, the sequence defined by the Elem[] pseudocode
+ * corresponds to storing the two 64-bit pieces in little-endian order.
+ */
+/* FIXME: Nothing in this file makes any effort at atomicity. */
+
+static inline void sve_ld1qq_be_host(void *vd, intptr_t reg_off, void *host)
+{
+ sve_ld1dd_be_host(vd, reg_off + 8, host);
+ sve_ld1dd_be_host(vd, reg_off, host + 8);
+}
+
+static inline void sve_ld1qq_le_host(void *vd, intptr_t reg_off, void *host)
+{
+ sve_ld1dd_le_host(vd, reg_off, host);
+ sve_ld1dd_le_host(vd, reg_off + 8, host + 8);
+}
+
+static inline void
+sve_ld1qq_be_tlb(CPUARMState *env, void *vd, intptr_t reg_off,
+ target_ulong addr, uintptr_t ra)
+{
+ sve_ld1dd_be_tlb(env, vd, reg_off + 8, addr, ra);
+ sve_ld1dd_be_tlb(env, vd, reg_off, addr + 8, ra);
+}
+
+static inline void
+sve_ld1qq_le_tlb(CPUARMState *env, void *vd, intptr_t reg_off,
+ target_ulong addr, uintptr_t ra)
+{
+ sve_ld1dd_le_tlb(env, vd, reg_off, addr, ra);
+ sve_ld1dd_le_tlb(env, vd, reg_off + 8, addr + 8, ra);
+}
+
+static inline void sve_st1qq_be_host(void *vd, intptr_t reg_off, void *host)
+{
+ sve_st1dd_be_host(vd, reg_off + 8, host);
+ sve_st1dd_be_host(vd, reg_off, host + 8);
+}
+
+static inline void sve_st1qq_le_host(void *vd, intptr_t reg_off, void *host)
+{
+ sve_st1dd_le_host(vd, reg_off, host);
+ sve_st1dd_le_host(vd, reg_off + 8, host + 8);
+}
+
+static inline void
+sve_st1qq_be_tlb(CPUARMState *env, void *vd, intptr_t reg_off,
+ target_ulong addr, uintptr_t ra)
+{
+ sve_st1dd_be_tlb(env, vd, reg_off + 8, addr, ra);
+ sve_st1dd_be_tlb(env, vd, reg_off, addr + 8, ra);
+}
+
+static inline void
+sve_st1qq_le_tlb(CPUARMState *env, void *vd, intptr_t reg_off,
+ target_ulong addr, uintptr_t ra)
+{
+ sve_st1dd_le_tlb(env, vd, reg_off, addr, ra);
+ sve_st1dd_le_tlb(env, vd, reg_off + 8, addr + 8, ra);
+}
+
#undef DO_LD_TLB
#undef DO_ST_TLB
#undef DO_LD_HOST
@@ -123,6 +211,7 @@ DO_ST_PRIM_2(dd, H1_8, uint64_t, uint64_t, stq)
#undef DO_ST_PRIM_1
#undef DO_LD_PRIM_2
#undef DO_ST_PRIM_2
+#undef DO_LD_PRIM_3
/*
* Resolve the guest virtual address to info->host and info->flags.
diff --git a/target/arm/tcg/tlb-insns.c b/target/arm/tcg/tlb-insns.c
index 630a481..95c26c6 100644
--- a/target/arm/tcg/tlb-insns.c
+++ b/target/arm/tcg/tlb-insns.c
@@ -8,6 +8,7 @@
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "exec/cputlb.h"
+#include "exec/target_page.h"
#include "cpu.h"
#include "internals.h"
#include "cpu-features.h"
@@ -34,7 +35,6 @@ static CPAccessResult access_ttlbis(CPUARMState *env, const ARMCPRegInfo *ri,
return CP_ACCESS_OK;
}
-#ifdef TARGET_AARCH64
/* Check for traps from EL1 due to HCR_EL2.TTLB or TTLBOS. */
static CPAccessResult access_ttlbos(CPUARMState *env, const ARMCPRegInfo *ri,
bool isread)
@@ -45,7 +45,6 @@ static CPAccessResult access_ttlbos(CPUARMState *env, const ARMCPRegInfo *ri,
}
return CP_ACCESS_OK;
}
-#endif
/* IS variants of TLB operations must affect all cores */
static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -801,7 +800,6 @@ static const ARMCPRegInfo tlbi_el3_cp_reginfo[] = {
.writefn = tlbi_aa64_vae3_write },
};
-#ifdef TARGET_AARCH64
typedef struct {
uint64_t base;
uint64_t length;
@@ -1269,8 +1267,6 @@ static const ARMCPRegInfo tlbi_rme_reginfo[] = {
.writefn = tlbi_aa64_paallos_write },
};
-#endif
-
void define_tlb_insn_regs(ARMCPU *cpu)
{
CPUARMState *env = &cpu->env;
@@ -1298,7 +1294,6 @@ void define_tlb_insn_regs(ARMCPU *cpu)
if (arm_feature(env, ARM_FEATURE_EL3)) {
define_arm_cp_regs(cpu, tlbi_el3_cp_reginfo);
}
-#ifdef TARGET_AARCH64
if (cpu_isar_feature(aa64_tlbirange, cpu)) {
define_arm_cp_regs(cpu, tlbirange_reginfo);
}
@@ -1308,5 +1303,4 @@ void define_tlb_insn_regs(ARMCPU *cpu)
if (cpu_isar_feature(aa64_rme, cpu)) {
define_arm_cp_regs(cpu, tlbi_rme_reginfo);
}
-#endif
}
diff --git a/target/arm/tcg/tlb_helper.c b/target/arm/tcg/tlb_helper.c
index 8841f03..23c72a9 100644
--- a/target/arm/tcg/tlb_helper.c
+++ b/target/arm/tcg/tlb_helper.c
@@ -9,9 +9,9 @@
#include "cpu.h"
#include "internals.h"
#include "cpu-features.h"
-#include "exec/exec-all.h"
-#include "exec/helper-proto.h"
+#define HELPER_H "tcg/helper.h"
+#include "exec/helper-proto.h.inc"
/*
* Returns true if the stage 1 translation regime is using LPAE format page
@@ -277,7 +277,7 @@ void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
arm_deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi);
}
-void helper_exception_pc_alignment(CPUARMState *env, target_ulong pc)
+void helper_exception_pc_alignment(CPUARMState *env, vaddr pc)
{
ARMMMUFaultInfo fi = { .type = ARMFault_Alignment };
int target_el = exception_target_el(env);
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
index 3901432..dbf4759 100644
--- a/target/arm/tcg/translate-a64.c
+++ b/target/arm/tcg/translate-a64.c
@@ -17,8 +17,7 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
-
-#include "exec/exec-all.h"
+#include "exec/target_page.h"
#include "translate.h"
#include "translate-a64.h"
#include "qemu/log.h"
@@ -434,12 +433,6 @@ static void gen_rebuild_hflags(DisasContext *s)
gen_helper_rebuild_hflags_a64(tcg_env, tcg_constant_i32(s->current_el));
}
-static void gen_exception_internal(int excp)
-{
- assert(excp_is_internal(excp));
- gen_helper_exception_internal(tcg_env, tcg_constant_i32(excp));
-}
-
static void gen_exception_internal_insn(DisasContext *s, int excp)
{
gen_a64_update_pc(s, 0);
@@ -1076,11 +1069,9 @@ static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
TCGv_i64 cf_64 = tcg_temp_new_i64();
TCGv_i64 vf_64 = tcg_temp_new_i64();
TCGv_i64 tmp = tcg_temp_new_i64();
- TCGv_i64 zero = tcg_constant_i64(0);
tcg_gen_extu_i32_i64(cf_64, cpu_CF);
- tcg_gen_add2_i64(result, cf_64, t0, zero, cf_64, zero);
- tcg_gen_add2_i64(result, cf_64, result, cf_64, t1, zero);
+ tcg_gen_addcio_i64(result, cf_64, t0, t1, cf_64);
tcg_gen_extrl_i64_i32(cpu_CF, cf_64);
gen_set_NZ64(result);
@@ -1094,12 +1085,10 @@ static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
TCGv_i32 t0_32 = tcg_temp_new_i32();
TCGv_i32 t1_32 = tcg_temp_new_i32();
TCGv_i32 tmp = tcg_temp_new_i32();
- TCGv_i32 zero = tcg_constant_i32(0);
tcg_gen_extrl_i64_i32(t0_32, t0);
tcg_gen_extrl_i64_i32(t1_32, t1);
- tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, zero, cpu_CF, zero);
- tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1_32, zero);
+ tcg_gen_addcio_i32(cpu_NF, cpu_CF, t0_32, t1_32, cpu_CF);
tcg_gen_mov_i32(cpu_ZF, cpu_NF);
tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
@@ -1392,11 +1381,8 @@ static bool fp_access_check_only(DisasContext *s)
return true;
}
-static bool fp_access_check(DisasContext *s)
+static bool nonstreaming_check(DisasContext *s)
{
- if (!fp_access_check_only(s)) {
- return false;
- }
if (s->sme_trap_nonstreaming && s->is_nonstreaming) {
gen_exception_insn(s, 0, EXCP_UDEF,
syn_smetrap(SME_ET_Streaming, false));
@@ -1405,6 +1391,11 @@ static bool fp_access_check(DisasContext *s)
return true;
}
+static bool fp_access_check(DisasContext *s)
+{
+ return fp_access_check_only(s) && nonstreaming_check(s);
+}
+
/*
* Return <0 for non-supported element sizes, with MO_16 controlled by
* FEAT_FP16; return 0 for fp disabled; otherwise return >0 for success.
@@ -1455,14 +1446,24 @@ static int fp_access_check_vector_hsd(DisasContext *s, bool is_q, MemOp esz)
*/
bool sve_access_check(DisasContext *s)
{
- if (s->pstate_sm || !dc_isar_feature(aa64_sve, s)) {
+ if (dc_isar_feature(aa64_sme, s)) {
bool ret;
- assert(dc_isar_feature(aa64_sme, s));
- ret = sme_sm_enabled_check(s);
+ if (s->pstate_sm) {
+ ret = sme_enabled_check(s);
+ } else if (dc_isar_feature(aa64_sve, s)) {
+ goto continue_sve;
+ } else {
+ ret = sme_sm_enabled_check(s);
+ }
+ if (ret) {
+ ret = nonstreaming_check(s);
+ }
s->sve_access_checked = (ret ? 1 : -1);
return ret;
}
+
+ continue_sve:
if (s->sve_excp_el) {
/* Assert that we only raise one exception per instruction. */
assert(!s->sve_access_checked);
@@ -1499,7 +1500,8 @@ bool sme_enabled_check(DisasContext *s)
* to be zero when fp_excp_el has priority. This is because we need
* sme_excp_el by itself for cpregs access checks.
*/
- if (!s->fp_excp_el || s->sme_excp_el < s->fp_excp_el) {
+ if (s->sme_excp_el
+ && (!s->fp_excp_el || s->sme_excp_el <= s->fp_excp_el)) {
bool ret = sme_access_check(s);
s->fp_access_checked = (ret ? 1 : -1);
return ret;
@@ -1821,6 +1823,10 @@ static bool trans_RETA(DisasContext *s, arg_reta *a)
{
TCGv_i64 dst;
+ if (!dc_isar_feature(aa64_pauth, s)) {
+ return false;
+ }
+
dst = auth_branch_target(s, cpu_reg(s, 30), cpu_X[31], !a->m);
gen_a64_set_pc(s, dst);
s->base.is_jmp = DISAS_JUMP;
@@ -6108,9 +6114,9 @@ static bool do_dot_vector_env(DisasContext *s, arg_qrrr_e *a,
return true;
}
-TRANS_FEAT(SDOT_v, aa64_dp, do_dot_vector, a, gen_helper_gvec_sdot_b)
-TRANS_FEAT(UDOT_v, aa64_dp, do_dot_vector, a, gen_helper_gvec_udot_b)
-TRANS_FEAT(USDOT_v, aa64_i8mm, do_dot_vector, a, gen_helper_gvec_usdot_b)
+TRANS_FEAT(SDOT_v, aa64_dp, do_dot_vector, a, gen_helper_gvec_sdot_4b)
+TRANS_FEAT(UDOT_v, aa64_dp, do_dot_vector, a, gen_helper_gvec_udot_4b)
+TRANS_FEAT(USDOT_v, aa64_i8mm, do_dot_vector, a, gen_helper_gvec_usdot_4b)
TRANS_FEAT(BFDOT_v, aa64_bf16, do_dot_vector_env, a, gen_helper_gvec_bfdot)
TRANS_FEAT(BFMMLA, aa64_bf16, do_dot_vector_env, a, gen_helper_gvec_bfmmla)
TRANS_FEAT(SMMLA, aa64_i8mm, do_dot_vector, a, gen_helper_gvec_smmla_b)
@@ -6870,12 +6876,12 @@ static bool do_dot_vector_idx_env(DisasContext *s, arg_qrrx_e *a,
return true;
}
-TRANS_FEAT(SDOT_vi, aa64_dp, do_dot_vector_idx, a, gen_helper_gvec_sdot_idx_b)
-TRANS_FEAT(UDOT_vi, aa64_dp, do_dot_vector_idx, a, gen_helper_gvec_udot_idx_b)
+TRANS_FEAT(SDOT_vi, aa64_dp, do_dot_vector_idx, a, gen_helper_gvec_sdot_idx_4b)
+TRANS_FEAT(UDOT_vi, aa64_dp, do_dot_vector_idx, a, gen_helper_gvec_udot_idx_4b)
TRANS_FEAT(SUDOT_vi, aa64_i8mm, do_dot_vector_idx, a,
- gen_helper_gvec_sudot_idx_b)
+ gen_helper_gvec_sudot_idx_4b)
TRANS_FEAT(USDOT_vi, aa64_i8mm, do_dot_vector_idx, a,
- gen_helper_gvec_usdot_idx_b)
+ gen_helper_gvec_usdot_idx_4b)
TRANS_FEAT(BFDOT_vi, aa64_bf16, do_dot_vector_idx_env, a,
gen_helper_gvec_bfdot_idx)
@@ -8600,7 +8606,7 @@ static bool trans_CCMP(DisasContext *s, arg_CCMP *a)
tcg_gen_subi_i32(tcg_t2, tcg_t0, 1);
nzcv = a->nzcv;
- has_andc = tcg_op_supported(INDEX_op_andc_i32, TCG_TYPE_I32, 0);
+ has_andc = tcg_op_supported(INDEX_op_andc, TCG_TYPE_I32, 0);
if (nzcv & 8) { /* N */
tcg_gen_or_i32(cpu_NF, cpu_NF, tcg_t1);
} else {
@@ -10133,8 +10139,10 @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
dc->trap_eret = EX_TBFLAG_A64(tb_flags, TRAP_ERET);
dc->sve_excp_el = EX_TBFLAG_A64(tb_flags, SVEEXC_EL);
dc->sme_excp_el = EX_TBFLAG_A64(tb_flags, SMEEXC_EL);
+ dc->zt0_excp_el = EX_TBFLAG_A64(tb_flags, ZT0EXC_EL);
dc->vl = (EX_TBFLAG_A64(tb_flags, VL) + 1) * 16;
dc->svl = (EX_TBFLAG_A64(tb_flags, SVL) + 1) * 16;
+ dc->max_svl = arm_cpu->sme_max_vq * 16;
dc->pauth_active = EX_TBFLAG_A64(tb_flags, PAUTH_ACTIVE);
dc->bt = EX_TBFLAG_A64(tb_flags, BT);
dc->btype = EX_TBFLAG_A64(tb_flags, BTYPE);
@@ -10247,7 +10255,7 @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
* start of the TB.
*/
assert(s->base.num_insns == 1);
- gen_helper_exception_pc_alignment(tcg_env, tcg_constant_tl(pc));
+ gen_helper_exception_pc_alignment(tcg_env, tcg_constant_vaddr(pc));
s->base.is_jmp = DISAS_NORETURN;
s->base.pc_next = QEMU_ALIGN_UP(pc, 4);
return;
diff --git a/target/arm/tcg/translate-a64.h b/target/arm/tcg/translate-a64.h
index b2420f5..993dde6 100644
--- a/target/arm/tcg/translate-a64.h
+++ b/target/arm/tcg/translate-a64.h
@@ -225,7 +225,13 @@ void gen_gvec_usqadd_qc(unsigned vece, uint32_t rd_ofs,
uint32_t rn_ofs, uint32_t rm_ofs,
uint32_t opr_sz, uint32_t max_sz);
-void gen_sve_ldr(DisasContext *s, TCGv_ptr, int vofs, int len, int rn, int imm);
-void gen_sve_str(DisasContext *s, TCGv_ptr, int vofs, int len, int rn, int imm);
+void gen_gvec_sve2_sqdmulh(unsigned vece, uint32_t rd_ofs,
+ uint32_t rn_ofs, uint32_t rm_ofs,
+ uint32_t opr_sz, uint32_t max_sz);
+
+void gen_sve_ldr(DisasContext *s, TCGv_ptr, int vofs,
+ int len, int rn, int imm, MemOp align);
+void gen_sve_str(DisasContext *s, TCGv_ptr, int vofs,
+ int len, int rn, int imm, MemOp align);
#endif /* TARGET_ARM_TRANSLATE_A64_H */
diff --git a/target/arm/tcg/translate-neon.c b/target/arm/tcg/translate-neon.c
index c4fecb8..844d2e2 100644
--- a/target/arm/tcg/translate-neon.c
+++ b/target/arm/tcg/translate-neon.c
@@ -271,7 +271,7 @@ static bool trans_VSDOT(DisasContext *s, arg_VSDOT *a)
return false;
}
return do_neon_ddda(s, a->q * 7, a->vd, a->vn, a->vm, 0,
- gen_helper_gvec_sdot_b);
+ gen_helper_gvec_sdot_4b);
}
static bool trans_VUDOT(DisasContext *s, arg_VUDOT *a)
@@ -280,7 +280,7 @@ static bool trans_VUDOT(DisasContext *s, arg_VUDOT *a)
return false;
}
return do_neon_ddda(s, a->q * 7, a->vd, a->vn, a->vm, 0,
- gen_helper_gvec_udot_b);
+ gen_helper_gvec_udot_4b);
}
static bool trans_VUSDOT(DisasContext *s, arg_VUSDOT *a)
@@ -289,7 +289,7 @@ static bool trans_VUSDOT(DisasContext *s, arg_VUSDOT *a)
return false;
}
return do_neon_ddda(s, a->q * 7, a->vd, a->vn, a->vm, 0,
- gen_helper_gvec_usdot_b);
+ gen_helper_gvec_usdot_4b);
}
static bool trans_VDOT_b16(DisasContext *s, arg_VDOT_b16 *a)
@@ -356,7 +356,7 @@ static bool trans_VSDOT_scalar(DisasContext *s, arg_VSDOT_scalar *a)
return false;
}
return do_neon_ddda(s, a->q * 6, a->vd, a->vn, a->vm, a->index,
- gen_helper_gvec_sdot_idx_b);
+ gen_helper_gvec_sdot_idx_4b);
}
static bool trans_VUDOT_scalar(DisasContext *s, arg_VUDOT_scalar *a)
@@ -365,7 +365,7 @@ static bool trans_VUDOT_scalar(DisasContext *s, arg_VUDOT_scalar *a)
return false;
}
return do_neon_ddda(s, a->q * 6, a->vd, a->vn, a->vm, a->index,
- gen_helper_gvec_udot_idx_b);
+ gen_helper_gvec_udot_idx_4b);
}
static bool trans_VUSDOT_scalar(DisasContext *s, arg_VUSDOT_scalar *a)
@@ -374,7 +374,7 @@ static bool trans_VUSDOT_scalar(DisasContext *s, arg_VUSDOT_scalar *a)
return false;
}
return do_neon_ddda(s, a->q * 6, a->vd, a->vn, a->vm, a->index,
- gen_helper_gvec_usdot_idx_b);
+ gen_helper_gvec_usdot_idx_4b);
}
static bool trans_VSUDOT_scalar(DisasContext *s, arg_VSUDOT_scalar *a)
@@ -383,7 +383,7 @@ static bool trans_VSUDOT_scalar(DisasContext *s, arg_VSUDOT_scalar *a)
return false;
}
return do_neon_ddda(s, a->q * 6, a->vd, a->vn, a->vm, a->index,
- gen_helper_gvec_sudot_idx_b);
+ gen_helper_gvec_sudot_idx_4b);
}
static bool trans_VDOT_b16_scal(DisasContext *s, arg_VDOT_b16_scal *a)
@@ -1010,8 +1010,8 @@ DO_3S_FP_GVEC(VACGE, gen_helper_gvec_facge_s, gen_helper_gvec_facge_h)
DO_3S_FP_GVEC(VACGT, gen_helper_gvec_facgt_s, gen_helper_gvec_facgt_h)
DO_3S_FP_GVEC(VMAX, gen_helper_gvec_fmax_s, gen_helper_gvec_fmax_h)
DO_3S_FP_GVEC(VMIN, gen_helper_gvec_fmin_s, gen_helper_gvec_fmin_h)
-DO_3S_FP_GVEC(VMLA, gen_helper_gvec_fmla_s, gen_helper_gvec_fmla_h)
-DO_3S_FP_GVEC(VMLS, gen_helper_gvec_fmls_s, gen_helper_gvec_fmls_h)
+DO_3S_FP_GVEC(VMLA, gen_helper_gvec_fmla_nf_s, gen_helper_gvec_fmla_nf_h)
+DO_3S_FP_GVEC(VMLS, gen_helper_gvec_fmls_nf_s, gen_helper_gvec_fmls_nf_h)
DO_3S_FP_GVEC(VFMA, gen_helper_gvec_vfma_s, gen_helper_gvec_vfma_h)
DO_3S_FP_GVEC(VFMS, gen_helper_gvec_vfms_s, gen_helper_gvec_vfms_h)
DO_3S_FP_GVEC(VRECPS, gen_helper_gvec_recps_nf_s, gen_helper_gvec_recps_nf_h)
diff --git a/target/arm/tcg/translate-sme.c b/target/arm/tcg/translate-sme.c
index fcbb350..65fc8bc 100644
--- a/target/arm/tcg/translate-sme.c
+++ b/target/arm/tcg/translate-sme.c
@@ -27,16 +27,25 @@
#include "decode-sme.c.inc"
+static bool sme2_zt0_enabled_check(DisasContext *s)
+{
+ if (!sme_za_enabled_check(s)) {
+ return false;
+ }
+ if (s->zt0_excp_el) {
+ gen_exception_insn_el(s, 0, EXCP_UDEF,
+ syn_smetrap(SME_ET_InaccessibleZT0, false),
+ s->zt0_excp_el);
+ return false;
+ }
+ return true;
+}
-/*
- * Resolve tile.size[index] to a host pointer, where tile and index
- * are always decoded together, dependent on the element size.
- */
+/* Resolve tile.size[rs+imm] to a host pointer. */
static TCGv_ptr get_tile_rowcol(DisasContext *s, int esz, int rs,
- int tile_index, bool vertical)
+ int tile, int imm, int div_len,
+ int vec_mod, bool vertical)
{
- int tile = tile_index >> (4 - esz);
- int index = esz == MO_128 ? 0 : extract32(tile_index, 0, 4 - esz);
int pos, len, offset;
TCGv_i32 tmp;
TCGv_ptr addr;
@@ -44,10 +53,23 @@ static TCGv_ptr get_tile_rowcol(DisasContext *s, int esz, int rs,
/* Compute the final index, which is Rs+imm. */
tmp = tcg_temp_new_i32();
tcg_gen_trunc_tl_i32(tmp, cpu_reg(s, rs));
- tcg_gen_addi_i32(tmp, tmp, index);
+ /*
+ * Round the vector index down to a multiple of vec_mod if necessary.
+ * We do this before adding the offset, to handle cases like
+ * MOVA (tile to vector, 2 registers) where we want to call this
+ * several times in a loop with an increasing offset. We rely on
+ * the instruction encodings always forcing the initial offset in
+ * [rs + offset] to be a multiple of vec_mod. The pseudocode usually
+ * does the round-down after adding the offset rather than before,
+ * but MOVA is an exception.
+ */
+ if (vec_mod > 1) {
+ tcg_gen_andc_i32(tmp, tmp, tcg_constant_i32(vec_mod - 1));
+ }
+ tcg_gen_addi_i32(tmp, tmp, imm);
/* Prepare a power-of-two modulo via extraction of @len bits. */
- len = ctz32(streaming_vec_reg_size(s)) - esz;
+ len = ctz32(streaming_vec_reg_size(s) / div_len) - esz;
if (!len) {
/*
@@ -92,7 +114,7 @@ static TCGv_ptr get_tile_rowcol(DisasContext *s, int esz, int rs,
offset = tile * sizeof(ARMVectorReg);
/* Include the byte offset of zarray to make this relative to env. */
- offset += offsetof(CPUARMState, zarray);
+ offset += offsetof(CPUARMState, za_state.za);
tcg_gen_addi_i32(tmp, tmp, offset);
/* Add the byte offset to env to produce the final pointer. */
@@ -103,6 +125,14 @@ static TCGv_ptr get_tile_rowcol(DisasContext *s, int esz, int rs,
return addr;
}
+/* Resolve ZArray[rs+imm] to a host pointer. */
+static TCGv_ptr get_zarray(DisasContext *s, int rs, int imm,
+ int div_len, int vec_mod)
+{
+ /* ZA[n] equates to ZA0H.B[n]. */
+ return get_tile_rowcol(s, MO_8, rs, 0, imm, div_len, vec_mod, false);
+}
+
/*
* Resolve tile.size[0] to a host pointer.
* Used by e.g. outer product insns where we require the entire tile.
@@ -112,7 +142,7 @@ static TCGv_ptr get_tile(DisasContext *s, int esz, int tile)
TCGv_ptr addr = tcg_temp_new_ptr();
int offset;
- offset = tile * sizeof(ARMVectorReg) + offsetof(CPUARMState, zarray);
+ offset = tile * sizeof(ARMVectorReg) + offsetof(CPUARMState, za_state.za);
tcg_gen_addi_ptr(addr, tcg_env, offset);
return addr;
@@ -130,7 +160,40 @@ static bool trans_ZERO(DisasContext *s, arg_ZERO *a)
return true;
}
-static bool trans_MOVA(DisasContext *s, arg_MOVA *a)
+static bool trans_ZERO_zt0(DisasContext *s, arg_ZERO_zt0 *a)
+{
+ if (!dc_isar_feature(aa64_sme2, s)) {
+ return false;
+ }
+ if (sme_enabled_check(s) && sme2_zt0_enabled_check(s)) {
+ tcg_gen_gvec_dup_imm(MO_64, offsetof(CPUARMState, za_state.zt0),
+ sizeof_field(CPUARMState, za_state.zt0),
+ sizeof_field(CPUARMState, za_state.zt0), 0);
+ }
+ return true;
+}
+
+static bool trans_ZERO_za(DisasContext *s, arg_ZERO_za *a)
+{
+ if (!dc_isar_feature(aa64_sme2p1, s)) {
+ return false;
+ }
+ if (sme_smza_enabled_check(s)) {
+ int svl = streaming_vec_reg_size(s);
+ int vstride = svl / a->ngrp;
+ TCGv_ptr t_za = get_zarray(s, a->rv, a->off, a->ngrp, a->nvec);
+
+ for (int r = 0; r < a->ngrp; ++r) {
+ for (int i = 0; i < a->nvec; ++i) {
+ int o_za = (r * vstride + i) * sizeof(ARMVectorReg);
+ tcg_gen_gvec_dup_imm_var(MO_64, t_za, o_za, svl, svl, 0);
+ }
+ }
+ }
+ return true;
+}
+
+static bool do_mova_tile(DisasContext *s, arg_mova_p *a, bool to_vec)
{
static gen_helper_gvec_4 * const h_fns[5] = {
gen_helper_sve_sel_zpzz_b, gen_helper_sve_sel_zpzz_h,
@@ -152,14 +215,11 @@ static bool trans_MOVA(DisasContext *s, arg_MOVA *a)
TCGv_i32 t_desc;
int svl;
- if (!dc_isar_feature(aa64_sme, s)) {
- return false;
- }
if (!sme_smza_enabled_check(s)) {
return true;
}
- t_za = get_tile_rowcol(s, a->esz, a->rs, a->za_imm, a->v);
+ t_za = get_tile_rowcol(s, a->esz, a->rs, a->za, a->off, 1, 0, a->v);
t_zr = vec_full_reg_ptr(s, a->zr);
t_pg = pred_full_reg_ptr(s, a->pg);
@@ -168,14 +228,14 @@ static bool trans_MOVA(DisasContext *s, arg_MOVA *a)
if (a->v) {
/* Vertical slice -- use sme mova helpers. */
- if (a->to_vec) {
+ if (to_vec) {
zc_fns[a->esz](t_zr, t_za, t_pg, t_desc);
} else {
cz_fns[a->esz](t_za, t_zr, t_pg, t_desc);
}
} else {
/* Horizontal slice -- reuse sve sel helpers. */
- if (a->to_vec) {
+ if (to_vec) {
h_fns[a->esz](t_zr, t_za, t_zr, t_pg, t_desc);
} else {
h_fns[a->esz](t_za, t_zr, t_za, t_pg, t_desc);
@@ -184,6 +244,147 @@ static bool trans_MOVA(DisasContext *s, arg_MOVA *a)
return true;
}
+TRANS_FEAT(MOVA_tz, aa64_sme, do_mova_tile, a, false)
+TRANS_FEAT(MOVA_zt, aa64_sme, do_mova_tile, a, true)
+
+static bool do_mova_tile_n(DisasContext *s, arg_mova_t *a, int n,
+ bool to_vec, bool zero)
+{
+ static gen_helper_gvec_2 * const cz_fns[] = {
+ gen_helper_sme2_mova_cz_b, gen_helper_sme2_mova_cz_h,
+ gen_helper_sme2_mova_cz_s, gen_helper_sme2_mova_cz_d,
+ };
+ static gen_helper_gvec_2 * const zc_fns[] = {
+ gen_helper_sme2_mova_zc_b, gen_helper_sme2_mova_zc_h,
+ gen_helper_sme2_mova_zc_s, gen_helper_sme2_mova_zc_d,
+ };
+ static gen_helper_gvec_2 * const zc_z_fns[] = {
+ gen_helper_sme2p1_movaz_zc_b, gen_helper_sme2p1_movaz_zc_h,
+ gen_helper_sme2p1_movaz_zc_s, gen_helper_sme2p1_movaz_zc_d,
+ gen_helper_sme2p1_movaz_zc_q,
+ };
+ TCGv_ptr t_za;
+ int svl, bytes_per_op = n << a->esz;
+
+ /*
+ * The MaxImplementedSVL check happens in the decode pseudocode,
+ * before the SM+ZA enabled check in the operation pseudocode.
+ * This will (currently) only fail for NREG=4, ESZ=MO_64.
+ */
+ if (s->max_svl < bytes_per_op) {
+ unallocated_encoding(s);
+ return true;
+ }
+
+ assert(a->esz <= MO_64 + zero);
+
+ if (!sme_smza_enabled_check(s)) {
+ return true;
+ }
+
+ svl = streaming_vec_reg_size(s);
+
+ /*
+ * The CurrentVL check happens in the operation pseudocode,
+ * after the SM+ZA enabled check.
+ */
+ if (svl < bytes_per_op) {
+ unallocated_encoding(s);
+ return true;
+ }
+
+ if (a->v) {
+ TCGv_i32 t_desc = tcg_constant_i32(simd_desc(svl, svl, 0));
+
+ for (int i = 0; i < n; ++i) {
+ TCGv_ptr t_zr = vec_full_reg_ptr(s, a->zr * n + i);
+ t_za = get_tile_rowcol(s, a->esz, a->rs, a->za,
+ a->off * n + i, 1, n, a->v);
+ if (zero) {
+ zc_z_fns[a->esz](t_zr, t_za, t_desc);
+ } else if (to_vec) {
+ zc_fns[a->esz](t_zr, t_za, t_desc);
+ } else {
+ cz_fns[a->esz](t_za, t_zr, t_desc);
+ }
+ }
+ } else {
+ for (int i = 0; i < n; ++i) {
+ int o_zr = vec_full_reg_offset(s, a->zr * n + i);
+ t_za = get_tile_rowcol(s, a->esz, a->rs, a->za,
+ a->off * n + i, 1, n, a->v);
+ if (to_vec) {
+ tcg_gen_gvec_mov_var(MO_8, tcg_env, o_zr, t_za, 0, svl, svl);
+ if (zero) {
+ tcg_gen_gvec_dup_imm_var(MO_8, t_za, 0, svl, svl, 0);
+ }
+ } else {
+ tcg_gen_gvec_mov_var(MO_8, t_za, 0, tcg_env, o_zr, svl, svl);
+ }
+ }
+ }
+ return true;
+}
+
+TRANS_FEAT(MOVA_tz2, aa64_sme2, do_mova_tile_n, a, 2, false, false)
+TRANS_FEAT(MOVA_tz4, aa64_sme2, do_mova_tile_n, a, 4, false, false)
+TRANS_FEAT(MOVA_zt2, aa64_sme2, do_mova_tile_n, a, 2, true, false)
+TRANS_FEAT(MOVA_zt4, aa64_sme2, do_mova_tile_n, a, 4, true, false)
+
+TRANS_FEAT(MOVAZ_zt, aa64_sme2p1, do_mova_tile_n, a, 1, true, true)
+TRANS_FEAT(MOVAZ_zt2, aa64_sme2p1, do_mova_tile_n, a, 2, true, true)
+TRANS_FEAT(MOVAZ_zt4, aa64_sme2p1, do_mova_tile_n, a, 4, true, true)
+
+static bool do_mova_array_n(DisasContext *s, arg_mova_a *a, int n,
+ bool to_vec, bool zero)
+{
+ TCGv_ptr t_za;
+ int svl;
+
+ if (!sme_smza_enabled_check(s)) {
+ return true;
+ }
+
+ svl = streaming_vec_reg_size(s);
+ t_za = get_zarray(s, a->rv, a->off, n, 0);
+
+ for (int i = 0; i < n; ++i) {
+ int o_za = (svl / n * sizeof(ARMVectorReg)) * i;
+ int o_zr = vec_full_reg_offset(s, a->zr * n + i);
+
+ if (to_vec) {
+ tcg_gen_gvec_mov_var(MO_8, tcg_env, o_zr, t_za, o_za, svl, svl);
+ if (zero) {
+ tcg_gen_gvec_dup_imm_var(MO_8, t_za, o_za, svl, svl, 0);
+ }
+ } else {
+ tcg_gen_gvec_mov_var(MO_8, t_za, o_za, tcg_env, o_zr, svl, svl);
+ }
+ }
+ return true;
+}
+
+TRANS_FEAT(MOVA_az2, aa64_sme2, do_mova_array_n, a, 2, false, false)
+TRANS_FEAT(MOVA_az4, aa64_sme2, do_mova_array_n, a, 4, false, false)
+TRANS_FEAT(MOVA_za2, aa64_sme2, do_mova_array_n, a, 2, true, false)
+TRANS_FEAT(MOVA_za4, aa64_sme2, do_mova_array_n, a, 4, true, false)
+
+TRANS_FEAT(MOVAZ_za2, aa64_sme2p1, do_mova_array_n, a, 2, true, true)
+TRANS_FEAT(MOVAZ_za4, aa64_sme2p1, do_mova_array_n, a, 4, true, true)
+
+static bool do_movt(DisasContext *s, arg_MOVT_rzt *a,
+ void (*func)(TCGv_i64, TCGv_ptr, tcg_target_long))
+{
+ if (sme2_zt0_enabled_check(s)) {
+ func(cpu_reg(s, a->rt), tcg_env,
+ offsetof(CPUARMState, za_state.zt0) + a->off * 8);
+ }
+ return true;
+}
+
+TRANS_FEAT(MOVT_rzt, aa64_sme2, do_movt, a, tcg_gen_ld_i64)
+TRANS_FEAT(MOVT_ztr, aa64_sme2, do_movt, a, tcg_gen_st_i64)
+
static bool trans_LDST1(DisasContext *s, arg_LDST1 *a)
{
typedef void GenLdSt1(TCGv_env, TCGv_ptr, TCGv_ptr, TCGv, TCGv_i32);
@@ -225,7 +426,7 @@ static bool trans_LDST1(DisasContext *s, arg_LDST1 *a)
return true;
}
- t_za = get_tile_rowcol(s, a->esz, a->rs, a->za_imm, a->v);
+ t_za = get_tile_rowcol(s, a->esz, a->rs, a->za, a->off, 1, 0, a->v);
t_pg = pred_full_reg_ptr(s, a->pg);
addr = tcg_temp_new_i64();
@@ -243,28 +444,37 @@ static bool trans_LDST1(DisasContext *s, arg_LDST1 *a)
return true;
}
-typedef void GenLdStR(DisasContext *, TCGv_ptr, int, int, int, int);
+typedef void GenLdStR(DisasContext *, TCGv_ptr, int, int, int, int, MemOp);
static bool do_ldst_r(DisasContext *s, arg_ldstr *a, GenLdStR *fn)
{
- int svl = streaming_vec_reg_size(s);
- int imm = a->imm;
- TCGv_ptr base;
+ if (sme_za_enabled_check(s)) {
+ int svl = streaming_vec_reg_size(s);
+ int imm = a->imm;
+ TCGv_ptr base = get_zarray(s, a->rv, imm, 1, 0);
- if (!sme_za_enabled_check(s)) {
- return true;
+ fn(s, base, 0, svl, a->rn, imm * svl,
+ s->align_mem ? MO_ALIGN_16 : MO_UNALN);
}
-
- /* ZA[n] equates to ZA0H.B[n]. */
- base = get_tile_rowcol(s, MO_8, a->rv, imm, false);
-
- fn(s, base, 0, svl, a->rn, imm * svl);
return true;
}
TRANS_FEAT(LDR, aa64_sme, do_ldst_r, a, gen_sve_ldr)
TRANS_FEAT(STR, aa64_sme, do_ldst_r, a, gen_sve_str)
+static bool do_ldst_zt0(DisasContext *s, arg_ldstzt0 *a, GenLdStR *fn)
+{
+ if (sme2_zt0_enabled_check(s)) {
+ fn(s, tcg_env, offsetof(CPUARMState, za_state.zt0),
+ sizeof_field(CPUARMState, za_state.zt0), a->rn, 0,
+ s->align_mem ? MO_ALIGN_16 : MO_UNALN);
+ }
+ return true;
+}
+
+TRANS_FEAT(LDR_zt0, aa64_sme2, do_ldst_zt0, a, gen_sve_ldr)
+TRANS_FEAT(STR_zt0, aa64_sme2, do_ldst_zt0, a, gen_sve_str)
+
static bool do_adda(DisasContext *s, arg_adda *a, MemOp esz,
gen_helper_gvec_4 *fn)
{
@@ -316,7 +526,7 @@ static bool do_outprod_fpst(DisasContext *s, arg_op *a, MemOp esz,
gen_helper_gvec_5_ptr *fn)
{
int svl = streaming_vec_reg_size(s);
- uint32_t desc = simd_desc(svl, svl, a->sub);
+ uint32_t desc = simd_desc(svl, svl, 0);
TCGv_ptr za, zn, zm, pn, pm, fpst;
if (!sme_smza_enabled_check(s)) {
@@ -338,7 +548,7 @@ static bool do_outprod_env(DisasContext *s, arg_op *a, MemOp esz,
gen_helper_gvec_5_ptr *fn)
{
int svl = streaming_vec_reg_size(s);
- uint32_t desc = simd_desc(svl, svl, a->sub);
+ uint32_t desc = simd_desc(svl, svl, 0);
TCGv_ptr za, zn, zm, pn, pm;
if (!sme_smza_enabled_check(s)) {
@@ -355,14 +565,32 @@ static bool do_outprod_env(DisasContext *s, arg_op *a, MemOp esz,
return true;
}
-TRANS_FEAT(FMOPA_h, aa64_sme, do_outprod_env, a,
- MO_32, gen_helper_sme_fmopa_h)
-TRANS_FEAT(FMOPA_s, aa64_sme, do_outprod_fpst, a,
- MO_32, FPST_A64, gen_helper_sme_fmopa_s)
-TRANS_FEAT(FMOPA_d, aa64_sme_f64f64, do_outprod_fpst, a,
- MO_64, FPST_A64, gen_helper_sme_fmopa_d)
+TRANS_FEAT(FMOPA_w_h, aa64_sme, do_outprod_env, a, MO_32,
+ !a->sub ? gen_helper_sme_fmopa_w_h
+ : !s->fpcr_ah ? gen_helper_sme_fmops_w_h
+ : gen_helper_sme_ah_fmops_w_h)
+TRANS_FEAT(FMOPA_h, aa64_sme_f16f16, do_outprod_fpst, a, MO_16, FPST_ZA_F16,
+ !a->sub ? gen_helper_sme_fmopa_h
+ : !s->fpcr_ah ? gen_helper_sme_fmops_h
+ : gen_helper_sme_ah_fmops_h)
+TRANS_FEAT(FMOPA_s, aa64_sme, do_outprod_fpst, a, MO_32, FPST_ZA,
+ !a->sub ? gen_helper_sme_fmopa_s
+ : !s->fpcr_ah ? gen_helper_sme_fmops_s
+ : gen_helper_sme_ah_fmops_s)
+TRANS_FEAT(FMOPA_d, aa64_sme_f64f64, do_outprod_fpst, a, MO_64, FPST_ZA,
+ !a->sub ? gen_helper_sme_fmopa_d
+ : !s->fpcr_ah ? gen_helper_sme_fmops_d
+ : gen_helper_sme_ah_fmops_d)
+
+TRANS_FEAT(BFMOPA, aa64_sme_b16b16, do_outprod_fpst, a, MO_16, FPST_ZA,
+ !a->sub ? gen_helper_sme_bfmopa
+ : !s->fpcr_ah ? gen_helper_sme_bfmops
+ : gen_helper_sme_ah_bfmops)
-TRANS_FEAT(BFMOPA, aa64_sme, do_outprod_env, a, MO_32, gen_helper_sme_bfmopa)
+TRANS_FEAT(BFMOPA_w, aa64_sme, do_outprod_env, a, MO_32,
+ !a->sub ? gen_helper_sme_bfmopa_w
+ : !s->fpcr_ah ? gen_helper_sme_bfmops_w
+ : gen_helper_sme_ah_bfmops_w)
TRANS_FEAT(SMOPA_s, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_smopa_s)
TRANS_FEAT(UMOPA_s, aa64_sme, do_outprod, a, MO_32, gen_helper_sme_umopa_s)
@@ -373,3 +601,1173 @@ TRANS_FEAT(SMOPA_d, aa64_sme_i16i64, do_outprod, a, MO_64, gen_helper_sme_smopa_
TRANS_FEAT(UMOPA_d, aa64_sme_i16i64, do_outprod, a, MO_64, gen_helper_sme_umopa_d)
TRANS_FEAT(SUMOPA_d, aa64_sme_i16i64, do_outprod, a, MO_64, gen_helper_sme_sumopa_d)
TRANS_FEAT(USMOPA_d, aa64_sme_i16i64, do_outprod, a, MO_64, gen_helper_sme_usmopa_d)
+
+TRANS_FEAT(BMOPA, aa64_sme2, do_outprod, a, MO_32, gen_helper_sme2_bmopa_s)
+TRANS_FEAT(SMOPA2_s, aa64_sme2, do_outprod, a, MO_32, gen_helper_sme2_smopa2_s)
+TRANS_FEAT(UMOPA2_s, aa64_sme2, do_outprod, a, MO_32, gen_helper_sme2_umopa2_s)
+
+static bool do_z2z_n1(DisasContext *s, arg_z2z_en *a, GVecGen3Fn *fn)
+{
+ int esz, dn, vsz, mofs, n;
+ bool overlap = false;
+
+ if (!sme_sm_enabled_check(s)) {
+ return true;
+ }
+
+ esz = a->esz;
+ n = a->n;
+ dn = a->zdn;
+ mofs = vec_full_reg_offset(s, a->zm);
+ vsz = streaming_vec_reg_size(s);
+
+ for (int i = 0; i < n; i++) {
+ int dofs = vec_full_reg_offset(s, dn + i);
+ if (dofs == mofs) {
+ overlap = true;
+ } else {
+ fn(esz, dofs, dofs, mofs, vsz, vsz);
+ }
+ }
+ if (overlap) {
+ fn(esz, mofs, mofs, mofs, vsz, vsz);
+ }
+ return true;
+}
+
+static void gen_sme2_srshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
+{
+ static gen_helper_gvec_3 * const fns[] = {
+ gen_helper_gvec_srshl_b, gen_helper_sme2_srshl_h,
+ gen_helper_sme2_srshl_s, gen_helper_sme2_srshl_d,
+ };
+ tcg_debug_assert(vece <= MO_64);
+ tcg_gen_gvec_3_ool(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, 0, fns[vece]);
+}
+
+static void gen_sme2_urshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
+{
+ static gen_helper_gvec_3 * const fns[] = {
+ gen_helper_gvec_urshl_b, gen_helper_sme2_urshl_h,
+ gen_helper_sme2_urshl_s, gen_helper_sme2_urshl_d,
+ };
+ tcg_debug_assert(vece <= MO_64);
+ tcg_gen_gvec_3_ool(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, 0, fns[vece]);
+}
+
+TRANS_FEAT(ADD_n1, aa64_sme2, do_z2z_n1, a, tcg_gen_gvec_add)
+TRANS_FEAT(SMAX_n1, aa64_sme2, do_z2z_n1, a, tcg_gen_gvec_smax)
+TRANS_FEAT(SMIN_n1, aa64_sme2, do_z2z_n1, a, tcg_gen_gvec_smin)
+TRANS_FEAT(UMAX_n1, aa64_sme2, do_z2z_n1, a, tcg_gen_gvec_umax)
+TRANS_FEAT(UMIN_n1, aa64_sme2, do_z2z_n1, a, tcg_gen_gvec_umin)
+TRANS_FEAT(SRSHL_n1, aa64_sme2, do_z2z_n1, a, gen_sme2_srshl)
+TRANS_FEAT(URSHL_n1, aa64_sme2, do_z2z_n1, a, gen_sme2_urshl)
+TRANS_FEAT(SQDMULH_n1, aa64_sme2, do_z2z_n1, a, gen_gvec_sve2_sqdmulh)
+
+static bool do_z2z_nn(DisasContext *s, arg_z2z_en *a, GVecGen3Fn *fn)
+{
+ int esz, dn, dm, vsz, n;
+
+ if (!sme_sm_enabled_check(s)) {
+ return true;
+ }
+
+ esz = a->esz;
+ n = a->n;
+ dn = a->zdn;
+ dm = a->zm;
+ vsz = streaming_vec_reg_size(s);
+
+ for (int i = 0; i < n; i++) {
+ int dofs = vec_full_reg_offset(s, dn + i);
+ int mofs = vec_full_reg_offset(s, dm + i);
+
+ fn(esz, dofs, dofs, mofs, vsz, vsz);
+ }
+ return true;
+}
+
+TRANS_FEAT(SMAX_nn, aa64_sme2, do_z2z_nn, a, tcg_gen_gvec_smax)
+TRANS_FEAT(SMIN_nn, aa64_sme2, do_z2z_nn, a, tcg_gen_gvec_smin)
+TRANS_FEAT(UMAX_nn, aa64_sme2, do_z2z_nn, a, tcg_gen_gvec_umax)
+TRANS_FEAT(UMIN_nn, aa64_sme2, do_z2z_nn, a, tcg_gen_gvec_umin)
+TRANS_FEAT(SRSHL_nn, aa64_sme2, do_z2z_nn, a, gen_sme2_srshl)
+TRANS_FEAT(URSHL_nn, aa64_sme2, do_z2z_nn, a, gen_sme2_urshl)
+TRANS_FEAT(SQDMULH_nn, aa64_sme2, do_z2z_nn, a, gen_gvec_sve2_sqdmulh)
+
+static bool do_z2z_n1_fpst(DisasContext *s, arg_z2z_en *a,
+ gen_helper_gvec_3_ptr * const fns[4])
+{
+ int esz = a->esz, n, dn, vsz, mofs;
+ bool overlap = false;
+ gen_helper_gvec_3_ptr *fn;
+ TCGv_ptr fpst;
+
+ /* These insns use MO_8 to encode BFloat16. */
+ if (esz == MO_8 && !dc_isar_feature(aa64_sme_b16b16, s)) {
+ return false;
+ }
+ if (!sme_sm_enabled_check(s)) {
+ return true;
+ }
+
+ fpst = fpstatus_ptr(esz == MO_16 ? FPST_A64_F16 : FPST_A64);
+ fn = fns[esz];
+ n = a->n;
+ dn = a->zdn;
+ mofs = vec_full_reg_offset(s, a->zm);
+ vsz = streaming_vec_reg_size(s);
+
+ for (int i = 0; i < n; i++) {
+ int dofs = vec_full_reg_offset(s, dn + i);
+ if (dofs == mofs) {
+ overlap = true;
+ } else {
+ tcg_gen_gvec_3_ptr(dofs, dofs, mofs, fpst, vsz, vsz, 0, fn);
+ }
+ }
+ if (overlap) {
+ tcg_gen_gvec_3_ptr(mofs, mofs, mofs, fpst, vsz, vsz, 0, fn);
+ }
+ return true;
+}
+
+static bool do_z2z_nn_fpst(DisasContext *s, arg_z2z_en *a,
+ gen_helper_gvec_3_ptr * const fns[4])
+{
+ int esz = a->esz, n, dn, dm, vsz;
+ gen_helper_gvec_3_ptr *fn;
+ TCGv_ptr fpst;
+
+ if (esz == MO_8 && !dc_isar_feature(aa64_sme_b16b16, s)) {
+ return false;
+ }
+ if (!sme_sm_enabled_check(s)) {
+ return true;
+ }
+
+ fpst = fpstatus_ptr(esz == MO_16 ? FPST_A64_F16 : FPST_A64);
+ fn = fns[esz];
+ n = a->n;
+ dn = a->zdn;
+ dm = a->zm;
+ vsz = streaming_vec_reg_size(s);
+
+ for (int i = 0; i < n; i++) {
+ int dofs = vec_full_reg_offset(s, dn + i);
+ int mofs = vec_full_reg_offset(s, dm + i);
+
+ tcg_gen_gvec_3_ptr(dofs, dofs, mofs, fpst, vsz, vsz, 0, fn);
+ }
+ return true;
+}
+
+static gen_helper_gvec_3_ptr * const f_vector_fmax[2][4] = {
+ { gen_helper_gvec_fmax_b16,
+ gen_helper_gvec_fmax_h,
+ gen_helper_gvec_fmax_s,
+ gen_helper_gvec_fmax_d },
+ { gen_helper_gvec_ah_fmax_b16,
+ gen_helper_gvec_ah_fmax_h,
+ gen_helper_gvec_ah_fmax_s,
+ gen_helper_gvec_ah_fmax_d },
+};
+TRANS_FEAT(FMAX_n1, aa64_sme2, do_z2z_n1_fpst, a, f_vector_fmax[s->fpcr_ah])
+TRANS_FEAT(FMAX_nn, aa64_sme2, do_z2z_nn_fpst, a, f_vector_fmax[s->fpcr_ah])
+
+static gen_helper_gvec_3_ptr * const f_vector_fmin[2][4] = {
+ { gen_helper_gvec_fmin_b16,
+ gen_helper_gvec_fmin_h,
+ gen_helper_gvec_fmin_s,
+ gen_helper_gvec_fmin_d },
+ { gen_helper_gvec_ah_fmin_b16,
+ gen_helper_gvec_ah_fmin_h,
+ gen_helper_gvec_ah_fmin_s,
+ gen_helper_gvec_ah_fmin_d },
+};
+TRANS_FEAT(FMIN_n1, aa64_sme2, do_z2z_n1_fpst, a, f_vector_fmin[s->fpcr_ah])
+TRANS_FEAT(FMIN_nn, aa64_sme2, do_z2z_nn_fpst, a, f_vector_fmin[s->fpcr_ah])
+
+static gen_helper_gvec_3_ptr * const f_vector_fmaxnm[4] = {
+ gen_helper_gvec_fmaxnum_b16,
+ gen_helper_gvec_fmaxnum_h,
+ gen_helper_gvec_fmaxnum_s,
+ gen_helper_gvec_fmaxnum_d,
+};
+TRANS_FEAT(FMAXNM_n1, aa64_sme2, do_z2z_n1_fpst, a, f_vector_fmaxnm)
+TRANS_FEAT(FMAXNM_nn, aa64_sme2, do_z2z_nn_fpst, a, f_vector_fmaxnm)
+
+static gen_helper_gvec_3_ptr * const f_vector_fminnm[4] = {
+ gen_helper_gvec_fminnum_b16,
+ gen_helper_gvec_fminnum_h,
+ gen_helper_gvec_fminnum_s,
+ gen_helper_gvec_fminnum_d,
+};
+TRANS_FEAT(FMINNM_n1, aa64_sme2, do_z2z_n1_fpst, a, f_vector_fminnm)
+TRANS_FEAT(FMINNM_nn, aa64_sme2, do_z2z_nn_fpst, a, f_vector_fminnm)
+
+/* Add/Sub vector Z[m] to each Z[n*N] with result in ZA[d*N]. */
+static bool do_azz_n1(DisasContext *s, arg_azz_n *a, int esz,
+ GVecGen3FnVar *fn)
+{
+ TCGv_ptr t_za;
+ int svl, n, o_zm;
+
+ if (!sme_smza_enabled_check(s)) {
+ return true;
+ }
+
+ n = a->n;
+ t_za = get_zarray(s, a->rv, a->off, n, 0);
+ o_zm = vec_full_reg_offset(s, a->zm);
+ svl = streaming_vec_reg_size(s);
+
+ for (int i = 0; i < n; ++i) {
+ int o_za = (svl / n * sizeof(ARMVectorReg)) * i;
+ int o_zn = vec_full_reg_offset(s, (a->zn + i) % 32);
+
+ fn(esz, t_za, o_za, tcg_env, o_zn, tcg_env, o_zm, svl, svl);
+ }
+ return true;
+}
+
+TRANS_FEAT(ADD_azz_n1_s, aa64_sme2, do_azz_n1, a, MO_32, tcg_gen_gvec_add_var)
+TRANS_FEAT(SUB_azz_n1_s, aa64_sme2, do_azz_n1, a, MO_32, tcg_gen_gvec_sub_var)
+TRANS_FEAT(ADD_azz_n1_d, aa64_sme2_i16i64, do_azz_n1, a, MO_64, tcg_gen_gvec_add_var)
+TRANS_FEAT(SUB_azz_n1_d, aa64_sme2_i16i64, do_azz_n1, a, MO_64, tcg_gen_gvec_sub_var)
+
+/* Add/Sub each vector Z[m*N] to each Z[n*N] with result in ZA[d*N]. */
+static bool do_azz_nn(DisasContext *s, arg_azz_n *a, int esz,
+ GVecGen3FnVar *fn)
+{
+ TCGv_ptr t_za;
+ int svl, n;
+
+ if (!sme_smza_enabled_check(s)) {
+ return true;
+ }
+
+ n = a->n;
+ t_za = get_zarray(s, a->rv, a->off, n, 1);
+ svl = streaming_vec_reg_size(s);
+
+ for (int i = 0; i < n; ++i) {
+ int o_za = (svl / n * sizeof(ARMVectorReg)) * i;
+ int o_zn = vec_full_reg_offset(s, a->zn + i);
+ int o_zm = vec_full_reg_offset(s, a->zm + i);
+
+ fn(esz, t_za, o_za, tcg_env, o_zn, tcg_env, o_zm, svl, svl);
+ }
+ return true;
+}
+
+TRANS_FEAT(ADD_azz_nn_s, aa64_sme2, do_azz_nn, a, MO_32, tcg_gen_gvec_add_var)
+TRANS_FEAT(SUB_azz_nn_s, aa64_sme2, do_azz_nn, a, MO_32, tcg_gen_gvec_sub_var)
+TRANS_FEAT(ADD_azz_nn_d, aa64_sme2_i16i64, do_azz_nn, a, MO_64, tcg_gen_gvec_add_var)
+TRANS_FEAT(SUB_azz_nn_d, aa64_sme2_i16i64, do_azz_nn, a, MO_64, tcg_gen_gvec_sub_var)
+
+/* Add/Sub each ZA[d*N] += Z[m*N] */
+static bool do_aaz(DisasContext *s, arg_az_n *a, int esz, GVecGen3FnVar *fn)
+{
+ TCGv_ptr t_za;
+ int svl, n;
+
+ if (!sme_smza_enabled_check(s)) {
+ return true;
+ }
+
+ n = a->n;
+ t_za = get_zarray(s, a->rv, a->off, n, 0);
+ svl = streaming_vec_reg_size(s);
+
+ for (int i = 0; i < n; ++i) {
+ int o_za = (svl / n * sizeof(ARMVectorReg)) * i;
+ int o_zm = vec_full_reg_offset(s, a->zm + i);
+
+ fn(esz, t_za, o_za, t_za, o_za, tcg_env, o_zm, svl, svl);
+ }
+ return true;
+}
+
+TRANS_FEAT(ADD_aaz_s, aa64_sme2, do_aaz, a, MO_32, tcg_gen_gvec_add_var)
+TRANS_FEAT(SUB_aaz_s, aa64_sme2, do_aaz, a, MO_32, tcg_gen_gvec_sub_var)
+TRANS_FEAT(ADD_aaz_d, aa64_sme2_i16i64, do_aaz, a, MO_64, tcg_gen_gvec_add_var)
+TRANS_FEAT(SUB_aaz_d, aa64_sme2_i16i64, do_aaz, a, MO_64, tcg_gen_gvec_sub_var)
+
+/*
+ * Expand array multi-vector single (n1), array multi-vector (nn),
+ * and array multi-vector indexed (nx), for floating-point accumulate.
+ * multi: true for nn, false for n1.
+ * fpst: >= 0 to set ptr argument for FPST_*, < 0 for ENV.
+ * data: stuff for simd_data, including any index.
+ */
+#define FPST_ENV -1
+
+static bool do_azz_fp(DisasContext *s, int nreg, int nsel,
+ int rv, int off, int zn, int zm,
+ int data, int shsel, bool multi, int fpst,
+ gen_helper_gvec_3_ptr *fn)
+{
+ if (sme_smza_enabled_check(s)) {
+ int svl = streaming_vec_reg_size(s);
+ int vstride = svl / nreg;
+ TCGv_ptr t_za = get_zarray(s, rv, off, nreg, nsel);
+ TCGv_ptr t, ptr;
+
+ if (fpst >= 0) {
+ ptr = fpstatus_ptr(fpst);
+ } else {
+ ptr = tcg_env;
+ }
+ t = tcg_temp_new_ptr();
+
+ for (int r = 0; r < nreg; ++r) {
+ TCGv_ptr t_zn = vec_full_reg_ptr(s, zn);
+ TCGv_ptr t_zm = vec_full_reg_ptr(s, zm);
+
+ for (int i = 0; i < nsel; ++i) {
+ int o_za = (r * vstride + i) * sizeof(ARMVectorReg);
+ int desc = simd_desc(svl, svl, data | (i << shsel));
+
+ tcg_gen_addi_ptr(t, t_za, o_za);
+ fn(t, t_zn, t_zm, ptr, tcg_constant_i32(desc));
+ }
+
+ /*
+ * For multiple-and-single vectors, Zn may wrap.
+ * For multiple vectors, both Zn and Zm are aligned.
+ */
+ zn = (zn + 1) % 32;
+ zm += multi;
+ }
+ }
+ return true;
+}
+
+static bool do_azz_acc_fp(DisasContext *s, int nreg, int nsel,
+ int rv, int off, int zn, int zm,
+ int data, int shsel, bool multi, int fpst,
+ gen_helper_gvec_4_ptr *fn)
+{
+ if (sme_smza_enabled_check(s)) {
+ int svl = streaming_vec_reg_size(s);
+ int vstride = svl / nreg;
+ TCGv_ptr t_za = get_zarray(s, rv, off, nreg, nsel);
+ TCGv_ptr t, ptr;
+
+ if (fpst >= 0) {
+ ptr = fpstatus_ptr(fpst);
+ } else {
+ ptr = tcg_env;
+ }
+ t = tcg_temp_new_ptr();
+
+ for (int r = 0; r < nreg; ++r) {
+ TCGv_ptr t_zn = vec_full_reg_ptr(s, zn);
+ TCGv_ptr t_zm = vec_full_reg_ptr(s, zm);
+
+ for (int i = 0; i < nsel; ++i) {
+ int o_za = (r * vstride + i) * sizeof(ARMVectorReg);
+ int desc = simd_desc(svl, svl, data | (i << shsel));
+
+ tcg_gen_addi_ptr(t, t_za, o_za);
+ fn(t, t_zn, t_zm, t, ptr, tcg_constant_i32(desc));
+ }
+
+ /*
+ * For multiple-and-single vectors, Zn may wrap.
+ * For multiple vectors, both Zn and Zm are aligned.
+ */
+ zn = (zn + 1) % 32;
+ zm += multi;
+ }
+ }
+ return true;
+}
+
+static bool do_fmlal(DisasContext *s, arg_azz_n *a, bool sub, bool multi)
+{
+ return do_azz_acc_fp(s, a->n, 2, a->rv, a->off, a->zn, a->zm,
+ (1 << 2) | sub, 1,
+ multi, FPST_ENV, gen_helper_sve2_fmlal_zzzw_s);
+}
+
+TRANS_FEAT(FMLAL_n1, aa64_sme2, do_fmlal, a, false, false)
+TRANS_FEAT(FMLSL_n1, aa64_sme2, do_fmlal, a, true, false)
+TRANS_FEAT(FMLAL_nn, aa64_sme2, do_fmlal, a, false, true)
+TRANS_FEAT(FMLSL_nn, aa64_sme2, do_fmlal, a, true, true)
+
+static bool do_fmlal_nx(DisasContext *s, arg_azx_n *a, bool sub)
+{
+ return do_azz_acc_fp(s, a->n, 2, a->rv, a->off, a->zn, a->zm,
+ (a->idx << 3) | (1 << 2) | sub, 1,
+ false, FPST_ENV, gen_helper_sve2_fmlal_zzxw_s);
+}
+
+TRANS_FEAT(FMLAL_nx, aa64_sme2, do_fmlal_nx, a, false)
+TRANS_FEAT(FMLSL_nx, aa64_sme2, do_fmlal_nx, a, true)
+
+static bool do_bfmlal(DisasContext *s, arg_azz_n *a, bool sub, bool multi)
+{
+ return do_azz_acc_fp(s, a->n, 2, a->rv, a->off, a->zn, a->zm,
+ 0, 0, multi, FPST_ZA,
+ (!sub ? gen_helper_gvec_bfmlal
+ : s->fpcr_ah ? gen_helper_gvec_ah_bfmlsl
+ : gen_helper_gvec_bfmlsl));
+}
+
+TRANS_FEAT(BFMLAL_n1, aa64_sme2, do_bfmlal, a, false, false)
+TRANS_FEAT(BFMLSL_n1, aa64_sme2, do_bfmlal, a, true, false)
+TRANS_FEAT(BFMLAL_nn, aa64_sme2, do_bfmlal, a, false, true)
+TRANS_FEAT(BFMLSL_nn, aa64_sme2, do_bfmlal, a, true, true)
+
+static bool do_bfmlal_nx(DisasContext *s, arg_azx_n *a, bool sub)
+{
+ return do_azz_acc_fp(s, a->n, 2, a->rv, a->off, a->zn, a->zm,
+ a->idx << 1, 0, false, FPST_ZA,
+ !sub ? gen_helper_gvec_bfmlal_idx
+ : s->fpcr_ah ? gen_helper_gvec_ah_bfmlsl_idx
+ : gen_helper_gvec_bfmlsl_idx);
+}
+
+TRANS_FEAT(BFMLAL_nx, aa64_sme2, do_bfmlal_nx, a, false)
+TRANS_FEAT(BFMLSL_nx, aa64_sme2, do_bfmlal_nx, a, true)
+
+static bool do_fdot(DisasContext *s, arg_azz_n *a, bool multi)
+{
+ return do_azz_acc_fp(s, a->n, 1, a->rv, a->off, a->zn, a->zm, 1, 0,
+ multi, FPST_ENV, gen_helper_sme2_fdot_h);
+}
+
+TRANS_FEAT(FDOT_n1, aa64_sme2, do_fdot, a, false)
+TRANS_FEAT(FDOT_nn, aa64_sme2, do_fdot, a, true)
+
+static bool do_fdot_nx(DisasContext *s, arg_azx_n *a)
+{
+ return do_azz_acc_fp(s, a->n, 1, a->rv, a->off, a->zn, a->zm,
+ a->idx | (1 << 2), 0, false, FPST_ENV,
+ gen_helper_sme2_fdot_idx_h);
+}
+
+TRANS_FEAT(FDOT_nx, aa64_sme2, do_fdot_nx, a)
+
+static bool do_bfdot(DisasContext *s, arg_azz_n *a, bool multi)
+{
+ return do_azz_acc_fp(s, a->n, 1, a->rv, a->off, a->zn, a->zm, 0, 0,
+ multi, FPST_ENV, gen_helper_gvec_bfdot);
+}
+
+TRANS_FEAT(BFDOT_n1, aa64_sme2, do_bfdot, a, false)
+TRANS_FEAT(BFDOT_nn, aa64_sme2, do_bfdot, a, true)
+
+static bool do_bfdot_nx(DisasContext *s, arg_azx_n *a)
+{
+ return do_azz_acc_fp(s, a->n, 1, a->rv, a->off, a->zn, a->zm, a->idx, 0,
+ false, FPST_ENV, gen_helper_gvec_bfdot_idx);
+}
+
+TRANS_FEAT(BFDOT_nx, aa64_sme2, do_bfdot_nx, a)
+
+static bool do_vdot(DisasContext *s, arg_azx_n *a, gen_helper_gvec_4_ptr *fn)
+{
+ if (sme_smza_enabled_check(s)) {
+ int svl = streaming_vec_reg_size(s);
+ int vstride = svl / 2;
+ TCGv_ptr t_za = get_zarray(s, a->rv, a->off, 2, 1);
+ TCGv_ptr t_zn = vec_full_reg_ptr(s, a->zn);
+ TCGv_ptr t_zm = vec_full_reg_ptr(s, a->zm);
+ TCGv_ptr t = tcg_temp_new_ptr();
+
+ for (int i = 0; i < 2; ++i) {
+ int o_za = i * vstride * sizeof(ARMVectorReg);
+ int desc = simd_desc(svl, svl, a->idx | (i << 2));
+
+ tcg_gen_addi_ptr(t, t_za, o_za);
+ fn(t, t_zn, t_zm, t, tcg_env, tcg_constant_i32(desc));
+ }
+ }
+ return true;
+}
+
+TRANS_FEAT(FVDOT, aa64_sme, do_vdot, a, gen_helper_sme2_fvdot_idx_h)
+TRANS_FEAT(BFVDOT, aa64_sme, do_vdot, a, gen_helper_sme2_bfvdot_idx)
+
+static bool do_fmla(DisasContext *s, arg_azz_n *a, bool multi,
+ ARMFPStatusFlavour fpst, gen_helper_gvec_3_ptr *fn)
+{
+ return do_azz_fp(s, a->n, 1, a->rv, a->off, a->zn, a->zm,
+ 0, 0, multi, fpst, fn);
+}
+
+TRANS_FEAT(FMLA_n1_h, aa64_sme_f16f16, do_fmla, a, false, FPST_ZA_F16,
+ gen_helper_gvec_vfma_h)
+TRANS_FEAT(FMLS_n1_h, aa64_sme_f16f16, do_fmla, a, false, FPST_ZA_F16,
+ s->fpcr_ah ? gen_helper_gvec_ah_vfms_h : gen_helper_gvec_vfms_h)
+TRANS_FEAT(FMLA_nn_h, aa64_sme_f16f16, do_fmla, a, true, FPST_ZA_F16,
+ gen_helper_gvec_vfma_h)
+TRANS_FEAT(FMLS_nn_h, aa64_sme_f16f16, do_fmla, a, true, FPST_ZA_F16,
+ s->fpcr_ah ? gen_helper_gvec_ah_vfms_h : gen_helper_gvec_vfms_h)
+
+TRANS_FEAT(FMLA_n1_s, aa64_sme2, do_fmla, a, false, FPST_ZA,
+ gen_helper_gvec_vfma_s)
+TRANS_FEAT(FMLS_n1_s, aa64_sme2, do_fmla, a, false, FPST_ZA,
+ s->fpcr_ah ? gen_helper_gvec_ah_vfms_s : gen_helper_gvec_vfms_s)
+TRANS_FEAT(FMLA_nn_s, aa64_sme2, do_fmla, a, true, FPST_ZA,
+ gen_helper_gvec_vfma_s)
+TRANS_FEAT(FMLS_nn_s, aa64_sme2, do_fmla, a, true, FPST_ZA,
+ s->fpcr_ah ? gen_helper_gvec_ah_vfms_s : gen_helper_gvec_vfms_s)
+
+TRANS_FEAT(FMLA_n1_d, aa64_sme2_f64f64, do_fmla, a, false, FPST_ZA,
+ gen_helper_gvec_vfma_d)
+TRANS_FEAT(FMLS_n1_d, aa64_sme2_f64f64, do_fmla, a, false, FPST_ZA,
+ s->fpcr_ah ? gen_helper_gvec_ah_vfms_d : gen_helper_gvec_vfms_d)
+TRANS_FEAT(FMLA_nn_d, aa64_sme2_f64f64, do_fmla, a, true, FPST_ZA,
+ gen_helper_gvec_vfma_d)
+TRANS_FEAT(FMLS_nn_d, aa64_sme2_f64f64, do_fmla, a, true, FPST_ZA,
+ s->fpcr_ah ? gen_helper_gvec_ah_vfms_d : gen_helper_gvec_vfms_d)
+
+TRANS_FEAT(BFMLA_n1, aa64_sme_b16b16, do_fmla, a, false, FPST_ZA,
+ gen_helper_gvec_bfmla)
+TRANS_FEAT(BFMLS_n1, aa64_sme_b16b16, do_fmla, a, false, FPST_ZA,
+ s->fpcr_ah ? gen_helper_gvec_ah_bfmls : gen_helper_gvec_bfmls)
+TRANS_FEAT(BFMLA_nn, aa64_sme_b16b16, do_fmla, a, true, FPST_ZA,
+ gen_helper_gvec_bfmla)
+TRANS_FEAT(BFMLS_nn, aa64_sme_b16b16, do_fmla, a, true, FPST_ZA,
+ s->fpcr_ah ? gen_helper_gvec_ah_bfmls : gen_helper_gvec_bfmls)
+
+static bool do_fmla_nx(DisasContext *s, arg_azx_n *a,
+ ARMFPStatusFlavour fpst, gen_helper_gvec_4_ptr *fn)
+{
+ return do_azz_acc_fp(s, a->n, 1, a->rv, a->off, a->zn, a->zm,
+ a->idx, 0, false, fpst, fn);
+}
+
+TRANS_FEAT(FMLA_nx_h, aa64_sme_f16f16, do_fmla_nx, a, FPST_ZA_F16,
+ gen_helper_gvec_fmla_idx_h)
+TRANS_FEAT(FMLS_nx_h, aa64_sme_f16f16, do_fmla_nx, a, FPST_ZA_F16,
+ s->fpcr_ah ? gen_helper_gvec_ah_fmls_idx_h : gen_helper_gvec_fmls_idx_h)
+TRANS_FEAT(FMLA_nx_s, aa64_sme2, do_fmla_nx, a, FPST_ZA,
+ gen_helper_gvec_fmla_idx_s)
+TRANS_FEAT(FMLS_nx_s, aa64_sme2, do_fmla_nx, a, FPST_ZA,
+ s->fpcr_ah ? gen_helper_gvec_ah_fmls_idx_s : gen_helper_gvec_fmls_idx_s)
+TRANS_FEAT(FMLA_nx_d, aa64_sme2_f64f64, do_fmla_nx, a, FPST_ZA,
+ gen_helper_gvec_fmla_idx_d)
+TRANS_FEAT(FMLS_nx_d, aa64_sme2_f64f64, do_fmla_nx, a, FPST_ZA,
+ s->fpcr_ah ? gen_helper_gvec_ah_fmls_idx_d : gen_helper_gvec_fmls_idx_d)
+
+TRANS_FEAT(BFMLA_nx, aa64_sme_b16b16, do_fmla_nx, a, FPST_ZA,
+ gen_helper_gvec_bfmla_idx)
+TRANS_FEAT(BFMLS_nx, aa64_sme_b16b16, do_fmla_nx, a, FPST_ZA,
+ s->fpcr_ah ? gen_helper_gvec_ah_bfmls_idx : gen_helper_gvec_bfmls_idx)
+
+static bool do_faddsub(DisasContext *s, arg_az_n *a, ARMFPStatusFlavour fpst,
+ gen_helper_gvec_3_ptr *fn)
+{
+ if (sme_smza_enabled_check(s)) {
+ int svl = streaming_vec_reg_size(s);
+ int n = a->n;
+ int zm = a->zm;
+ int vstride = svl / n;
+ TCGv_ptr t_za = get_zarray(s, a->rv, a->off, n, 0);
+ TCGv_ptr ptr = fpstatus_ptr(fpst);
+ TCGv_ptr t = tcg_temp_new_ptr();
+
+ for (int r = 0; r < n; ++r) {
+ TCGv_ptr t_zm = vec_full_reg_ptr(s, zm + r);
+ int o_za = r * vstride * sizeof(ARMVectorReg);
+ int desc = simd_desc(svl, svl, 0);
+
+ tcg_gen_addi_ptr(t, t_za, o_za);
+ fn(t, t, t_zm, ptr, tcg_constant_i32(desc));
+ }
+ }
+ return true;
+}
+
+TRANS_FEAT(FADD_nn_h, aa64_sme_f16f16, do_faddsub, a,
+ FPST_ZA_F16, gen_helper_gvec_fadd_h)
+TRANS_FEAT(FSUB_nn_h, aa64_sme_f16f16, do_faddsub, a,
+ FPST_ZA_F16, gen_helper_gvec_fsub_h)
+
+TRANS_FEAT(FADD_nn_s, aa64_sme2, do_faddsub, a,
+ FPST_ZA, gen_helper_gvec_fadd_s)
+TRANS_FEAT(FSUB_nn_s, aa64_sme2, do_faddsub, a,
+ FPST_ZA, gen_helper_gvec_fsub_s)
+
+TRANS_FEAT(FADD_nn_d, aa64_sme2_f64f64, do_faddsub, a,
+ FPST_ZA, gen_helper_gvec_fadd_d)
+TRANS_FEAT(FSUB_nn_d, aa64_sme2_f64f64, do_faddsub, a,
+ FPST_ZA, gen_helper_gvec_fsub_d)
+
+TRANS_FEAT(BFADD_nn, aa64_sme_b16b16, do_faddsub, a,
+ FPST_ZA, gen_helper_gvec_bfadd)
+TRANS_FEAT(BFSUB_nn, aa64_sme_b16b16, do_faddsub, a,
+ FPST_ZA, gen_helper_gvec_bfsub)
+
+/*
+ * Expand array multi-vector single (n1), array multi-vector (nn),
+ * and array multi-vector indexed (nx), for integer accumulate.
+ * multi: true for nn, false for n1.
+ * data: stuff for simd_data, including any index.
+ */
+static bool do_azz_acc(DisasContext *s, int nreg, int nsel,
+ int rv, int off, int zn, int zm,
+ int data, int shsel, bool multi,
+ gen_helper_gvec_4 *fn)
+{
+ if (sme_smza_enabled_check(s)) {
+ int svl = streaming_vec_reg_size(s);
+ int vstride = svl / nreg;
+ TCGv_ptr t_za = get_zarray(s, rv, off, nreg, nsel);
+ TCGv_ptr t = tcg_temp_new_ptr();
+
+ for (int r = 0; r < nreg; ++r) {
+ TCGv_ptr t_zn = vec_full_reg_ptr(s, zn);
+ TCGv_ptr t_zm = vec_full_reg_ptr(s, zm);
+
+ for (int i = 0; i < nsel; ++i) {
+ int o_za = (r * vstride + i) * sizeof(ARMVectorReg);
+ int desc = simd_desc(svl, svl, data | (i << shsel));
+
+ tcg_gen_addi_ptr(t, t_za, o_za);
+ fn(t, t_zn, t_zm, t, tcg_constant_i32(desc));
+ }
+
+ /*
+ * For multiple-and-single vectors, Zn may wrap.
+ * For multiple vectors, both Zn and Zm are aligned.
+ */
+ zn = (zn + 1) % 32;
+ zm += multi;
+ }
+ }
+ return true;
+}
+
+static bool do_dot(DisasContext *s, arg_azz_n *a, bool multi,
+ gen_helper_gvec_4 *fn)
+{
+ return do_azz_acc(s, a->n, 1, a->rv, a->off, a->zn, a->zm,
+ 0, 0, multi, fn);
+}
+
+static void gen_helper_gvec_sudot_4b(TCGv_ptr d, TCGv_ptr n, TCGv_ptr m,
+ TCGv_ptr a, TCGv_i32 desc)
+{
+ gen_helper_gvec_usdot_4b(d, m, n, a, desc);
+}
+
+TRANS_FEAT(USDOT_n1, aa64_sme2, do_dot, a, false, gen_helper_gvec_usdot_4b)
+TRANS_FEAT(SUDOT_n1, aa64_sme2, do_dot, a, false, gen_helper_gvec_sudot_4b)
+TRANS_FEAT(SDOT_n1_2h, aa64_sme2, do_dot, a, false, gen_helper_gvec_sdot_2h)
+TRANS_FEAT(UDOT_n1_2h, aa64_sme2, do_dot, a, false, gen_helper_gvec_udot_2h)
+TRANS_FEAT(SDOT_n1_4b, aa64_sme2, do_dot, a, false, gen_helper_gvec_sdot_4b)
+TRANS_FEAT(UDOT_n1_4b, aa64_sme2, do_dot, a, false, gen_helper_gvec_udot_4b)
+TRANS_FEAT(SDOT_n1_4h, aa64_sme2_i16i64, do_dot, a, false, gen_helper_gvec_sdot_4h)
+TRANS_FEAT(UDOT_n1_4h, aa64_sme2_i16i64, do_dot, a, false, gen_helper_gvec_udot_4h)
+
+TRANS_FEAT(USDOT_nn, aa64_sme2, do_dot, a, true, gen_helper_gvec_usdot_4b)
+TRANS_FEAT(SDOT_nn_2h, aa64_sme2, do_dot, a, true, gen_helper_gvec_sdot_2h)
+TRANS_FEAT(UDOT_nn_2h, aa64_sme2, do_dot, a, true, gen_helper_gvec_udot_2h)
+TRANS_FEAT(SDOT_nn_4b, aa64_sme2, do_dot, a, true, gen_helper_gvec_sdot_4b)
+TRANS_FEAT(UDOT_nn_4b, aa64_sme2, do_dot, a, true, gen_helper_gvec_udot_4b)
+TRANS_FEAT(SDOT_nn_4h, aa64_sme2_i16i64, do_dot, a, true, gen_helper_gvec_sdot_4h)
+TRANS_FEAT(UDOT_nn_4h, aa64_sme2_i16i64, do_dot, a, true, gen_helper_gvec_udot_4h)
+
+static bool do_dot_nx(DisasContext *s, arg_azx_n *a, gen_helper_gvec_4 *fn)
+{
+ return do_azz_acc(s, a->n, 1, a->rv, a->off, a->zn, a->zm,
+ a->idx, 0, false, fn);
+}
+
+TRANS_FEAT(USDOT_nx, aa64_sme2, do_dot_nx, a, gen_helper_gvec_usdot_idx_4b)
+TRANS_FEAT(SUDOT_nx, aa64_sme2, do_dot_nx, a, gen_helper_gvec_sudot_idx_4b)
+TRANS_FEAT(SDOT_nx_2h, aa64_sme2, do_dot_nx, a, gen_helper_gvec_sdot_idx_2h)
+TRANS_FEAT(UDOT_nx_2h, aa64_sme2, do_dot_nx, a, gen_helper_gvec_udot_idx_2h)
+TRANS_FEAT(SDOT_nx_4b, aa64_sme2, do_dot_nx, a, gen_helper_gvec_sdot_idx_4b)
+TRANS_FEAT(UDOT_nx_4b, aa64_sme2, do_dot_nx, a, gen_helper_gvec_udot_idx_4b)
+TRANS_FEAT(SDOT_nx_4h, aa64_sme2_i16i64, do_dot_nx, a, gen_helper_gvec_sdot_idx_4h)
+TRANS_FEAT(UDOT_nx_4h, aa64_sme2_i16i64, do_dot_nx, a, gen_helper_gvec_udot_idx_4h)
+
+static bool do_vdot_nx(DisasContext *s, arg_azx_n *a, gen_helper_gvec_3 *fn)
+{
+ if (sme_smza_enabled_check(s)) {
+ int svl = streaming_vec_reg_size(s);
+ fn(get_zarray(s, a->rv, a->off, a->n, 0),
+ vec_full_reg_ptr(s, a->zn),
+ vec_full_reg_ptr(s, a->zm),
+ tcg_constant_i32(simd_desc(svl, svl, a->idx)));
+ }
+ return true;
+}
+
+TRANS_FEAT(SVDOT_nx_2h, aa64_sme2, do_vdot_nx, a, gen_helper_sme2_svdot_idx_2h)
+TRANS_FEAT(SVDOT_nx_4b, aa64_sme2, do_vdot_nx, a, gen_helper_sme2_svdot_idx_4b)
+TRANS_FEAT(SVDOT_nx_4h, aa64_sme2, do_vdot_nx, a, gen_helper_sme2_svdot_idx_4h)
+
+TRANS_FEAT(UVDOT_nx_2h, aa64_sme2, do_vdot_nx, a, gen_helper_sme2_uvdot_idx_2h)
+TRANS_FEAT(UVDOT_nx_4b, aa64_sme2, do_vdot_nx, a, gen_helper_sme2_uvdot_idx_4b)
+TRANS_FEAT(UVDOT_nx_4h, aa64_sme2, do_vdot_nx, a, gen_helper_sme2_uvdot_idx_4h)
+
+TRANS_FEAT(SUVDOT_nx_4b, aa64_sme2, do_vdot_nx, a, gen_helper_sme2_suvdot_idx_4b)
+TRANS_FEAT(USVDOT_nx_4b, aa64_sme2, do_vdot_nx, a, gen_helper_sme2_usvdot_idx_4b)
+
+static bool do_smlal(DisasContext *s, arg_azz_n *a, bool multi,
+ gen_helper_gvec_4 *fn)
+{
+ return do_azz_acc(s, a->n, 2, a->rv, a->off, a->zn, a->zm,
+ 0, 0, multi, fn);
+}
+
+TRANS_FEAT(SMLAL_n1, aa64_sme2, do_smlal, a, false, gen_helper_sve2_smlal_zzzw_s)
+TRANS_FEAT(SMLSL_n1, aa64_sme2, do_smlal, a, false, gen_helper_sve2_smlsl_zzzw_s)
+TRANS_FEAT(UMLAL_n1, aa64_sme2, do_smlal, a, false, gen_helper_sve2_umlal_zzzw_s)
+TRANS_FEAT(UMLSL_n1, aa64_sme2, do_smlal, a, false, gen_helper_sve2_umlsl_zzzw_s)
+
+TRANS_FEAT(SMLAL_nn, aa64_sme2, do_smlal, a, true, gen_helper_sve2_smlal_zzzw_s)
+TRANS_FEAT(SMLSL_nn, aa64_sme2, do_smlal, a, true, gen_helper_sve2_smlsl_zzzw_s)
+TRANS_FEAT(UMLAL_nn, aa64_sme2, do_smlal, a, true, gen_helper_sve2_umlal_zzzw_s)
+TRANS_FEAT(UMLSL_nn, aa64_sme2, do_smlal, a, true, gen_helper_sve2_umlsl_zzzw_s)
+
+static bool do_smlal_nx(DisasContext *s, arg_azx_n *a,
+ gen_helper_gvec_4 *fn)
+{
+ return do_azz_acc(s, a->n, 2, a->rv, a->off, a->zn, a->zm,
+ a->idx << 1, 0, false, fn);
+}
+
+TRANS_FEAT(SMLAL_nx, aa64_sme2, do_smlal_nx, a, gen_helper_sve2_smlal_idx_s)
+TRANS_FEAT(SMLSL_nx, aa64_sme2, do_smlal_nx, a, gen_helper_sve2_smlsl_idx_s)
+TRANS_FEAT(UMLAL_nx, aa64_sme2, do_smlal_nx, a, gen_helper_sve2_umlal_idx_s)
+TRANS_FEAT(UMLSL_nx, aa64_sme2, do_smlal_nx, a, gen_helper_sve2_umlsl_idx_s)
+
+static bool do_smlall(DisasContext *s, arg_azz_n *a, bool multi,
+ gen_helper_gvec_4 *fn)
+{
+ return do_azz_acc(s, a->n, 4, a->rv, a->off, a->zn, a->zm,
+ 0, 0, multi, fn);
+}
+
+static void gen_helper_sme2_sumlall_s(TCGv_ptr d, TCGv_ptr n, TCGv_ptr m,
+ TCGv_ptr a, TCGv_i32 desc)
+{
+ gen_helper_sme2_usmlall_s(d, m, n, a, desc);
+}
+
+TRANS_FEAT(SMLALL_n1_s, aa64_sme2, do_smlall, a, false, gen_helper_sme2_smlall_s)
+TRANS_FEAT(SMLSLL_n1_s, aa64_sme2, do_smlall, a, false, gen_helper_sme2_smlsll_s)
+TRANS_FEAT(UMLALL_n1_s, aa64_sme2, do_smlall, a, false, gen_helper_sme2_umlall_s)
+TRANS_FEAT(UMLSLL_n1_s, aa64_sme2, do_smlall, a, false, gen_helper_sme2_umlsll_s)
+TRANS_FEAT(USMLALL_n1_s, aa64_sme2, do_smlall, a, false, gen_helper_sme2_usmlall_s)
+TRANS_FEAT(SUMLALL_n1_s, aa64_sme2, do_smlall, a, false, gen_helper_sme2_sumlall_s)
+
+TRANS_FEAT(SMLALL_n1_d, aa64_sme2_i16i64, do_smlall, a, false, gen_helper_sme2_smlall_d)
+TRANS_FEAT(SMLSLL_n1_d, aa64_sme2_i16i64, do_smlall, a, false, gen_helper_sme2_smlsll_d)
+TRANS_FEAT(UMLALL_n1_d, aa64_sme2_i16i64, do_smlall, a, false, gen_helper_sme2_umlall_d)
+TRANS_FEAT(UMLSLL_n1_d, aa64_sme2_i16i64, do_smlall, a, false, gen_helper_sme2_umlsll_d)
+
+TRANS_FEAT(SMLALL_nn_s, aa64_sme2, do_smlall, a, true, gen_helper_sme2_smlall_s)
+TRANS_FEAT(SMLSLL_nn_s, aa64_sme2, do_smlall, a, true, gen_helper_sme2_smlsll_s)
+TRANS_FEAT(UMLALL_nn_s, aa64_sme2, do_smlall, a, true, gen_helper_sme2_umlall_s)
+TRANS_FEAT(UMLSLL_nn_s, aa64_sme2, do_smlall, a, true, gen_helper_sme2_umlsll_s)
+TRANS_FEAT(USMLALL_nn_s, aa64_sme2, do_smlall, a, true, gen_helper_sme2_usmlall_s)
+
+TRANS_FEAT(SMLALL_nn_d, aa64_sme2_i16i64, do_smlall, a, true, gen_helper_sme2_smlall_d)
+TRANS_FEAT(SMLSLL_nn_d, aa64_sme2_i16i64, do_smlall, a, true, gen_helper_sme2_smlsll_d)
+TRANS_FEAT(UMLALL_nn_d, aa64_sme2_i16i64, do_smlall, a, true, gen_helper_sme2_umlall_d)
+TRANS_FEAT(UMLSLL_nn_d, aa64_sme2_i16i64, do_smlall, a, true, gen_helper_sme2_umlsll_d)
+
+static bool do_smlall_nx(DisasContext *s, arg_azx_n *a,
+ gen_helper_gvec_4 *fn)
+{
+ return do_azz_acc(s, a->n, 4, a->rv, a->off, a->zn, a->zm,
+ a->idx << 2, 0, false, fn);
+}
+
+TRANS_FEAT(SMLALL_nx_s, aa64_sme2, do_smlall_nx, a, gen_helper_sme2_smlall_idx_s)
+TRANS_FEAT(SMLSLL_nx_s, aa64_sme2, do_smlall_nx, a, gen_helper_sme2_smlsll_idx_s)
+TRANS_FEAT(UMLALL_nx_s, aa64_sme2, do_smlall_nx, a, gen_helper_sme2_umlall_idx_s)
+TRANS_FEAT(UMLSLL_nx_s, aa64_sme2, do_smlall_nx, a, gen_helper_sme2_umlsll_idx_s)
+TRANS_FEAT(USMLALL_nx_s, aa64_sme2, do_smlall_nx, a, gen_helper_sme2_usmlall_idx_s)
+TRANS_FEAT(SUMLALL_nx_s, aa64_sme2, do_smlall_nx, a, gen_helper_sme2_sumlall_idx_s)
+
+TRANS_FEAT(SMLALL_nx_d, aa64_sme2_i16i64, do_smlall_nx, a, gen_helper_sme2_smlall_idx_d)
+TRANS_FEAT(SMLSLL_nx_d, aa64_sme2_i16i64, do_smlall_nx, a, gen_helper_sme2_smlsll_idx_d)
+TRANS_FEAT(UMLALL_nx_d, aa64_sme2_i16i64, do_smlall_nx, a, gen_helper_sme2_umlall_idx_d)
+TRANS_FEAT(UMLSLL_nx_d, aa64_sme2_i16i64, do_smlall_nx, a, gen_helper_sme2_umlsll_idx_d)
+
+static bool do_zz_fpst(DisasContext *s, arg_zz_n *a, int data,
+ ARMFPStatusFlavour type, gen_helper_gvec_2_ptr *fn)
+{
+ if (sme_sm_enabled_check(s)) {
+ int svl = streaming_vec_reg_size(s);
+ TCGv_ptr fpst = fpstatus_ptr(type);
+
+ for (int i = 0, n = a->n; i < n; ++i) {
+ tcg_gen_gvec_2_ptr(vec_full_reg_offset(s, a->zd + i),
+ vec_full_reg_offset(s, a->zn + i),
+ fpst, svl, svl, data, fn);
+ }
+ }
+ return true;
+}
+
+TRANS_FEAT(BFCVT, aa64_sme2, do_zz_fpst, a, 0,
+ FPST_A64, gen_helper_sme2_bfcvt)
+TRANS_FEAT(BFCVTN, aa64_sme2, do_zz_fpst, a, 0,
+ FPST_A64, gen_helper_sme2_bfcvtn)
+TRANS_FEAT(FCVT_n, aa64_sme2, do_zz_fpst, a, 0,
+ FPST_A64, gen_helper_sme2_fcvt_n)
+TRANS_FEAT(FCVTN, aa64_sme2, do_zz_fpst, a, 0,
+ FPST_A64, gen_helper_sme2_fcvtn)
+
+TRANS_FEAT(FCVT_w, aa64_sme_f16f16, do_zz_fpst, a, 0,
+ FPST_A64_F16, gen_helper_sme2_fcvt_w)
+TRANS_FEAT(FCVTL, aa64_sme_f16f16, do_zz_fpst, a, 0,
+ FPST_A64_F16, gen_helper_sme2_fcvtl)
+
+TRANS_FEAT(FCVTZS, aa64_sme2, do_zz_fpst, a, 0,
+ FPST_A64, gen_helper_gvec_vcvt_rz_fs)
+TRANS_FEAT(FCVTZU, aa64_sme2, do_zz_fpst, a, 0,
+ FPST_A64, gen_helper_gvec_vcvt_rz_fu)
+
+TRANS_FEAT(SCVTF, aa64_sme2, do_zz_fpst, a, 0,
+ FPST_A64, gen_helper_sme2_scvtf)
+TRANS_FEAT(UCVTF, aa64_sme2, do_zz_fpst, a, 0,
+ FPST_A64, gen_helper_sme2_ucvtf)
+
+TRANS_FEAT(FRINTN, aa64_sme2, do_zz_fpst, a, float_round_nearest_even,
+ FPST_A64, gen_helper_gvec_vrint_rm_s)
+TRANS_FEAT(FRINTP, aa64_sme2, do_zz_fpst, a, float_round_up,
+ FPST_A64, gen_helper_gvec_vrint_rm_s)
+TRANS_FEAT(FRINTM, aa64_sme2, do_zz_fpst, a, float_round_down,
+ FPST_A64, gen_helper_gvec_vrint_rm_s)
+TRANS_FEAT(FRINTA, aa64_sme2, do_zz_fpst, a, float_round_ties_away,
+ FPST_A64, gen_helper_gvec_vrint_rm_s)
+
+static bool do_zz(DisasContext *s, arg_zz_n *a, int data,
+ gen_helper_gvec_2 *fn)
+{
+ if (sme_sm_enabled_check(s)) {
+ int svl = streaming_vec_reg_size(s);
+
+ for (int i = 0, n = a->n; i < n; ++i) {
+ tcg_gen_gvec_2_ool(vec_full_reg_offset(s, a->zd + i),
+ vec_full_reg_offset(s, a->zn + i),
+ svl, svl, data, fn);
+ }
+ }
+ return true;
+}
+
+TRANS_FEAT(SQCVT_sh, aa64_sme2, do_zz, a, 0, gen_helper_sme2_sqcvt_sh)
+TRANS_FEAT(UQCVT_sh, aa64_sme2, do_zz, a, 0, gen_helper_sme2_uqcvt_sh)
+TRANS_FEAT(SQCVTU_sh, aa64_sme2, do_zz, a, 0, gen_helper_sme2_sqcvtu_sh)
+
+TRANS_FEAT(SQCVT_sb, aa64_sme2, do_zz, a, 0, gen_helper_sme2_sqcvt_sb)
+TRANS_FEAT(UQCVT_sb, aa64_sme2, do_zz, a, 0, gen_helper_sme2_uqcvt_sb)
+TRANS_FEAT(SQCVTU_sb, aa64_sme2, do_zz, a, 0, gen_helper_sme2_sqcvtu_sb)
+
+TRANS_FEAT(SQCVT_dh, aa64_sme2, do_zz, a, 0, gen_helper_sme2_sqcvt_dh)
+TRANS_FEAT(UQCVT_dh, aa64_sme2, do_zz, a, 0, gen_helper_sme2_uqcvt_dh)
+TRANS_FEAT(SQCVTU_dh, aa64_sme2, do_zz, a, 0, gen_helper_sme2_sqcvtu_dh)
+
+TRANS_FEAT(SQCVTN_sb, aa64_sme2, do_zz, a, 0, gen_helper_sme2_sqcvtn_sb)
+TRANS_FEAT(UQCVTN_sb, aa64_sme2, do_zz, a, 0, gen_helper_sme2_uqcvtn_sb)
+TRANS_FEAT(SQCVTUN_sb, aa64_sme2, do_zz, a, 0, gen_helper_sme2_sqcvtun_sb)
+
+TRANS_FEAT(SQCVTN_dh, aa64_sme2, do_zz, a, 0, gen_helper_sme2_sqcvtn_dh)
+TRANS_FEAT(UQCVTN_dh, aa64_sme2, do_zz, a, 0, gen_helper_sme2_uqcvtn_dh)
+TRANS_FEAT(SQCVTUN_dh, aa64_sme2, do_zz, a, 0, gen_helper_sme2_sqcvtun_dh)
+
+TRANS_FEAT(SUNPK_2bh, aa64_sme2, do_zz, a, 0, gen_helper_sme2_sunpk2_bh)
+TRANS_FEAT(SUNPK_2hs, aa64_sme2, do_zz, a, 0, gen_helper_sme2_sunpk2_hs)
+TRANS_FEAT(SUNPK_2sd, aa64_sme2, do_zz, a, 0, gen_helper_sme2_sunpk2_sd)
+
+TRANS_FEAT(SUNPK_4bh, aa64_sme2, do_zz, a, 0, gen_helper_sme2_sunpk4_bh)
+TRANS_FEAT(SUNPK_4hs, aa64_sme2, do_zz, a, 0, gen_helper_sme2_sunpk4_hs)
+TRANS_FEAT(SUNPK_4sd, aa64_sme2, do_zz, a, 0, gen_helper_sme2_sunpk4_sd)
+
+TRANS_FEAT(UUNPK_2bh, aa64_sme2, do_zz, a, 0, gen_helper_sme2_uunpk2_bh)
+TRANS_FEAT(UUNPK_2hs, aa64_sme2, do_zz, a, 0, gen_helper_sme2_uunpk2_hs)
+TRANS_FEAT(UUNPK_2sd, aa64_sme2, do_zz, a, 0, gen_helper_sme2_uunpk2_sd)
+
+TRANS_FEAT(UUNPK_4bh, aa64_sme2, do_zz, a, 0, gen_helper_sme2_uunpk4_bh)
+TRANS_FEAT(UUNPK_4hs, aa64_sme2, do_zz, a, 0, gen_helper_sme2_uunpk4_hs)
+TRANS_FEAT(UUNPK_4sd, aa64_sme2, do_zz, a, 0, gen_helper_sme2_uunpk4_sd)
+
+static bool do_zipuzp_4(DisasContext *s, arg_zz_e *a,
+ gen_helper_gvec_2 * const fn[5])
+{
+ int bytes_per_op = 4 << a->esz;
+
+ /* Both MO_64 and MO_128 can fail the size test. */
+ if (s->max_svl < bytes_per_op) {
+ unallocated_encoding(s);
+ } else if (sme_sm_enabled_check(s)) {
+ int svl = streaming_vec_reg_size(s);
+ if (svl < bytes_per_op) {
+ unallocated_encoding(s);
+ } else {
+ tcg_gen_gvec_2_ool(vec_full_reg_offset(s, a->zd),
+ vec_full_reg_offset(s, a->zn),
+ svl, svl, 0, fn[a->esz]);
+ }
+ }
+ return true;
+}
+
+static gen_helper_gvec_2 * const zip4_fns[] = {
+ gen_helper_sme2_zip4_b,
+ gen_helper_sme2_zip4_h,
+ gen_helper_sme2_zip4_s,
+ gen_helper_sme2_zip4_d,
+ gen_helper_sme2_zip4_q,
+};
+TRANS_FEAT(ZIP_4, aa64_sme2, do_zipuzp_4, a, zip4_fns)
+
+static gen_helper_gvec_2 * const uzp4_fns[] = {
+ gen_helper_sme2_uzp4_b,
+ gen_helper_sme2_uzp4_h,
+ gen_helper_sme2_uzp4_s,
+ gen_helper_sme2_uzp4_d,
+ gen_helper_sme2_uzp4_q,
+};
+TRANS_FEAT(UZP_4, aa64_sme2, do_zipuzp_4, a, uzp4_fns)
+
+static bool do_zz_rshr(DisasContext *s, arg_rshr *a, gen_helper_gvec_2 *fn)
+{
+ if (sve_access_check(s)) {
+ int vl = vec_full_reg_size(s);
+ tcg_gen_gvec_2_ool(vec_full_reg_offset(s, a->zd),
+ vec_full_reg_offset(s, a->zn),
+ vl, vl, a->shift, fn);
+ }
+ return true;
+}
+
+TRANS_FEAT(SQRSHR_sh, aa64_sme2, do_zz_rshr, a, gen_helper_sme2_sqrshr_sh)
+TRANS_FEAT(UQRSHR_sh, aa64_sme2, do_zz_rshr, a, gen_helper_sme2_uqrshr_sh)
+TRANS_FEAT(SQRSHRU_sh, aa64_sme2, do_zz_rshr, a, gen_helper_sme2_sqrshru_sh)
+
+TRANS_FEAT(SQRSHR_sb, aa64_sme2, do_zz_rshr, a, gen_helper_sme2_sqrshr_sb)
+TRANS_FEAT(SQRSHR_dh, aa64_sme2, do_zz_rshr, a, gen_helper_sme2_sqrshr_dh)
+TRANS_FEAT(UQRSHR_sb, aa64_sme2, do_zz_rshr, a, gen_helper_sme2_uqrshr_sb)
+TRANS_FEAT(UQRSHR_dh, aa64_sme2, do_zz_rshr, a, gen_helper_sme2_uqrshr_dh)
+TRANS_FEAT(SQRSHRU_sb, aa64_sme2, do_zz_rshr, a, gen_helper_sme2_sqrshru_sb)
+TRANS_FEAT(SQRSHRU_dh, aa64_sme2, do_zz_rshr, a, gen_helper_sme2_sqrshru_dh)
+
+TRANS_FEAT(SQRSHRN_sh, aa64_sme2_or_sve2p1, do_zz_rshr, a, gen_helper_sme2_sqrshrn_sh)
+TRANS_FEAT(UQRSHRN_sh, aa64_sme2_or_sve2p1, do_zz_rshr, a, gen_helper_sme2_uqrshrn_sh)
+TRANS_FEAT(SQRSHRUN_sh, aa64_sme2_or_sve2p1, do_zz_rshr, a, gen_helper_sme2_sqrshrun_sh)
+
+TRANS_FEAT(SQRSHRN_sb, aa64_sme2, do_zz_rshr, a, gen_helper_sme2_sqrshrn_sb)
+TRANS_FEAT(SQRSHRN_dh, aa64_sme2, do_zz_rshr, a, gen_helper_sme2_sqrshrn_dh)
+TRANS_FEAT(UQRSHRN_sb, aa64_sme2, do_zz_rshr, a, gen_helper_sme2_uqrshrn_sb)
+TRANS_FEAT(UQRSHRN_dh, aa64_sme2, do_zz_rshr, a, gen_helper_sme2_uqrshrn_dh)
+TRANS_FEAT(SQRSHRUN_sb, aa64_sme2, do_zz_rshr, a, gen_helper_sme2_sqrshrun_sb)
+TRANS_FEAT(SQRSHRUN_dh, aa64_sme2, do_zz_rshr, a, gen_helper_sme2_sqrshrun_dh)
+
+static bool do_zipuzp_2(DisasContext *s, arg_zzz_e *a,
+ gen_helper_gvec_3 * const fn[5])
+{
+ int bytes_per_op = 2 << a->esz;
+
+ /* MO_128 can fail the size test. */
+ if (s->max_svl < bytes_per_op) {
+ unallocated_encoding(s);
+ } else if (sme_sm_enabled_check(s)) {
+ int svl = streaming_vec_reg_size(s);
+ if (svl < bytes_per_op) {
+ unallocated_encoding(s);
+ } else {
+ tcg_gen_gvec_3_ool(vec_full_reg_offset(s, a->zd),
+ vec_full_reg_offset(s, a->zn),
+ vec_full_reg_offset(s, a->zm),
+ svl, svl, 0, fn[a->esz]);
+ }
+ }
+ return true;
+}
+
+static gen_helper_gvec_3 * const zip2_fns[] = {
+ gen_helper_sme2_zip2_b,
+ gen_helper_sme2_zip2_h,
+ gen_helper_sme2_zip2_s,
+ gen_helper_sme2_zip2_d,
+ gen_helper_sme2_zip2_q,
+};
+TRANS_FEAT(ZIP_2, aa64_sme2, do_zipuzp_2, a, zip2_fns)
+
+static gen_helper_gvec_3 * const uzp2_fns[] = {
+ gen_helper_sme2_uzp2_b,
+ gen_helper_sme2_uzp2_h,
+ gen_helper_sme2_uzp2_s,
+ gen_helper_sme2_uzp2_d,
+ gen_helper_sme2_uzp2_q,
+};
+TRANS_FEAT(UZP_2, aa64_sme2, do_zipuzp_2, a, uzp2_fns)
+
+static bool trans_FCLAMP(DisasContext *s, arg_zzz_en *a)
+{
+ static gen_helper_gvec_3_ptr * const fn[] = {
+ gen_helper_sme2_bfclamp,
+ gen_helper_sme2_fclamp_h,
+ gen_helper_sme2_fclamp_s,
+ gen_helper_sme2_fclamp_d,
+ };
+ TCGv_ptr fpst;
+ int vl;
+
+ if (!dc_isar_feature(aa64_sme2, s)) {
+ return false;
+ }
+ /* This insn uses MO_8 to encode BFloat16. */
+ if (a->esz == MO_8 && !dc_isar_feature(aa64_sme_b16b16, s)) {
+ return false;
+ }
+ if (!sme_sm_enabled_check(s)) {
+ return true;
+ }
+
+ fpst = fpstatus_ptr(a->esz == MO_16 ? FPST_A64_F16 : FPST_A64);
+ vl = vec_full_reg_size(s);
+
+ tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, a->zd),
+ vec_full_reg_offset(s, a->zn),
+ vec_full_reg_offset(s, a->zm),
+ fpst, vl, vl, a->n, fn[a->esz]);
+ return true;
+}
+
+static bool do_clamp(DisasContext *s, arg_zzz_en *a,
+ gen_helper_gvec_3 * const fn[4])
+{
+ int vl;
+
+ if (!dc_isar_feature(aa64_sme2, s)) {
+ return false;
+ }
+ if (!sme_sm_enabled_check(s)) {
+ return true;
+ }
+
+ /*
+ * Clamp is just a min+max, easily supported by most host
+ * vector operations -- we already have such an expansion in
+ * translate-sve.c for a single output.
+ * TODO: Add support in gvec for multiple simultaneous output,
+ * and/or copy to temporary upon overlap.
+ */
+ vl = vec_full_reg_size(s);
+ tcg_gen_gvec_3_ool(vec_full_reg_offset(s, a->zd),
+ vec_full_reg_offset(s, a->zn),
+ vec_full_reg_offset(s, a->zm),
+ vl, vl, a->n, fn[a->esz]);
+ return true;
+}
+
+static gen_helper_gvec_3 * const sclamp_fns[] = {
+ gen_helper_sme2_sclamp_b,
+ gen_helper_sme2_sclamp_h,
+ gen_helper_sme2_sclamp_s,
+ gen_helper_sme2_sclamp_d,
+};
+TRANS(SCLAMP, do_clamp, a, sclamp_fns)
+
+static gen_helper_gvec_3 * const uclamp_fns[] = {
+ gen_helper_sme2_uclamp_b,
+ gen_helper_sme2_uclamp_h,
+ gen_helper_sme2_uclamp_s,
+ gen_helper_sme2_uclamp_d,
+};
+TRANS(UCLAMP, do_clamp, a, uclamp_fns)
+
+static bool trans_SEL(DisasContext *s, arg_SEL *a)
+{
+ typedef void sme_sel_fn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32, TCGv_i32);
+ static sme_sel_fn * const fns[4] = {
+ gen_helper_sme2_sel_b, gen_helper_sme2_sel_h,
+ gen_helper_sme2_sel_s, gen_helper_sme2_sel_d
+ };
+
+ if (!dc_isar_feature(aa64_sme2, s)) {
+ return false;
+ }
+ if (sme_sm_enabled_check(s)) {
+ int svl = streaming_vec_reg_size(s);
+ uint32_t desc = simd_desc(svl, svl, a->n);
+ TCGv_ptr t_d = tcg_temp_new_ptr();
+ TCGv_ptr t_n = tcg_temp_new_ptr();
+ TCGv_ptr t_m = tcg_temp_new_ptr();
+ TCGv_i32 png = tcg_temp_new_i32();
+
+ tcg_gen_addi_ptr(t_d, tcg_env, vec_full_reg_offset(s, a->zd));
+ tcg_gen_addi_ptr(t_n, tcg_env, vec_full_reg_offset(s, a->zn));
+ tcg_gen_addi_ptr(t_m, tcg_env, vec_full_reg_offset(s, a->zm));
+
+ tcg_gen_ld16u_i32(png, tcg_env, pred_full_reg_offset(s, a->pg)
+ ^ (HOST_BIG_ENDIAN ? 6 : 0));
+
+ fns[a->esz](t_d, t_n, t_m, png, tcg_constant_i32(desc));
+ }
+ return true;
+}
+
+static bool do_lut(DisasContext *s, arg_lut *a,
+ gen_helper_gvec_2_ptr *fn, bool strided)
+{
+ if (sme_sm_enabled_check(s) && sme2_zt0_enabled_check(s)) {
+ int svl = streaming_vec_reg_size(s);
+ tcg_gen_gvec_2_ptr(vec_full_reg_offset(s, a->zd),
+ vec_full_reg_offset(s, a->zn),
+ tcg_env, svl, svl, strided | (a->idx << 1), fn);
+ }
+ return true;
+}
+
+TRANS_FEAT(LUTI2_c_1b, aa64_sme2, do_lut, a, gen_helper_sme2_luti2_1b, false)
+TRANS_FEAT(LUTI2_c_1h, aa64_sme2, do_lut, a, gen_helper_sme2_luti2_1h, false)
+TRANS_FEAT(LUTI2_c_1s, aa64_sme2, do_lut, a, gen_helper_sme2_luti2_1s, false)
+
+TRANS_FEAT(LUTI2_c_2b, aa64_sme2, do_lut, a, gen_helper_sme2_luti2_2b, false)
+TRANS_FEAT(LUTI2_c_2h, aa64_sme2, do_lut, a, gen_helper_sme2_luti2_2h, false)
+TRANS_FEAT(LUTI2_c_2s, aa64_sme2, do_lut, a, gen_helper_sme2_luti2_2s, false)
+
+TRANS_FEAT(LUTI2_c_4b, aa64_sme2, do_lut, a, gen_helper_sme2_luti2_4b, false)
+TRANS_FEAT(LUTI2_c_4h, aa64_sme2, do_lut, a, gen_helper_sme2_luti2_4h, false)
+TRANS_FEAT(LUTI2_c_4s, aa64_sme2, do_lut, a, gen_helper_sme2_luti2_4s, false)
+
+TRANS_FEAT(LUTI4_c_1b, aa64_sme2, do_lut, a, gen_helper_sme2_luti4_1b, false)
+TRANS_FEAT(LUTI4_c_1h, aa64_sme2, do_lut, a, gen_helper_sme2_luti4_1h, false)
+TRANS_FEAT(LUTI4_c_1s, aa64_sme2, do_lut, a, gen_helper_sme2_luti4_1s, false)
+
+TRANS_FEAT(LUTI4_c_2b, aa64_sme2, do_lut, a, gen_helper_sme2_luti4_2b, false)
+TRANS_FEAT(LUTI4_c_2h, aa64_sme2, do_lut, a, gen_helper_sme2_luti4_2h, false)
+TRANS_FEAT(LUTI4_c_2s, aa64_sme2, do_lut, a, gen_helper_sme2_luti4_2s, false)
+
+TRANS_FEAT(LUTI4_c_4h, aa64_sme2, do_lut, a, gen_helper_sme2_luti4_4h, false)
+TRANS_FEAT(LUTI4_c_4s, aa64_sme2, do_lut, a, gen_helper_sme2_luti4_4s, false)
+
+static bool do_lut_s4(DisasContext *s, arg_lut *a, gen_helper_gvec_2_ptr *fn)
+{
+ return !(a->zd & 0b01100) && do_lut(s, a, fn, true);
+}
+
+static bool do_lut_s8(DisasContext *s, arg_lut *a, gen_helper_gvec_2_ptr *fn)
+{
+ return !(a->zd & 0b01000) && do_lut(s, a, fn, true);
+}
+
+TRANS_FEAT(LUTI2_s_2b, aa64_sme2p1, do_lut_s8, a, gen_helper_sme2_luti2_2b)
+TRANS_FEAT(LUTI2_s_2h, aa64_sme2p1, do_lut_s8, a, gen_helper_sme2_luti2_2h)
+
+TRANS_FEAT(LUTI2_s_4b, aa64_sme2p1, do_lut_s4, a, gen_helper_sme2_luti2_4b)
+TRANS_FEAT(LUTI2_s_4h, aa64_sme2p1, do_lut_s4, a, gen_helper_sme2_luti2_4h)
+
+TRANS_FEAT(LUTI4_s_2b, aa64_sme2p1, do_lut_s8, a, gen_helper_sme2_luti4_2b)
+TRANS_FEAT(LUTI4_s_2h, aa64_sme2p1, do_lut_s8, a, gen_helper_sme2_luti4_2h)
+
+TRANS_FEAT(LUTI4_s_4h, aa64_sme2p1, do_lut_s4, a, gen_helper_sme2_luti4_4h)
diff --git a/target/arm/tcg/translate-sve.c b/target/arm/tcg/translate-sve.c
index d23be47..7b57573 100644
--- a/target/arm/tcg/translate-sve.c
+++ b/target/arm/tcg/translate-sve.c
@@ -89,7 +89,7 @@ static inline int expand_imm_sh8u(DisasContext *s, int x)
*/
static inline int msz_dtype(DisasContext *s, int msz)
{
- static const uint8_t dtype[4] = { 0, 5, 10, 15 };
+ static const uint8_t dtype[5] = { 0, 5, 10, 15, 18 };
return dtype[msz];
}
@@ -629,7 +629,7 @@ static void gen_bsl2n_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k)
* = | ~(m | k)
*/
tcg_gen_and_i64(n, n, k);
- if (tcg_op_supported(INDEX_op_orc_i64, TCG_TYPE_I64, 0)) {
+ if (tcg_op_supported(INDEX_op_orc, TCG_TYPE_I64, 0)) {
tcg_gen_or_i64(m, m, k);
tcg_gen_orc_i64(d, n, m);
} else {
@@ -778,6 +778,9 @@ DO_ZPZ(NOT_zpz, aa64_sve, sve_not_zpz)
DO_ZPZ(ABS, aa64_sve, sve_abs)
DO_ZPZ(NEG, aa64_sve, sve_neg)
DO_ZPZ(RBIT, aa64_sve, sve_rbit)
+DO_ZPZ(ORQV, aa64_sme2p1_or_sve2p1, sve2p1_orqv)
+DO_ZPZ(EORQV, aa64_sme2p1_or_sve2p1, sve2p1_eorqv)
+DO_ZPZ(ANDQV, aa64_sme2p1_or_sve2p1, sve2p1_andqv)
static gen_helper_gvec_3 * const fabs_fns[4] = {
NULL, gen_helper_sve_fabs_h,
@@ -828,6 +831,41 @@ TRANS_FEAT(SXTW, aa64_sve, gen_gvec_ool_arg_zpz,
TRANS_FEAT(UXTW, aa64_sve, gen_gvec_ool_arg_zpz,
a->esz == 3 ? gen_helper_sve_uxtw_d : NULL, a, 0)
+static gen_helper_gvec_3 * const addqv_fns[4] = {
+ gen_helper_sve2p1_addqv_b, gen_helper_sve2p1_addqv_h,
+ gen_helper_sve2p1_addqv_s, gen_helper_sve2p1_addqv_d,
+};
+TRANS_FEAT(ADDQV, aa64_sme2p1_or_sve2p1,
+ gen_gvec_ool_arg_zpz, addqv_fns[a->esz], a, 0)
+
+static gen_helper_gvec_3 * const smaxqv_fns[4] = {
+ gen_helper_sve2p1_smaxqv_b, gen_helper_sve2p1_smaxqv_h,
+ gen_helper_sve2p1_smaxqv_s, gen_helper_sve2p1_smaxqv_d,
+};
+TRANS_FEAT(SMAXQV, aa64_sme2p1_or_sve2p1,
+ gen_gvec_ool_arg_zpz, smaxqv_fns[a->esz], a, 0)
+
+static gen_helper_gvec_3 * const sminqv_fns[4] = {
+ gen_helper_sve2p1_sminqv_b, gen_helper_sve2p1_sminqv_h,
+ gen_helper_sve2p1_sminqv_s, gen_helper_sve2p1_sminqv_d,
+};
+TRANS_FEAT(SMINQV, aa64_sme2p1_or_sve2p1,
+ gen_gvec_ool_arg_zpz, sminqv_fns[a->esz], a, 0)
+
+static gen_helper_gvec_3 * const umaxqv_fns[4] = {
+ gen_helper_sve2p1_umaxqv_b, gen_helper_sve2p1_umaxqv_h,
+ gen_helper_sve2p1_umaxqv_s, gen_helper_sve2p1_umaxqv_d,
+};
+TRANS_FEAT(UMAXQV, aa64_sme2p1_or_sve2p1,
+ gen_gvec_ool_arg_zpz, umaxqv_fns[a->esz], a, 0)
+
+static gen_helper_gvec_3 * const uminqv_fns[4] = {
+ gen_helper_sve2p1_uminqv_b, gen_helper_sve2p1_uminqv_h,
+ gen_helper_sve2p1_uminqv_s, gen_helper_sve2p1_uminqv_d,
+};
+TRANS_FEAT(UMINQV, aa64_sme2p1_or_sve2p1,
+ gen_gvec_ool_arg_zpz, uminqv_fns[a->esz], a, 0)
+
/*
*** SVE Integer Reduction Group
*/
@@ -1679,6 +1717,22 @@ static bool do_predset(DisasContext *s, int esz, int rd, int pat, bool setflag)
TRANS_FEAT(PTRUE, aa64_sve, do_predset, a->esz, a->rd, a->pat, a->s)
+static bool trans_PTRUE_cnt(DisasContext *s, arg_PTRUE_cnt *a)
+{
+ if (!dc_isar_feature(aa64_sme2_or_sve2p1, s)) {
+ return false;
+ }
+ if (sve_access_check(s)) {
+ /* Canonical TRUE is 0 count, invert bit, plus element size. */
+ int val = (1 << 15) | (1 << a->esz);
+
+ /* Write val to the first uint64_t; clear all of the rest. */
+ tcg_gen_gvec_dup_imm(MO_64, pred_full_reg_offset(s, a->rd),
+ 8, size_for_gvec(pred_full_reg_size(s)), val);
+ }
+ return true;
+}
+
/* Note pat == 31 is #all, to set all elements. */
TRANS_FEAT_NONSTREAMING(SETFFR, aa64_sve,
do_predset, 0, FFR_PRED_NUM, 31, false)
@@ -2148,6 +2202,55 @@ static bool do_EXT(DisasContext *s, int rd, int rn, int rm, int imm)
TRANS_FEAT(EXT, aa64_sve, do_EXT, a->rd, a->rn, a->rm, a->imm)
TRANS_FEAT(EXT_sve2, aa64_sve2, do_EXT, a->rd, a->rn, (a->rn + 1) % 32, a->imm)
+static bool trans_EXTQ(DisasContext *s, arg_EXTQ *a)
+{
+ unsigned vl, dofs, sofs0, sofs1, sofs2, imm;
+
+ if (!dc_isar_feature(aa64_sme2p1_or_sve2p1, s)) {
+ return false;
+ }
+ if (!sve_access_check(s)) {
+ return true;
+ }
+
+ imm = a->imm;
+ if (imm == 0) {
+ /* So far we never optimize Zdn with MOVPRFX, so zd = zn is a nop. */
+ return true;
+ }
+
+ vl = vec_full_reg_size(s);
+ dofs = vec_full_reg_offset(s, a->rd);
+ sofs2 = vec_full_reg_offset(s, a->rn);
+
+ if (imm & 8) {
+ sofs0 = dofs + 8;
+ sofs1 = sofs2;
+ sofs2 += 8;
+ } else {
+ sofs0 = dofs;
+ sofs1 = dofs + 8;
+ }
+ imm = (imm & 7) << 3;
+
+ for (unsigned i = 0; i < vl; i += 16) {
+ TCGv_i64 s0 = tcg_temp_new_i64();
+ TCGv_i64 s1 = tcg_temp_new_i64();
+ TCGv_i64 s2 = tcg_temp_new_i64();
+
+ tcg_gen_ld_i64(s0, tcg_env, sofs0 + i);
+ tcg_gen_ld_i64(s1, tcg_env, sofs1 + i);
+ tcg_gen_ld_i64(s2, tcg_env, sofs2 + i);
+
+ tcg_gen_extract2_i64(s0, s0, s1, imm);
+ tcg_gen_extract2_i64(s1, s1, s2, imm);
+
+ tcg_gen_st_i64(s0, tcg_env, dofs + i);
+ tcg_gen_st_i64(s1, tcg_env, dofs + i + 8);
+ }
+ return true;
+}
+
/*
*** SVE Permute - Unpredicated Group
*/
@@ -2195,6 +2298,27 @@ static bool trans_DUP_x(DisasContext *s, arg_DUP_x *a)
return true;
}
+static bool trans_DUPQ(DisasContext *s, arg_DUPQ *a)
+{
+ unsigned vl, dofs, nofs;
+
+ if (!dc_isar_feature(aa64_sme2p1_or_sve2p1, s)) {
+ return false;
+ }
+ if (!sve_access_check(s)) {
+ return true;
+ }
+
+ vl = vec_full_reg_size(s);
+ dofs = vec_full_reg_offset(s, a->rd);
+ nofs = vec_reg_offset(s, a->rn, a->imm, a->esz);
+
+ for (unsigned i = 0; i < vl; i += 16) {
+ tcg_gen_gvec_dup_mem(a->esz, dofs + i, nofs + i, 16, 16);
+ }
+ return true;
+}
+
static void do_insr_i64(DisasContext *s, arg_rrr_esz *a, TCGv_i64 val)
{
typedef void gen_insr(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv_i32);
@@ -2256,12 +2380,124 @@ static gen_helper_gvec_4 * const sve2_tbl_fns[4] = {
TRANS_FEAT(TBL_sve2, aa64_sve2, gen_gvec_ool_zzzz, sve2_tbl_fns[a->esz],
a->rd, a->rn, (a->rn + 1) % 32, a->rm, 0)
+static gen_helper_gvec_3 * const tblq_fns[4] = {
+ gen_helper_sve2p1_tblq_b, gen_helper_sve2p1_tblq_h,
+ gen_helper_sve2p1_tblq_s, gen_helper_sve2p1_tblq_d
+};
+TRANS_FEAT(TBLQ, aa64_sme2p1_or_sve2p1, gen_gvec_ool_arg_zzz,
+ tblq_fns[a->esz], a, 0)
+
static gen_helper_gvec_3 * const tbx_fns[4] = {
gen_helper_sve2_tbx_b, gen_helper_sve2_tbx_h,
gen_helper_sve2_tbx_s, gen_helper_sve2_tbx_d
};
TRANS_FEAT(TBX, aa64_sve2, gen_gvec_ool_arg_zzz, tbx_fns[a->esz], a, 0)
+static gen_helper_gvec_3 * const tbxq_fns[4] = {
+ gen_helper_sve2p1_tbxq_b, gen_helper_sve2p1_tbxq_h,
+ gen_helper_sve2p1_tbxq_s, gen_helper_sve2p1_tbxq_d
+};
+TRANS_FEAT(TBXQ, aa64_sme2p1_or_sve2p1, gen_gvec_ool_arg_zzz,
+ tbxq_fns[a->esz], a, 0)
+
+static bool trans_PMOV_pv(DisasContext *s, arg_PMOV_pv *a)
+{
+ static gen_helper_gvec_2 * const fns[4] = {
+ NULL, gen_helper_pmov_pv_h,
+ gen_helper_pmov_pv_s, gen_helper_pmov_pv_d
+ };
+ unsigned vl, pl, vofs, pofs;
+ TCGv_i64 tmp;
+
+ if (!dc_isar_feature(aa64_sme2p1_or_sve2p1, s)) {
+ return false;
+ }
+ if (!sve_access_check(s)) {
+ return true;
+ }
+
+ vl = vec_full_reg_size(s);
+ if (a->esz != MO_8) {
+ tcg_gen_gvec_2_ool(pred_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn),
+ vl, vl, a->imm, fns[a->esz]);
+ return true;
+ }
+
+ /*
+ * Copy the low PL bytes from vector Zn, zero-extending to a
+ * multiple of 8 bytes, so that Pd is properly cleared.
+ */
+
+ pl = vl / 8;
+ pofs = pred_full_reg_offset(s, a->rd);
+ vofs = vec_full_reg_offset(s, a->rn);
+
+ QEMU_BUILD_BUG_ON(sizeof(ARMPredicateReg) != 32);
+ for (unsigned i = 32; i >= 8; i >>= 1) {
+ if (pl & i) {
+ tcg_gen_gvec_mov(MO_64, pofs, vofs, i, i);
+ pofs += i;
+ vofs += i;
+ }
+ }
+ switch (pl & 7) {
+ case 0:
+ return true;
+ case 2:
+ tmp = tcg_temp_new_i64();
+ tcg_gen_ld16u_i64(tmp, tcg_env, vofs + (HOST_BIG_ENDIAN ? 6 : 0));
+ break;
+ case 4:
+ tmp = tcg_temp_new_i64();
+ tcg_gen_ld32u_i64(tmp, tcg_env, vofs + (HOST_BIG_ENDIAN ? 4 : 0));
+ break;
+ case 6:
+ tmp = tcg_temp_new_i64();
+ tcg_gen_ld_i64(tmp, tcg_env, vofs);
+ tcg_gen_extract_i64(tmp, tmp, 0, 48);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ tcg_gen_st_i64(tmp, tcg_env, pofs);
+ return true;
+}
+
+static bool trans_PMOV_vp(DisasContext *s, arg_PMOV_pv *a)
+{
+ static gen_helper_gvec_2 * const fns[4] = {
+ NULL, gen_helper_pmov_vp_h,
+ gen_helper_pmov_vp_s, gen_helper_pmov_vp_d
+ };
+ unsigned vl;
+
+ if (!dc_isar_feature(aa64_sme2p1_or_sve2p1, s)) {
+ return false;
+ }
+ if (!sve_access_check(s)) {
+ return true;
+ }
+
+ vl = vec_full_reg_size(s);
+
+ if (a->esz == MO_8) {
+ /*
+ * The low PL bytes are copied from Pn to Zd unchanged.
+ * We know that the unused portion of Pn is zero, and
+ * that imm == 0, so the balance of Zd must be zeroed.
+ */
+ tcg_gen_gvec_mov(MO_64, vec_full_reg_offset(s, a->rd),
+ pred_full_reg_offset(s, a->rn),
+ size_for_gvec(vl / 8), vl);
+ } else {
+ tcg_gen_gvec_2_ool(vec_full_reg_offset(s, a->rd),
+ pred_full_reg_offset(s, a->rn),
+ vl, vl, a->imm, fns[a->esz]);
+ }
+ return true;
+}
+
static bool trans_UNPK(DisasContext *s, arg_UNPK *a)
{
static gen_helper_gvec_2 * const fns[4][2] = {
@@ -2352,6 +2588,23 @@ TRANS_FEAT(PUNPKHI, aa64_sve, do_perm_pred2, a, 1, gen_helper_sve_punpk_p)
*** SVE Permute - Interleaving Group
*/
+static bool do_interleave_q(DisasContext *s, gen_helper_gvec_3 *fn,
+ arg_rrr_esz *a, int data)
+{
+ if (sve_access_check(s)) {
+ unsigned vsz = vec_full_reg_size(s);
+ if (vsz < 32) {
+ unallocated_encoding(s);
+ } else {
+ tcg_gen_gvec_3_ool(vec_full_reg_offset(s, a->rd),
+ vec_full_reg_offset(s, a->rn),
+ vec_full_reg_offset(s, a->rm),
+ vsz, vsz, data, fn);
+ }
+ }
+ return true;
+}
+
static gen_helper_gvec_3 * const zip_fns[4] = {
gen_helper_sve_zip_b, gen_helper_sve_zip_h,
gen_helper_sve_zip_s, gen_helper_sve_zip_d,
@@ -2361,26 +2614,43 @@ TRANS_FEAT(ZIP1_z, aa64_sve, gen_gvec_ool_arg_zzz,
TRANS_FEAT(ZIP2_z, aa64_sve, gen_gvec_ool_arg_zzz,
zip_fns[a->esz], a, vec_full_reg_size(s) / 2)
-TRANS_FEAT(ZIP1_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz,
- gen_helper_sve2_zip_q, a, 0)
-TRANS_FEAT(ZIP2_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz,
- gen_helper_sve2_zip_q, a,
- QEMU_ALIGN_DOWN(vec_full_reg_size(s), 32) / 2)
+TRANS_FEAT_NONSTREAMING(ZIP1_q, aa64_sve_f64mm, do_interleave_q,
+ gen_helper_sve2_zip_q, a, 0)
+TRANS_FEAT_NONSTREAMING(ZIP2_q, aa64_sve_f64mm, do_interleave_q,
+ gen_helper_sve2_zip_q, a,
+ QEMU_ALIGN_DOWN(vec_full_reg_size(s), 32) / 2)
+
+static gen_helper_gvec_3 * const zipq_fns[4] = {
+ gen_helper_sve2p1_zipq_b, gen_helper_sve2p1_zipq_h,
+ gen_helper_sve2p1_zipq_s, gen_helper_sve2p1_zipq_d,
+};
+TRANS_FEAT(ZIPQ1, aa64_sme2p1_or_sve2p1, gen_gvec_ool_arg_zzz,
+ zipq_fns[a->esz], a, 0)
+TRANS_FEAT(ZIPQ2, aa64_sme2p1_or_sve2p1, gen_gvec_ool_arg_zzz,
+ zipq_fns[a->esz], a, 16 / 2)
static gen_helper_gvec_3 * const uzp_fns[4] = {
gen_helper_sve_uzp_b, gen_helper_sve_uzp_h,
gen_helper_sve_uzp_s, gen_helper_sve_uzp_d,
};
-
TRANS_FEAT(UZP1_z, aa64_sve, gen_gvec_ool_arg_zzz,
uzp_fns[a->esz], a, 0)
TRANS_FEAT(UZP2_z, aa64_sve, gen_gvec_ool_arg_zzz,
uzp_fns[a->esz], a, 1 << a->esz)
-TRANS_FEAT(UZP1_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz,
- gen_helper_sve2_uzp_q, a, 0)
-TRANS_FEAT(UZP2_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz,
- gen_helper_sve2_uzp_q, a, 16)
+TRANS_FEAT_NONSTREAMING(UZP1_q, aa64_sve_f64mm, do_interleave_q,
+ gen_helper_sve2_uzp_q, a, 0)
+TRANS_FEAT_NONSTREAMING(UZP2_q, aa64_sve_f64mm, do_interleave_q,
+ gen_helper_sve2_uzp_q, a, 16)
+
+static gen_helper_gvec_3 * const uzpq_fns[4] = {
+ gen_helper_sve2p1_uzpq_b, gen_helper_sve2p1_uzpq_h,
+ gen_helper_sve2p1_uzpq_s, gen_helper_sve2p1_uzpq_d,
+};
+TRANS_FEAT(UZPQ1, aa64_sme2p1_or_sve2p1, gen_gvec_ool_arg_zzz,
+ uzpq_fns[a->esz], a, 0)
+TRANS_FEAT(UZPQ2, aa64_sme2p1_or_sve2p1, gen_gvec_ool_arg_zzz,
+ uzpq_fns[a->esz], a, 1 << a->esz)
static gen_helper_gvec_3 * const trn_fns[4] = {
gen_helper_sve_trn_b, gen_helper_sve_trn_h,
@@ -2392,10 +2662,10 @@ TRANS_FEAT(TRN1_z, aa64_sve, gen_gvec_ool_arg_zzz,
TRANS_FEAT(TRN2_z, aa64_sve, gen_gvec_ool_arg_zzz,
trn_fns[a->esz], a, 1 << a->esz)
-TRANS_FEAT(TRN1_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz,
- gen_helper_sve2_trn_q, a, 0)
-TRANS_FEAT(TRN2_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz,
- gen_helper_sve2_trn_q, a, 16)
+TRANS_FEAT_NONSTREAMING(TRN1_q, aa64_sve_f64mm, do_interleave_q,
+ gen_helper_sve2_trn_q, a, 0)
+TRANS_FEAT_NONSTREAMING(TRN2_q, aa64_sve_f64mm, do_interleave_q,
+ gen_helper_sve2_trn_q, a, 16)
/*
*** SVE Permute Vector - Predicated Group
@@ -2981,6 +3251,36 @@ static bool trans_CNTP(DisasContext *s, arg_CNTP *a)
return true;
}
+static bool trans_CNTP_c(DisasContext *s, arg_CNTP_c *a)
+{
+ TCGv_i32 t_png;
+ uint32_t desc = 0;
+
+ if (dc_isar_feature(aa64_sve2p1, s)) {
+ if (!sve_access_check(s)) {
+ return true;
+ }
+ } else if (dc_isar_feature(aa64_sme2, s)) {
+ if (!sme_sm_enabled_check(s)) {
+ return true;
+ }
+ } else {
+ return false;
+ }
+
+ t_png = tcg_temp_new_i32();
+ tcg_gen_ld16u_i32(t_png, tcg_env,
+ pred_full_reg_offset(s, a->rn) ^
+ (HOST_BIG_ENDIAN ? 6 : 0));
+
+ desc = FIELD_DP32(desc, PREDDESC, OPRSZ, pred_full_reg_size(s));
+ desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz);
+ desc = FIELD_DP32(desc, PREDDESC, DATA, a->vl);
+
+ gen_helper_sve2p1_cntp_c(cpu_reg(s, a->rd), t_png, tcg_constant_i32(desc));
+ return true;
+}
+
static bool trans_INCDECP_r(DisasContext *s, arg_incdec_pred *a)
{
if (!dc_isar_feature(aa64_sve, s)) {
@@ -3091,7 +3391,9 @@ static bool trans_CTERM(DisasContext *s, arg_CTERM *a)
return true;
}
-static bool trans_WHILE(DisasContext *s, arg_WHILE *a)
+typedef void gen_while_fn(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32);
+static bool do_WHILE(DisasContext *s, arg_while *a,
+ bool lt, int scale, int data, gen_while_fn *fn)
{
TCGv_i64 op0, op1, t0, t1, tmax;
TCGv_i32 t2;
@@ -3101,14 +3403,8 @@ static bool trans_WHILE(DisasContext *s, arg_WHILE *a)
TCGCond cond;
uint64_t maxval;
/* Note that GE/HS has a->eq == 0 and GT/HI has a->eq == 1. */
- bool eq = a->eq == a->lt;
+ bool eq = a->eq == lt;
- /* The greater-than conditions are all SVE2. */
- if (a->lt
- ? !dc_isar_feature(aa64_sve, s)
- : !dc_isar_feature(aa64_sve2, s)) {
- return false;
- }
if (!sve_access_check(s)) {
return true;
}
@@ -3132,7 +3428,7 @@ static bool trans_WHILE(DisasContext *s, arg_WHILE *a)
t0 = tcg_temp_new_i64();
t1 = tcg_temp_new_i64();
- if (a->lt) {
+ if (lt) {
tcg_gen_sub_i64(t0, op1, op0);
if (a->u) {
maxval = a->sf ? UINT64_MAX : UINT32_MAX;
@@ -3152,7 +3448,7 @@ static bool trans_WHILE(DisasContext *s, arg_WHILE *a)
}
}
- tmax = tcg_constant_i64(vsz >> a->esz);
+ tmax = tcg_constant_i64((vsz << scale) >> a->esz);
if (eq) {
/* Equality means one more iteration. */
tcg_gen_addi_i64(t0, t0, 1);
@@ -3181,24 +3477,38 @@ static bool trans_WHILE(DisasContext *s, arg_WHILE *a)
t2 = tcg_temp_new_i32();
tcg_gen_extrl_i64_i32(t2, t0);
- /* Scale elements to bits. */
- tcg_gen_shli_i32(t2, t2, a->esz);
-
desc = FIELD_DP32(desc, PREDDESC, OPRSZ, vsz / 8);
desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz);
+ desc = FIELD_DP32(desc, PREDDESC, DATA, data);
ptr = tcg_temp_new_ptr();
tcg_gen_addi_ptr(ptr, tcg_env, pred_full_reg_offset(s, a->rd));
- if (a->lt) {
- gen_helper_sve_whilel(t2, ptr, t2, tcg_constant_i32(desc));
- } else {
- gen_helper_sve_whileg(t2, ptr, t2, tcg_constant_i32(desc));
- }
+ fn(t2, ptr, t2, tcg_constant_i32(desc));
+
do_pred_flags(t2);
return true;
}
+TRANS_FEAT(WHILE_lt, aa64_sve, do_WHILE,
+ a, true, 0, 0, gen_helper_sve_whilel)
+TRANS_FEAT(WHILE_gt, aa64_sve2, do_WHILE,
+ a, false, 0, 0, gen_helper_sve_whileg)
+
+TRANS_FEAT(WHILE_lt_pair, aa64_sme2_or_sve2p1, do_WHILE,
+ a, true, 1, 0, gen_helper_sve_while2l)
+TRANS_FEAT(WHILE_gt_pair, aa64_sme2_or_sve2p1, do_WHILE,
+ a, false, 1, 0, gen_helper_sve_while2g)
+
+TRANS_FEAT(WHILE_lt_cnt2, aa64_sme2_or_sve2p1, do_WHILE,
+ a, true, 1, 1, gen_helper_sve_whilecl)
+TRANS_FEAT(WHILE_lt_cnt4, aa64_sme2_or_sve2p1, do_WHILE,
+ a, true, 2, 2, gen_helper_sve_whilecl)
+TRANS_FEAT(WHILE_gt_cnt2, aa64_sme2_or_sve2p1, do_WHILE,
+ a, false, 1, 1, gen_helper_sve_whilecg)
+TRANS_FEAT(WHILE_gt_cnt4, aa64_sme2_or_sve2p1, do_WHILE,
+ a, false, 2, 2, gen_helper_sve_whilecg)
+
static bool trans_WHILE_ptr(DisasContext *s, arg_WHILE_ptr *a)
{
TCGv_i64 op0, op1, diff, t1, tmax;
@@ -3217,7 +3527,7 @@ static bool trans_WHILE_ptr(DisasContext *s, arg_WHILE_ptr *a)
op0 = read_cpu_reg(s, a->rn, 1);
op1 = read_cpu_reg(s, a->rm, 1);
- tmax = tcg_constant_i64(vsz);
+ tmax = tcg_constant_i64(vsz >> a->esz);
diff = tcg_temp_new_i64();
if (a->rw) {
@@ -3227,15 +3537,15 @@ static bool trans_WHILE_ptr(DisasContext *s, arg_WHILE_ptr *a)
tcg_gen_sub_i64(diff, op0, op1);
tcg_gen_sub_i64(t1, op1, op0);
tcg_gen_movcond_i64(TCG_COND_GEU, diff, op0, op1, diff, t1);
- /* Round down to a multiple of ESIZE. */
- tcg_gen_andi_i64(diff, diff, -1 << a->esz);
+ /* Divide, rounding down, by ESIZE. */
+ tcg_gen_shri_i64(diff, diff, a->esz);
/* If op1 == op0, diff == 0, and the condition is always true. */
tcg_gen_movcond_i64(TCG_COND_EQ, diff, op0, op1, tmax, diff);
} else {
/* WHILEWR */
tcg_gen_sub_i64(diff, op1, op0);
- /* Round down to a multiple of ESIZE. */
- tcg_gen_andi_i64(diff, diff, -1 << a->esz);
+ /* Divide, rounding down, by ESIZE. */
+ tcg_gen_shri_i64(diff, diff, a->esz);
/* If op0 >= op1, diff <= 0, the condition is always true. */
tcg_gen_movcond_i64(TCG_COND_GEU, diff, op0, op1, tmax, diff);
}
@@ -3258,6 +3568,42 @@ static bool trans_WHILE_ptr(DisasContext *s, arg_WHILE_ptr *a)
return true;
}
+static bool do_pext(DisasContext *s, arg_pext *a, int n)
+{
+ TCGv_i32 t_png;
+ TCGv_ptr t_pd;
+ int pl;
+
+ if (!sve_access_check(s)) {
+ return true;
+ }
+
+ t_png = tcg_temp_new_i32();
+ tcg_gen_ld16u_i32(t_png, tcg_env,
+ pred_full_reg_offset(s, a->rn) ^
+ (HOST_BIG_ENDIAN ? 6 : 0));
+
+ t_pd = tcg_temp_new_ptr();
+ pl = pred_full_reg_size(s);
+
+ for (int i = 0; i < n; ++i) {
+ int rd = (a->rd + i) % 16;
+ int part = a->imm * n + i;
+ unsigned desc = 0;
+
+ desc = FIELD_DP32(desc, PREDDESC, OPRSZ, pl);
+ desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz);
+ desc = FIELD_DP32(desc, PREDDESC, DATA, part);
+
+ tcg_gen_addi_ptr(t_pd, tcg_env, pred_full_reg_offset(s, rd));
+ gen_helper_pext(t_pd, t_png, tcg_constant_i32(desc));
+ }
+ return true;
+}
+
+TRANS_FEAT(PEXT_1, aa64_sme2_or_sve2p1, do_pext, a, 1)
+TRANS_FEAT(PEXT_2, aa64_sme2_or_sve2p1, do_pext, a, 2)
+
/*
*** SVE Integer Wide Immediate - Unpredicated Group
*/
@@ -3385,8 +3731,8 @@ DO_ZZI(UMIN, umin)
#undef DO_ZZI
static gen_helper_gvec_4 * const dot_fns[2][2] = {
- { gen_helper_gvec_sdot_b, gen_helper_gvec_sdot_h },
- { gen_helper_gvec_udot_b, gen_helper_gvec_udot_h }
+ { gen_helper_gvec_sdot_4b, gen_helper_gvec_sdot_4h },
+ { gen_helper_gvec_udot_4b, gen_helper_gvec_udot_4h }
};
TRANS_FEAT(DOT_zzzz, aa64_sve, gen_gvec_ool_zzzz,
dot_fns[a->u][a->sz], a->rd, a->rn, a->rm, a->ra, 0)
@@ -3395,19 +3741,24 @@ TRANS_FEAT(DOT_zzzz, aa64_sve, gen_gvec_ool_zzzz,
* SVE Multiply - Indexed
*/
-TRANS_FEAT(SDOT_zzxw_s, aa64_sve, gen_gvec_ool_arg_zzxz,
- gen_helper_gvec_sdot_idx_b, a)
-TRANS_FEAT(SDOT_zzxw_d, aa64_sve, gen_gvec_ool_arg_zzxz,
- gen_helper_gvec_sdot_idx_h, a)
-TRANS_FEAT(UDOT_zzxw_s, aa64_sve, gen_gvec_ool_arg_zzxz,
- gen_helper_gvec_udot_idx_b, a)
-TRANS_FEAT(UDOT_zzxw_d, aa64_sve, gen_gvec_ool_arg_zzxz,
- gen_helper_gvec_udot_idx_h, a)
-
-TRANS_FEAT(SUDOT_zzxw_s, aa64_sve_i8mm, gen_gvec_ool_arg_zzxz,
- gen_helper_gvec_sudot_idx_b, a)
-TRANS_FEAT(USDOT_zzxw_s, aa64_sve_i8mm, gen_gvec_ool_arg_zzxz,
- gen_helper_gvec_usdot_idx_b, a)
+TRANS_FEAT(SDOT_zzxw_4s, aa64_sve, gen_gvec_ool_arg_zzxz,
+ gen_helper_gvec_sdot_idx_4b, a)
+TRANS_FEAT(SDOT_zzxw_4d, aa64_sve, gen_gvec_ool_arg_zzxz,
+ gen_helper_gvec_sdot_idx_4h, a)
+TRANS_FEAT(UDOT_zzxw_4s, aa64_sve, gen_gvec_ool_arg_zzxz,
+ gen_helper_gvec_udot_idx_4b, a)
+TRANS_FEAT(UDOT_zzxw_4d, aa64_sve, gen_gvec_ool_arg_zzxz,
+ gen_helper_gvec_udot_idx_4h, a)
+
+TRANS_FEAT(SUDOT_zzxw_4s, aa64_sve_i8mm, gen_gvec_ool_arg_zzxz,
+ gen_helper_gvec_sudot_idx_4b, a)
+TRANS_FEAT(USDOT_zzxw_4s, aa64_sve_i8mm, gen_gvec_ool_arg_zzxz,
+ gen_helper_gvec_usdot_idx_4b, a)
+
+TRANS_FEAT(SDOT_zzxw_2s, aa64_sme2_or_sve2p1, gen_gvec_ool_arg_zzxz,
+ gen_helper_gvec_sdot_idx_2h, a)
+TRANS_FEAT(UDOT_zzxw_2s, aa64_sme2_or_sve2p1, gen_gvec_ool_arg_zzxz,
+ gen_helper_gvec_udot_idx_2h, a)
#define DO_SVE2_RRX(NAME, FUNC) \
TRANS_FEAT(NAME, aa64_sve, gen_gvec_ool_zzz, FUNC, \
@@ -3621,6 +3972,54 @@ DO_VPZ_AH(FMAXV, fmaxv)
#undef DO_VPZ
+static gen_helper_gvec_3_ptr * const faddqv_fns[4] = {
+ NULL, gen_helper_sve2p1_faddqv_h,
+ gen_helper_sve2p1_faddqv_s, gen_helper_sve2p1_faddqv_d,
+};
+TRANS_FEAT(FADDQV, aa64_sme2p1_or_sve2p1, gen_gvec_fpst_arg_zpz,
+ faddqv_fns[a->esz], a, 0,
+ a->esz == MO_16 ? FPST_A64_F16 : FPST_A64)
+
+static gen_helper_gvec_3_ptr * const fmaxnmqv_fns[4] = {
+ NULL, gen_helper_sve2p1_fmaxnmqv_h,
+ gen_helper_sve2p1_fmaxnmqv_s, gen_helper_sve2p1_fmaxnmqv_d,
+};
+TRANS_FEAT(FMAXNMQV, aa64_sme2p1_or_sve2p1, gen_gvec_fpst_arg_zpz,
+ fmaxnmqv_fns[a->esz], a, 0,
+ a->esz == MO_16 ? FPST_A64_F16 : FPST_A64)
+
+static gen_helper_gvec_3_ptr * const fminnmqv_fns[4] = {
+ NULL, gen_helper_sve2p1_fminnmqv_h,
+ gen_helper_sve2p1_fminnmqv_s, gen_helper_sve2p1_fminnmqv_d,
+};
+TRANS_FEAT(FMINNMQV, aa64_sme2p1_or_sve2p1, gen_gvec_fpst_arg_zpz,
+ fminnmqv_fns[a->esz], a, 0,
+ a->esz == MO_16 ? FPST_A64_F16 : FPST_A64)
+
+static gen_helper_gvec_3_ptr * const fmaxqv_fns[4] = {
+ NULL, gen_helper_sve2p1_fmaxqv_h,
+ gen_helper_sve2p1_fmaxqv_s, gen_helper_sve2p1_fmaxqv_d,
+};
+static gen_helper_gvec_3_ptr * const fmaxqv_ah_fns[4] = {
+ NULL, gen_helper_sve2p1_ah_fmaxqv_h,
+ gen_helper_sve2p1_ah_fmaxqv_s, gen_helper_sve2p1_ah_fmaxqv_d,
+};
+TRANS_FEAT(FMAXQV, aa64_sme2p1_or_sve2p1, gen_gvec_fpst_arg_zpz,
+ (s->fpcr_ah ? fmaxqv_fns : fmaxqv_ah_fns)[a->esz], a, 0,
+ a->esz == MO_16 ? FPST_A64_F16 : FPST_A64)
+
+static gen_helper_gvec_3_ptr * const fminqv_fns[4] = {
+ NULL, gen_helper_sve2p1_fminqv_h,
+ gen_helper_sve2p1_fminqv_s, gen_helper_sve2p1_fminqv_d,
+};
+static gen_helper_gvec_3_ptr * const fminqv_ah_fns[4] = {
+ NULL, gen_helper_sve2p1_ah_fminqv_h,
+ gen_helper_sve2p1_ah_fminqv_s, gen_helper_sve2p1_ah_fminqv_d,
+};
+TRANS_FEAT(FMINQV, aa64_sme2p1_or_sve2p1, gen_gvec_fpst_arg_zpz,
+ (s->fpcr_ah ? fminqv_fns : fminqv_ah_fns)[a->esz], a, 0,
+ a->esz == MO_16 ? FPST_A64_F16 : FPST_A64)
+
/*
*** SVE Floating Point Unary Operations - Unpredicated Group
*/
@@ -4143,7 +4542,7 @@ TRANS_FEAT(UCVTF_dd, aa64_sve, gen_gvec_fpst_arg_zpz,
*/
void gen_sve_ldr(DisasContext *s, TCGv_ptr base, int vofs,
- int len, int rn, int imm)
+ int len, int rn, int imm, MemOp align)
{
int len_align = QEMU_ALIGN_DOWN(len, 16);
int len_remain = len % 16;
@@ -4172,12 +4571,15 @@ void gen_sve_ldr(DisasContext *s, TCGv_ptr base, int vofs,
for (i = 0; i < len_align; i += 16) {
tcg_gen_qemu_ld_i128(t16, clean_addr, midx,
- MO_LE | MO_128 | MO_ATOM_NONE);
+ MO_LE | MO_128 | MO_ATOM_NONE | align);
tcg_gen_extr_i128_i64(t0, t1, t16);
tcg_gen_st_i64(t0, base, vofs + i);
tcg_gen_st_i64(t1, base, vofs + i + 8);
tcg_gen_addi_i64(clean_addr, clean_addr, 16);
}
+ if (len_align) {
+ align = MO_UNALN;
+ }
} else {
TCGLabel *loop = gen_new_label();
TCGv_ptr tp, i = tcg_temp_new_ptr();
@@ -4187,7 +4589,7 @@ void gen_sve_ldr(DisasContext *s, TCGv_ptr base, int vofs,
t16 = tcg_temp_new_i128();
tcg_gen_qemu_ld_i128(t16, clean_addr, midx,
- MO_LE | MO_128 | MO_ATOM_NONE);
+ MO_LE | MO_128 | MO_ATOM_NONE | align);
tcg_gen_addi_i64(clean_addr, clean_addr, 16);
tp = tcg_temp_new_ptr();
@@ -4202,6 +4604,7 @@ void gen_sve_ldr(DisasContext *s, TCGv_ptr base, int vofs,
tcg_gen_st_i64(t1, tp, vofs + 8);
tcg_gen_brcondi_ptr(TCG_COND_LTU, i, len_align, loop);
+ align = MO_UNALN;
}
/*
@@ -4210,7 +4613,9 @@ void gen_sve_ldr(DisasContext *s, TCGv_ptr base, int vofs,
*/
if (len_remain >= 8) {
t0 = tcg_temp_new_i64();
- tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEUQ | MO_ATOM_NONE);
+ tcg_gen_qemu_ld_i64(t0, clean_addr, midx,
+ MO_LEUQ | MO_ATOM_NONE | align);
+ align = MO_UNALN;
tcg_gen_st_i64(t0, base, vofs + len_align);
len_remain -= 8;
len_align += 8;
@@ -4225,12 +4630,14 @@ void gen_sve_ldr(DisasContext *s, TCGv_ptr base, int vofs,
case 4:
case 8:
tcg_gen_qemu_ld_i64(t0, clean_addr, midx,
- MO_LE | ctz32(len_remain) | MO_ATOM_NONE);
+ MO_LE | ctz32(len_remain)
+ | MO_ATOM_NONE | align);
break;
case 6:
t1 = tcg_temp_new_i64();
- tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEUL | MO_ATOM_NONE);
+ tcg_gen_qemu_ld_i64(t0, clean_addr, midx,
+ MO_LEUL | MO_ATOM_NONE | align);
tcg_gen_addi_i64(clean_addr, clean_addr, 4);
tcg_gen_qemu_ld_i64(t1, clean_addr, midx, MO_LEUW | MO_ATOM_NONE);
tcg_gen_deposit_i64(t0, t0, t1, 32, 32);
@@ -4245,7 +4652,7 @@ void gen_sve_ldr(DisasContext *s, TCGv_ptr base, int vofs,
/* Similarly for stores. */
void gen_sve_str(DisasContext *s, TCGv_ptr base, int vofs,
- int len, int rn, int imm)
+ int len, int rn, int imm, MemOp align)
{
int len_align = QEMU_ALIGN_DOWN(len, 16);
int len_remain = len % 16;
@@ -4277,9 +4684,12 @@ void gen_sve_str(DisasContext *s, TCGv_ptr base, int vofs,
tcg_gen_ld_i64(t1, base, vofs + i + 8);
tcg_gen_concat_i64_i128(t16, t0, t1);
tcg_gen_qemu_st_i128(t16, clean_addr, midx,
- MO_LE | MO_128 | MO_ATOM_NONE);
+ MO_LE | MO_128 | MO_ATOM_NONE | align);
tcg_gen_addi_i64(clean_addr, clean_addr, 16);
}
+ if (len_align) {
+ align = MO_UNALN;
+ }
} else {
TCGLabel *loop = gen_new_label();
TCGv_ptr tp, i = tcg_temp_new_ptr();
@@ -4303,13 +4713,16 @@ void gen_sve_str(DisasContext *s, TCGv_ptr base, int vofs,
tcg_gen_addi_i64(clean_addr, clean_addr, 16);
tcg_gen_brcondi_ptr(TCG_COND_LTU, i, len_align, loop);
+ align = MO_UNALN;
}
/* Predicate register stores can be any multiple of 2. */
if (len_remain >= 8) {
t0 = tcg_temp_new_i64();
tcg_gen_ld_i64(t0, base, vofs + len_align);
- tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUQ | MO_ATOM_NONE);
+ tcg_gen_qemu_st_i64(t0, clean_addr, midx,
+ MO_LEUQ | MO_ATOM_NONE | align);
+ align = MO_UNALN;
len_remain -= 8;
len_align += 8;
if (len_remain) {
@@ -4325,11 +4738,13 @@ void gen_sve_str(DisasContext *s, TCGv_ptr base, int vofs,
case 4:
case 8:
tcg_gen_qemu_st_i64(t0, clean_addr, midx,
- MO_LE | ctz32(len_remain) | MO_ATOM_NONE);
+ MO_LE | ctz32(len_remain)
+ | MO_ATOM_NONE | align);
break;
case 6:
- tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUL | MO_ATOM_NONE);
+ tcg_gen_qemu_st_i64(t0, clean_addr, midx,
+ MO_LEUL | MO_ATOM_NONE | align);
tcg_gen_addi_i64(clean_addr, clean_addr, 4);
tcg_gen_shri_i64(t0, t0, 32);
tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUW | MO_ATOM_NONE);
@@ -4349,7 +4764,8 @@ static bool trans_LDR_zri(DisasContext *s, arg_rri *a)
if (sve_access_check(s)) {
int size = vec_full_reg_size(s);
int off = vec_full_reg_offset(s, a->rd);
- gen_sve_ldr(s, tcg_env, off, size, a->rn, a->imm * size);
+ gen_sve_ldr(s, tcg_env, off, size, a->rn, a->imm * size,
+ s->align_mem ? MO_ALIGN_16 : MO_UNALN);
}
return true;
}
@@ -4362,7 +4778,8 @@ static bool trans_LDR_pri(DisasContext *s, arg_rri *a)
if (sve_access_check(s)) {
int size = pred_full_reg_size(s);
int off = pred_full_reg_offset(s, a->rd);
- gen_sve_ldr(s, tcg_env, off, size, a->rn, a->imm * size);
+ gen_sve_ldr(s, tcg_env, off, size, a->rn, a->imm * size,
+ s->align_mem ? MO_ALIGN_2 : MO_UNALN);
}
return true;
}
@@ -4375,7 +4792,8 @@ static bool trans_STR_zri(DisasContext *s, arg_rri *a)
if (sve_access_check(s)) {
int size = vec_full_reg_size(s);
int off = vec_full_reg_offset(s, a->rd);
- gen_sve_str(s, tcg_env, off, size, a->rn, a->imm * size);
+ gen_sve_str(s, tcg_env, off, size, a->rn, a->imm * size,
+ s->align_mem ? MO_ALIGN_16 : MO_UNALN);
}
return true;
}
@@ -4388,7 +4806,8 @@ static bool trans_STR_pri(DisasContext *s, arg_rri *a)
if (sve_access_check(s)) {
int size = pred_full_reg_size(s);
int off = pred_full_reg_offset(s, a->rd);
- gen_sve_str(s, tcg_env, off, size, a->rn, a->imm * size);
+ gen_sve_str(s, tcg_env, off, size, a->rn, a->imm * size,
+ s->align_mem ? MO_ALIGN_2 : MO_UNALN);
}
return true;
}
@@ -4398,21 +4817,25 @@ static bool trans_STR_pri(DisasContext *s, arg_rri *a)
*/
/* The memory mode of the dtype. */
-static const MemOp dtype_mop[16] = {
+static const MemOp dtype_mop[19] = {
MO_UB, MO_UB, MO_UB, MO_UB,
MO_SL, MO_UW, MO_UW, MO_UW,
MO_SW, MO_SW, MO_UL, MO_UL,
- MO_SB, MO_SB, MO_SB, MO_UQ
+ MO_SB, MO_SB, MO_SB, MO_UQ,
+ /* Artificial values used by decode */
+ MO_UL, MO_UQ, MO_128,
};
#define dtype_msz(x) (dtype_mop[x] & MO_SIZE)
/* The vector element size of dtype. */
-static const uint8_t dtype_esz[16] = {
+static const uint8_t dtype_esz[19] = {
0, 1, 2, 3,
3, 1, 2, 3,
3, 2, 2, 3,
- 3, 2, 1, 3
+ 3, 2, 1, 3,
+ /* Artificial values used by decode */
+ 4, 4, 4,
};
uint32_t make_svemte_desc(DisasContext *s, unsigned vsz, uint32_t nregs,
@@ -4463,7 +4886,7 @@ static void do_mem_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr,
}
/* Indexed by [mte][be][dtype][nreg] */
-static gen_helper_gvec_mem * const ldr_fns[2][2][16][4] = {
+static gen_helper_gvec_mem * const ldr_fns[2][2][19][4] = {
{ /* mte inactive, little-endian */
{ { gen_helper_sve_ld1bb_r, gen_helper_sve_ld2bb_r,
gen_helper_sve_ld3bb_r, gen_helper_sve_ld4bb_r },
@@ -4487,7 +4910,13 @@ static gen_helper_gvec_mem * const ldr_fns[2][2][16][4] = {
{ gen_helper_sve_ld1bss_r, NULL, NULL, NULL },
{ gen_helper_sve_ld1bhs_r, NULL, NULL, NULL },
{ gen_helper_sve_ld1dd_le_r, gen_helper_sve_ld2dd_le_r,
- gen_helper_sve_ld3dd_le_r, gen_helper_sve_ld4dd_le_r } },
+ gen_helper_sve_ld3dd_le_r, gen_helper_sve_ld4dd_le_r },
+
+ { gen_helper_sve_ld1squ_le_r, NULL, NULL, NULL },
+ { gen_helper_sve_ld1dqu_le_r, NULL, NULL, NULL },
+ { NULL, gen_helper_sve_ld2qq_le_r,
+ gen_helper_sve_ld3qq_le_r, gen_helper_sve_ld4qq_le_r },
+ },
/* mte inactive, big-endian */
{ { gen_helper_sve_ld1bb_r, gen_helper_sve_ld2bb_r,
@@ -4512,7 +4941,14 @@ static gen_helper_gvec_mem * const ldr_fns[2][2][16][4] = {
{ gen_helper_sve_ld1bss_r, NULL, NULL, NULL },
{ gen_helper_sve_ld1bhs_r, NULL, NULL, NULL },
{ gen_helper_sve_ld1dd_be_r, gen_helper_sve_ld2dd_be_r,
- gen_helper_sve_ld3dd_be_r, gen_helper_sve_ld4dd_be_r } } },
+ gen_helper_sve_ld3dd_be_r, gen_helper_sve_ld4dd_be_r },
+
+ { gen_helper_sve_ld1squ_be_r, NULL, NULL, NULL },
+ { gen_helper_sve_ld1dqu_be_r, NULL, NULL, NULL },
+ { NULL, gen_helper_sve_ld2qq_be_r,
+ gen_helper_sve_ld3qq_be_r, gen_helper_sve_ld4qq_be_r },
+ },
+ },
{ /* mte active, little-endian */
{ { gen_helper_sve_ld1bb_r_mte,
@@ -4545,7 +4981,15 @@ static gen_helper_gvec_mem * const ldr_fns[2][2][16][4] = {
{ gen_helper_sve_ld1dd_le_r_mte,
gen_helper_sve_ld2dd_le_r_mte,
gen_helper_sve_ld3dd_le_r_mte,
- gen_helper_sve_ld4dd_le_r_mte } },
+ gen_helper_sve_ld4dd_le_r_mte },
+
+ { gen_helper_sve_ld1squ_le_r_mte, NULL, NULL, NULL },
+ { gen_helper_sve_ld1dqu_le_r_mte, NULL, NULL, NULL },
+ { NULL,
+ gen_helper_sve_ld2qq_le_r_mte,
+ gen_helper_sve_ld3qq_le_r_mte,
+ gen_helper_sve_ld4qq_le_r_mte },
+ },
/* mte active, big-endian */
{ { gen_helper_sve_ld1bb_r_mte,
@@ -4578,7 +5022,16 @@ static gen_helper_gvec_mem * const ldr_fns[2][2][16][4] = {
{ gen_helper_sve_ld1dd_be_r_mte,
gen_helper_sve_ld2dd_be_r_mte,
gen_helper_sve_ld3dd_be_r_mte,
- gen_helper_sve_ld4dd_be_r_mte } } },
+ gen_helper_sve_ld4dd_be_r_mte },
+
+ { gen_helper_sve_ld1squ_be_r_mte, NULL, NULL, NULL },
+ { gen_helper_sve_ld1dqu_be_r_mte, NULL, NULL, NULL },
+ { NULL,
+ gen_helper_sve_ld2qq_be_r_mte,
+ gen_helper_sve_ld3qq_be_r_mte,
+ gen_helper_sve_ld4qq_be_r_mte },
+ },
+ },
};
static void do_ld_zpa(DisasContext *s, int zt, int pg,
@@ -4597,9 +5050,32 @@ static void do_ld_zpa(DisasContext *s, int zt, int pg,
static bool trans_LD_zprr(DisasContext *s, arg_rprr_load *a)
{
- if (a->rm == 31 || !dc_isar_feature(aa64_sve, s)) {
+ if (a->rm == 31) {
return false;
}
+
+ /* dtypes 16-18 are artificial, representing 128-bit element */
+ switch (a->dtype) {
+ case 0 ... 15:
+ if (!dc_isar_feature(aa64_sve, s)) {
+ return false;
+ }
+ break;
+ case 16: case 17:
+ if (!dc_isar_feature(aa64_sve2p1, s)) {
+ return false;
+ }
+ s->is_nonstreaming = true;
+ break;
+ case 18:
+ if (!dc_isar_feature(aa64_sme2p1_or_sve2p1, s)) {
+ return false;
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
if (sve_access_check(s)) {
TCGv_i64 addr = tcg_temp_new_i64();
tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype));
@@ -4611,9 +5087,28 @@ static bool trans_LD_zprr(DisasContext *s, arg_rprr_load *a)
static bool trans_LD_zpri(DisasContext *s, arg_rpri_load *a)
{
- if (!dc_isar_feature(aa64_sve, s)) {
- return false;
+ /* dtypes 16-18 are artificial, representing 128-bit element */
+ switch (a->dtype) {
+ case 0 ... 15:
+ if (!dc_isar_feature(aa64_sve, s)) {
+ return false;
+ }
+ break;
+ case 16: case 17:
+ if (!dc_isar_feature(aa64_sve2p1, s)) {
+ return false;
+ }
+ s->is_nonstreaming = true;
+ break;
+ case 18:
+ if (!dc_isar_feature(aa64_sme2p1_or_sve2p1, s)) {
+ return false;
+ }
+ break;
+ default:
+ g_assert_not_reached();
}
+
if (sve_access_check(s)) {
int vsz = vec_full_reg_size(s);
int elements = vsz >> dtype_esz[a->dtype];
@@ -5060,7 +5555,7 @@ static bool trans_LD1R_zpri(DisasContext *s, arg_rpri_load *a)
static void do_st_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr,
int msz, int esz, int nreg)
{
- static gen_helper_gvec_mem * const fn_single[2][2][4][4] = {
+ static gen_helper_gvec_mem * const fn_single[2][2][4][5] = {
{ { { gen_helper_sve_st1bb_r,
gen_helper_sve_st1bh_r,
gen_helper_sve_st1bs_r,
@@ -5071,9 +5566,11 @@ static void do_st_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr,
gen_helper_sve_st1hd_le_r },
{ NULL, NULL,
gen_helper_sve_st1ss_le_r,
- gen_helper_sve_st1sd_le_r },
+ gen_helper_sve_st1sd_le_r,
+ gen_helper_sve_st1sq_le_r, },
{ NULL, NULL, NULL,
- gen_helper_sve_st1dd_le_r } },
+ gen_helper_sve_st1dd_le_r,
+ gen_helper_sve_st1dq_le_r, } },
{ { gen_helper_sve_st1bb_r,
gen_helper_sve_st1bh_r,
gen_helper_sve_st1bs_r,
@@ -5084,9 +5581,11 @@ static void do_st_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr,
gen_helper_sve_st1hd_be_r },
{ NULL, NULL,
gen_helper_sve_st1ss_be_r,
- gen_helper_sve_st1sd_be_r },
+ gen_helper_sve_st1sd_be_r,
+ gen_helper_sve_st1sq_be_r },
{ NULL, NULL, NULL,
- gen_helper_sve_st1dd_be_r } } },
+ gen_helper_sve_st1dd_be_r,
+ gen_helper_sve_st1dq_be_r } } },
{ { { gen_helper_sve_st1bb_r_mte,
gen_helper_sve_st1bh_r_mte,
@@ -5098,9 +5597,11 @@ static void do_st_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr,
gen_helper_sve_st1hd_le_r_mte },
{ NULL, NULL,
gen_helper_sve_st1ss_le_r_mte,
- gen_helper_sve_st1sd_le_r_mte },
+ gen_helper_sve_st1sd_le_r_mte,
+ gen_helper_sve_st1sq_le_r_mte },
{ NULL, NULL, NULL,
- gen_helper_sve_st1dd_le_r_mte } },
+ gen_helper_sve_st1dd_le_r_mte,
+ gen_helper_sve_st1dq_le_r_mte } },
{ { gen_helper_sve_st1bb_r_mte,
gen_helper_sve_st1bh_r_mte,
gen_helper_sve_st1bs_r_mte,
@@ -5111,59 +5612,73 @@ static void do_st_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr,
gen_helper_sve_st1hd_be_r_mte },
{ NULL, NULL,
gen_helper_sve_st1ss_be_r_mte,
- gen_helper_sve_st1sd_be_r_mte },
+ gen_helper_sve_st1sd_be_r_mte,
+ gen_helper_sve_st1sq_be_r_mte },
{ NULL, NULL, NULL,
- gen_helper_sve_st1dd_be_r_mte } } },
+ gen_helper_sve_st1dd_be_r_mte,
+ gen_helper_sve_st1dq_be_r_mte } } },
};
- static gen_helper_gvec_mem * const fn_multiple[2][2][3][4] = {
+ static gen_helper_gvec_mem * const fn_multiple[2][2][3][5] = {
{ { { gen_helper_sve_st2bb_r,
gen_helper_sve_st2hh_le_r,
gen_helper_sve_st2ss_le_r,
- gen_helper_sve_st2dd_le_r },
+ gen_helper_sve_st2dd_le_r,
+ gen_helper_sve_st2qq_le_r },
{ gen_helper_sve_st3bb_r,
gen_helper_sve_st3hh_le_r,
gen_helper_sve_st3ss_le_r,
- gen_helper_sve_st3dd_le_r },
+ gen_helper_sve_st3dd_le_r,
+ gen_helper_sve_st3qq_le_r },
{ gen_helper_sve_st4bb_r,
gen_helper_sve_st4hh_le_r,
gen_helper_sve_st4ss_le_r,
- gen_helper_sve_st4dd_le_r } },
+ gen_helper_sve_st4dd_le_r,
+ gen_helper_sve_st4qq_le_r } },
{ { gen_helper_sve_st2bb_r,
gen_helper_sve_st2hh_be_r,
gen_helper_sve_st2ss_be_r,
- gen_helper_sve_st2dd_be_r },
+ gen_helper_sve_st2dd_be_r,
+ gen_helper_sve_st2qq_be_r },
{ gen_helper_sve_st3bb_r,
gen_helper_sve_st3hh_be_r,
gen_helper_sve_st3ss_be_r,
- gen_helper_sve_st3dd_be_r },
+ gen_helper_sve_st3dd_be_r,
+ gen_helper_sve_st3qq_be_r },
{ gen_helper_sve_st4bb_r,
gen_helper_sve_st4hh_be_r,
gen_helper_sve_st4ss_be_r,
- gen_helper_sve_st4dd_be_r } } },
+ gen_helper_sve_st4dd_be_r,
+ gen_helper_sve_st4qq_be_r } } },
{ { { gen_helper_sve_st2bb_r_mte,
gen_helper_sve_st2hh_le_r_mte,
gen_helper_sve_st2ss_le_r_mte,
- gen_helper_sve_st2dd_le_r_mte },
+ gen_helper_sve_st2dd_le_r_mte,
+ gen_helper_sve_st2qq_le_r_mte },
{ gen_helper_sve_st3bb_r_mte,
gen_helper_sve_st3hh_le_r_mte,
gen_helper_sve_st3ss_le_r_mte,
- gen_helper_sve_st3dd_le_r_mte },
+ gen_helper_sve_st3dd_le_r_mte,
+ gen_helper_sve_st3qq_le_r_mte },
{ gen_helper_sve_st4bb_r_mte,
gen_helper_sve_st4hh_le_r_mte,
gen_helper_sve_st4ss_le_r_mte,
- gen_helper_sve_st4dd_le_r_mte } },
+ gen_helper_sve_st4dd_le_r_mte,
+ gen_helper_sve_st4qq_le_r_mte } },
{ { gen_helper_sve_st2bb_r_mte,
gen_helper_sve_st2hh_be_r_mte,
gen_helper_sve_st2ss_be_r_mte,
- gen_helper_sve_st2dd_be_r_mte },
+ gen_helper_sve_st2dd_be_r_mte,
+ gen_helper_sve_st2qq_be_r_mte },
{ gen_helper_sve_st3bb_r_mte,
gen_helper_sve_st3hh_be_r_mte,
gen_helper_sve_st3ss_be_r_mte,
- gen_helper_sve_st3dd_be_r_mte },
+ gen_helper_sve_st3dd_be_r_mte,
+ gen_helper_sve_st3qq_be_r_mte },
{ gen_helper_sve_st4bb_r_mte,
gen_helper_sve_st4hh_be_r_mte,
gen_helper_sve_st4ss_be_r_mte,
- gen_helper_sve_st4dd_be_r_mte } } },
+ gen_helper_sve_st4dd_be_r_mte,
+ gen_helper_sve_st4qq_be_r_mte } } },
};
gen_helper_gvec_mem *fn;
int be = s->be_data == MO_BE;
@@ -5182,12 +5697,32 @@ static void do_st_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr,
static bool trans_ST_zprr(DisasContext *s, arg_rprr_store *a)
{
- if (!dc_isar_feature(aa64_sve, s)) {
- return false;
- }
if (a->rm == 31 || a->msz > a->esz) {
return false;
}
+ switch (a->esz) {
+ case MO_8 ... MO_64:
+ if (!dc_isar_feature(aa64_sve, s)) {
+ return false;
+ }
+ break;
+ case MO_128:
+ if (a->nreg == 0) {
+ assert(a->msz < a->esz);
+ if (!dc_isar_feature(aa64_sve2p1, s)) {
+ return false;
+ }
+ s->is_nonstreaming = true;
+ } else {
+ if (!dc_isar_feature(aa64_sme2p1_or_sve2p1, s)) {
+ return false;
+ }
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
if (sve_access_check(s)) {
TCGv_i64 addr = tcg_temp_new_i64();
tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), a->msz);
@@ -5199,12 +5734,32 @@ static bool trans_ST_zprr(DisasContext *s, arg_rprr_store *a)
static bool trans_ST_zpri(DisasContext *s, arg_rpri_store *a)
{
- if (!dc_isar_feature(aa64_sve, s)) {
- return false;
- }
if (a->msz > a->esz) {
return false;
}
+ switch (a->esz) {
+ case MO_8 ... MO_64:
+ if (!dc_isar_feature(aa64_sve, s)) {
+ return false;
+ }
+ break;
+ case MO_128:
+ if (a->nreg == 0) {
+ assert(a->msz < a->esz);
+ if (!dc_isar_feature(aa64_sve2p1, s)) {
+ return false;
+ }
+ s->is_nonstreaming = true;
+ } else {
+ if (!dc_isar_feature(aa64_sme2p1_or_sve2p1, s)) {
+ return false;
+ }
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
if (sve_access_check(s)) {
int vsz = vec_full_reg_size(s);
int elements = vsz >> a->esz;
@@ -5566,13 +6121,23 @@ gather_load_fn64[2][2][2][3][2][4] = {
gen_helper_sve_ldffdd_be_zd_mte, } } } } },
};
+static gen_helper_gvec_mem_scatter * const
+gather_load_fn128[2][2] = {
+ { gen_helper_sve_ldqq_le_zd,
+ gen_helper_sve_ldqq_be_zd },
+ { gen_helper_sve_ldqq_le_zd_mte,
+ gen_helper_sve_ldqq_be_zd_mte }
+};
+
static bool trans_LD1_zprz(DisasContext *s, arg_LD1_zprz *a)
{
gen_helper_gvec_mem_scatter *fn = NULL;
bool be = s->be_data == MO_BE;
bool mte = s->mte_active[0];
- if (!dc_isar_feature(aa64_sve, s)) {
+ if (a->esz < MO_128
+ ? !dc_isar_feature(aa64_sve, s)
+ : !dc_isar_feature(aa64_sve2p1, s)) {
return false;
}
s->is_nonstreaming = true;
@@ -5587,6 +6152,12 @@ static bool trans_LD1_zprz(DisasContext *s, arg_LD1_zprz *a)
case MO_64:
fn = gather_load_fn64[mte][be][a->ff][a->xs][a->u][a->msz];
break;
+ case MO_128:
+ assert(!a->ff && a->u && a->xs == 2 && a->msz == MO_128);
+ fn = gather_load_fn128[mte][be];
+ break;
+ default:
+ g_assert_not_reached();
}
assert(fn != NULL);
@@ -5754,6 +6325,14 @@ static gen_helper_gvec_mem_scatter * const scatter_store_fn64[2][2][3][4] = {
gen_helper_sve_stdd_be_zd_mte, } } },
};
+static gen_helper_gvec_mem_scatter * const
+scatter_store_fn128[2][2] = {
+ { gen_helper_sve_stqq_le_zd,
+ gen_helper_sve_stqq_be_zd },
+ { gen_helper_sve_stqq_le_zd_mte,
+ gen_helper_sve_stqq_be_zd_mte }
+};
+
static bool trans_ST1_zprz(DisasContext *s, arg_ST1_zprz *a)
{
gen_helper_gvec_mem_scatter *fn;
@@ -5763,7 +6342,9 @@ static bool trans_ST1_zprz(DisasContext *s, arg_ST1_zprz *a)
if (a->esz < a->msz || (a->msz == 0 && a->scale)) {
return false;
}
- if (!dc_isar_feature(aa64_sve, s)) {
+ if (a->esz < MO_128
+ ? !dc_isar_feature(aa64_sve, s)
+ : !dc_isar_feature(aa64_sve2p1, s)) {
return false;
}
s->is_nonstreaming = true;
@@ -5777,6 +6358,10 @@ static bool trans_ST1_zprz(DisasContext *s, arg_ST1_zprz *a)
case MO_64:
fn = scatter_store_fn64[mte][be][a->xs][a->msz];
break;
+ case MO_128:
+ assert(a->xs == 2 && a->msz == MO_128);
+ fn = scatter_store_fn128[mte][be];
+ break;
default:
g_assert_not_reached();
}
@@ -5911,6 +6496,7 @@ TRANS_FEAT(MOVPRFX_z, aa64_sve, do_movz_zpz, a->rd, a->rn, a->pg, a->esz, false)
*/
TRANS_FEAT(MUL_zzz, aa64_sve2, gen_gvec_fn_arg_zzz, tcg_gen_gvec_mul, a)
+TRANS_FEAT(SQDMULH_zzz, aa64_sve2, gen_gvec_fn_arg_zzz, gen_gvec_sve2_sqdmulh, a)
static gen_helper_gvec_3 * const smulh_zzz_fns[4] = {
gen_helper_gvec_smulh_b, gen_helper_gvec_smulh_h,
@@ -5929,13 +6515,6 @@ TRANS_FEAT(UMULH_zzz, aa64_sve2, gen_gvec_ool_arg_zzz,
TRANS_FEAT(PMUL_zzz, aa64_sve2, gen_gvec_ool_arg_zzz,
gen_helper_gvec_pmul_b, a, 0)
-static gen_helper_gvec_3 * const sqdmulh_zzz_fns[4] = {
- gen_helper_sve2_sqdmulh_b, gen_helper_sve2_sqdmulh_h,
- gen_helper_sve2_sqdmulh_s, gen_helper_sve2_sqdmulh_d,
-};
-TRANS_FEAT(SQDMULH_zzz, aa64_sve2, gen_gvec_ool_arg_zzz,
- sqdmulh_zzz_fns[a->esz], a, 0)
-
static gen_helper_gvec_3 * const sqrdmulh_zzz_fns[4] = {
gen_helper_sve2_sqrdmulh_b, gen_helper_sve2_sqrdmulh_h,
gen_helper_sve2_sqrdmulh_s, gen_helper_sve2_sqrdmulh_d,
@@ -7008,17 +7587,26 @@ DO_ZPZZ_FP(FMINNMP, aa64_sve2, sve2_fminnmp_zpzz)
DO_ZPZZ_FP(FMAXP, aa64_sve2, sve2_fmaxp_zpzz)
DO_ZPZZ_FP(FMINP, aa64_sve2, sve2_fminp_zpzz)
+static bool do_fmmla(DisasContext *s, arg_rrrr_esz *a,
+ gen_helper_gvec_4_ptr *fn)
+{
+ if (sve_access_check(s)) {
+ if (vec_full_reg_size(s) < 4 * memop_size(a->esz)) {
+ unallocated_encoding(s);
+ } else {
+ gen_gvec_fpst_zzzz(s, fn, a->rd, a->rn, a->rm, a->ra, 0, FPST_A64);
+ }
+ }
+ return true;
+}
+
+TRANS_FEAT_NONSTREAMING(FMMLA_s, aa64_sve_f32mm, do_fmmla, a, gen_helper_fmmla_s)
+TRANS_FEAT_NONSTREAMING(FMMLA_d, aa64_sve_f64mm, do_fmmla, a, gen_helper_fmmla_d)
+
/*
* SVE Integer Multiply-Add (unpredicated)
*/
-TRANS_FEAT_NONSTREAMING(FMMLA_s, aa64_sve_f32mm, gen_gvec_fpst_zzzz,
- gen_helper_fmmla_s, a->rd, a->rn, a->rm, a->ra,
- 0, FPST_A64)
-TRANS_FEAT_NONSTREAMING(FMMLA_d, aa64_sve_f64mm, gen_gvec_fpst_zzzz,
- gen_helper_fmmla_d, a->rd, a->rn, a->rm, a->ra,
- 0, FPST_A64)
-
static gen_helper_gvec_4 * const sqdmlal_zzzw_fns[] = {
NULL, gen_helper_sve2_sqdmlal_zzzw_h,
gen_helper_sve2_sqdmlal_zzzw_s, gen_helper_sve2_sqdmlal_zzzw_d,
@@ -7111,8 +7699,13 @@ static gen_helper_gvec_4 * const sqrdcmlah_fns[] = {
TRANS_FEAT(SQRDCMLAH_zzzz, aa64_sve2, gen_gvec_ool_zzzz,
sqrdcmlah_fns[a->esz], a->rd, a->rn, a->rm, a->ra, a->rot)
-TRANS_FEAT(USDOT_zzzz, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
- a->esz == 2 ? gen_helper_gvec_usdot_b : NULL, a, 0)
+TRANS_FEAT(USDOT_zzzz_4s, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
+ gen_helper_gvec_usdot_4b, a, 0)
+
+TRANS_FEAT(SDOT_zzzz_2s, aa64_sme2_or_sve2p1, gen_gvec_ool_arg_zzzz,
+ gen_helper_gvec_sdot_2h, a, 0)
+TRANS_FEAT(UDOT_zzzz_2s, aa64_sme2_or_sve2p1, gen_gvec_ool_arg_zzzz,
+ gen_helper_gvec_udot_2h, a, 0)
TRANS_FEAT_NONSTREAMING(AESMC, aa64_sve2_aes, gen_gvec_ool_zz,
gen_helper_crypto_aesmc, a->rd, a->rd, 0)
@@ -7174,7 +7767,7 @@ static bool do_FMLAL_zzxw(DisasContext *s, arg_rrxr_esz *a, bool sub, bool sel)
{
return gen_gvec_ptr_zzzz(s, gen_helper_sve2_fmlal_zzxw_s,
a->rd, a->rn, a->rm, a->ra,
- (a->index << 2) | (sel << 1) | sub, tcg_env);
+ (a->index << 3) | (sel << 1) | sub, tcg_env);
}
TRANS_FEAT(FMLALB_zzxw, aa64_sve2, do_FMLAL_zzxw, a, false, false)
@@ -7189,6 +7782,11 @@ TRANS_FEAT_NONSTREAMING(USMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
TRANS_FEAT_NONSTREAMING(UMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
gen_helper_gvec_ummla_b, a, 0)
+TRANS_FEAT(FDOT_zzzz, aa64_sme2_or_sve2p1, gen_gvec_env_arg_zzzz,
+ gen_helper_sme2_fdot_h, a, 0)
+TRANS_FEAT(FDOT_zzxz, aa64_sme2_or_sve2p1, gen_gvec_env_arg_zzxz,
+ gen_helper_sme2_fdot_idx_h, a)
+
TRANS_FEAT(BFDOT_zzzz, aa64_sve_bf16, gen_gvec_env_arg_zzzz,
gen_helper_gvec_bfdot, a, 0)
TRANS_FEAT(BFDOT_zzxz, aa64_sve_bf16, gen_gvec_env_arg_zzxz,
@@ -7218,6 +7816,36 @@ static bool do_BFMLAL_zzxw(DisasContext *s, arg_rrxr_esz *a, bool sel)
TRANS_FEAT(BFMLALB_zzxw, aa64_sve_bf16, do_BFMLAL_zzxw, a, false)
TRANS_FEAT(BFMLALT_zzxw, aa64_sve_bf16, do_BFMLAL_zzxw, a, true)
+static bool do_BFMLSL_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sel)
+{
+ if (s->fpcr_ah) {
+ return gen_gvec_fpst_zzzz(s, gen_helper_gvec_ah_bfmlsl,
+ a->rd, a->rn, a->rm, a->ra, sel, FPST_AH);
+ } else {
+ return gen_gvec_fpst_zzzz(s, gen_helper_gvec_bfmlsl,
+ a->rd, a->rn, a->rm, a->ra, sel, FPST_A64);
+ }
+}
+
+TRANS_FEAT(BFMLSLB_zzzw, aa64_sme2_or_sve2p1, do_BFMLSL_zzzw, a, false)
+TRANS_FEAT(BFMLSLT_zzzw, aa64_sme2_or_sve2p1, do_BFMLSL_zzzw, a, true)
+
+static bool do_BFMLSL_zzxw(DisasContext *s, arg_rrxr_esz *a, bool sel)
+{
+ if (s->fpcr_ah) {
+ return gen_gvec_fpst_zzzz(s, gen_helper_gvec_ah_bfmlsl_idx,
+ a->rd, a->rn, a->rm, a->ra,
+ (a->index << 1) | sel, FPST_AH);
+ } else {
+ return gen_gvec_fpst_zzzz(s, gen_helper_gvec_bfmlsl_idx,
+ a->rd, a->rn, a->rm, a->ra,
+ (a->index << 1) | sel, FPST_A64);
+ }
+}
+
+TRANS_FEAT(BFMLSLB_zzxw, aa64_sme2_or_sve2p1, do_BFMLSL_zzxw, a, false)
+TRANS_FEAT(BFMLSLT_zzxw, aa64_sme2_or_sve2p1, do_BFMLSL_zzxw, a, true)
+
static bool trans_PSEL(DisasContext *s, arg_psel *a)
{
int vl = vec_full_reg_size(s);
@@ -7226,7 +7854,7 @@ static bool trans_PSEL(DisasContext *s, arg_psel *a)
TCGv_i64 tmp, didx, dbit;
TCGv_ptr ptr;
- if (!dc_isar_feature(aa64_sme, s)) {
+ if (!dc_isar_feature(aa64_sme_or_sve2p1, s)) {
return false;
}
if (!sve_access_check(s)) {
@@ -7265,6 +7893,7 @@ static bool trans_PSEL(DisasContext *s, arg_psel *a)
tcg_gen_neg_i64(tmp, tmp);
/* Apply to either copy the source, or write zeros. */
+ pl = size_for_gvec(pl);
tcg_gen_gvec_ands(MO_64, pred_full_reg_offset(s, a->pd),
pred_full_reg_offset(s, a->pn), tmp, pl, pl);
return true;
@@ -7319,7 +7948,7 @@ static void gen_sclamp(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &ops[vece]);
}
-TRANS_FEAT(SCLAMP, aa64_sme, gen_gvec_fn_arg_zzzz, gen_sclamp, a)
+TRANS_FEAT(SCLAMP, aa64_sme_or_sve2p1, gen_gvec_fn_arg_zzzz, gen_sclamp, a)
static void gen_uclamp_i32(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_i32 a)
{
@@ -7370,4 +7999,136 @@ static void gen_uclamp(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &ops[vece]);
}
-TRANS_FEAT(UCLAMP, aa64_sme, gen_gvec_fn_arg_zzzz, gen_uclamp, a)
+TRANS_FEAT(UCLAMP, aa64_sme_or_sve2p1, gen_gvec_fn_arg_zzzz, gen_uclamp, a)
+
+static bool trans_FCLAMP(DisasContext *s, arg_FCLAMP *a)
+{
+ static gen_helper_gvec_3_ptr * const fn[] = {
+ gen_helper_sme2_bfclamp,
+ gen_helper_sme2_fclamp_h,
+ gen_helper_sme2_fclamp_s,
+ gen_helper_sme2_fclamp_d,
+ };
+
+ /* This insn uses MO_8 to encode BFloat16. */
+ if (a->esz == MO_8
+ ? !dc_isar_feature(aa64_sve_b16b16, s)
+ : !dc_isar_feature(aa64_sme2_or_sve2p1, s)) {
+ return false;
+ }
+
+ /* So far we never optimize rda with MOVPRFX */
+ assert(a->rd == a->ra);
+ return gen_gvec_fpst_zzz(s, fn[a->esz], a->rd, a->rn, a->rm, 1,
+ a->esz == MO_16 ? FPST_A64_F16 : FPST_A64);
+}
+
+TRANS_FEAT(SQCVTN_sh, aa64_sme2_or_sve2p1, gen_gvec_ool_zz,
+ gen_helper_sme2_sqcvtn_sh, a->rd, a->rn, 0)
+TRANS_FEAT(UQCVTN_sh, aa64_sme2_or_sve2p1, gen_gvec_ool_zz,
+ gen_helper_sme2_uqcvtn_sh, a->rd, a->rn, 0)
+TRANS_FEAT(SQCVTUN_sh, aa64_sme2_or_sve2p1, gen_gvec_ool_zz,
+ gen_helper_sme2_sqcvtun_sh, a->rd, a->rn, 0)
+
+static bool gen_ldst_c(DisasContext *s, TCGv_i64 addr, int zd, int png,
+ MemOp esz, bool is_write, int n, bool strided)
+{
+ typedef void ldst_c_fn(TCGv_env, TCGv_ptr, TCGv_i64,
+ TCGv_i32, TCGv_i32);
+ static ldst_c_fn * const f_ldst[2][2][4] = {
+ { { gen_helper_sve2p1_ld1bb_c,
+ gen_helper_sve2p1_ld1hh_le_c,
+ gen_helper_sve2p1_ld1ss_le_c,
+ gen_helper_sve2p1_ld1dd_le_c, },
+ { gen_helper_sve2p1_ld1bb_c,
+ gen_helper_sve2p1_ld1hh_be_c,
+ gen_helper_sve2p1_ld1ss_be_c,
+ gen_helper_sve2p1_ld1dd_be_c, } },
+
+ { { gen_helper_sve2p1_st1bb_c,
+ gen_helper_sve2p1_st1hh_le_c,
+ gen_helper_sve2p1_st1ss_le_c,
+ gen_helper_sve2p1_st1dd_le_c, },
+ { gen_helper_sve2p1_st1bb_c,
+ gen_helper_sve2p1_st1hh_be_c,
+ gen_helper_sve2p1_st1ss_be_c,
+ gen_helper_sve2p1_st1dd_be_c, } }
+ };
+
+ TCGv_i32 t_png, t_desc;
+ TCGv_ptr t_zd;
+ uint32_t desc, lg2_rstride = 0;
+ bool be = s->be_data == MO_BE;
+
+ assert(n == 2 || n == 4);
+ if (strided) {
+ lg2_rstride = 3;
+ if (n == 4) {
+ /* Validate ZD alignment. */
+ if (zd & 4) {
+ return false;
+ }
+ lg2_rstride = 2;
+ }
+ /* Ignore non-temporal bit */
+ zd &= ~8;
+ }
+
+ if (strided || !dc_isar_feature(aa64_sve2p1, s)
+ ? !sme_sm_enabled_check(s)
+ : !sve_access_check(s)) {
+ return true;
+ }
+
+ if (!s->mte_active[0]) {
+ addr = clean_data_tbi(s, addr);
+ }
+
+ desc = n == 2 ? 0 : 1;
+ desc = desc | (lg2_rstride << 1);
+ desc = make_svemte_desc(s, vec_full_reg_size(s), 1, esz, is_write, desc);
+ t_desc = tcg_constant_i32(desc);
+
+ t_png = tcg_temp_new_i32();
+ tcg_gen_ld16u_i32(t_png, tcg_env,
+ pred_full_reg_offset(s, png) ^
+ (HOST_BIG_ENDIAN ? 6 : 0));
+
+ t_zd = tcg_temp_new_ptr();
+ tcg_gen_addi_ptr(t_zd, tcg_env, vec_full_reg_offset(s, zd));
+
+ f_ldst[is_write][be][esz](tcg_env, t_zd, addr, t_png, t_desc);
+ return true;
+}
+
+static bool gen_ldst_zcrr_c(DisasContext *s, arg_zcrr_ldst *a,
+ bool is_write, bool strided)
+{
+ TCGv_i64 addr = tcg_temp_new_i64();
+
+ tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), a->esz);
+ tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn));
+ return gen_ldst_c(s, addr, a->rd, a->png, a->esz, is_write,
+ a->nreg, strided);
+}
+
+static bool gen_ldst_zcri_c(DisasContext *s, arg_zcri_ldst *a,
+ bool is_write, bool strided)
+{
+ TCGv_i64 addr = tcg_temp_new_i64();
+
+ tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn),
+ a->imm * a->nreg * vec_full_reg_size(s));
+ return gen_ldst_c(s, addr, a->rd, a->png, a->esz, is_write,
+ a->nreg, strided);
+}
+
+TRANS_FEAT(LD1_zcrr, aa64_sme2_or_sve2p1, gen_ldst_zcrr_c, a, false, false)
+TRANS_FEAT(LD1_zcri, aa64_sme2_or_sve2p1, gen_ldst_zcri_c, a, false, false)
+TRANS_FEAT(ST1_zcrr, aa64_sme2_or_sve2p1, gen_ldst_zcrr_c, a, true, false)
+TRANS_FEAT(ST1_zcri, aa64_sme2_or_sve2p1, gen_ldst_zcri_c, a, true, false)
+
+TRANS_FEAT(LD1_zcrr_stride, aa64_sme2, gen_ldst_zcrr_c, a, false, true)
+TRANS_FEAT(LD1_zcri_stride, aa64_sme2, gen_ldst_zcri_c, a, false, true)
+TRANS_FEAT(ST1_zcrr_stride, aa64_sme2, gen_ldst_zcrr_c, a, true, true)
+TRANS_FEAT(ST1_zcri_stride, aa64_sme2, gen_ldst_zcri_c, a, true, true)
diff --git a/target/arm/tcg/translate.c b/target/arm/tcg/translate.c
index d280018..f7d6d8c 100644
--- a/target/arm/tcg/translate.c
+++ b/target/arm/tcg/translate.c
@@ -27,6 +27,7 @@
#include "semihosting/semihost.h"
#include "cpregs.h"
#include "exec/helper-proto.h"
+#include "exec/target_page.h"
#define HELPER_H "helper.h"
#include "exec/helper-info.c.inc"
@@ -371,7 +372,7 @@ static void gen_rebuild_hflags(DisasContext *s, bool new_el)
}
}
-static void gen_exception_internal(int excp)
+void gen_exception_internal(int excp)
{
assert(excp_is_internal(excp));
gen_helper_exception_internal(tcg_env, tcg_constant_i32(excp));
@@ -493,20 +494,9 @@ static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
{
TCGv_i32 tmp = tcg_temp_new_i32();
- if (tcg_op_supported(INDEX_op_add2_i32, TCG_TYPE_I32, 0)) {
- tcg_gen_movi_i32(tmp, 0);
- tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
- tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
- } else {
- TCGv_i64 q0 = tcg_temp_new_i64();
- TCGv_i64 q1 = tcg_temp_new_i64();
- tcg_gen_extu_i32_i64(q0, t0);
- tcg_gen_extu_i32_i64(q1, t1);
- tcg_gen_add_i64(q0, q0, q1);
- tcg_gen_extu_i32_i64(q1, cpu_CF);
- tcg_gen_add_i64(q0, q0, q1);
- tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
- }
+
+ tcg_gen_addcio_i32(cpu_NF, cpu_CF, t0, t1, cpu_CF);
+
tcg_gen_mov_i32(cpu_ZF, cpu_NF);
tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
tcg_gen_xor_i32(tmp, t0, t1);
@@ -7770,7 +7760,8 @@ static bool arm_check_ss_active(DisasContext *dc)
static void arm_post_translate_insn(DisasContext *dc)
{
- if (dc->condjmp && dc->base.is_jmp == DISAS_NEXT) {
+ if (dc->condjmp &&
+ (dc->base.is_jmp == DISAS_NEXT || dc->base.is_jmp == DISAS_TOO_MANY)) {
if (dc->pc_save != dc->condlabel.pc_save) {
gen_update_pc(dc, dc->condlabel.pc_save - dc->pc_save);
}
@@ -7800,7 +7791,7 @@ static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
* be possible after an indirect branch, at the start of the TB.
*/
assert(dc->base.num_insns == 1);
- gen_helper_exception_pc_alignment(tcg_env, tcg_constant_tl(pc));
+ gen_helper_exception_pc_alignment(tcg_env, tcg_constant_vaddr(pc));
dc->base.is_jmp = DISAS_NORETURN;
dc->base.pc_next = QEMU_ALIGN_UP(pc, 4);
return;
diff --git a/target/arm/tcg/translate.h b/target/arm/tcg/translate.h
index 53e485d..f974996 100644
--- a/target/arm/tcg/translate.h
+++ b/target/arm/tcg/translate.h
@@ -4,7 +4,6 @@
#include "cpu.h"
#include "tcg/tcg-op.h"
#include "tcg/tcg-op-gvec.h"
-#include "exec/exec-all.h"
#include "exec/translator.h"
#include "exec/translation-block.h"
#include "exec/helper-gen.h"
@@ -71,8 +70,10 @@ typedef struct DisasContext {
int fp_excp_el; /* FP exception EL or 0 if enabled */
int sve_excp_el; /* SVE exception EL or 0 if enabled */
int sme_excp_el; /* SME exception EL or 0 if enabled */
+ int zt0_excp_el; /* ZT0 exception EL or 0 if enabled */
int vl; /* current vector length in bytes */
int svl; /* current streaming vector length in bytes */
+ int max_svl; /* maximum implemented streaming vector length */
bool vfp_enabled; /* FP enabled via FPSCR.EN */
int vec_len;
int vec_stride;
@@ -209,6 +210,11 @@ static inline int plus_2(DisasContext *s, int x)
return x + 2;
}
+static inline int plus_8(DisasContext *s, int x)
+{
+ return x + 8;
+}
+
static inline int plus_12(DisasContext *s, int x)
{
return x + 12;
@@ -348,6 +354,7 @@ void arm_jump_cc(DisasCompare *cmp, TCGLabel *label);
void arm_gen_test_cc(int cc, TCGLabel *label);
MemOp pow2_align(unsigned i);
void unallocated_encoding(DisasContext *s);
+void gen_exception_internal(int excp);
void gen_exception_insn_el(DisasContext *s, target_long pc_diff, int excp,
uint32_t syn, uint32_t target_el);
void gen_exception_insn(DisasContext *s, target_long pc_diff,
@@ -636,6 +643,8 @@ typedef void GVecGen3Fn(unsigned, uint32_t, uint32_t,
uint32_t, uint32_t, uint32_t);
typedef void GVecGen4Fn(unsigned, uint32_t, uint32_t, uint32_t,
uint32_t, uint32_t, uint32_t);
+typedef void GVecGen3FnVar(unsigned, TCGv_ptr, uint32_t, TCGv_ptr, uint32_t,
+ TCGv_ptr, uint32_t, uint32_t, uint32_t);
/* Function prototype for gen_ functions for calling Neon helpers */
typedef void NeonGenOneOpFn(TCGv_i32, TCGv_i32);
diff --git a/target/arm/tcg/vec_helper.c b/target/arm/tcg/vec_helper.c
index 986eaf8..bae6165 100644
--- a/target/arm/tcg/vec_helper.c
+++ b/target/arm/tcg/vec_helper.c
@@ -825,11 +825,11 @@ void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \
clear_tail(d, opr_sz, simd_maxsz(desc)); \
}
-DO_DOT(gvec_sdot_b, int32_t, int8_t, int8_t)
-DO_DOT(gvec_udot_b, uint32_t, uint8_t, uint8_t)
-DO_DOT(gvec_usdot_b, uint32_t, uint8_t, int8_t)
-DO_DOT(gvec_sdot_h, int64_t, int16_t, int16_t)
-DO_DOT(gvec_udot_h, uint64_t, uint16_t, uint16_t)
+DO_DOT(gvec_sdot_4b, int32_t, int8_t, int8_t)
+DO_DOT(gvec_udot_4b, uint32_t, uint8_t, uint8_t)
+DO_DOT(gvec_usdot_4b, uint32_t, uint8_t, int8_t)
+DO_DOT(gvec_sdot_4h, int64_t, int16_t, int16_t)
+DO_DOT(gvec_udot_4h, uint64_t, uint16_t, uint16_t)
#define DO_DOT_IDX(NAME, TYPED, TYPEN, TYPEM, HD) \
void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \
@@ -865,12 +865,63 @@ void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \
clear_tail(d, opr_sz, simd_maxsz(desc)); \
}
-DO_DOT_IDX(gvec_sdot_idx_b, int32_t, int8_t, int8_t, H4)
-DO_DOT_IDX(gvec_udot_idx_b, uint32_t, uint8_t, uint8_t, H4)
-DO_DOT_IDX(gvec_sudot_idx_b, int32_t, int8_t, uint8_t, H4)
-DO_DOT_IDX(gvec_usdot_idx_b, int32_t, uint8_t, int8_t, H4)
-DO_DOT_IDX(gvec_sdot_idx_h, int64_t, int16_t, int16_t, H8)
-DO_DOT_IDX(gvec_udot_idx_h, uint64_t, uint16_t, uint16_t, H8)
+DO_DOT_IDX(gvec_sdot_idx_4b, int32_t, int8_t, int8_t, H4)
+DO_DOT_IDX(gvec_udot_idx_4b, uint32_t, uint8_t, uint8_t, H4)
+DO_DOT_IDX(gvec_sudot_idx_4b, int32_t, int8_t, uint8_t, H4)
+DO_DOT_IDX(gvec_usdot_idx_4b, int32_t, uint8_t, int8_t, H4)
+DO_DOT_IDX(gvec_sdot_idx_4h, int64_t, int16_t, int16_t, H8)
+DO_DOT_IDX(gvec_udot_idx_4h, uint64_t, uint16_t, uint16_t, H8)
+
+#undef DO_DOT
+#undef DO_DOT_IDX
+
+/* Similar for 2-way dot product */
+#define DO_DOT(NAME, TYPED, TYPEN, TYPEM) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc); \
+ TYPED *d = vd, *a = va; \
+ TYPEN *n = vn; \
+ TYPEM *m = vm; \
+ for (i = 0; i < opr_sz / sizeof(TYPED); ++i) { \
+ d[i] = (a[i] + \
+ (TYPED)n[i * 2 + 0] * m[i * 2 + 0] + \
+ (TYPED)n[i * 2 + 1] * m[i * 2 + 1]); \
+ } \
+ clear_tail(d, opr_sz, simd_maxsz(desc)); \
+}
+
+#define DO_DOT_IDX(NAME, TYPED, TYPEN, TYPEM, HD) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \
+{ \
+ intptr_t i = 0, opr_sz = simd_oprsz(desc); \
+ intptr_t opr_sz_n = opr_sz / sizeof(TYPED); \
+ intptr_t segend = MIN(16 / sizeof(TYPED), opr_sz_n); \
+ intptr_t index = simd_data(desc); \
+ TYPED *d = vd, *a = va; \
+ TYPEN *n = vn; \
+ TYPEM *m_indexed = (TYPEM *)vm + HD(index) * 2; \
+ do { \
+ TYPED m0 = m_indexed[i * 2 + 0]; \
+ TYPED m1 = m_indexed[i * 2 + 1]; \
+ do { \
+ d[i] = (a[i] + \
+ n[i * 2 + 0] * m0 + \
+ n[i * 2 + 1] * m1); \
+ } while (++i < segend); \
+ segend = i + (16 / sizeof(TYPED)); \
+ } while (i < opr_sz_n); \
+ clear_tail(d, opr_sz, simd_maxsz(desc)); \
+}
+
+DO_DOT(gvec_sdot_2h, int32_t, int16_t, int16_t)
+DO_DOT(gvec_udot_2h, uint32_t, uint16_t, uint16_t)
+
+DO_DOT_IDX(gvec_sdot_idx_2h, int32_t, int16_t, int16_t, H4)
+DO_DOT_IDX(gvec_udot_idx_2h, uint32_t, uint16_t, uint16_t, H4)
+
+#undef DO_DOT
+#undef DO_DOT_IDX
void HELPER(gvec_fcaddh)(void *vd, void *vn, void *vm,
float_status *fpst, uint32_t desc)
@@ -1419,10 +1470,12 @@ void HELPER(NAME)(void *vd, void *vn, void *vm, \
DO_3OP(gvec_fadd_h, float16_add, float16)
DO_3OP(gvec_fadd_s, float32_add, float32)
DO_3OP(gvec_fadd_d, float64_add, float64)
+DO_3OP(gvec_bfadd, bfloat16_add, bfloat16)
DO_3OP(gvec_fsub_h, float16_sub, float16)
DO_3OP(gvec_fsub_s, float32_sub, float32)
DO_3OP(gvec_fsub_d, float64_sub, float64)
+DO_3OP(gvec_bfsub, bfloat16_sub, bfloat16)
DO_3OP(gvec_fmul_h, float16_mul, float16)
DO_3OP(gvec_fmul_s, float32_mul, float32)
@@ -1515,6 +1568,13 @@ DO_3OP(gvec_ah_fmin_h, helper_vfp_ah_minh, float16)
DO_3OP(gvec_ah_fmin_s, helper_vfp_ah_mins, float32)
DO_3OP(gvec_ah_fmin_d, helper_vfp_ah_mind, float64)
+DO_3OP(gvec_fmax_b16, bfloat16_max, bfloat16)
+DO_3OP(gvec_fmin_b16, bfloat16_min, bfloat16)
+DO_3OP(gvec_fmaxnum_b16, bfloat16_maxnum, bfloat16)
+DO_3OP(gvec_fminnum_b16, bfloat16_minnum, bfloat16)
+DO_3OP(gvec_ah_fmax_b16, helper_sme2_ah_fmax_b16, bfloat16)
+DO_3OP(gvec_ah_fmin_b16, helper_sme2_ah_fmin_b16, bfloat16)
+
#endif
#undef DO_3OP
@@ -1550,6 +1610,12 @@ static float16 float16_muladd_f(float16 dest, float16 op1, float16 op2,
return float16_muladd(op1, op2, dest, 0, stat);
}
+static bfloat16 bfloat16_muladd_f(bfloat16 dest, bfloat16 op1, bfloat16 op2,
+ float_status *stat)
+{
+ return bfloat16_muladd(op1, op2, dest, 0, stat);
+}
+
static float32 float32_muladd_f(float32 dest, float32 op1, float32 op2,
float_status *stat)
{
@@ -1568,6 +1634,12 @@ static float16 float16_mulsub_f(float16 dest, float16 op1, float16 op2,
return float16_muladd(float16_chs(op1), op2, dest, 0, stat);
}
+static bfloat16 bfloat16_mulsub_f(bfloat16 dest, bfloat16 op1, bfloat16 op2,
+ float_status *stat)
+{
+ return bfloat16_muladd(bfloat16_chs(op1), op2, dest, 0, stat);
+}
+
static float32 float32_mulsub_f(float32 dest, float32 op1, float32 op2,
float_status *stat)
{
@@ -1586,6 +1658,12 @@ static float16 float16_ah_mulsub_f(float16 dest, float16 op1, float16 op2,
return float16_muladd(op1, op2, dest, float_muladd_negate_product, stat);
}
+static bfloat16 bfloat16_ah_mulsub_f(bfloat16 dest, bfloat16 op1, bfloat16 op2,
+ float_status *stat)
+{
+ return bfloat16_muladd(op1, op2, dest, float_muladd_negate_product, stat);
+}
+
static float32 float32_ah_mulsub_f(float32 dest, float32 op1, float32 op2,
float_status *stat)
{
@@ -1610,23 +1688,28 @@ void HELPER(NAME)(void *vd, void *vn, void *vm, \
clear_tail(d, oprsz, simd_maxsz(desc)); \
}
-DO_MULADD(gvec_fmla_h, float16_muladd_nf, float16)
-DO_MULADD(gvec_fmla_s, float32_muladd_nf, float32)
+DO_MULADD(gvec_fmla_nf_h, float16_muladd_nf, float16)
+DO_MULADD(gvec_fmla_nf_s, float32_muladd_nf, float32)
-DO_MULADD(gvec_fmls_h, float16_mulsub_nf, float16)
-DO_MULADD(gvec_fmls_s, float32_mulsub_nf, float32)
+DO_MULADD(gvec_fmls_nf_h, float16_mulsub_nf, float16)
+DO_MULADD(gvec_fmls_nf_s, float32_mulsub_nf, float32)
DO_MULADD(gvec_vfma_h, float16_muladd_f, float16)
DO_MULADD(gvec_vfma_s, float32_muladd_f, float32)
DO_MULADD(gvec_vfma_d, float64_muladd_f, float64)
+DO_MULADD(gvec_bfmla, bfloat16_muladd_f, bfloat16)
DO_MULADD(gvec_vfms_h, float16_mulsub_f, float16)
DO_MULADD(gvec_vfms_s, float32_mulsub_f, float32)
DO_MULADD(gvec_vfms_d, float64_mulsub_f, float64)
+DO_MULADD(gvec_bfmls, bfloat16_mulsub_f, bfloat16)
DO_MULADD(gvec_ah_vfms_h, float16_ah_mulsub_f, float16)
DO_MULADD(gvec_ah_vfms_s, float32_ah_mulsub_f, float32)
DO_MULADD(gvec_ah_vfms_d, float64_ah_mulsub_f, float64)
+DO_MULADD(gvec_ah_bfmls, bfloat16_ah_mulsub_f, bfloat16)
+
+#undef DO_MULADD
/* For the indexed ops, SVE applies the index per 128-bit vector segment.
* For AdvSIMD, there is of course only one such vector segment.
@@ -1745,14 +1828,17 @@ void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, \
DO_FMLA_IDX(gvec_fmla_idx_h, float16, H2, 0, 0)
DO_FMLA_IDX(gvec_fmla_idx_s, float32, H4, 0, 0)
DO_FMLA_IDX(gvec_fmla_idx_d, float64, H8, 0, 0)
+DO_FMLA_IDX(gvec_bfmla_idx, bfloat16, H2, 0, 0)
DO_FMLA_IDX(gvec_fmls_idx_h, float16, H2, INT16_MIN, 0)
DO_FMLA_IDX(gvec_fmls_idx_s, float32, H4, INT32_MIN, 0)
DO_FMLA_IDX(gvec_fmls_idx_d, float64, H8, INT64_MIN, 0)
+DO_FMLA_IDX(gvec_bfmls_idx, bfloat16, H2, INT16_MIN, 0)
DO_FMLA_IDX(gvec_ah_fmls_idx_h, float16, H2, 0, float_muladd_negate_product)
DO_FMLA_IDX(gvec_ah_fmls_idx_s, float32, H4, 0, float_muladd_negate_product)
DO_FMLA_IDX(gvec_ah_fmls_idx_d, float64, H8, 0, float_muladd_negate_product)
+DO_FMLA_IDX(gvec_ah_bfmls_idx, bfloat16, H2, 0, float_muladd_negate_product)
#undef DO_FMLA_IDX
@@ -2184,7 +2270,8 @@ void HELPER(sve2_fmlal_zzzw_s)(void *vd, void *vn, void *vm, void *va,
intptr_t i, oprsz = simd_oprsz(desc);
bool is_s = extract32(desc, SIMD_DATA_SHIFT, 1);
intptr_t sel = extract32(desc, SIMD_DATA_SHIFT + 1, 1) * sizeof(float16);
- float_status *status = &env->vfp.fp_status[FPST_A64];
+ bool za = extract32(desc, SIMD_DATA_SHIFT + 2, 1);
+ float_status *status = &env->vfp.fp_status[za ? FPST_ZA : FPST_A64];
bool fz16 = env->vfp.fpcr & FPCR_FZ16;
int negx = 0, negf = 0;
@@ -2267,8 +2354,9 @@ void HELPER(sve2_fmlal_zzxw_s)(void *vd, void *vn, void *vm, void *va,
intptr_t i, j, oprsz = simd_oprsz(desc);
bool is_s = extract32(desc, SIMD_DATA_SHIFT, 1);
intptr_t sel = extract32(desc, SIMD_DATA_SHIFT + 1, 1) * sizeof(float16);
- intptr_t idx = extract32(desc, SIMD_DATA_SHIFT + 2, 3) * sizeof(float16);
- float_status *status = &env->vfp.fp_status[FPST_A64];
+ bool za = extract32(desc, SIMD_DATA_SHIFT + 2, 1);
+ intptr_t idx = extract32(desc, SIMD_DATA_SHIFT + 3, 3) * sizeof(float16);
+ float_status *status = &env->vfp.fp_status[za ? FPST_ZA : FPST_A64];
bool fz16 = env->vfp.fpcr & FPCR_FZ16;
int negx = 0, negf = 0;
@@ -2989,31 +3077,62 @@ float32 bfdotadd(float32 sum, uint32_t e1, uint32_t e2, float_status *fpst)
float32 bfdotadd_ebf(float32 sum, uint32_t e1, uint32_t e2,
float_status *fpst, float_status *fpst_odd)
{
- /*
- * Compare f16_dotadd() in sme_helper.c, but here we have
- * bfloat16 inputs. In particular that means that we do not
- * want the FPCR.FZ16 flush semantics, so we use the normal
- * float_status for the input handling here.
- */
- float64 e1r = float32_to_float64(e1 << 16, fpst);
- float64 e1c = float32_to_float64(e1 & 0xffff0000u, fpst);
- float64 e2r = float32_to_float64(e2 << 16, fpst);
- float64 e2c = float32_to_float64(e2 & 0xffff0000u, fpst);
- float64 t64;
+ float32 s1r = e1 << 16;
+ float32 s1c = e1 & 0xffff0000u;
+ float32 s2r = e2 << 16;
+ float32 s2c = e2 & 0xffff0000u;
float32 t32;
- /*
- * The ARM pseudocode function FPDot performs both multiplies
- * and the add with a single rounding operation. Emulate this
- * by performing the first multiply in round-to-odd, then doing
- * the second multiply as fused multiply-add, and rounding to
- * float32 all in one step.
- */
- t64 = float64_mul(e1r, e2r, fpst_odd);
- t64 = float64r32_muladd(e1c, e2c, t64, 0, fpst);
+ /* C.f. FPProcessNaNs4 */
+ if (float32_is_any_nan(s1r) || float32_is_any_nan(s1c) ||
+ float32_is_any_nan(s2r) || float32_is_any_nan(s2c)) {
+ if (float32_is_signaling_nan(s1r, fpst)) {
+ t32 = s1r;
+ } else if (float32_is_signaling_nan(s1c, fpst)) {
+ t32 = s1c;
+ } else if (float32_is_signaling_nan(s2r, fpst)) {
+ t32 = s2r;
+ } else if (float32_is_signaling_nan(s2c, fpst)) {
+ t32 = s2c;
+ } else if (float32_is_any_nan(s1r)) {
+ t32 = s1r;
+ } else if (float32_is_any_nan(s1c)) {
+ t32 = s1c;
+ } else if (float32_is_any_nan(s2r)) {
+ t32 = s2r;
+ } else {
+ t32 = s2c;
+ }
+ /*
+ * FPConvertNaN(FPProcessNaN(t32)) will be done as part
+ * of the final addition below.
+ */
+ } else {
+ /*
+ * Compare f16_dotadd() in sme_helper.c, but here we have
+ * bfloat16 inputs. In particular that means that we do not
+ * want the FPCR.FZ16 flush semantics, so we use the normal
+ * float_status for the input handling here.
+ */
+ float64 e1r = float32_to_float64(s1r, fpst);
+ float64 e1c = float32_to_float64(s1c, fpst);
+ float64 e2r = float32_to_float64(s2r, fpst);
+ float64 e2c = float32_to_float64(s2c, fpst);
+ float64 t64;
+
+ /*
+ * The ARM pseudocode function FPDot performs both multiplies
+ * and the add with a single rounding operation. Emulate this
+ * by performing the first multiply in round-to-odd, then doing
+ * the second multiply as fused multiply-add, and rounding to
+ * float32 all in one step.
+ */
+ t64 = float64_mul(e1r, e2r, fpst_odd);
+ t64 = float64r32_muladd(e1c, e2c, t64, 0, fpst);
- /* This conversion is exact, because we've already rounded. */
- t32 = float64_to_float32(t64, fpst);
+ /* This conversion is exact, because we've already rounded. */
+ t32 = float64_to_float32(t64, fpst);
+ }
/* The final accumulation step is not fused. */
return float32_add(sum, t32, fpst);
@@ -3070,6 +3189,45 @@ void HELPER(gvec_bfdot_idx)(void *vd, void *vn, void *vm,
clear_tail(d, opr_sz, simd_maxsz(desc));
}
+void HELPER(sme2_bfvdot_idx)(void *vd, void *vn, void *vm,
+ void *va, CPUARMState *env, uint32_t desc)
+{
+ intptr_t i, j, opr_sz = simd_oprsz(desc);
+ intptr_t idx = extract32(desc, SIMD_DATA_SHIFT, 2);
+ intptr_t sel = extract32(desc, SIMD_DATA_SHIFT + 2, 1);
+ intptr_t elements = opr_sz / 4;
+ intptr_t eltspersegment = MIN(16 / 4, elements);
+ float32 *d = vd, *a = va;
+ uint16_t *n0 = vn;
+ uint16_t *n1 = vn + sizeof(ARMVectorReg);
+ uint32_t *m = vm;
+ float_status fpst, fpst_odd;
+
+ if (is_ebf(env, &fpst, &fpst_odd)) {
+ for (i = 0; i < elements; i += eltspersegment) {
+ uint32_t m_idx = m[i + H4(idx)];
+
+ for (j = 0; j < eltspersegment; j++) {
+ uint32_t nn = (n0[H2(2 * (i + j) + sel)])
+ | (n1[H2(2 * (i + j) + sel)] << 16);
+ d[i + H4(j)] = bfdotadd_ebf(a[i + H4(j)], nn, m_idx,
+ &fpst, &fpst_odd);
+ }
+ }
+ } else {
+ for (i = 0; i < elements; i += eltspersegment) {
+ uint32_t m_idx = m[i + H4(idx)];
+
+ for (j = 0; j < eltspersegment; j++) {
+ uint32_t nn = (n0[H2(2 * (i + j) + sel)])
+ | (n1[H2(2 * (i + j) + sel)] << 16);
+ d[i + H4(j)] = bfdotadd(a[i + H4(j)], nn, m_idx, &fpst);
+ }
+ }
+ }
+ clear_tail(d, opr_sz, simd_maxsz(desc));
+}
+
void HELPER(gvec_bfmmla)(void *vd, void *vn, void *vm, void *va,
CPUARMState *env, uint32_t desc)
{
@@ -3146,44 +3304,76 @@ void HELPER(gvec_bfmmla)(void *vd, void *vn, void *vm, void *va,
clear_tail(d, opr_sz, simd_maxsz(desc));
}
-void HELPER(gvec_bfmlal)(void *vd, void *vn, void *vm, void *va,
- float_status *stat, uint32_t desc)
+static void do_bfmlal(float32 *d, bfloat16 *n, bfloat16 *m, float32 *a,
+ float_status *stat, uint32_t desc, int negx, int negf)
{
intptr_t i, opr_sz = simd_oprsz(desc);
- intptr_t sel = simd_data(desc);
- float32 *d = vd, *a = va;
- bfloat16 *n = vn, *m = vm;
+ intptr_t sel = extract32(desc, SIMD_DATA_SHIFT, 1);
for (i = 0; i < opr_sz / 4; ++i) {
- float32 nn = n[H2(i * 2 + sel)] << 16;
+ float32 nn = (negx ^ n[H2(i * 2 + sel)]) << 16;
float32 mm = m[H2(i * 2 + sel)] << 16;
- d[H4(i)] = float32_muladd(nn, mm, a[H4(i)], 0, stat);
+ d[H4(i)] = float32_muladd(nn, mm, a[H4(i)], negf, stat);
}
clear_tail(d, opr_sz, simd_maxsz(desc));
}
-void HELPER(gvec_bfmlal_idx)(void *vd, void *vn, void *vm,
- void *va, float_status *stat, uint32_t desc)
+void HELPER(gvec_bfmlal)(void *vd, void *vn, void *vm, void *va,
+ float_status *stat, uint32_t desc)
+{
+ do_bfmlal(vd, vn, vm, va, stat, desc, 0, 0);
+}
+
+void HELPER(gvec_bfmlsl)(void *vd, void *vn, void *vm, void *va,
+ float_status *stat, uint32_t desc)
+{
+ do_bfmlal(vd, vn, vm, va, stat, desc, 0x8000, 0);
+}
+
+void HELPER(gvec_ah_bfmlsl)(void *vd, void *vn, void *vm, void *va,
+ float_status *stat, uint32_t desc)
+{
+ do_bfmlal(vd, vn, vm, va, stat, desc, 0, float_muladd_negate_product);
+}
+
+static void do_bfmlal_idx(float32 *d, bfloat16 *n, bfloat16 *m, float32 *a,
+ float_status *stat, uint32_t desc, int negx, int negf)
{
intptr_t i, j, opr_sz = simd_oprsz(desc);
intptr_t sel = extract32(desc, SIMD_DATA_SHIFT, 1);
intptr_t index = extract32(desc, SIMD_DATA_SHIFT + 1, 3);
intptr_t elements = opr_sz / 4;
intptr_t eltspersegment = MIN(16 / 4, elements);
- float32 *d = vd, *a = va;
- bfloat16 *n = vn, *m = vm;
for (i = 0; i < elements; i += eltspersegment) {
float32 m_idx = m[H2(2 * i + index)] << 16;
for (j = i; j < i + eltspersegment; j++) {
- float32 n_j = n[H2(2 * j + sel)] << 16;
- d[H4(j)] = float32_muladd(n_j, m_idx, a[H4(j)], 0, stat);
+ float32 n_j = (negx ^ n[H2(2 * j + sel)]) << 16;
+ d[H4(j)] = float32_muladd(n_j, m_idx, a[H4(j)], negf, stat);
}
}
clear_tail(d, opr_sz, simd_maxsz(desc));
}
+void HELPER(gvec_bfmlal_idx)(void *vd, void *vn, void *vm, void *va,
+ float_status *stat, uint32_t desc)
+{
+ do_bfmlal_idx(vd, vn, vm, va, stat, desc, 0, 0);
+}
+
+void HELPER(gvec_bfmlsl_idx)(void *vd, void *vn, void *vm, void *va,
+ float_status *stat, uint32_t desc)
+{
+ do_bfmlal_idx(vd, vn, vm, va, stat, desc, 0x8000, 0);
+}
+
+void HELPER(gvec_ah_bfmlsl_idx)(void *vd, void *vn, void *vm, void *va,
+ float_status *stat, uint32_t desc)
+{
+ do_bfmlal_idx(vd, vn, vm, va, stat, desc, 0, float_muladd_negate_product);
+}
+
#define DO_CLAMP(NAME, TYPE) \
void HELPER(NAME)(void *d, void *n, void *m, void *a, uint32_t desc) \
{ \
@@ -3253,3 +3443,90 @@ void HELPER(gvec_ursqrte_s)(void *vd, void *vn, uint32_t desc)
}
clear_tail(d, opr_sz, simd_maxsz(desc));
}
+
+static inline void do_lut_b(void *zd, uint64_t *indexes, uint64_t *table,
+ unsigned elements, unsigned segbase,
+ unsigned dstride, unsigned isize,
+ unsigned tsize, unsigned nreg)
+{
+ for (unsigned r = 0; r < nreg; ++r) {
+ uint8_t *dst = zd + dstride * r;
+ unsigned base = segbase + r * elements;
+
+ for (unsigned e = 0; e < elements; ++e) {
+ unsigned index = extractn(indexes, (base + e) * isize, isize);
+ dst[H1(e)] = extractn(table, index * tsize, 8);
+ }
+ }
+}
+
+static inline void do_lut_h(void *zd, uint64_t *indexes, uint64_t *table,
+ unsigned elements, unsigned segbase,
+ unsigned dstride, unsigned isize,
+ unsigned tsize, unsigned nreg)
+{
+ for (unsigned r = 0; r < nreg; ++r) {
+ uint16_t *dst = zd + dstride * r;
+ unsigned base = segbase + r * elements;
+
+ for (unsigned e = 0; e < elements; ++e) {
+ unsigned index = extractn(indexes, (base + e) * isize, isize);
+ dst[H2(e)] = extractn(table, index * tsize, 16);
+ }
+ }
+}
+
+static inline void do_lut_s(void *zd, uint64_t *indexes, uint32_t *table,
+ unsigned elements, unsigned segbase,
+ unsigned dstride, unsigned isize,
+ unsigned tsize, unsigned nreg)
+{
+ for (unsigned r = 0; r < nreg; ++r) {
+ uint32_t *dst = zd + dstride * r;
+ unsigned base = segbase + r * elements;
+
+ for (unsigned e = 0; e < elements; ++e) {
+ unsigned index = extractn(indexes, (base + e) * isize, isize);
+ dst[H4(e)] = table[H4(index)];
+ }
+ }
+}
+
+#define DO_SME2_LUT(ISIZE, NREG, SUFF, ESIZE) \
+void helper_sme2_luti##ISIZE##_##NREG##SUFF \
+ (void *zd, void *zn, CPUARMState *env, uint32_t desc) \
+{ \
+ unsigned vl = simd_oprsz(desc); \
+ unsigned strided = extract32(desc, SIMD_DATA_SHIFT, 1); \
+ unsigned idx = extract32(desc, SIMD_DATA_SHIFT + 1, 4); \
+ unsigned elements = vl / ESIZE; \
+ unsigned dstride = (!strided ? 1 : NREG == 4 ? 4 : 8); \
+ unsigned segments = (ESIZE * 8) / (ISIZE * NREG); \
+ unsigned segment = idx & (segments - 1); \
+ ARMVectorReg indexes; \
+ memcpy(&indexes, zn, vl); \
+ do_lut_##SUFF(zd, indexes.d, (void *)env->za_state.zt0, elements, \
+ segment * NREG * elements, \
+ dstride * sizeof(ARMVectorReg), ISIZE, 32, NREG); \
+}
+
+DO_SME2_LUT(2,1,b, 1)
+DO_SME2_LUT(2,1,h, 2)
+DO_SME2_LUT(2,1,s, 4)
+DO_SME2_LUT(2,2,b, 1)
+DO_SME2_LUT(2,2,h, 2)
+DO_SME2_LUT(2,2,s, 4)
+DO_SME2_LUT(2,4,b, 1)
+DO_SME2_LUT(2,4,h, 2)
+DO_SME2_LUT(2,4,s, 4)
+
+DO_SME2_LUT(4,1,b, 1)
+DO_SME2_LUT(4,1,h, 2)
+DO_SME2_LUT(4,1,s, 4)
+DO_SME2_LUT(4,2,b, 1)
+DO_SME2_LUT(4,2,h, 2)
+DO_SME2_LUT(4,2,s, 4)
+DO_SME2_LUT(4,4,h, 2)
+DO_SME2_LUT(4,4,s, 4)
+
+#undef DO_SME2_LUT
diff --git a/target/arm/tcg/vec_internal.h b/target/arm/tcg/vec_internal.h
index 6b93b5a..cf41b03 100644
--- a/target/arm/tcg/vec_internal.h
+++ b/target/arm/tcg/vec_internal.h
@@ -22,6 +22,8 @@
#include "fpu/softfloat.h"
+typedef struct CPUArchState CPUARMState;
+
/*
* Note that vector data is stored in host-endian 64-bit chunks,
* so addressing units smaller than that needs a host-endian fixup.
@@ -221,6 +223,34 @@ int16_t do_sqrdmlah_h(int16_t, int16_t, int16_t, bool, bool, uint32_t *);
int32_t do_sqrdmlah_s(int32_t, int32_t, int32_t, bool, bool, uint32_t *);
int64_t do_sqrdmlah_d(int64_t, int64_t, int64_t, bool, bool);
+#define do_ssat_b(val) MIN(MAX(val, INT8_MIN), INT8_MAX)
+#define do_ssat_h(val) MIN(MAX(val, INT16_MIN), INT16_MAX)
+#define do_ssat_s(val) MIN(MAX(val, INT32_MIN), INT32_MAX)
+#define do_usat_b(val) MIN(MAX(val, 0), UINT8_MAX)
+#define do_usat_h(val) MIN(MAX(val, 0), UINT16_MAX)
+#define do_usat_s(val) MIN(MAX(val, 0), UINT32_MAX)
+
+static inline uint64_t do_urshr(uint64_t x, unsigned sh)
+{
+ if (likely(sh < 64)) {
+ return (x >> sh) + ((x >> (sh - 1)) & 1);
+ } else if (sh == 64) {
+ return x >> 63;
+ } else {
+ return 0;
+ }
+}
+
+static inline int64_t do_srshr(int64_t x, unsigned sh)
+{
+ if (likely(sh < 64)) {
+ return (x >> sh) + ((x >> (sh - 1)) & 1);
+ } else {
+ /* Rounding the sign bit always produces 0. */
+ return 0;
+ }
+}
+
/**
* bfdotadd:
* @sum: addend
@@ -270,6 +300,11 @@ bool is_ebf(CPUARMState *env, float_status *statusp, float_status *oddstatusp);
/*
* Negate as for FPCR.AH=1 -- do not negate NaNs.
*/
+static inline float16 bfloat16_ah_chs(float16 a)
+{
+ return bfloat16_is_any_nan(a) ? a : bfloat16_chs(a);
+}
+
static inline float16 float16_ah_chs(float16 a)
{
return float16_is_any_nan(a) ? a : float16_chs(a);
@@ -300,4 +335,119 @@ static inline float64 float64_maybe_ah_chs(float64 a, bool fpcr_ah)
return fpcr_ah && float64_is_any_nan(a) ? a : float64_chs(a);
}
+/* Not actually called directly as a helper, but uses similar machinery. */
+bfloat16 helper_sme2_ah_fmax_b16(bfloat16 a, bfloat16 b, float_status *fpst);
+bfloat16 helper_sme2_ah_fmin_b16(bfloat16 a, bfloat16 b, float_status *fpst);
+
+float32 sve_f16_to_f32(float16 f, float_status *fpst);
+float16 sve_f32_to_f16(float32 f, float_status *fpst);
+
+/*
+ * Decode helper functions for predicate as counter.
+ */
+
+typedef struct {
+ unsigned count;
+ unsigned lg2_stride;
+ bool invert;
+} DecodeCounter;
+
+static inline DecodeCounter
+decode_counter(unsigned png, unsigned vl, unsigned v_esz)
+{
+ DecodeCounter ret = { };
+
+ /* C.f. Arm pseudocode CounterToPredicate. */
+ if (likely(png & 0xf)) {
+ unsigned p_esz = ctz32(png);
+
+ /*
+ * maxbit = log2(pl(bits) * 4)
+ * = log2(vl(bytes) * 4)
+ * = log2(vl) + 2
+ * maxbit_mask = ones<maxbit:0>
+ * = (1 << (maxbit + 1)) - 1
+ * = (1 << (log2(vl) + 2 + 1)) - 1
+ * = (1 << (log2(vl) + 3)) - 1
+ * = (pow2ceil(vl) << 3) - 1
+ */
+ ret.count = png & (((unsigned)pow2ceil(vl) << 3) - 1);
+ ret.count >>= p_esz + 1;
+
+ ret.invert = (png >> 15) & 1;
+
+ /*
+ * The Arm pseudocode for CounterToPredicate expands the count to
+ * a set of bits, and then the operation proceeds as for the original
+ * interpretation of predicates as a set of bits.
+ *
+ * We can avoid the expansion by adjusting the count and supplying
+ * an element stride.
+ */
+ if (unlikely(p_esz != v_esz)) {
+ if (p_esz < v_esz) {
+ /*
+ * For predicate esz < vector esz, the expanded predicate
+ * will have more bits set than will be consumed.
+ * Adjust the count down, rounding up.
+ * Consider p_esz = MO_8, v_esz = MO_64, count 14:
+ * The expanded predicate would be
+ * 0011 1111 1111 1111
+ * The significant bits are
+ * ...1 ...1 ...1 ...1
+ */
+ unsigned shift = v_esz - p_esz;
+ unsigned trunc = ret.count >> shift;
+ ret.count = trunc + (ret.count != (trunc << shift));
+ } else {
+ /*
+ * For predicate esz > vector esz, the expanded predicate
+ * will have bits set only at power-of-two multiples of
+ * the vector esz. Bits at other multiples will all be
+ * false. Adjust the count up, and supply the caller
+ * with a stride of elements to skip.
+ */
+ unsigned shift = p_esz - v_esz;
+ ret.count <<= shift;
+ ret.lg2_stride = shift;
+ }
+ }
+ }
+ return ret;
+}
+
+/* Extract @len bits from an array of uint64_t at offset @pos bits. */
+static inline uint64_t extractn(uint64_t *p, unsigned pos, unsigned len)
+{
+ uint64_t x;
+
+ p += pos / 64;
+ pos = pos % 64;
+
+ x = p[0];
+ if (pos + len > 64) {
+ x = (x >> pos) | (p[1] << (-pos & 63));
+ pos = 0;
+ }
+ return extract64(x, pos, len);
+}
+
+/* Deposit @len bits into an array of uint64_t at offset @pos bits. */
+static inline void depositn(uint64_t *p, unsigned pos,
+ unsigned len, uint64_t val)
+{
+ p += pos / 64;
+ pos = pos % 64;
+
+ if (pos + len <= 64) {
+ p[0] = deposit64(p[0], pos, len, val);
+ } else {
+ unsigned len0 = 64 - pos;
+ unsigned len1 = len - len0;
+
+ p[0] = deposit64(p[0], pos, len0, val);
+ p[1] = deposit64(p[1], 0, len1, val >> len0);
+ }
+}
+
#endif /* TARGET_ARM_VEC_INTERNAL_H */
diff --git a/target/arm/tcg/vfp_helper.c b/target/arm/tcg/vfp_helper.c
index b32e2f4..e156e37 100644
--- a/target/arm/tcg/vfp_helper.c
+++ b/target/arm/tcg/vfp_helper.c
@@ -19,12 +19,14 @@
#include "qemu/osdep.h"
#include "cpu.h"
-#include "exec/helper-proto.h"
#include "internals.h"
#include "cpu-features.h"
#include "fpu/softfloat.h"
#include "qemu/log.h"
+#define HELPER_H "tcg/helper.h"
+#include "exec/helper-proto.h.inc"
+
/*
* Set the float_status behaviour to match the Arm defaults:
* * tininess-before-rounding
@@ -121,7 +123,7 @@ uint32_t vfp_get_fpsr_from_host(CPUARMState *env)
a64_flags |= (get_float_exception_flags(&env->vfp.fp_status[FPST_A64_F16])
& ~(float_flag_input_denormal_flushed | float_flag_input_denormal_used));
/*
- * We do not merge in flags from FPST_AH or FPST_AH_F16, because
+ * We do not merge in flags from FPST_{AH,ZA} or FPST_{AH,ZA}_F16, because
* they are used for insns that must not set the cumulative exception bits.
*/
@@ -194,6 +196,8 @@ void vfp_set_fpcr_to_host(CPUARMState *env, uint32_t val, uint32_t mask)
set_float_rounding_mode(i, &env->vfp.fp_status[FPST_A64]);
set_float_rounding_mode(i, &env->vfp.fp_status[FPST_A32_F16]);
set_float_rounding_mode(i, &env->vfp.fp_status[FPST_A64_F16]);
+ set_float_rounding_mode(i, &env->vfp.fp_status[FPST_ZA]);
+ set_float_rounding_mode(i, &env->vfp.fp_status[FPST_ZA_F16]);
}
if (changed & FPCR_FZ16) {
bool ftz_enabled = val & FPCR_FZ16;
@@ -201,15 +205,18 @@ void vfp_set_fpcr_to_host(CPUARMState *env, uint32_t val, uint32_t mask)
set_flush_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_A64_F16]);
set_flush_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_STD_F16]);
set_flush_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_AH_F16]);
+ set_flush_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_ZA_F16]);
set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_A32_F16]);
set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_A64_F16]);
set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_STD_F16]);
set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_AH_F16]);
+ set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_ZA_F16]);
}
if (changed & FPCR_FZ) {
bool ftz_enabled = val & FPCR_FZ;
set_flush_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_A32]);
set_flush_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_A64]);
+ set_flush_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_ZA]);
/* FIZ is A64 only so FZ always makes A32 code flush inputs to zero */
set_flush_inputs_to_zero(ftz_enabled, &env->vfp.fp_status[FPST_A32]);
}
@@ -221,6 +228,7 @@ void vfp_set_fpcr_to_host(CPUARMState *env, uint32_t val, uint32_t mask)
bool fitz_enabled = (val & FPCR_FIZ) ||
(val & (FPCR_FZ | FPCR_AH)) == FPCR_FZ;
set_flush_inputs_to_zero(fitz_enabled, &env->vfp.fp_status[FPST_A64]);
+ set_flush_inputs_to_zero(fitz_enabled, &env->vfp.fp_status[FPST_ZA]);
}
if (changed & FPCR_DN) {
bool dnan_enabled = val & FPCR_DN;
@@ -238,9 +246,13 @@ void vfp_set_fpcr_to_host(CPUARMState *env, uint32_t val, uint32_t mask)
/* Change behaviours for A64 FP operations */
arm_set_ah_fp_behaviours(&env->vfp.fp_status[FPST_A64]);
arm_set_ah_fp_behaviours(&env->vfp.fp_status[FPST_A64_F16]);
+ arm_set_ah_fp_behaviours(&env->vfp.fp_status[FPST_ZA]);
+ arm_set_ah_fp_behaviours(&env->vfp.fp_status[FPST_ZA_F16]);
} else {
arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_A64]);
arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_A64_F16]);
+ arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_ZA]);
+ arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_ZA_F16]);
}
}
/*