aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorRichard Sandiford <richard.sandiford@arm.com>2024-11-11 12:32:21 +0000
committerRichard Sandiford <richard.sandiford@arm.com>2024-11-11 12:32:21 +0000
commit2cc4c2a281e983d735c4c38091d67508d3b4f892 (patch)
tree8a8ea1472be142f47e9f57addd5e0ce998b51cd3 /gcc
parentf7ed863632547705a6c791ea6487fcb33ee029a8 (diff)
downloadgcc-2cc4c2a281e983d735c4c38091d67508d3b4f892.zip
gcc-2cc4c2a281e983d735c4c38091d67508d3b4f892.tar.gz
gcc-2cc4c2a281e983d735c4c38091d67508d3b4f892.tar.bz2
aarch64: Add remaining SVE2p1 support
This patch adds the instructions that are new to FEAT_SVE2p1. It mostly contains simple additions, so it didn't seem worth splitting up further. It's likely that we'll find more autovec uses for some of these instructions, but for now this patch just deals with one obvious case: using the new hybrid-VLA permutations to handle "stepped" versions of some Advanced SIMD permutations. See aarch64_evpc_hvla for details. The patch also continues the existing practice of lowering ACLE permutation intrinsics to VEC_PERM_EXPR. That's admittedly a bit inconsistent with the approach I've been advocating for when it comes to arithmetic, but I think the difference is that (a) these are pure data movement, and so there's limited scope for things like gimple canonicalisations to mess with the instruction selection or operation mix; and (b) there are no added UB rules to worry about. Another new thing in the patch is the concept of "memory-only" SVE vector modes. These are used to represent the memory operands of the new LD1[DW] (to .Q), LD[234]Q, ST1[DW] (from .Q), and ST[234]Q instructions. We continue to use .B, .H, .S, and .D modes for the registers, since there's no predicated contiguous LD1Q instruction, and since there's no arithmetic that can be done on TI. (The new instructions are instead intended for hybrid VLA, i.e. for vectors of vectors.) For now, all of the new instructions are non-streaming-only. Some of them are streaming-compatible with SME2p1, but that's a later patch. gcc/ * config/aarch64/aarch64-modes.def (VNx1SI, VNx1DI): New modes. * config/aarch64/aarch64-sve-builtins-base.cc (svdup_lane_impl::expand): Update generation of TBL instruction. (svtbl_impl): Delete. (svtbl): Use unspec_based_uncond_function instead. * config/aarch64/aarch64-sve-builtins-functions.h (permute::fold_permute): Handle trailing immediate arguments. * config/aarch64/aarch64-sve-builtins-shapes.h (extq): Declare. (load_gather64_sv_index, load_gather64_sv_offset): Likewise. (load_gather64_vs_index, load_gather64_vs_offset): Likewise. (pmov_from_vector, pmov_from_vector_lane, pmov_to_vector_lane) (reduction_neonq, store_scatter64_index, store_scatter64_offset) (unary_lane): Likewise. * config/aarch64/aarch64-sve-builtins-shapes.cc (load_gather64_sv_base, store_scatter64_base): New classes. (extq_def, ext): New shape. (load_gather64_sv_index_def, load_gather64_sv_index): Likewise. (load_gather64_sv_offset_def, load_gather64_sv_offset): Likewise. (load_gather64_vs_index_def, load_gather64_vs_index): Likewise. (load_gather64_vs_offset_def, load_gather64_vs_offset): Likewise. (pmov_from_vector_def, pmov_from_vector): Likewise. (pmov_from_vector_lane_def, pmov_from_vector_lane): Likewise. (pmov_to_vector_lane_def, pmov_to_vector_lane): Likewise. (reduction_neonq_def, reduction_neonq): Likewise. (store_scatter64_index_def, store_scatter64_index): Likewise. (store_scatter64_offset_def, store_scatter64_offset): Likewise. (unary_lane_def, unary_lane): Likewise. * config/aarch64/aarch64-sve-builtins-sve2.h (svaddqv, svandqv) (svdup_laneq, sveorqv, svextq, svld1q_gather, svld1udq, svld1uwq) (svld2q, svld3q, svld4q, svmaxnmqv, svmaxqv, svminnmqv, svminqv) (svorqv, svpmov, svpmov_lane, svst1qd, svst1q_scatter, svst1wq) (svst2q, svst3q, svst4q, svtblq, svtbx, svtbxq, svuzpq1, svuzpq2) (svzipq1, svzipq2): Declare. * config/aarch64/aarch64-sve-builtins-sve2.cc (ld1uxq_st1xq_base) (ld234q_st234q_base, svdup_laneq_impl, svextq_impl): New classes. (svld1q_gather_impl, svld1uxq_impl, svld234q_impl): Likewise. (svpmov_impl, svpmov_lane_impl, svst1q_scatter_impl): Likewise. (svst1xq_impl, svst234q_impl, svuzpq_impl, svzipq_impl): Likewise. (svaddqv, svandqv, svdup_laneq, sveorqv, svextq, svld1q_gather) (svld1udq, svld1uwq, svld2q, svld3q, svld4q, svmaxnmqv, svmaxqv) (svminnmqv, svminqv, svorqv, svpmov, svpmov_lane, svst1qd) (svst1q_scatter, svst1wq, svst2q, svst3q, svst4q, svtblq, svtbx) (svtbxq, svuzpq1, svuzpq2, svzipq1, svzipq2): New function entries. * config/aarch64/aarch64-sve-builtins-sve2.def (svaddqv, svandqv) (svdup_laneq, sveorqv, svextq, svld2q, svld3q, svld4q, svmaxnmqv) (svmaxqv, svminnmqv, svminqv, svorqv, svpmov, svpmov_lanes, vst2q) (svst3q, svst4q, svtblq, svtbxq, svuzpq1, svuzpq2, svzipq1, svzipq2) (svld1q_gather, svld1udq, svld1uwq, svst1dq, svst1q_scatter) (svst1wq): New function definitions. * config/aarch64/aarch64-sve-builtins.cc (TYPES_hsd_data) (hsd_data, s_data): New type lists. (function_resolver::infer_pointer_type): Give a specific error about passing a pointer to 8-bit elements to an _index function. (function_resolver::resolve_sv_displacement): Check whether the function allows 32-bit bases. * config/aarch64/iterators.md (UNSPEC_TBLQ, UNSPEC_TBXQ): New unspecs. (UNSPEC_ADDQV, UNSPEC_ANDQV, UNSPEC_DUPQ, UNSPEC_EORQV, UNSPEC_EXTQ) (UNSPEC_FADDQV, UNSPEC_FMAXQV, UNSPEC_FMAXNMQV, UNSPEC_FMINQV) (UNSPEC_FMINNMQV, UNSPEC_LD1_EXTENDQ, UNSPEC_LD1Q_GATHER): Likewise. (UNSPEC_LDNQ, UNSPEC_ORQV, UNSPEC_PMOV_PACK, UNSPEC_PMOV_PACK_LANE) (UNSPEC_PMOV_UNPACK, UNSPEC_PMOV_UNPACK_LANE, UNSPEC_SMAXQV): Likewise. (UNSPEC_SMINQV, UNSPEC_ST1_TRUNCQ, UNSPEC_ST1Q_SCATTER, UNSPEC_STNQ) (UNSPEC_UMAXQV, UNSPEC_UMINQV, UNSPEC_UZPQ1, UNSPEC_UZPQ2): Likewise. (UNSPEC_ZIPQ1, UNSPEC_ZIPQ2): Likewise. (Vtype): Handle single-vector SVE modes. (Vendreg): Handle SVE structure modes. (VNxTI, LD1_EXTENDQ_MEM): New mode attributes. (SVE_PERMUTE, SVE_TBL, SVE_TBX): New int iterators. (SVE_INT_REDUCTION_128, SVE_FP_REDUCTION_128): Likewise. (optab): Handle the new SVE2.1 reductions. (perm_insn): Handle the new SVE2.1 permutations. * config/aarch64/aarch64-sve.md (@aarch64_sve_tbl<mode>): Generalize to... (@aarch64_sve_<SVE_TBL:perm_insn><mode>): ...this. (@aarch64_sve_<PERMUTE:perm_insn><mode>): Generalize to... (@aarch64_sve_<SVE_PERMUTE:perm_insn><mode>): ...this. * config/aarch64/aarch64-sve2.md (@aarch64_pmov_to_<mode>) (@aarch64_pmov_lane_to_<mode>, @aarch64_pmov_from_<mode>) (@aarch64_pmov_lane_from_<mode>, @aarch64_sve_ld1_extendq<mode>) (@aarch64_sve_ldnq<mode>, aarch64_gather_ld1q): New patterns. (@aarch64_sve_st1_truncq<mode>, @aarch64_sve_stnq<mode>): Likewise. (aarch64_scatter_st1q, @aarch64_pred_reduc_<optab>_<mode>): Likewise. (@aarch64_sve_dupq<mode>, @aarch64_sve_extq<mode>): Likewise. (@aarch64_sve2_tbx<mode>): Generalize to... (@aarch64_sve_<SVE_TBX:perm_insn><mode>): ...this. * config/aarch64/aarch64.cc (aarch64_classify_vector_memory_mode): New function. (aarch64_regmode_natural_size): Use it. (aarch64_classify_index): Likewise. (aarch64_classify_address): Likewise. (aarch64_print_address_internal): Likewise. (aarch64_evpc_hvla): New function. (aarch64_expand_vec_perm_const_1): Use it. gcc/testsuite/ * gcc.target/aarch64/sve/acle/general-c/load_ext_gather_index_1.c, * gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_1.c, * gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_2.c, * gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_3.c, * gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_4.c, * gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_5.c: Adjust the "did you mean" suggestion. * gcc.target/aarch64/sve/acle/general-c/ld1sh_gather_1.c: Removed. * gcc.target/aarch64/sve/acle/general-c/extq_1.c: New test. * gcc.target/aarch64/sve/acle/general-c/load_gather64_sv_index_1.c: Likewise. * gcc.target/aarch64/sve/acle/general-c/load_gather64_sv_offset_1.c: Likewise. * gcc.target/aarch64/sve/acle/general-c/pmov_from_vector_1.c: Likewise. * gcc.target/aarch64/sve/acle/general-c/pmov_from_vector_lane_1.c: Likewise. * gcc.target/aarch64/sve/acle/general-c/pmov_to_vector_lane_1.c: Likewise. * gcc.target/aarch64/sve/acle/general-c/pmov_to_vector_lane_2.c: Likewise. * gcc.target/aarch64/sve/acle/general-c/store_scatter64_1.c: Likewise. * gcc.target/aarch64/sve/acle/general-c/store_scatter64_index_1.c: Likewise. * gcc.target/aarch64/sve/acle/general-c/store_scatter64_offset_1.c: Likewise. * gcc.target/aarch64/sve/acle/general-c/unary_lane_1.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/addqv_f16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/addqv_f32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/addqv_f64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/addqv_s16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/addqv_s32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/addqv_s64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/addqv_s8.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/addqv_u16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/addqv_u32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/addqv_u64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/addqv_u8.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/andqv_s16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/andqv_s32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/andqv_s64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/andqv_s8.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/andqv_u16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/andqv_u32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/andqv_u64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/andqv_u8.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/dup_laneq_bf16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/dup_laneq_f16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/dup_laneq_f32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/dup_laneq_f64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/dup_laneq_s16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/dup_laneq_s32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/dup_laneq_s64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/dup_laneq_s8.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/dup_laneq_u16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/dup_laneq_u32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/dup_laneq_u64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/dup_laneq_u8.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/eorqv_s16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/eorqv_s32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/eorqv_s64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/eorqv_s8.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/eorqv_u16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/eorqv_u32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/eorqv_u64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/eorqv_u8.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/extq_bf16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/extq_f16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/extq_f32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/extq_f64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/extq_s16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/extq_s32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/extq_s64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/extq_s8.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/extq_u16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/extq_u32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/extq_u64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/extq_u8.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/ld1q_gather_bf16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/ld1q_gather_f16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/ld1q_gather_f32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/ld1q_gather_f64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/ld1q_gather_s16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/ld1q_gather_s32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/ld1q_gather_s64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/ld1q_gather_s8.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/ld1q_gather_u16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/ld1q_gather_u32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/ld1q_gather_u64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/ld1q_gather_u8.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/ld1udq_f64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/ld1udq_s64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/ld1udq_u64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/ld1uwq_f32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/ld1uwq_s32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/ld1uwq_u32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/ld2q_bf16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/ld2q_f16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/ld2q_f32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/ld2q_f64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/ld2q_s16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/ld2q_s32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/ld2q_s64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/ld2q_s8.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/ld2q_u16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/ld2q_u32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/ld2q_u64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/ld2q_u8.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/ld3q_bf16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/ld3q_f16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/ld3q_f32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/ld3q_f64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/ld3q_s16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/ld3q_s32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/ld3q_s64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/ld3q_s8.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/ld3q_u16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/ld3q_u32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/ld3q_u64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/ld3q_u8.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/ld4q_bf16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/ld4q_f16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/ld4q_f32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/ld4q_f64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/ld4q_s16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/ld4q_s32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/ld4q_s64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/ld4q_s8.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/ld4q_u16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/ld4q_u32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/ld4q_u64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/ld4q_u8.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/maxnmqv_f16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/maxnmqv_f32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/maxnmqv_f64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/maxqv_f16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/maxqv_f32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/maxqv_f64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/maxqv_s16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/maxqv_s32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/maxqv_s64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/maxqv_s8.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/maxqv_u16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/maxqv_u32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/maxqv_u64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/maxqv_u8.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/minnmqv_f16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/minnmqv_f32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/minnmqv_f64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/minqv_f16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/minqv_f32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/minqv_f64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/minqv_s16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/minqv_s32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/minqv_s64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/minqv_s8.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/minqv_u16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/minqv_u32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/minqv_u64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/minqv_u8.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/orqv_s16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/orqv_s32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/orqv_s64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/orqv_s8.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/orqv_u16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/orqv_u32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/orqv_u64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/orqv_u8.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/pmov_s16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/pmov_s32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/pmov_s64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/pmov_s8.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/pmov_u16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/pmov_u32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/pmov_u64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/pmov_u8.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/st1dq_f64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/st1dq_s64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/st1dq_u64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/st1q_scatter_bf16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/st1q_scatter_f16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/st1q_scatter_f32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/st1q_scatter_f64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/st1q_scatter_s16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/st1q_scatter_s32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/st1q_scatter_s64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/st1q_scatter_s8.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/st1q_scatter_u16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/st1q_scatter_u32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/st1q_scatter_u64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/st1q_scatter_u8.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/st1wq_f32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/st1wq_s32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/st1wq_u32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/st2q_bf16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/st2q_f16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/st2q_f32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/st2q_f64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/st2q_s16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/st2q_s32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/st2q_s64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/st2q_s8.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/st2q_u16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/st2q_u32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/st2q_u64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/st2q_u8.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/st3q_bf16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/st3q_f16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/st3q_f32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/st3q_f64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/st3q_s16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/st3q_s32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/st3q_s64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/st3q_s8.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/st3q_u16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/st3q_u32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/st3q_u64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/st3q_u8.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/st4q_bf16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/st4q_f16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/st4q_f32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/st4q_f64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/st4q_s16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/st4q_s32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/st4q_s64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/st4q_s8.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/st4q_u16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/st4q_u32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/st4q_u64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/st4q_u8.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/tblq_bf16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/tblq_f16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/tblq_f32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/tblq_f64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/tblq_s16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/tblq_s32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/tblq_s64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/tblq_s8.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/tblq_u16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/tblq_u32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/tblq_u64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/tblq_u8.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/tbxq_bf16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/tbxq_f16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/tbxq_f32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/tbxq_f64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/tbxq_s16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/tbxq_s32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/tbxq_s64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/tbxq_s8.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/tbxq_u16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/tbxq_u32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/tbxq_u64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/tbxq_u8.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/uzpq1_bf16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/uzpq1_f16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/uzpq1_f32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/uzpq1_f64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/uzpq1_s16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/uzpq1_s32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/uzpq1_s64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/uzpq1_s8.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/uzpq1_u16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/uzpq1_u32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/uzpq1_u64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/uzpq1_u8.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/uzpq2_bf16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/uzpq2_f16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/uzpq2_f32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/uzpq2_f64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/uzpq2_s16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/uzpq2_s32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/uzpq2_s64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/uzpq2_s8.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/uzpq2_u16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/uzpq2_u32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/uzpq2_u64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/uzpq2_u8.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/zipq1_bf16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/zipq1_f16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/zipq1_f32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/zipq1_f64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/zipq1_s16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/zipq1_s32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/zipq1_s64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/zipq1_s8.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/zipq1_u16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/zipq1_u32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/zipq1_u64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/zipq1_u8.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/zipq2_bf16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/zipq2_f16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/zipq2_f32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/zipq2_f64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/zipq2_s16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/zipq2_s32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/zipq2_s64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/zipq2_s8.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/zipq2_u16.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/zipq2_u32.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/zipq2_u64.c: Likewise. * gcc.target/aarch64/sve2/acle/asm/zipq2_u8.c: Likewise. * gcc.target/aarch64/sve2/dupq_1.c: Likewise. * gcc.target/aarch64/sve2/extq_1.c: Likewise. * gcc.target/aarch64/sve2/uzpq_1.c: Likewise. * gcc.target/aarch64/sve2/zipq_1.c: Likewise.
Diffstat (limited to 'gcc')
-rw-r--r--gcc/config/aarch64/aarch64-modes.def17
-rw-r--r--gcc/config/aarch64/aarch64-sve-builtins-base.cc16
-rw-r--r--gcc/config/aarch64/aarch64-sve-builtins-functions.h2
-rw-r--r--gcc/config/aarch64/aarch64-sve-builtins-shapes.cc287
-rw-r--r--gcc/config/aarch64/aarch64-sve-builtins-shapes.h12
-rw-r--r--gcc/config/aarch64/aarch64-sve-builtins-sve2.cc317
-rw-r--r--gcc/config/aarch64/aarch64-sve-builtins-sve2.def42
-rw-r--r--gcc/config/aarch64/aarch64-sve-builtins-sve2.h30
-rw-r--r--gcc/config/aarch64/aarch64-sve-builtins.cc19
-rw-r--r--gcc/config/aarch64/aarch64-sve.md13
-rw-r--r--gcc/config/aarch64/aarch64-sve2.md314
-rw-r--r--gcc/config/aarch64/aarch64.cc147
-rw-r--r--gcc/config/aarch64/iterators.md121
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/extq_1.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ld1sh_gather_1.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_index_1.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_1.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_2.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_3.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_4.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_5.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_gather64_sv_index_1.c57
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_gather64_sv_offset_1.c54
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/pmov_from_vector_1.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/pmov_from_vector_lane_1.c41
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/pmov_to_vector_lane_1.c45
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/pmov_to_vector_lane_2.c19
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/store_scatter64_1.c32
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/store_scatter64_index_1.c59
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/store_scatter64_offset_1.c58
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_lane_1.c42
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/addqv_f16.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/addqv_f32.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/addqv_f64.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/addqv_s16.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/addqv_s32.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/addqv_s64.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/addqv_s8.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/addqv_u16.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/addqv_u32.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/addqv_u64.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/addqv_u8.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/andqv_s16.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/andqv_s32.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/andqv_s64.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/andqv_s8.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/andqv_u16.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/andqv_u32.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/andqv_u64.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/andqv_u8.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/dup_laneq_bf16.c53
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/dup_laneq_f16.c53
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/dup_laneq_f32.c53
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/dup_laneq_f64.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/dup_laneq_s16.c53
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/dup_laneq_s32.c53
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/dup_laneq_s64.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/dup_laneq_s8.c53
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/dup_laneq_u16.c53
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/dup_laneq_u32.c53
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/dup_laneq_u64.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/dup_laneq_u8.c53
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/eorqv_s16.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/eorqv_s32.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/eorqv_s64.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/eorqv_s8.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/eorqv_u16.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/eorqv_u32.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/eorqv_u64.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/eorqv_u8.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/extq_bf16.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/extq_f16.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/extq_f32.c67
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/extq_f64.c47
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/extq_s16.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/extq_s32.c67
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/extq_s64.c47
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/extq_s8.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/extq_u16.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/extq_u32.c67
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/extq_u64.c47
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/extq_u8.c77
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1q_gather_bf16.c179
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1q_gather_f16.c179
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1q_gather_f32.c179
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1q_gather_f64.c179
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1q_gather_s16.c179
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1q_gather_s32.c179
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1q_gather_s64.c179
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1q_gather_s8.c109
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1q_gather_u16.c179
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1q_gather_u32.c179
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1q_gather_u64.c179
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1q_gather_u8.c109
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1udq_f64.c163
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1udq_s64.c163
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1udq_u64.c163
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1uwq_f32.c163
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1uwq_s32.c163
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1uwq_u32.c163
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld2q_bf16.c234
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld2q_f16.c234
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld2q_f32.c224
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld2q_f64.c214
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld2q_s16.c234
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld2q_s32.c224
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld2q_s64.c214
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld2q_s8.c244
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld2q_u16.c234
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld2q_u32.c224
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld2q_u64.c214
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld2q_u8.c244
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld3q_bf16.c281
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld3q_f16.c281
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld3q_f32.c271
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld3q_f64.c261
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld3q_s16.c281
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld3q_s32.c271
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld3q_s64.c261
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld3q_s8.c291
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld3q_u16.c281
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld3q_u32.c271
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld3q_u64.c261
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld3q_u8.c291
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld4q_bf16.c325
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld4q_f16.c325
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld4q_f32.c315
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld4q_f64.c305
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld4q_s16.c325
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld4q_s32.c315
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld4q_s64.c305
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld4q_s8.c335
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld4q_u16.c325
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld4q_u32.c315
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld4q_u64.c305
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld4q_u8.c335
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxnmqv_f16.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxnmqv_f32.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxnmqv_f64.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxqv_f16.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxqv_f32.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxqv_f64.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxqv_s16.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxqv_s32.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxqv_s64.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxqv_s8.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxqv_u16.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxqv_u32.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxqv_u64.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxqv_u8.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minnmqv_f16.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minnmqv_f32.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minnmqv_f64.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minqv_f16.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minqv_f32.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minqv_f64.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minqv_s16.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minqv_s32.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minqv_s64.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minqv_s8.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minqv_u16.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minqv_u32.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minqv_u64.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minqv_u8.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/orqv_s16.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/orqv_s32.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/orqv_s64.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/orqv_s8.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/orqv_u16.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/orqv_u32.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/orqv_u64.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/orqv_u8.c26
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/pmov_s16.c68
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/pmov_s32.c104
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/pmov_s64.c104
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/pmov_s8.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/pmov_u16.c68
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/pmov_u32.c104
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/pmov_u64.c104
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/pmov_u8.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1dq_f64.c163
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1dq_s64.c163
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1dq_u64.c163
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1q_scatter_bf16.c93
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1q_scatter_f16.c93
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1q_scatter_f32.c93
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1q_scatter_f64.c152
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1q_scatter_s16.c93
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1q_scatter_s32.c93
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1q_scatter_s64.c152
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1q_scatter_s8.c93
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1q_scatter_u16.c93
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1q_scatter_u32.c93
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1q_scatter_u64.c152
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1q_scatter_u8.c93
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1wq_f32.c163
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1wq_s32.c163
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1wq_u32.c163
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st2q_bf16.c239
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st2q_f16.c239
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st2q_f32.c229
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st2q_f64.c219
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st2q_s16.c239
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st2q_s32.c229
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st2q_s64.c219
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st2q_s8.c249
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st2q_u16.c239
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st2q_u32.c229
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st2q_u64.c219
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st2q_u8.c249
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st3q_bf16.c281
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st3q_f16.c281
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st3q_f32.c271
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st3q_f64.c261
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st3q_s16.c281
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st3q_s32.c271
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st3q_s64.c261
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st3q_s8.c291
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st3q_u16.c281
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st3q_u32.c271
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st3q_u64.c261
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st3q_u8.c291
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st4q_bf16.c325
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st4q_f16.c325
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st4q_f32.c315
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st4q_f64.c305
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st4q_s16.c325
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st4q_s32.c315
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st4q_s64.c305
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st4q_s8.c335
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st4q_u16.c325
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st4q_u32.c315
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st4q_u64.c305
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st4q_u8.c335
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tblq_bf16.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tblq_f16.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tblq_f32.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tblq_f64.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tblq_s16.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tblq_s32.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tblq_s64.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tblq_s8.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tblq_u16.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tblq_u32.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tblq_u64.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tblq_u8.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tbxq_bf16.c42
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tbxq_f16.c42
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tbxq_f32.c42
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tbxq_f64.c42
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tbxq_s16.c42
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tbxq_s32.c42
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tbxq_s64.c42
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tbxq_s8.c42
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tbxq_u16.c42
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tbxq_u32.c42
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tbxq_u64.c42
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tbxq_u8.c42
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq1_bf16.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq1_f16.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq1_f32.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq1_f64.c47
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq1_s16.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq1_s32.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq1_s64.c47
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq1_s8.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq1_u16.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq1_u32.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq1_u64.c47
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq1_u8.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq2_bf16.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq2_f16.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq2_f32.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq2_f64.c47
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq2_s16.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq2_s32.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq2_s64.c47
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq2_s8.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq2_u16.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq2_u32.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq2_u64.c47
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq2_u8.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq1_bf16.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq1_f16.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq1_f32.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq1_f64.c47
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq1_s16.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq1_s32.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq1_s64.c47
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq1_s8.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq1_u16.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq1_u32.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq1_u64.c47
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq1_u8.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq2_bf16.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq2_f16.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq2_f32.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq2_f64.c47
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq2_s16.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq2_s32.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq2_s64.c47
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq2_s8.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq2_u16.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq2_u32.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq2_u64.c47
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq2_u8.c35
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/dupq_1.c162
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/extq_1.c128
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/uzpq_1.c111
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve2/zipq_1.c111
310 files changed, 33776 insertions, 81 deletions
diff --git a/gcc/config/aarch64/aarch64-modes.def b/gcc/config/aarch64/aarch64-modes.def
index 813421e..c401d67 100644
--- a/gcc/config/aarch64/aarch64-modes.def
+++ b/gcc/config/aarch64/aarch64-modes.def
@@ -194,7 +194,7 @@ ADV_SIMD_Q_REG_STRUCT_MODES (4, V4x16, V4x8, V4x4, V4x2)
stored in each 128-bit unit. The actual size of the mode depends
on command-line flags.
- VNx1TI isn't really a native SVE mode, but it can be useful in some
+ VNx1* aren't really native SVE modes, but they can be useful in some
limited situations. */
VECTOR_MODE_WITH_PREFIX (VNx, INT, TI, 1, 1);
SVE_MODES (1, VNx16, VNx8, VNx4, VNx2, VNx1)
@@ -204,9 +204,10 @@ SVE_MODES (4, VNx64, VNx32, VNx16, VNx8, VNx4)
/* Partial SVE vectors:
- VNx2QI VNx4QI VNx8QI
- VNx2HI VNx4HI
- VNx2SI
+ VNx2QI VNx4QI VNx8QI
+ VNx2HI VNx4HI
+ VNx1SI VNx2SI
+ VNx1DI
In memory they occupy contiguous locations, in the same way as fixed-length
vectors. E.g. VNx8QImode is half the size of VNx16QImode.
@@ -214,12 +215,17 @@ SVE_MODES (4, VNx64, VNx32, VNx16, VNx8, VNx4)
Passing 2 as the final argument ensures that the modes come after all
other single-vector modes in the GET_MODE_WIDER chain, so that we never
pick them in preference to a full vector mode. */
+VECTOR_MODE_WITH_PREFIX (VNx, INT, SI, 1, 2);
+VECTOR_MODE_WITH_PREFIX (VNx, INT, DI, 1, 2);
VECTOR_MODES_WITH_PREFIX (VNx, INT, 2, 2);
VECTOR_MODES_WITH_PREFIX (VNx, INT, 4, 2);
VECTOR_MODES_WITH_PREFIX (VNx, INT, 8, 2);
VECTOR_MODES_WITH_PREFIX (VNx, FLOAT, 4, 2);
VECTOR_MODES_WITH_PREFIX (VNx, FLOAT, 8, 2);
+ADJUST_NUNITS (VNx1SI, exact_div (aarch64_sve_vg, 2));
+ADJUST_NUNITS (VNx1DI, exact_div (aarch64_sve_vg, 2));
+
ADJUST_NUNITS (VNx2QI, aarch64_sve_vg);
ADJUST_NUNITS (VNx2HI, aarch64_sve_vg);
ADJUST_NUNITS (VNx2SI, aarch64_sve_vg);
@@ -245,9 +251,12 @@ ADJUST_ALIGNMENT (VNx2BF, 2);
ADJUST_ALIGNMENT (VNx4HF, 2);
ADJUST_ALIGNMENT (VNx4BF, 2);
+ADJUST_ALIGNMENT (VNx1SI, 4);
ADJUST_ALIGNMENT (VNx2SI, 4);
ADJUST_ALIGNMENT (VNx2SF, 4);
+ADJUST_ALIGNMENT (VNx1DI, 8);
+
/* Quad float: 128-bit floating mode for long doubles. */
FLOAT_MODE (TF, 16, ieee_quad_format);
diff --git a/gcc/config/aarch64/aarch64-sve-builtins-base.cc b/gcc/config/aarch64/aarch64-sve-builtins-base.cc
index 1c9f515..2117eceb 100644
--- a/gcc/config/aarch64/aarch64-sve-builtins-base.cc
+++ b/gcc/config/aarch64/aarch64-sve-builtins-base.cc
@@ -956,7 +956,8 @@ public:
return e.use_exact_insn (code_for_aarch64_sve_dup_lane (mode));
/* Treat svdup_lane as if it were svtbl_n. */
- return e.use_exact_insn (code_for_aarch64_sve_tbl (e.vector_mode (0)));
+ return e.use_exact_insn (code_for_aarch64_sve (UNSPEC_TBL,
+ e.vector_mode (0)));
}
};
@@ -2897,16 +2898,6 @@ public:
}
};
-class svtbl_impl : public permute
-{
-public:
- rtx
- expand (function_expander &e) const override
- {
- return e.use_exact_insn (code_for_aarch64_sve_tbl (e.vector_mode (0)));
- }
-};
-
/* Implements svtrn1 and svtrn2. */
class svtrn_impl : public binary_permute
{
@@ -3432,7 +3423,8 @@ FUNCTION (svsub, svsub_impl,)
FUNCTION (svsubr, rtx_code_function_rotated, (MINUS, MINUS, UNSPEC_COND_FSUB))
FUNCTION (svsudot, svusdot_impl, (true))
FUNCTION (svsudot_lane, svdotprod_lane_impl, (UNSPEC_SUDOT, -1, -1))
-FUNCTION (svtbl, svtbl_impl,)
+FUNCTION (svtbl, quiet<unspec_based_uncond_function>, (UNSPEC_TBL, UNSPEC_TBL,
+ UNSPEC_TBL))
FUNCTION (svtmad, CODE_FOR_MODE0 (aarch64_sve_tmad),)
FUNCTION (svtrn1, svtrn_impl, (0))
FUNCTION (svtrn1q, unspec_based_function, (UNSPEC_TRN1Q, UNSPEC_TRN1Q,
diff --git a/gcc/config/aarch64/aarch64-sve-builtins-functions.h b/gcc/config/aarch64/aarch64-sve-builtins-functions.h
index 7d06a57..08443eb 100644
--- a/gcc/config/aarch64/aarch64-sve-builtins-functions.h
+++ b/gcc/config/aarch64/aarch64-sve-builtins-functions.h
@@ -600,7 +600,7 @@ public:
tree perm_type = build_vector_type (ssizetype, nelts);
return gimple_build_assign (f.lhs, VEC_PERM_EXPR,
gimple_call_arg (f.call, 0),
- gimple_call_arg (f.call, nargs - 1),
+ gimple_call_arg (f.call, nargs == 1 ? 0 : 1),
vec_perm_indices_to_tree (perm_type, indices));
}
};
diff --git a/gcc/config/aarch64/aarch64-sve-builtins-shapes.cc b/gcc/config/aarch64/aarch64-sve-builtins-shapes.cc
index 62277af..1088fba 100644
--- a/gcc/config/aarch64/aarch64-sve-builtins-shapes.cc
+++ b/gcc/config/aarch64/aarch64-sve-builtins-shapes.cc
@@ -735,7 +735,7 @@ struct binary_za_slice_opt_single_base : public overloaded_base<1>
}
};
-/* Base class for ext. */
+/* Base class for ext and extq. */
struct ext_base : public overloaded_base<0>
{
void
@@ -850,6 +850,22 @@ struct load_gather_sv_base : public overloaded_base<0>
}
};
+/* Base class for load_gather64_sv_index and load_gather64_sv_offset. */
+struct load_gather64_sv_base : public load_gather_sv_base
+{
+ type_suffix_index
+ vector_base_type (type_suffix_index) const override
+ {
+ return TYPE_SUFFIX_u64;
+ }
+
+ function_resolver::target_type_restrictions
+ get_target_type_restrictions (const function_instance &) const override
+ {
+ return function_resolver::TARGET_ANY;
+ }
+};
+
/* Base class for load_ext_gather_index and load_ext_gather_offset,
which differ only in the units of the displacement. */
struct load_ext_gather_base : public overloaded_base<1>
@@ -1033,6 +1049,22 @@ struct store_scatter_base : public overloaded_base<0>
}
};
+/* Base class for store_scatter64_index and store_scatter64_offset. */
+struct store_scatter64_base : public store_scatter_base
+{
+ type_suffix_index
+ vector_base_type (type_suffix_index) const override
+ {
+ return TYPE_SUFFIX_u64;
+ }
+
+ type_suffix_index
+ infer_vector_type (function_resolver &r, unsigned int argno) const override
+ {
+ return r.infer_vector_type (argno);
+ }
+};
+
/* Base class for ternary operations in which the final argument is an
immediate shift amount. The derived class should check the range. */
struct ternary_shift_imm_base : public overloaded_base<0>
@@ -2441,6 +2473,21 @@ struct ext_def : public ext_base
};
SHAPE (ext)
+/* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0>_t, uint64_t)
+
+ where the final argument is an integer constant expression that when
+ multiplied by the number of bytes in t0 is in the range [0, 15]. */
+struct extq_def : public ext_base
+{
+ bool
+ check (function_checker &c) const override
+ {
+ unsigned int bytes = c.type_suffix (0).element_bytes;
+ return c.require_immediate_range (2, 0, 16 / bytes - 1);
+ }
+};
+SHAPE (extq)
+
/* svboolx<g>_t svfoo_t0_g(sv<t0>_t, sv<t0>_t, uint32_t). */
struct extract_pred_def : public nonoverloaded_base
{
@@ -2992,6 +3039,75 @@ struct load_gather_vs_def : public overloaded_base<1>
};
SHAPE (load_gather_vs)
+/* sv<t0>_t svfoo_[s64]index[_t0](const <t0>_t *, svint64_t)
+ sv<t0>_t svfoo_[u64]index[_t0](const <t0>_t *, svuint64_t). */
+struct load_gather64_sv_index_def : public load_gather64_sv_base
+{
+ void
+ build (function_builder &b, const function_group_info &group) const override
+ {
+ b.add_overloaded_functions (group, MODE_index);
+ build_all (b, "t0,al,d", group, MODE_s64index);
+ build_all (b, "t0,al,d", group, MODE_u64index);
+ }
+};
+SHAPE (load_gather64_sv_index)
+
+/* sv<t0>_t svfoo_[s64]offset[_t0](const <t0>_t *, svint64_t)
+ sv<t0>_t svfoo_[u64]offset[_t0](const <t0>_t *, svuint64_t). */
+struct load_gather64_sv_offset_def : public load_gather64_sv_base
+{
+ void
+ build (function_builder &b, const function_group_info &group) const override
+ {
+ b.add_overloaded_functions (group, MODE_offset);
+ build_all (b, "t0,al,d", group, MODE_s64offset);
+ build_all (b, "t0,al,d", group, MODE_u64offset);
+ }
+};
+SHAPE (load_gather64_sv_offset)
+
+/* sv<t0>_t svfoo[_u64base]_index_t0(svuint64_t, int64_t). */
+struct load_gather64_vs_index_def : public nonoverloaded_base
+{
+ void
+ build (function_builder &b, const function_group_info &group) const override
+ {
+ build_all (b, "t0,b,ss64", group, MODE_u64base_index, true);
+ }
+
+ tree
+ resolve (function_resolver &) const override
+ {
+ /* The short name just makes the base vector mode implicit;
+ no resolution is needed. */
+ gcc_unreachable ();
+ }
+};
+SHAPE (load_gather64_vs_index)
+
+/* sv<t0>_t svfoo[_u64base]_t0(svuint64_t)
+
+ sv<t0>_t svfoo[_u64base]_offset_t0(svuint64_t, int64_t). */
+struct load_gather64_vs_offset_def : public nonoverloaded_base
+{
+ void
+ build (function_builder &b, const function_group_info &group) const override
+ {
+ build_all (b, "t0,b", group, MODE_u64base, true);
+ build_all (b, "t0,b,ss64", group, MODE_u64base_offset, true);
+ }
+
+ tree
+ resolve (function_resolver &) const override
+ {
+ /* The short name just makes the base vector mode implicit;
+ no resolution is needed. */
+ gcc_unreachable ();
+ }
+};
+SHAPE (load_gather64_vs_offset)
+
/* sv<t0>_t svfoo[_t0](const <t0>_t *)
The only difference from "load" is that this shape has no vnum form. */
@@ -3044,6 +3160,92 @@ struct pattern_pred_def : public nonoverloaded_base
};
SHAPE (pattern_pred)
+/* svbool_t svfoo[_t0](sv<t0>_t). */
+struct pmov_from_vector_def : public overloaded_base<0>
+{
+ void
+ build (function_builder &b, const function_group_info &group) const override
+ {
+ b.add_overloaded_functions (group, MODE_none);
+ build_all (b, "vp,v0", group, MODE_none);
+ }
+
+ tree
+ resolve (function_resolver &r) const override
+ {
+ return r.resolve_uniform (1);
+ }
+};
+SHAPE (pmov_from_vector)
+
+/* svbool_t svfoo[_t0](sv<t0>_t, uint64_t)
+
+ where the final argument is an integer constant expression in the
+ range [0, sizeof (<t0>_t) - 1]. */
+struct pmov_from_vector_lane_def : public overloaded_base<0>
+{
+ void
+ build (function_builder &b, const function_group_info &group) const override
+ {
+ b.add_overloaded_functions (group, MODE_none);
+ build_all (b, "vp,v0,su64", group, MODE_none);
+ }
+
+ tree
+ resolve (function_resolver &r) const override
+ {
+ return r.resolve_uniform (1, 1);
+ }
+
+ bool
+ check (function_checker &c) const override
+ {
+ unsigned int bytes = c.type_suffix (0).element_bytes;
+ return c.require_immediate_range (1, 0, bytes - 1);
+ }
+};
+SHAPE (pmov_from_vector_lane)
+
+/* sv<t0>_t svfoo_t0(uint64_t)
+
+ where the final argument is an integer constant expression in the
+ range [1, sizeof (<t0>_t) - 1]. */
+struct pmov_to_vector_lane_def : public overloaded_base<0>
+{
+ void
+ build (function_builder &b, const function_group_info &group) const override
+ {
+ b.add_overloaded_functions (group, MODE_none);
+ build_all (b, "v0,su64", group, MODE_none);
+ }
+
+ tree
+ resolve (function_resolver &r) const override
+ {
+ type_suffix_index type;
+ gcc_assert (r.pred == PRED_m);
+ if (!r.check_num_arguments (3)
+ || (type = r.infer_vector_type (0)) == NUM_TYPE_SUFFIXES
+ || !r.require_vector_type (1, VECTOR_TYPE_svbool_t)
+ || !r.require_integer_immediate (2))
+ return error_mark_node;
+
+ return r.resolve_to (r.mode_suffix_id, type);
+ }
+
+ bool
+ check (function_checker &c) const override
+ {
+ unsigned int bytes = c.type_suffix (0).element_bytes;
+ /* 1 to account for the vector argument.
+
+ ??? This should probably be folded into function_checker::m_base_arg,
+ but it doesn't currently have the necessary information. */
+ return c.require_immediate_range (1, 1, bytes - 1);
+ }
+};
+SHAPE (pmov_to_vector_lane)
+
/* void svfoo(const void *, svprfop)
void svfoo_vnum(const void *, int64_t, svprfop). */
struct prefetch_def : public nonoverloaded_base
@@ -3215,6 +3417,24 @@ struct reduction_def : public overloaded_base<0>
};
SHAPE (reduction)
+/* <t0>xN_t svfoo[_t0](sv<t0>_t). */
+struct reduction_neonq_def : public overloaded_base<0>
+{
+ void
+ build (function_builder &b, const function_group_info &group) const override
+ {
+ b.add_overloaded_functions (group, MODE_none);
+ build_all (b, "Q0,v0", group, MODE_none);
+ }
+
+ tree
+ resolve (function_resolver &r) const override
+ {
+ return r.resolve_uniform (1);
+ }
+};
+SHAPE (reduction_neonq)
+
/* int64_t svfoo[_t0](sv<t0>_t) (for signed t0)
uint64_t svfoo[_t0](sv<t0>_t) (for unsigned t0)
<t0>_t svfoo[_t0](sv<t0>_t) (for floating-point t0)
@@ -3612,6 +3832,44 @@ struct store_scatter_offset_restricted_def : public store_scatter_base
};
SHAPE (store_scatter_offset_restricted)
+/* void svfoo_[s64]index[_t0](<t0>_t *, svint64_t, sv<t0>_t)
+ void svfoo_[u64]index[_t0](<t0>_t *, svuint64_t, sv<t0>_t)
+
+ void svfoo[_u64base]_index[_t0](svuint64_t, int64_t, sv<t0>_t). */
+struct store_scatter64_index_def : public store_scatter64_base
+{
+ void
+ build (function_builder &b, const function_group_info &group) const override
+ {
+ b.add_overloaded_functions (group, MODE_index);
+ build_all (b, "_,as,d,t0", group, MODE_s64index);
+ build_all (b, "_,as,d,t0", group, MODE_u64index);
+ build_all (b, "_,b,ss64,t0", group, MODE_u64base_index);
+ }
+};
+SHAPE (store_scatter64_index)
+
+/* void svfoo_[s64]offset[_t0](<t0>_t *, svint64_t, sv<t0>_t)
+ void svfoo_[u64]offset[_t0](<t0>_t *, svuint64_t, sv<t0>_t)
+
+ void svfoo[_u64base_t0](svuint64_t, sv<t0>_t)
+
+ void svfoo[_u64base]_offset[_t0](svuint64_t, int64_t, sv<t0>_t). */
+struct store_scatter64_offset_def : public store_scatter64_base
+{
+ void
+ build (function_builder &b, const function_group_info &group) const override
+ {
+ b.add_overloaded_functions (group, MODE_none);
+ b.add_overloaded_functions (group, MODE_offset);
+ build_all (b, "_,as,d,t0", group, MODE_s64offset);
+ build_all (b, "_,as,d,t0", group, MODE_u64offset);
+ build_all (b, "_,b,t0", group, MODE_u64base);
+ build_all (b, "_,b,ss64,t0", group, MODE_u64base_offset);
+ }
+};
+SHAPE (store_scatter64_offset)
+
/* void svfoo_t0(uint64_t, uint32_t, svbool_t, void *)
void svfoo_vnum_t0(uint64_t, uint32_t, svbool_t, void *, int64_t)
@@ -4365,6 +4623,33 @@ struct unary_convertxn_def : public unary_convert_def
};
SHAPE (unary_convertxn)
+/* sv<t0>_t svfoo_<t0>(sv<t0>_t, uint64_t)
+
+ where the final argument is an integer constant expression in the
+ range [0, 16 / sizeof (<t0>_t) - 1]. */
+struct unary_lane_def : public overloaded_base<0>
+{
+ void
+ build (function_builder &b, const function_group_info &group) const override
+ {
+ b.add_overloaded_functions (group, MODE_none);
+ build_all (b, "v0,v0,su64", group, MODE_none);
+ }
+
+ tree
+ resolve (function_resolver &r) const override
+ {
+ return r.resolve_uniform (1, 1);
+ }
+
+ bool
+ check (function_checker &c) const override
+ {
+ return c.require_immediate_lane_index (1, 0);
+ }
+};
+SHAPE (unary_lane)
+
/* sv<t0>_t svfoo[_t0](sv<t0:half>_t). */
struct unary_long_def : public overloaded_base<0>
{
diff --git a/gcc/config/aarch64/aarch64-sve-builtins-shapes.h b/gcc/config/aarch64/aarch64-sve-builtins-shapes.h
index ea87240..12ef2c9 100644
--- a/gcc/config/aarch64/aarch64-sve-builtins-shapes.h
+++ b/gcc/config/aarch64/aarch64-sve-builtins-shapes.h
@@ -128,6 +128,7 @@ namespace aarch64_sve
extern const function_shape *const dupq;
extern const function_shape *const dup_neonq;
extern const function_shape *const ext;
+ extern const function_shape *const extq;
extern const function_shape *const extract_pred;
extern const function_shape *const fold_left;
extern const function_shape *const get;
@@ -152,12 +153,19 @@ namespace aarch64_sve
extern const function_shape *const load_gather_sv;
extern const function_shape *const load_gather_sv_restricted;
extern const function_shape *const load_gather_vs;
+ extern const function_shape *const load_gather64_sv_index;
+ extern const function_shape *const load_gather64_sv_offset;
+ extern const function_shape *const load_gather64_vs_index;
+ extern const function_shape *const load_gather64_vs_offset;
extern const function_shape *const load_replicate;
extern const function_shape *const load_za;
extern const function_shape *const luti2_lane_zt;
extern const function_shape *const luti4_lane_zt;
extern const function_shape *const mmla;
extern const function_shape *const pattern_pred;
+ extern const function_shape *const pmov_from_vector;
+ extern const function_shape *const pmov_from_vector_lane;
+ extern const function_shape *const pmov_to_vector_lane;
extern const function_shape *const prefetch;
extern const function_shape *const prefetch_gather_index;
extern const function_shape *const prefetch_gather_offset;
@@ -167,6 +175,7 @@ namespace aarch64_sve
extern const function_shape *const read_za_m;
extern const function_shape *const read_za_slice;
extern const function_shape *const reduction;
+ extern const function_shape *const reduction_neonq;
extern const function_shape *const reduction_wide;
extern const function_shape *const reinterpret;
extern const function_shape *const select_pred;
@@ -186,6 +195,8 @@ namespace aarch64_sve
extern const function_shape *const store_scatter_index_restricted;
extern const function_shape *const store_scatter_offset;
extern const function_shape *const store_scatter_offset_restricted;
+ extern const function_shape *const store_scatter64_index;
+ extern const function_shape *const store_scatter64_offset;
extern const function_shape *const store_za;
extern const function_shape *const storexn;
extern const function_shape *const str_za;
@@ -218,6 +229,7 @@ namespace aarch64_sve
extern const function_shape *const unary_convert;
extern const function_shape *const unary_convert_narrowt;
extern const function_shape *const unary_convertxn;
+ extern const function_shape *const unary_lane;
extern const function_shape *const unary_long;
extern const function_shape *const unary_n;
extern const function_shape *const unary_narrowb;
diff --git a/gcc/config/aarch64/aarch64-sve-builtins-sve2.cc b/gcc/config/aarch64/aarch64-sve-builtins-sve2.cc
index 24e95af..fd0c98c 100644
--- a/gcc/config/aarch64/aarch64-sve-builtins-sve2.cc
+++ b/gcc/config/aarch64/aarch64-sve-builtins-sve2.cc
@@ -78,6 +78,44 @@ unspec_sqrdcmlah (int rot)
}
}
+class ld1uxq_st1xq_base : public function_base
+{
+public:
+ CONSTEXPR ld1uxq_st1xq_base (machine_mode memory_mode)
+ : m_memory_mode (memory_mode) {}
+
+ tree
+ memory_scalar_type (const function_instance &fi) const override
+ {
+ return fi.scalar_type (0);
+ }
+
+ machine_mode
+ memory_vector_mode (const function_instance &) const override
+ {
+ return m_memory_mode;
+ }
+
+protected:
+ machine_mode m_memory_mode;
+};
+
+class ld234q_st234q_base : public full_width_access
+{
+public:
+ CONSTEXPR ld234q_st234q_base (unsigned int vector_count, machine_mode mode)
+ : full_width_access (vector_count), m_mode (mode)
+ {}
+
+ machine_mode
+ memory_vector_mode (const function_instance &) const override
+ {
+ return m_mode;
+ }
+
+ machine_mode m_mode;
+};
+
class svaba_impl : public function_base
{
public:
@@ -183,6 +221,100 @@ public:
}
};
+class svdup_laneq_impl : public function_base
+{
+public:
+ rtx
+ expand (function_expander &e) const override
+ {
+ return e.use_exact_insn (code_for_aarch64_sve_dupq (e.result_mode ()));
+ }
+};
+
+class svextq_impl : public permute
+{
+public:
+ gimple *
+ fold (gimple_folder &f) const override
+ {
+ unsigned int index = tree_to_uhwi (gimple_call_arg (f.call, 2));
+ machine_mode mode = f.vector_mode (0);
+ unsigned int subelts = 128U / GET_MODE_UNIT_BITSIZE (mode);
+ poly_uint64 nelts = GET_MODE_NUNITS (mode);
+ vec_perm_builder builder (nelts, subelts, 3);
+ for (unsigned int i = 0; i < 3; ++i)
+ for (unsigned int j = 0; j < subelts; ++j)
+ {
+ if (index + j < subelts)
+ builder.quick_push (i * subelts + index + j);
+ else
+ builder.quick_push (i * subelts + index + j - subelts + nelts);
+ }
+ return fold_permute (f, builder);
+ }
+
+ rtx
+ expand (function_expander &e) const override
+ {
+ return e.use_exact_insn (code_for_aarch64_sve_extq (e.vector_mode (0)));
+ }
+};
+
+class svld1q_gather_impl : public full_width_access
+{
+public:
+ unsigned int
+ call_properties (const function_instance &) const override
+ {
+ return CP_READ_MEMORY;
+ }
+
+ rtx
+ expand (function_expander &e) const override
+ {
+ e.prepare_gather_address_operands (1, false);
+ return e.use_exact_insn (CODE_FOR_aarch64_gather_ld1q);
+ }
+};
+
+class svld1uxq_impl : public ld1uxq_st1xq_base
+{
+public:
+ using ld1uxq_st1xq_base::ld1uxq_st1xq_base;
+
+ unsigned int
+ call_properties (const function_instance &) const override
+ {
+ return CP_READ_MEMORY;
+ }
+
+ rtx
+ expand (function_expander &e) const override
+ {
+ insn_code icode = code_for_aarch64_sve_ld1_extendq (e.vector_mode (0));
+ return e.use_contiguous_load_insn (icode);
+ }
+};
+
+class svld234q_impl : public ld234q_st234q_base
+{
+public:
+ using ld234q_st234q_base::ld234q_st234q_base;
+
+ unsigned int
+ call_properties (const function_instance &) const override
+ {
+ return CP_READ_MEMORY;
+ }
+
+ rtx
+ expand (function_expander &e) const override
+ {
+ insn_code icode = code_for_aarch64_sve_ldnq (e.result_mode ());
+ return e.use_contiguous_load_insn (icode);
+ }
+};
+
class svldnt1_gather_impl : public full_width_access
{
public:
@@ -268,6 +400,38 @@ public:
}
};
+class svpmov_impl : public function_base
+{
+public:
+ rtx
+ expand (function_expander &e) const override
+ {
+ insn_code icode;
+ if (e.pred == PRED_z)
+ icode = code_for_aarch64_pmov_to (e.vector_mode (0));
+ else
+ icode = code_for_aarch64_pmov_from (e.vector_mode (0));
+ return e.use_exact_insn (icode);
+ }
+};
+
+class svpmov_lane_impl : public function_base
+{
+public:
+ rtx
+ expand (function_expander &e) const override
+ {
+ insn_code icode;
+ if (e.pred == PRED_m)
+ icode = code_for_aarch64_pmov_lane_to (e.vector_mode (0));
+ else if (e.args[1] == const0_rtx)
+ icode = code_for_aarch64_pmov_from (e.vector_mode (0));
+ else
+ icode = code_for_aarch64_pmov_lane_from (e.vector_mode (0));
+ return e.use_exact_insn (icode);
+ }
+};
+
class svpsel_lane_impl : public function_base
{
public:
@@ -479,7 +643,7 @@ public:
gimple_call_set_arg (call, 2, imm3_prec);
return call;
}
-public:
+
rtx
expand (function_expander &e) const override
{
@@ -489,6 +653,64 @@ public:
}
};
+class svst1q_scatter_impl : public full_width_access
+{
+public:
+ unsigned int
+ call_properties (const function_instance &) const override
+ {
+ return CP_WRITE_MEMORY;
+ }
+
+ rtx
+ expand (function_expander &e) const override
+ {
+ rtx data = e.args.last ();
+ e.args.last () = force_lowpart_subreg (VNx2DImode, data, GET_MODE (data));
+ e.prepare_gather_address_operands (1, false);
+ return e.use_exact_insn (CODE_FOR_aarch64_scatter_st1q);
+ }
+};
+
+class svst1xq_impl : public ld1uxq_st1xq_base
+{
+public:
+ using ld1uxq_st1xq_base::ld1uxq_st1xq_base;
+
+ unsigned int
+ call_properties (const function_instance &) const override
+ {
+ return CP_WRITE_MEMORY;
+ }
+
+ rtx
+ expand (function_expander &e) const override
+ {
+ insn_code icode = code_for_aarch64_sve_st1_truncq (e.vector_mode (0));
+ return e.use_contiguous_store_insn (icode);
+ }
+};
+
+class svst234q_impl : public ld234q_st234q_base
+{
+public:
+ using ld234q_st234q_base::ld234q_st234q_base;
+
+ unsigned int
+ call_properties (const function_instance &) const override
+ {
+ return CP_WRITE_MEMORY;
+ }
+
+ rtx
+ expand (function_expander &e) const override
+ {
+ machine_mode tuple_mode = GET_MODE (e.args.last ());
+ insn_code icode = code_for_aarch64_sve_stnq (tuple_mode);
+ return e.use_contiguous_store_insn (icode);
+ }
+};
+
class svstnt1_scatter_impl : public full_width_access
{
public:
@@ -562,6 +784,34 @@ public:
}
};
+/* Implements svuzpq1 and svuzpq2. */
+class svuzpq_impl : public binary_permute
+{
+public:
+ CONSTEXPR svuzpq_impl (unsigned int base)
+ : binary_permute (base ? UNSPEC_UZPQ2 : UNSPEC_UZPQ1), m_base (base) {}
+
+ gimple *
+ fold (gimple_folder &f) const override
+ {
+ machine_mode mode = f.vector_mode (0);
+ unsigned int subelts = 128U / GET_MODE_UNIT_BITSIZE (mode);
+ poly_uint64 nelts = GET_MODE_NUNITS (mode);
+ vec_perm_builder builder (nelts, subelts, 3);
+ for (unsigned int i = 0; i < 3; ++i)
+ {
+ for (unsigned int j = 0; j < subelts / 2; ++j)
+ builder.quick_push (m_base + j * 2 + i * subelts);
+ for (unsigned int j = 0; j < subelts / 2; ++j)
+ builder.quick_push (m_base + j * 2 + i * subelts + nelts);
+ }
+ return fold_permute (f, builder);
+ }
+
+ /* 0 for svuzpq1, 1 for svuzpq2. */
+ unsigned int m_base;
+};
+
/* Implements both svwhilerw and svwhilewr; the unspec parameter decides
between them. */
class svwhilerw_svwhilewr_impl : public full_width_access
@@ -580,6 +830,34 @@ public:
int m_unspec;
};
+/* Implements svzipq1 and svzipq2. */
+class svzipq_impl : public binary_permute
+{
+public:
+ CONSTEXPR svzipq_impl (unsigned int base)
+ : binary_permute (base ? UNSPEC_ZIPQ2 : UNSPEC_ZIPQ1), m_base (base) {}
+
+ gimple *
+ fold (gimple_folder &f) const override
+ {
+ machine_mode mode = f.vector_mode (0);
+ unsigned int pairs = 64U / GET_MODE_UNIT_BITSIZE (mode);
+ poly_uint64 nelts = GET_MODE_NUNITS (mode);
+ auto base = m_base * pairs;
+ vec_perm_builder builder (nelts, pairs * 2, 3);
+ for (unsigned int i = 0; i < 3; ++i)
+ for (unsigned int j = 0; j < pairs; ++j)
+ {
+ builder.quick_push (base + j + i * pairs * 2);
+ builder.quick_push (base + j + i * pairs * 2 + nelts);
+ }
+ return fold_permute (f, builder);
+ }
+
+ /* 0 for svzipq1, 1 for svzipq2. */
+ unsigned int m_base;
+};
+
} /* end anonymous namespace */
namespace aarch64_sve {
@@ -601,6 +879,7 @@ FUNCTION (svaddlbt, unspec_based_function, (UNSPEC_SADDLBT, -1, -1))
FUNCTION (svaddlt, unspec_based_function, (UNSPEC_SADDLT, UNSPEC_UADDLT, -1))
FUNCTION (svaddp, unspec_based_pred_function, (UNSPEC_ADDP, UNSPEC_ADDP,
UNSPEC_FADDP))
+FUNCTION (svaddqv, reduction, (UNSPEC_ADDQV, UNSPEC_ADDQV, UNSPEC_FADDQV))
FUNCTION (svaddwb, unspec_based_function, (UNSPEC_SADDWB, UNSPEC_UADDWB, -1))
FUNCTION (svaddwt, unspec_based_function, (UNSPEC_SADDWT, UNSPEC_UADDWT, -1))
FUNCTION (svaesd, fixed_insn_function, (CODE_FOR_aarch64_sve2_aesd))
@@ -611,6 +890,7 @@ FUNCTION (svamax, cond_or_uncond_unspec_function,
(UNSPEC_COND_FAMAX, UNSPEC_FAMAX))
FUNCTION (svamin, cond_or_uncond_unspec_function,
(UNSPEC_COND_FAMIN, UNSPEC_FAMIN))
+FUNCTION (svandqv, reduction, (UNSPEC_ANDQV, UNSPEC_ANDQV, -1))
FUNCTION (svbcax, CODE_FOR_MODE0 (aarch64_sve2_bcax),)
FUNCTION (svbdep, unspec_based_function, (UNSPEC_BDEP, UNSPEC_BDEP, -1))
FUNCTION (svbext, unspec_based_function, (UNSPEC_BEXT, UNSPEC_BEXT, -1))
@@ -631,15 +911,24 @@ FUNCTION (svcvtlt, unspec_based_function, (-1, -1, UNSPEC_COND_FCVTLT))
FUNCTION (svcvtn, svcvtn_impl,)
FUNCTION (svcvtx, unspec_based_function, (-1, -1, UNSPEC_COND_FCVTX))
FUNCTION (svcvtxnt, CODE_FOR_MODE1 (aarch64_sve2_cvtxnt),)
+FUNCTION (svdup_laneq, svdup_laneq_impl,)
FUNCTION (sveor3, CODE_FOR_MODE0 (aarch64_sve2_eor3),)
FUNCTION (sveorbt, unspec_based_function, (UNSPEC_EORBT, UNSPEC_EORBT, -1))
+FUNCTION (sveorqv, reduction, (UNSPEC_EORQV, UNSPEC_EORQV, -1))
FUNCTION (sveortb, unspec_based_function, (UNSPEC_EORTB, UNSPEC_EORTB, -1))
+FUNCTION (svextq, svextq_impl,)
FUNCTION (svhadd, unspec_based_function, (UNSPEC_SHADD, UNSPEC_UHADD, -1))
FUNCTION (svhsub, unspec_based_function, (UNSPEC_SHSUB, UNSPEC_UHSUB, -1))
FUNCTION (svhistcnt, CODE_FOR_MODE0 (aarch64_sve2_histcnt),)
FUNCTION (svhistseg, CODE_FOR_MODE0 (aarch64_sve2_histseg),)
FUNCTION (svhsubr, unspec_based_function_rotated, (UNSPEC_SHSUB,
UNSPEC_UHSUB, -1))
+FUNCTION (svld1q_gather, svld1q_gather_impl,)
+FUNCTION (svld1udq, svld1uxq_impl, (VNx1DImode))
+FUNCTION (svld1uwq, svld1uxq_impl, (VNx1SImode))
+FUNCTION (svld2q, svld234q_impl, (2, VNx2TImode))
+FUNCTION (svld3q, svld234q_impl, (3, VNx3TImode))
+FUNCTION (svld4q, svld234q_impl, (4, VNx4TImode))
FUNCTION (svldnt1_gather, svldnt1_gather_impl,)
FUNCTION (svldnt1sb_gather, svldnt1_gather_extend_impl, (TYPE_SUFFIX_s8))
FUNCTION (svldnt1sh_gather, svldnt1_gather_extend_impl, (TYPE_SUFFIX_s16))
@@ -650,11 +939,15 @@ FUNCTION (svldnt1uw_gather, svldnt1_gather_extend_impl, (TYPE_SUFFIX_u32))
FUNCTION (svlogb, unspec_based_function, (-1, -1, UNSPEC_COND_FLOGB))
FUNCTION (svmatch, svmatch_svnmatch_impl, (UNSPEC_MATCH))
FUNCTION (svmaxnmp, unspec_based_pred_function, (-1, -1, UNSPEC_FMAXNMP))
+FUNCTION (svmaxnmqv, reduction, (-1, -1, UNSPEC_FMAXNMQV))
FUNCTION (svmaxp, unspec_based_pred_function, (UNSPEC_SMAXP, UNSPEC_UMAXP,
UNSPEC_FMAXP))
+FUNCTION (svmaxqv, reduction, (UNSPEC_SMAXQV, UNSPEC_UMAXQV, UNSPEC_FMAXQV))
FUNCTION (svminnmp, unspec_based_pred_function, (-1, -1, UNSPEC_FMINNMP))
+FUNCTION (svminnmqv, reduction, (-1, -1, UNSPEC_FMINNMQV))
FUNCTION (svminp, unspec_based_pred_function, (UNSPEC_SMINP, UNSPEC_UMINP,
UNSPEC_FMINP))
+FUNCTION (svminqv, reduction, (UNSPEC_SMINQV, UNSPEC_UMINQV, UNSPEC_FMINQV))
FUNCTION (svmlalb, unspec_based_mla_function, (UNSPEC_SMULLB,
UNSPEC_UMULLB, UNSPEC_FMLALB))
FUNCTION (svmlalb_lane, unspec_based_mla_lane_function, (UNSPEC_SMULLB,
@@ -685,7 +978,10 @@ FUNCTION (svmullt_lane, unspec_based_lane_function, (UNSPEC_SMULLT,
UNSPEC_UMULLT, -1))
FUNCTION (svnbsl, CODE_FOR_MODE0 (aarch64_sve2_nbsl),)
FUNCTION (svnmatch, svmatch_svnmatch_impl, (UNSPEC_NMATCH))
+FUNCTION (svorqv, reduction, (UNSPEC_ORQV, UNSPEC_ORQV, -1))
FUNCTION (svpext_lane, svpext_lane_impl,)
+FUNCTION (svpmov, svpmov_impl,)
+FUNCTION (svpmov_lane, svpmov_lane_impl,)
FUNCTION (svpmul, CODE_FOR_MODE0 (aarch64_sve2_pmul),)
FUNCTION (svpmullb, unspec_based_function, (-1, UNSPEC_PMULLB, -1))
FUNCTION (svpmullb_pair, unspec_based_function, (-1, UNSPEC_PMULLB_PAIR, -1))
@@ -787,6 +1083,12 @@ FUNCTION (svsm4ekey, fixed_insn_function, (CODE_FOR_aarch64_sve2_sm4ekey))
FUNCTION (svsqadd, svsqadd_impl,)
FUNCTION (svsra, svsra_impl,)
FUNCTION (svsri, unspec_based_function, (UNSPEC_SRI, UNSPEC_SRI, -1))
+FUNCTION (svst1dq, svst1xq_impl, (VNx1DImode))
+FUNCTION (svst1q_scatter, svst1q_scatter_impl,)
+FUNCTION (svst1wq, svst1xq_impl, (VNx1SImode))
+FUNCTION (svst2q, svst234q_impl, (2, VNx2TImode))
+FUNCTION (svst3q, svst234q_impl, (3, VNx3TImode))
+FUNCTION (svst4q, svst234q_impl, (4, VNx4TImode))
FUNCTION (svstnt1_scatter, svstnt1_scatter_impl,)
FUNCTION (svstnt1b_scatter, svstnt1_scatter_truncate_impl, (QImode))
FUNCTION (svstnt1h_scatter, svstnt1_scatter_truncate_impl, (HImode))
@@ -800,11 +1102,20 @@ FUNCTION (svsubltb, unspec_based_function, (UNSPEC_SSUBLTB, -1, -1))
FUNCTION (svsubwb, unspec_based_function, (UNSPEC_SSUBWB, UNSPEC_USUBWB, -1))
FUNCTION (svsubwt, unspec_based_function, (UNSPEC_SSUBWT, UNSPEC_USUBWT, -1))
FUNCTION (svtbl2, svtbl2_impl,)
-FUNCTION (svtbx, CODE_FOR_MODE0 (aarch64_sve2_tbx),)
+FUNCTION (svtblq, quiet<unspec_based_uncond_function>, (UNSPEC_TBLQ,
+ UNSPEC_TBLQ,
+ UNSPEC_TBLQ))
+FUNCTION (svtbx, quiet<unspec_based_uncond_function>, (UNSPEC_TBX, UNSPEC_TBX,
+ UNSPEC_TBX))
+FUNCTION (svtbxq, quiet<unspec_based_uncond_function>, (UNSPEC_TBXQ,
+ UNSPEC_TBXQ,
+ UNSPEC_TBXQ))
FUNCTION (svunpk, svunpk_impl,)
FUNCTION (svuqadd, svuqadd_impl,)
FUNCTION (svuzp, multireg_permute, (UNSPEC_UZP))
FUNCTION (svuzpq, multireg_permute, (UNSPEC_UZPQ))
+FUNCTION (svuzpq1, svuzpq_impl, (0))
+FUNCTION (svuzpq2, svuzpq_impl, (1))
FUNCTION (svwhilege, while_comparison, (UNSPEC_WHILEGE, UNSPEC_WHILEHS))
FUNCTION (svwhilegt, while_comparison, (UNSPEC_WHILEGT, UNSPEC_WHILEHI))
FUNCTION (svwhilerw, svwhilerw_svwhilewr_impl, (UNSPEC_WHILERW))
@@ -812,5 +1123,7 @@ FUNCTION (svwhilewr, svwhilerw_svwhilewr_impl, (UNSPEC_WHILEWR))
FUNCTION (svxar, svxar_impl,)
FUNCTION (svzip, multireg_permute, (UNSPEC_ZIP))
FUNCTION (svzipq, multireg_permute, (UNSPEC_ZIPQ))
+FUNCTION (svzipq1, svzipq_impl, (0))
+FUNCTION (svzipq2, svzipq_impl, (1))
} /* end namespace aarch64_sve */
diff --git a/gcc/config/aarch64/aarch64-sve-builtins-sve2.def b/gcc/config/aarch64/aarch64-sve-builtins-sve2.def
index 9e8aad9..c641ed5 100644
--- a/gcc/config/aarch64/aarch64-sve-builtins-sve2.def
+++ b/gcc/config/aarch64/aarch64-sve-builtins-sve2.def
@@ -220,6 +220,35 @@ DEF_SVE_FUNCTION (svsm4e, binary, s_unsigned, none)
DEF_SVE_FUNCTION (svsm4ekey, binary, s_unsigned, none)
#undef REQUIRED_EXTENSIONS
+#define REQUIRED_EXTENSIONS nonstreaming_sve (AARCH64_FL_SVE2p1)
+DEF_SVE_FUNCTION (svaddqv, reduction_neonq, all_arith, implicit)
+DEF_SVE_FUNCTION (svandqv, reduction_neonq, all_integer, implicit)
+DEF_SVE_FUNCTION (svdup_laneq, unary_lane, all_data, none)
+DEF_SVE_FUNCTION (sveorqv, reduction_neonq, all_integer, implicit)
+DEF_SVE_FUNCTION (svextq, extq, all_data, none)
+DEF_SVE_FUNCTION (svld2q, load, all_data, implicit)
+DEF_SVE_FUNCTION (svld3q, load, all_data, implicit)
+DEF_SVE_FUNCTION (svld4q, load, all_data, implicit)
+DEF_SVE_FUNCTION (svmaxnmqv, reduction_neonq, all_float, implicit)
+DEF_SVE_FUNCTION (svmaxqv, reduction_neonq, all_arith, implicit)
+DEF_SVE_FUNCTION (svminnmqv, reduction_neonq, all_float, implicit)
+DEF_SVE_FUNCTION (svminqv, reduction_neonq, all_arith, implicit)
+DEF_SVE_FUNCTION (svpmov, pmov_from_vector, all_integer, none)
+DEF_SVE_FUNCTION (svpmov, inherent, all_integer, z)
+DEF_SVE_FUNCTION (svpmov_lane, pmov_from_vector_lane, all_integer, none)
+DEF_SVE_FUNCTION (svpmov_lane, pmov_to_vector_lane, hsd_integer, m)
+DEF_SVE_FUNCTION (svorqv, reduction_neonq, all_integer, implicit)
+DEF_SVE_FUNCTION (svst2q, store, all_data, implicit)
+DEF_SVE_FUNCTION (svst3q, store, all_data, implicit)
+DEF_SVE_FUNCTION (svst4q, store, all_data, implicit)
+DEF_SVE_FUNCTION (svtblq, binary_uint, all_data, none)
+DEF_SVE_FUNCTION (svtbxq, ternary_uint, all_data, none)
+DEF_SVE_FUNCTION (svuzpq1, binary, all_data, none)
+DEF_SVE_FUNCTION (svuzpq2, binary, all_data, none)
+DEF_SVE_FUNCTION (svzipq1, binary, all_data, none)
+DEF_SVE_FUNCTION (svzipq2, binary, all_data, none)
+#undef REQUIRED_EXTENSIONS
+
#define REQUIRED_EXTENSIONS sve_and_sme (AARCH64_FL_SVE2p1, 0)
DEF_SVE_FUNCTION (svclamp, clamp, all_integer, none)
DEF_SVE_FUNCTION (svpsel_lane, select_pred, all_pred_count, none)
@@ -254,6 +283,19 @@ DEF_SVE_FUNCTION_GS (svwhilelt, compare_scalar, while_x, x2, none)
DEF_SVE_FUNCTION (svwhilelt, compare_scalar_count, while_x_c, none)
#undef REQUIRED_EXTENSIONS
+#define REQUIRED_EXTENSIONS nonstreaming_sve (AARCH64_FL_SVE2p1)
+DEF_SVE_FUNCTION (svld1q_gather, load_gather64_sv_offset, all_data, implicit)
+DEF_SVE_FUNCTION (svld1q_gather, load_gather64_sv_index, hsd_data, implicit)
+DEF_SVE_FUNCTION (svld1q_gather, load_gather64_vs_offset, all_data, implicit)
+DEF_SVE_FUNCTION (svld1q_gather, load_gather64_vs_index, hsd_data, implicit)
+DEF_SVE_FUNCTION (svld1udq, load, d_data, implicit)
+DEF_SVE_FUNCTION (svld1uwq, load, s_data, implicit)
+DEF_SVE_FUNCTION (svst1dq, store, d_data, implicit)
+DEF_SVE_FUNCTION (svst1q_scatter, store_scatter64_offset, all_data, implicit)
+DEF_SVE_FUNCTION (svst1q_scatter, store_scatter64_index, hsd_data, implicit)
+DEF_SVE_FUNCTION (svst1wq, store, s_data, implicit)
+#undef REQUIRED_EXTENSIONS
+
#define REQUIRED_EXTENSIONS streaming_only (AARCH64_FL_SME2)
DEF_SVE_FUNCTION_GS (svadd, binary_single, all_integer, x24, none)
DEF_SVE_FUNCTION_GS (svclamp, clamp, all_arith, x24, none)
diff --git a/gcc/config/aarch64/aarch64-sve-builtins-sve2.h b/gcc/config/aarch64/aarch64-sve-builtins-sve2.h
index d581902..bb610cb 100644
--- a/gcc/config/aarch64/aarch64-sve-builtins-sve2.h
+++ b/gcc/config/aarch64/aarch64-sve-builtins-sve2.h
@@ -38,12 +38,14 @@ namespace aarch64_sve
extern const function_base *const svaddlbt;
extern const function_base *const svaddlt;
extern const function_base *const svaddp;
+ extern const function_base *const svaddqv;
extern const function_base *const svaddwb;
extern const function_base *const svaddwt;
extern const function_base *const svaesd;
extern const function_base *const svaese;
extern const function_base *const svaesimc;
extern const function_base *const svaesmc;
+ extern const function_base *const svandqv;
extern const function_base *const svbcax;
extern const function_base *const svbdep;
extern const function_base *const svbext;
@@ -63,14 +65,23 @@ namespace aarch64_sve
extern const function_base *const svcvtn;
extern const function_base *const svcvtx;
extern const function_base *const svcvtxnt;
+ extern const function_base *const svdup_laneq;
extern const function_base *const sveor3;
extern const function_base *const sveorbt;
+ extern const function_base *const sveorqv;
extern const function_base *const sveortb;
+ extern const function_base *const svextq;
extern const function_base *const svhadd;
extern const function_base *const svhistcnt;
extern const function_base *const svhistseg;
extern const function_base *const svhsub;
extern const function_base *const svhsubr;
+ extern const function_base *const svld1q_gather;
+ extern const function_base *const svld1udq;
+ extern const function_base *const svld1uwq;
+ extern const function_base *const svld2q;
+ extern const function_base *const svld3q;
+ extern const function_base *const svld4q;
extern const function_base *const svldnt1_gather;
extern const function_base *const svldnt1sb_gather;
extern const function_base *const svldnt1sh_gather;
@@ -81,9 +92,13 @@ namespace aarch64_sve
extern const function_base *const svlogb;
extern const function_base *const svmatch;
extern const function_base *const svmaxnmp;
+ extern const function_base *const svmaxnmqv;
extern const function_base *const svmaxp;
+ extern const function_base *const svmaxqv;
extern const function_base *const svminnmp;
+ extern const function_base *const svminnmqv;
extern const function_base *const svminp;
+ extern const function_base *const svminqv;
extern const function_base *const svmlalb;
extern const function_base *const svmlalb_lane;
extern const function_base *const svmlalt;
@@ -100,7 +115,10 @@ namespace aarch64_sve
extern const function_base *const svmullt_lane;
extern const function_base *const svnbsl;
extern const function_base *const svnmatch;
+ extern const function_base *const svorqv;
extern const function_base *const svpext_lane;
+ extern const function_base *const svpmov;
+ extern const function_base *const svpmov_lane;
extern const function_base *const svpmul;
extern const function_base *const svpmullb;
extern const function_base *const svpmullb_pair;
@@ -180,6 +198,12 @@ namespace aarch64_sve
extern const function_base *const svsqadd;
extern const function_base *const svsra;
extern const function_base *const svsri;
+ extern const function_base *const svst1dq;
+ extern const function_base *const svst1q_scatter;
+ extern const function_base *const svst1wq;
+ extern const function_base *const svst2q;
+ extern const function_base *const svst3q;
+ extern const function_base *const svst4q;
extern const function_base *const svstnt1_scatter;
extern const function_base *const svstnt1b_scatter;
extern const function_base *const svstnt1h_scatter;
@@ -193,11 +217,15 @@ namespace aarch64_sve
extern const function_base *const svsubwb;
extern const function_base *const svsubwt;
extern const function_base *const svtbl2;
+ extern const function_base *const svtblq;
extern const function_base *const svtbx;
+ extern const function_base *const svtbxq;
extern const function_base *const svunpk;
extern const function_base *const svuqadd;
extern const function_base *const svuzp;
extern const function_base *const svuzpq;
+ extern const function_base *const svuzpq1;
+ extern const function_base *const svuzpq2;
extern const function_base *const svwhilege;
extern const function_base *const svwhilegt;
extern const function_base *const svwhilerw;
@@ -205,6 +233,8 @@ namespace aarch64_sve
extern const function_base *const svxar;
extern const function_base *const svzip;
extern const function_base *const svzipq;
+ extern const function_base *const svzipq1;
+ extern const function_base *const svzipq2;
}
}
diff --git a/gcc/config/aarch64/aarch64-sve-builtins.cc b/gcc/config/aarch64/aarch64-sve-builtins.cc
index 259e7b7..be6abab 100644
--- a/gcc/config/aarch64/aarch64-sve-builtins.cc
+++ b/gcc/config/aarch64/aarch64-sve-builtins.cc
@@ -334,6 +334,11 @@ CONSTEXPR const group_suffix_info group_suffixes[] = {
#define TYPES_hsd_integer(S, D) \
TYPES_hsd_signed (S, D), S (u16), S (u32), S (u64)
+#define TYPES_hsd_data(S, D) \
+ TYPES_h_data (S, D), \
+ TYPES_s_data (S, D), \
+ TYPES_d_data (S, D)
+
/* _f32. */
#define TYPES_s_float(S, D) \
S (f32)
@@ -742,12 +747,14 @@ DEF_SVE_TYPES_ARRAY (hs_data);
DEF_SVE_TYPES_ARRAY (hd_unsigned);
DEF_SVE_TYPES_ARRAY (hsd_signed);
DEF_SVE_TYPES_ARRAY (hsd_integer);
+DEF_SVE_TYPES_ARRAY (hsd_data);
DEF_SVE_TYPES_ARRAY (s_float);
DEF_SVE_TYPES_ARRAY (s_float_hsd_integer);
DEF_SVE_TYPES_ARRAY (s_float_sd_integer);
DEF_SVE_TYPES_ARRAY (s_signed);
DEF_SVE_TYPES_ARRAY (s_unsigned);
DEF_SVE_TYPES_ARRAY (s_integer);
+DEF_SVE_TYPES_ARRAY (s_data);
DEF_SVE_TYPES_ARRAY (sd_signed);
DEF_SVE_TYPES_ARRAY (sd_unsigned);
DEF_SVE_TYPES_ARRAY (sd_integer);
@@ -2036,6 +2043,15 @@ function_resolver::infer_pointer_type (unsigned int argno,
actual, argno + 1, fndecl);
return NUM_TYPE_SUFFIXES;
}
+ if (displacement_units () == UNITS_elements && bits == 8)
+ {
+ error_at (location, "passing %qT to argument %d of %qE, which"
+ " expects the data to be 16 bits or wider",
+ actual, argno + 1, fndecl);
+ inform (location, "use the %<offset%> rather than %<index%> form"
+ " for 8-bit data");
+ return NUM_TYPE_SUFFIXES;
+ }
return type;
}
@@ -2827,7 +2843,8 @@ function_resolver::resolve_sv_displacement (unsigned int argno,
}
}
- if (type_suffix_ids[0] == NUM_TYPE_SUFFIXES)
+ if (type_suffix_ids[0] == NUM_TYPE_SUFFIXES
+ && shape->vector_base_type (TYPE_SUFFIX_u32) == TYPE_SUFFIX_u32)
{
/* TYPE has been inferred rather than specified by the user,
so mention it in the error messages. */
diff --git a/gcc/config/aarch64/aarch64-sve.md b/gcc/config/aarch64/aarch64-sve.md
index f89036c..5f0ecf4 100644
--- a/gcc/config/aarch64/aarch64-sve.md
+++ b/gcc/config/aarch64/aarch64-sve.md
@@ -9018,6 +9018,7 @@
;; -------------------------------------------------------------------------
;; Includes:
;; - TBL
+;; - TBLQ (SVE2p1)
;; -------------------------------------------------------------------------
(define_expand "vec_perm<mode>"
@@ -9033,14 +9034,14 @@
}
)
-(define_insn "@aarch64_sve_tbl<mode>"
+(define_insn "@aarch64_sve_<perm_insn><mode>"
[(set (match_operand:SVE_FULL 0 "register_operand" "=w")
(unspec:SVE_FULL
[(match_operand:SVE_FULL 1 "register_operand" "w")
(match_operand:<V_INT_EQUIV> 2 "register_operand" "w")]
- UNSPEC_TBL))]
+ SVE_TBL))]
"TARGET_SVE"
- "tbl\t%0.<Vetype>, {%1.<Vetype>}, %2.<Vetype>"
+ "<perm_insn>\t%0.<Vetype>, {%1.<Vetype>}, %2.<Vetype>"
)
;; -------------------------------------------------------------------------
@@ -9129,9 +9130,13 @@
;; - TRN1
;; - TRN2
;; - UZP1
+;; - UZPQ1 (SVE2p1)
;; - UZP2
+;; - UZPQ2 (SVE2p1)
;; - ZIP1
+;; - ZIPQ1 (SVE2p1)
;; - ZIP2
+;; - ZIPQ2 (SVE2p1)
;; -------------------------------------------------------------------------
;; Like EXT, but start at the first active element.
@@ -9156,7 +9161,7 @@
(unspec:SVE_ALL
[(match_operand:SVE_ALL 1 "register_operand" "w")
(match_operand:SVE_ALL 2 "register_operand" "w")]
- PERMUTE))]
+ SVE_PERMUTE))]
"TARGET_SVE"
"<perm_insn>\t%0.<Vctype>, %1.<Vctype>, %2.<Vctype>"
)
diff --git a/gcc/config/aarch64/aarch64-sve2.md b/gcc/config/aarch64/aarch64-sve2.md
index 61bae64..9383c77 100644
--- a/gcc/config/aarch64/aarch64-sve2.md
+++ b/gcc/config/aarch64/aarch64-sve2.md
@@ -21,12 +21,22 @@
;; The file is organised into the following sections (search for the full
;; line):
;;
+;; == Moves
+;; ---- Predicate to vector moves
+;; ---- Vector to predicate moves
+;;
;; == Loads
+;; ---- 128-bit extending loads
+;; ---- 128-bit structure loads
;; ---- Multi-register loads predicated by a counter
+;; ---- 128-bit gather loads
;; ---- Non-temporal gather loads
;;
;; == Stores
+;; ---- 128-bit truncating stores
+;; ---- 128-bit structure stores
;; ---- Multi-register stores predicated by a counter
+;; ---- 128-bit scatter stores
;; ---- Non-temporal scatter stores
;;
;; == Predicate manipulation
@@ -99,8 +109,13 @@
;; ---- [INT,FP] Select based on predicates as counters
;; ---- [INT] While tests
;;
+;; == Reductions
+;; ---- [INT] Reduction to 128-bit vector
+;; ---- [FP] Reduction to 128-bit vector
+;;
;; == Permutation
;; ---- [INT,FP] Reversal
+;; ---- [INT,FP] HVLA permutes
;; ---- [INT,FP] General permutes
;; ---- [INT,FP] Multi-register permutes
;; ---- [INT] Optional bit-permute extensions
@@ -116,10 +131,121 @@
;; ---- Optional SM4 extensions
;; =========================================================================
+;; == Moves
+;; =========================================================================
+
+;; -------------------------------------------------------------------------
+;; ---- Predicate to vector moves
+;; -------------------------------------------------------------------------
+;; Includes:
+;; - PMOV (to vector)
+;; -------------------------------------------------------------------------
+
+(define_insn "@aarch64_pmov_to_<mode>"
+ [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w")
+ (unspec:SVE_FULL_I
+ [(match_operand:<VPRED> 1 "register_operand" "Upa")]
+ UNSPEC_PMOV_UNPACK))]
+ "TARGET_SVE2p1 && TARGET_NON_STREAMING"
+ "pmov\t%0, %1.<Vetype>"
+)
+
+(define_insn "@aarch64_pmov_lane_to_<mode>"
+ [(set (match_operand:SVE_FULL_I 0 "register_operand" "=w")
+ (unspec:SVE_FULL_I
+ [(match_operand:SVE_FULL_I 1 "register_operand" "0")
+ (match_operand:<VPRED> 2 "register_operand" "Upa")
+ (match_operand:DI 3 "immediate_operand")]
+ UNSPEC_PMOV_UNPACK_LANE))]
+ "TARGET_SVE2p1 && TARGET_NON_STREAMING"
+ "pmov\t%0[%3], %2.<Vetype>"
+)
+
+;; -------------------------------------------------------------------------
+;; ---- Vector to predicate moves
+;; -------------------------------------------------------------------------
+;; Includes:
+;; - PMOV (from vector)
+;; -------------------------------------------------------------------------
+
+(define_insn "@aarch64_pmov_from_<mode>"
+ [(set (match_operand:VNx16BI 0 "register_operand" "=Upa")
+ (unspec:VNx16BI
+ [(match_operand:SVE_FULL_I 1 "register_operand" "w")]
+ UNSPEC_PMOV_PACK))]
+ "TARGET_SVE2p1 && TARGET_NON_STREAMING"
+ "pmov\t%0.<Vetype>, %1"
+)
+
+(define_insn "@aarch64_pmov_lane_from_<mode>"
+ [(set (match_operand:VNx16BI 0 "register_operand" "=Upa")
+ (unspec:VNx16BI
+ [(match_operand:SVE_FULL_I 1 "register_operand" "w")
+ (match_operand:DI 2 "immediate_operand")]
+ UNSPEC_PMOV_PACK_LANE))]
+ "TARGET_SVE2p1 && TARGET_NON_STREAMING"
+ "pmov\t%0.<Vetype>, %1[%2]"
+)
+
+;; =========================================================================
;; == Loads
;; =========================================================================
;; -------------------------------------------------------------------------
+;; ---- 128-bit extending loads
+;; -------------------------------------------------------------------------
+;; Includes:
+;; - LD1W (to .Q)
+;; - LD1D (to .Q)
+;; -------------------------------------------------------------------------
+
+;; There isn't really a natural way of representing these instructions
+;; with the modes that we normally use:
+;;
+;; (1) It doesn't really make sense to use VNx1TI (or similar) for the
+;; result, since there's nothing that can be done with such a mode
+;; other than to cast it to another mode. It also isn't how the
+;; ACLE represents it (for similar reasons).
+;;
+;; (2) Only the lowest bit of each 16 in the predicate is significant,
+;; but it doesn't really make sense to use VNx1BI to represent it,
+;; since there is no "PTRUE Pn.Q, ..." instruction.
+;;
+;; (3) We do however need to use VNx1DI and VNx1SI to represent the
+;; source memories, since none of the normal register modes would
+;; give the right extent and alignment information (with the alignment
+;; mattering only for -mstrict-align).
+(define_insn "@aarch64_sve_ld1_extendq<mode>"
+ [(set (match_operand:SVE_FULL_SD 0 "register_operand" "=w")
+ (unspec:SVE_FULL_SD
+ [(match_operand:<VPRED> 2 "register_operand" "Upl")
+ (match_operand:<LD1_EXTENDQ_MEM> 1 "memory_operand" "m")]
+ UNSPEC_LD1_EXTENDQ))]
+ "TARGET_SVE2p1 && TARGET_NON_STREAMING"
+ "ld1<Vesize>\t{%0.q}, %2/z, %1"
+)
+
+;; -------------------------------------------------------------------------
+;; ---- 128-bit structure loads
+;; -------------------------------------------------------------------------
+;; Includes:
+;; - LD2Q
+;; - LD3Q
+;; - LD4Q
+;; -------------------------------------------------------------------------
+
+;; Predicated LD[234]Q.
+(define_insn "@aarch64_sve_ldnq<mode>"
+ [(set (match_operand:SVE_STRUCT 0 "register_operand" "=w")
+ (unspec:SVE_STRUCT
+ [(match_operand:<VPRED> 2 "register_operand" "Upl")
+ (match_operand:<VNxTI> 1 "memory_operand" "m")]
+ UNSPEC_LDNQ))]
+ "TARGET_SVE2p1 && TARGET_NON_STREAMING"
+ "ld<vector_count>q\t{%S0.q - %<Vendreg>0.q}, %2/z, %1"
+)
+
+;; -------------------------------------------------------------------------
;; ---- Multi-register loads predicated by a counter
;; -------------------------------------------------------------------------
;; Includes:
@@ -196,6 +322,33 @@
)
;; -------------------------------------------------------------------------
+;; ---- 128-bit gather loads
+;; -------------------------------------------------------------------------
+;; Includes gather forms of:
+;; - LD1Q
+;; -------------------------------------------------------------------------
+
+;; Model this as operating on the largest valid element size, which is DI.
+;; This avoids having to define move patterns & more for VNx1TI, which would
+;; be difficult without a non-gather form of LD1Q.
+(define_insn "aarch64_gather_ld1q"
+ [(set (match_operand:VNx2DI 0 "register_operand")
+ (unspec:VNx2DI
+ [(match_operand:VNx2BI 1 "register_operand")
+ (match_operand:DI 2 "aarch64_reg_or_zero")
+ (match_operand:VNx2DI 3 "register_operand")
+ (mem:BLK (scratch))]
+ UNSPEC_LD1_GATHER))]
+ "TARGET_SVE2p1 && TARGET_NON_STREAMING"
+ {@ [cons: =0, 1, 2, 3]
+ [&w, Upl, Z, w] ld1q\t{%0.q}, %1/z, [%3.d]
+ [?w, Upl, Z, 0] ^
+ [&w, Upl, r, w] ld1q\t{%0.q}, %1/z, [%3.d, %2]
+ [?w, Upl, r, 0] ^
+ }
+)
+
+;; -------------------------------------------------------------------------
;; ---- Non-temporal gather loads
;; -------------------------------------------------------------------------
;; Includes gather forms of:
@@ -256,6 +409,48 @@
;; =========================================================================
;; -------------------------------------------------------------------------
+;; ---- 128-bit truncating stores
+;; -------------------------------------------------------------------------
+;; Includes:
+;; - ST1W (from .Q)
+;; - ST1D (from .Q)
+;; -------------------------------------------------------------------------
+
+;; See the comment above the corresponding loads for a discussion about the
+;; choice of modes.
+(define_insn "@aarch64_sve_st1_truncq<mode>"
+ [(set (match_operand:<LD1_EXTENDQ_MEM> 0 "memory_operand" "+m")
+ (unspec:<LD1_EXTENDQ_MEM>
+ [(match_operand:<VPRED> 2 "register_operand" "Upl")
+ (match_operand:SVE_FULL_SD 1 "register_operand" "w")
+ (match_dup 0)]
+ UNSPEC_ST1_TRUNCQ))]
+ "TARGET_SVE2p1 && TARGET_NON_STREAMING"
+ "st1<Vesize>\t{%1.q}, %2, %0"
+)
+
+;; -------------------------------------------------------------------------
+;; ---- 128-bit structure stores
+;; -------------------------------------------------------------------------
+;; Includes:
+;; - ST2Q
+;; - ST3Q
+;; - ST4Q
+;; -------------------------------------------------------------------------
+
+;; Predicated ST[234].
+(define_insn "@aarch64_sve_stnq<mode>"
+ [(set (match_operand:<VNxTI> 0 "memory_operand" "+m")
+ (unspec:<VNxTI>
+ [(match_operand:<VPRED> 2 "register_operand" "Upl")
+ (match_operand:SVE_STRUCT 1 "register_operand" "w")
+ (match_dup 0)]
+ UNSPEC_STNQ))]
+ "TARGET_SVE2p1 && TARGET_NON_STREAMING"
+ "st<vector_count>q\t{%S1.q - %<Vendreg>1.q}, %2, %0"
+)
+
+;; -------------------------------------------------------------------------
;; ---- Multi-register stores predicated by a counter
;; -------------------------------------------------------------------------
;; Includes:
@@ -312,6 +507,28 @@
)
;; -------------------------------------------------------------------------
+;; ---- 128-bit scatter stores
+;; -------------------------------------------------------------------------
+;; Includes scatter form of:
+;; - ST1Q
+;; -------------------------------------------------------------------------
+
+(define_insn "aarch64_scatter_st1q"
+ [(set (mem:BLK (scratch))
+ (unspec:BLK
+ [(match_operand:VNx2BI 0 "register_operand")
+ (match_operand:DI 1 "aarch64_reg_or_zero")
+ (match_operand:VNx2DI 2 "register_operand")
+ (match_operand:VNx2DI 3 "register_operand")]
+ UNSPEC_ST1Q_SCATTER))]
+ "TARGET_SVE2p1 && TARGET_NON_STREAMING"
+ {@ [ cons: 0 , 1 , 2 , 3 ]
+ [ Upl , Z , w , w ] st1q\t{%3.q}, %0, [%2.d]
+ [ Upl , r , w , w ] st1q\t{%3.q}, %0, [%2.d, %1]
+ }
+)
+
+;; -------------------------------------------------------------------------
;; ---- Non-temporal scatter stores
;; -------------------------------------------------------------------------
;; Includes scatter forms of:
@@ -3172,6 +3389,55 @@
)
;; =========================================================================
+;; == Reductions
+;; =========================================================================
+
+;; -------------------------------------------------------------------------
+;; ---- [INT] Reduction to 128-bit vector
+;; -------------------------------------------------------------------------
+;; Includes:
+;; - ADDQV
+;; - ANDQV
+;; - EORQV
+;; - ORQV
+;; - SMAXQV
+;; - SMINQV
+;; - UMAXQV
+;; - UMINQV
+;; -------------------------------------------------------------------------
+
+(define_insn "@aarch64_pred_reduc_<optab>_<mode>"
+ [(set (match_operand:<V128> 0 "register_operand" "=w")
+ (unspec:<V128>
+ [(match_operand:<VPRED> 1 "register_operand" "Upl")
+ (match_operand:SVE_FULL_I 2 "register_operand" "w")]
+ SVE_INT_REDUCTION_128))]
+ "TARGET_SVE2p1 && TARGET_NON_STREAMING"
+ "<optab>\t%0.<Vtype>, %1, %2.<Vetype>"
+)
+
+;; -------------------------------------------------------------------------
+;; ---- [FP] Reduction to 128-bit vector
+;; -------------------------------------------------------------------------
+;; Includes:
+;; - FADDQV
+;; - FMAXNMQV
+;; - FMAXQV
+;; - FMINNMQV
+;; - FMINQV
+;; -------------------------------------------------------------------------
+
+(define_insn "@aarch64_pred_reduc_<optab>_<mode>"
+ [(set (match_operand:<V128> 0 "register_operand" "=w")
+ (unspec:<V128>
+ [(match_operand:<VPRED> 1 "register_operand" "Upl")
+ (match_operand:SVE_FULL_F 2 "register_operand" "w")]
+ SVE_FP_REDUCTION_128))]
+ "TARGET_SVE2p1 && TARGET_NON_STREAMING"
+ "<optab>\t%0.<Vtype>, %1, %2.<Vetype>"
+)
+
+;; =========================================================================
;; == Permutation
;; =========================================================================
@@ -3214,11 +3480,51 @@
)
;; -------------------------------------------------------------------------
+;; ---- [INT,FP] HVLA permutes
+;; -------------------------------------------------------------------------
+;; Includes:
+;; - DUPQ
+;; - EXTQ
+;; -------------------------------------------------------------------------
+
+(define_insn "@aarch64_sve_dupq<mode>"
+ [(set (match_operand:SVE_FULL 0 "register_operand" "=w")
+ (unspec:SVE_FULL
+ [(match_operand:SVE_FULL 1 "register_operand" "w")
+ (match_operand:SI 2 "const_int_operand")]
+ UNSPEC_DUPQ))]
+ "TARGET_SVE2p1
+ && TARGET_NON_STREAMING
+ && IN_RANGE (INTVAL (operands[2]) * (<elem_bits> / 8), 0, 15)"
+ "dupq\t%0.<Vetype>, %1.<Vetype>[%2]"
+)
+
+(define_insn "@aarch64_sve_extq<mode>"
+ [(set (match_operand:SVE_FULL 0 "register_operand" "=w, ?&w")
+ (unspec:SVE_FULL
+ [(match_operand:SVE_FULL 1 "register_operand" "0, w")
+ (match_operand:SVE_FULL 2 "register_operand" "w, w")
+ (match_operand:SI 3 "const_int_operand")]
+ UNSPEC_EXTQ))]
+ "TARGET_SVE2p1
+ && TARGET_NON_STREAMING
+ && IN_RANGE (INTVAL (operands[3]) * (<elem_bits> / 8), 0, 15)"
+ {
+ operands[3] = GEN_INT (INTVAL (operands[3]) * (<elem_bits> / 8));
+ return (which_alternative == 0
+ ? "extq\\t%0.b, %0.b, %2.b, #%3"
+ : "movprfx\t%0, %1\;extq\\t%0.b, %0.b, %2.b, #%3");
+ }
+ [(set_attr "movprfx" "*,yes")]
+)
+
+;; -------------------------------------------------------------------------
;; ---- [INT,FP] General permutes
;; -------------------------------------------------------------------------
;; Includes:
;; - TBL (vector pair form)
;; - TBX
+;; - TBXQ (SVE2p1)
;; -------------------------------------------------------------------------
;; TBL on a pair of data vectors.
@@ -3232,16 +3538,16 @@
"tbl\t%0.<Vetype>, %1, %2.<Vetype>"
)
-;; TBX. These instructions do not take MOVPRFX.
-(define_insn "@aarch64_sve2_tbx<mode>"
+;; TBX(Q). These instructions do not take MOVPRFX.
+(define_insn "@aarch64_sve_<perm_insn><mode>"
[(set (match_operand:SVE_FULL 0 "register_operand" "=w")
(unspec:SVE_FULL
[(match_operand:SVE_FULL 1 "register_operand" "0")
(match_operand:SVE_FULL 2 "register_operand" "w")
(match_operand:<V_INT_EQUIV> 3 "register_operand" "w")]
- UNSPEC_TBX))]
+ SVE_TBX))]
"TARGET_SVE2"
- "tbx\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>"
+ "<perm_insn>\t%0.<Vetype>, %2.<Vetype>, %3.<Vetype>"
)
;; -------------------------------------------------------------------------
diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
index eaf1e11..e9cee0a 100644
--- a/gcc/config/aarch64/aarch64.cc
+++ b/gcc/config/aarch64/aarch64.cc
@@ -1692,6 +1692,32 @@ aarch64_classify_vector_mode (machine_mode mode, bool any_target_p = false)
}
}
+/* Like aarch64_classify_vector_mode, but also include modes that are used
+ for memory operands but not register operands. Such modes do not count
+ as real vector modes; they are just an internal construct to make things
+ easier to describe. */
+static unsigned int
+aarch64_classify_vector_memory_mode (machine_mode mode)
+{
+ switch (mode)
+ {
+ case VNx1SImode:
+ case VNx1DImode:
+ return TARGET_SVE ? VEC_SVE_DATA | VEC_PARTIAL : 0;
+
+ case VNx1TImode:
+ return TARGET_SVE ? VEC_SVE_DATA : 0;
+
+ case VNx2TImode:
+ case VNx3TImode:
+ case VNx4TImode:
+ return TARGET_SVE ? VEC_SVE_DATA | VEC_STRUCT : 0;
+
+ default:
+ return aarch64_classify_vector_mode (mode);
+ }
+}
+
/* Return true if MODE is any of the Advanced SIMD structure modes. */
bool
aarch64_advsimd_struct_mode_p (machine_mode mode)
@@ -2578,7 +2604,9 @@ aarch64_regmode_natural_size (machine_mode mode)
code for Advanced SIMD. */
if (!aarch64_sve_vg.is_constant ())
{
- unsigned int vec_flags = aarch64_classify_vector_mode (mode);
+ /* REGMODE_NATURAL_SIZE influences general subreg validity rules,
+ so we need to handle memory-only modes as well. */
+ unsigned int vec_flags = aarch64_classify_vector_memory_mode (mode);
if (vec_flags & VEC_SVE_PRED)
return BYTES_PER_SVE_PRED;
if (vec_flags & VEC_SVE_DATA)
@@ -10484,7 +10512,8 @@ aarch64_classify_index (struct aarch64_address_info *info, rtx x,
&& contains_reg_of_mode[GENERAL_REGS][GET_MODE (SUBREG_REG (index))])
index = SUBREG_REG (index);
- if (aarch64_sve_data_mode_p (mode) || mode == VNx1TImode)
+ auto vec_flags = aarch64_classify_vector_memory_mode (mode);
+ if (vec_flags & VEC_SVE_DATA)
{
if (type != ADDRESS_REG_REG
|| (1 << shift) != GET_MODE_UNIT_SIZE (mode))
@@ -10555,7 +10584,7 @@ aarch64_classify_address (struct aarch64_address_info *info,
Partial vectors like VNx8QImode allow the same indexed addressing
mode and MUL VL addressing mode as full vectors like VNx16QImode;
in both cases, MUL VL counts multiples of GET_MODE_SIZE. */
- unsigned int vec_flags = aarch64_classify_vector_mode (mode);
+ unsigned int vec_flags = aarch64_classify_vector_memory_mode (mode);
vec_flags &= ~VEC_PARTIAL;
/* On BE, we use load/store pair for all large int mode load/stores.
@@ -10591,8 +10620,7 @@ aarch64_classify_address (struct aarch64_address_info *info,
&& ((vec_flags == 0
&& known_lt (GET_MODE_SIZE (mode), 16))
|| vec_flags == VEC_ADVSIMD
- || vec_flags & VEC_SVE_DATA
- || mode == VNx1TImode));
+ || vec_flags & VEC_SVE_DATA));
/* For SVE, only accept [Rn], [Rn, #offset, MUL VL] and [Rn, Rm, LSL #shift].
The latter is not valid for SVE predicates, and that's rejected through
@@ -10711,7 +10739,7 @@ aarch64_classify_address (struct aarch64_address_info *info,
/* Make "m" use the LD1 offset range for SVE data modes, so
that pre-RTL optimizers like ivopts will work to that
instead of the wider LDR/STR range. */
- if (vec_flags == VEC_SVE_DATA || mode == VNx1TImode)
+ if (vec_flags == VEC_SVE_DATA)
return (type == ADDR_QUERY_M
? offset_4bit_signed_scaled_p (mode, offset)
: offset_9bit_signed_scaled_p (mode, offset));
@@ -12029,7 +12057,7 @@ sizetochar (int size)
case 64: return 'd';
case 32: return 's';
case 16: return 'h';
- case 8 : return 'b';
+ case 8: return 'b';
default: gcc_unreachable ();
}
}
@@ -12611,7 +12639,7 @@ aarch64_print_address_internal (FILE *f, machine_mode mode, rtx x,
return true;
}
- vec_flags = aarch64_classify_vector_mode (mode);
+ vec_flags = aarch64_classify_vector_memory_mode (mode);
if ((vec_flags & VEC_ANY_SVE) && !load_store_pair_p)
{
HOST_WIDE_INT vnum
@@ -26239,6 +26267,107 @@ aarch64_evpc_dup (struct expand_vec_perm_d *d)
return true;
}
+/* Recognize things that can be done using the SVE2p1 Hybrid-VLA
+ permutations, which apply Advanced-SIMD-style permutations to each
+ individual 128-bit block. */
+
+static bool
+aarch64_evpc_hvla (struct expand_vec_perm_d *d)
+{
+ machine_mode vmode = d->vmode;
+ if (!TARGET_SVE2p1
+ || !TARGET_NON_STREAMING
+ || BYTES_BIG_ENDIAN
+ || d->vec_flags != VEC_SVE_DATA
+ || GET_MODE_UNIT_BITSIZE (vmode) > 64)
+ return false;
+
+ /* Set SUBELTS to the number of elements in an Advanced SIMD vector
+ and make sure that adding SUBELTS to each block of SUBELTS indices
+ gives the next block of SUBELTS indices. That is, it must be possible
+ to interpret the index vector as SUBELTS interleaved linear series in
+ which each series has step SUBELTS. */
+ unsigned int subelts = 128U / GET_MODE_UNIT_BITSIZE (vmode);
+ unsigned int pairs = subelts / 2;
+ for (unsigned int i = 0; i < subelts; ++i)
+ if (!d->perm.series_p (i, subelts, d->perm[i], subelts))
+ return false;
+
+ /* Used once we have verified that we can use UNSPEC to do the operation. */
+ auto use_binary = [&](int unspec) -> bool
+ {
+ if (!d->testing_p)
+ {
+ rtvec vec = gen_rtvec (2, d->op0, d->op1);
+ emit_set_insn (d->target, gen_rtx_UNSPEC (vmode, vec, unspec));
+ }
+ return true;
+ };
+
+ /* Now check whether the first SUBELTS elements match a supported
+ Advanced-SIMD-style operation. */
+ poly_int64 first = d->perm[0];
+ poly_int64 nelt = d->perm.length ();
+ auto try_zip = [&]() -> bool
+ {
+ if (maybe_ne (first, 0) && maybe_ne (first, pairs))
+ return false;
+ for (unsigned int i = 0; i < pairs; ++i)
+ if (maybe_ne (d->perm[i * 2], first + i)
+ || maybe_ne (d->perm[i * 2 + 1], first + nelt + i))
+ return false;
+ return use_binary (maybe_ne (first, 0) ? UNSPEC_ZIPQ2 : UNSPEC_ZIPQ1);
+ };
+ auto try_uzp = [&]() -> bool
+ {
+ if (maybe_ne (first, 0) && maybe_ne (first, 1))
+ return false;
+ for (unsigned int i = 0; i < pairs; ++i)
+ if (maybe_ne (d->perm[i], first + i * 2)
+ || maybe_ne (d->perm[i + pairs], first + nelt + i * 2))
+ return false;
+ return use_binary (maybe_ne (first, 0) ? UNSPEC_UZPQ2 : UNSPEC_UZPQ1);
+ };
+ auto try_extq = [&]() -> bool
+ {
+ HOST_WIDE_INT start;
+ if (!first.is_constant (&start) || !IN_RANGE (start, 0, subelts - 1))
+ return false;
+ for (unsigned int i = 0; i < subelts; ++i)
+ {
+ poly_int64 next = (start + i >= subelts
+ ? start + i - subelts + nelt
+ : start + i);
+ if (maybe_ne (d->perm[i], next))
+ return false;
+ }
+ if (!d->testing_p)
+ {
+ rtx op2 = gen_int_mode (start, SImode);
+ emit_insn (gen_aarch64_sve_extq (vmode, d->target,
+ d->op0, d->op1, op2));
+ }
+ return true;
+ };
+ auto try_dupq = [&]() -> bool
+ {
+ HOST_WIDE_INT start;
+ if (!first.is_constant (&start) || !IN_RANGE (start, 0, subelts - 1))
+ return false;
+ for (unsigned int i = 0; i < subelts; ++i)
+ if (maybe_ne (d->perm[i], start))
+ return false;
+ if (!d->testing_p)
+ {
+ rtx op1 = gen_int_mode (start, SImode);
+ emit_insn (gen_aarch64_sve_dupq (vmode, d->target, d->op0, op1));
+ }
+ return true;
+ };
+
+ return try_zip () || try_uzp () || try_extq () || try_dupq ();
+}
+
static bool
aarch64_evpc_tbl (struct expand_vec_perm_d *d)
{
@@ -26515,6 +26644,8 @@ aarch64_expand_vec_perm_const_1 (struct expand_vec_perm_d *d)
return true;
else if (aarch64_evpc_ins (d))
return true;
+ else if (aarch64_evpc_hvla (d))
+ return true;
else if (aarch64_evpc_reencode (d))
return true;
diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md
index 73d6748..8e3b5731 100644
--- a/gcc/config/aarch64/iterators.md
+++ b/gcc/config/aarch64/iterators.md
@@ -734,7 +734,9 @@
UNSPEC_USHLL ; Used in aarch64-simd.md.
UNSPEC_ADDP ; Used in aarch64-simd.md.
UNSPEC_TBL ; Used in vector permute patterns.
+ UNSPEC_TBLQ ; Used in vector permute patterns.
UNSPEC_TBX ; Used in vector permute patterns.
+ UNSPEC_TBXQ ; Used in vector permute patterns.
UNSPEC_CONCAT ; Used in vector permute patterns.
;; The following permute unspecs are generated directly by
@@ -1071,14 +1073,43 @@
UNSPEC_FAMIN ; Used in aarch64-simd.md.
;; All used in aarch64-sve2.md
+ UNSPEC_ADDQV
+ UNSPEC_ANDQV
+ UNSPEC_DUPQ
+ UNSPEC_EORQV
+ UNSPEC_EXTQ
+ UNSPEC_FADDQV
+ UNSPEC_FMAXQV
+ UNSPEC_FMAXNMQV
+ UNSPEC_FMINQV
+ UNSPEC_FMINNMQV
UNSPEC_FCVTN
UNSPEC_FDOT
+ UNSPEC_LD1_EXTENDQ
+ UNSPEC_LD1Q_GATHER
+ UNSPEC_LDNQ
+ UNSPEC_ORQV
+ UNSPEC_PMOV_PACK
+ UNSPEC_PMOV_PACK_LANE
+ UNSPEC_PMOV_UNPACK
+ UNSPEC_PMOV_UNPACK_LANE
+ UNSPEC_SMAXQV
+ UNSPEC_SMINQV
UNSPEC_SQCVT
UNSPEC_SQCVTN
UNSPEC_SQCVTU
UNSPEC_SQCVTUN
+ UNSPEC_ST1_TRUNCQ
+ UNSPEC_ST1Q_SCATTER
+ UNSPEC_STNQ
+ UNSPEC_UMAXQV
+ UNSPEC_UMINQV
UNSPEC_UQCVT
UNSPEC_UQCVTN
+ UNSPEC_UZPQ1
+ UNSPEC_UZPQ2
+ UNSPEC_ZIPQ1
+ UNSPEC_ZIPQ2
;; All used in aarch64-sme.md
UNSPEC_SME_ADD
@@ -1326,7 +1357,11 @@
(V4x16QI "16b") (V4x8HI "8h")
(V4x4SI "4s") (V4x2DI "2d")
(V4x8HF "8h") (V4x4SF "4s")
- (V4x2DF "2d") (V4x8BF "8h")])
+ (V4x2DF "2d") (V4x8BF "8h")
+ (VNx16QI "16b") (VNx8HI "8h")
+ (VNx4SI "4s") (VNx2DI "2d")
+ (VNx8HF "8h") (VNx4SF "4s")
+ (VNx2DF "2d") (VNx8BF "8h")])
;; Map mode to type used in widening multiplies.
(define_mode_attr Vcondtype [(V4HI "4h") (V8HI "4h") (V2SI "2s") (V4SI "2s")])
@@ -1994,7 +2029,22 @@
(V4x4HF "V") (V4x8HF "V")
(V4x2SF "V") (V4x4SF "V")
(V4x1DF "V") (V4x2DF "V")
- (V4x4BF "V") (V4x8BF "V")])
+ (V4x4BF "V") (V4x8BF "V")
+
+ (VNx32QI "T") (VNx16HI "T")
+ (VNx8SI "T") (VNx4DI "T")
+ (VNx16BF "T") (VNx16HF "T")
+ (VNx8SF "T") (VNx4DF "T")
+
+ (VNx48QI "U") (VNx24HI "U")
+ (VNx12SI "U") (VNx6DI "U")
+ (VNx24BF "U") (VNx24HF "U")
+ (VNx12SF "U") (VNx6DF "U")
+
+ (VNx64QI "V") (VNx32HI "V")
+ (VNx16SI "V") (VNx8DI "V")
+ (VNx32BF "V") (VNx32HF "V")
+ (VNx16SF "V") (VNx8DF "V")])
;; This is both the number of Q-Registers needed to hold the corresponding
;; opaque large integer mode, and the number of elements touched by the
@@ -2338,6 +2388,21 @@
(VNx4SI "VNx8SI") (VNx4SF "VNx8SF")
(VNx2DI "VNx4DI") (VNx2DF "VNx4DF")])
+(define_mode_attr VNxTI [(VNx32QI "VNx2TI") (VNx16HI "VNx2TI")
+ (VNx8SI "VNx2TI") (VNx4DI "VNx2TI")
+ (VNx16BF "VNx2TI") (VNx16HF "VNx2TI")
+ (VNx8SF "VNx2TI") (VNx4DF "VNx2TI")
+
+ (VNx48QI "VNx3TI") (VNx24HI "VNx3TI")
+ (VNx12SI "VNx3TI") (VNx6DI "VNx3TI")
+ (VNx24BF "VNx3TI") (VNx24HF "VNx3TI")
+ (VNx12SF "VNx3TI") (VNx6DF "VNx3TI")
+
+ (VNx64QI "VNx4TI") (VNx32HI "VNx4TI")
+ (VNx16SI "VNx4TI") (VNx8DI "VNx4TI")
+ (VNx32BF "VNx4TI") (VNx32HF "VNx4TI")
+ (VNx16SF "VNx4TI") (VNx8DF "VNx4TI")])
+
;; The Advanced SIMD modes of popcount corresponding to scalar modes.
(define_mode_attr VEC_POP_MODE [(QI "V8QI") (HI "V4HI")
(SI "V2SI") (DI "V1DI")])
@@ -2448,6 +2513,9 @@
(VNx64QI "Uw4") (VNx32HI "Uw4")
(VNx32BF "Uw4") (VNx32HF "Uw4")])
+(define_mode_attr LD1_EXTENDQ_MEM [(VNx4SI "VNx1SI") (VNx4SF "VNx1SI")
+ (VNx2DI "VNx1DI") (VNx2DF "VNx1DI")])
+
;; -------------------------------------------------------------------
;; Code Iterators
;; -------------------------------------------------------------------
@@ -2973,6 +3041,21 @@
UNSPEC_TRN1 UNSPEC_TRN2
UNSPEC_UZP1 UNSPEC_UZP2])
+(define_int_iterator SVE_PERMUTE
+ [PERMUTE
+ (UNSPEC_UZPQ1 "TARGET_SVE2p1 && TARGET_NON_STREAMING")
+ (UNSPEC_UZPQ2 "TARGET_SVE2p1 && TARGET_NON_STREAMING")
+ (UNSPEC_ZIPQ1 "TARGET_SVE2p1 && TARGET_NON_STREAMING")
+ (UNSPEC_ZIPQ2 "TARGET_SVE2p1 && TARGET_NON_STREAMING")])
+
+(define_int_iterator SVE_TBL
+ [UNSPEC_TBL
+ (UNSPEC_TBLQ "TARGET_SVE2p1 && TARGET_NON_STREAMING")])
+
+(define_int_iterator SVE_TBX
+ [UNSPEC_TBX
+ (UNSPEC_TBXQ "TARGET_SVE2p1 && TARGET_NON_STREAMING")])
+
(define_int_iterator PERMUTEQ [UNSPEC_ZIP1Q UNSPEC_ZIP2Q
UNSPEC_TRN1Q UNSPEC_TRN2Q
UNSPEC_UZP1Q UNSPEC_UZP2Q])
@@ -3072,12 +3155,27 @@
UNSPEC_UMINV
UNSPEC_XORV])
+(define_int_iterator SVE_INT_REDUCTION_128 [UNSPEC_ADDQV
+ UNSPEC_ANDQV
+ UNSPEC_EORQV
+ UNSPEC_ORQV
+ UNSPEC_SMAXQV
+ UNSPEC_SMINQV
+ UNSPEC_UMAXQV
+ UNSPEC_UMINQV])
+
(define_int_iterator SVE_FP_REDUCTION [UNSPEC_FADDV
UNSPEC_FMAXV
UNSPEC_FMAXNMV
UNSPEC_FMINV
UNSPEC_FMINNMV])
+(define_int_iterator SVE_FP_REDUCTION_128 [UNSPEC_FADDQV
+ UNSPEC_FMAXQV
+ UNSPEC_FMAXNMQV
+ UNSPEC_FMINQV
+ UNSPEC_FMINNMQV])
+
(define_int_iterator SVE_COND_FP_UNARY [UNSPEC_COND_FABS
UNSPEC_COND_FNEG
UNSPEC_COND_FRECPX
@@ -3629,6 +3727,8 @@
(UNSPEC_UMINV "umin")
(UNSPEC_SMAXV "smax")
(UNSPEC_SMINV "smin")
+ (UNSPEC_ADDQV "addqv")
+ (UNSPEC_ANDQV "andqv")
(UNSPEC_CADD90 "cadd90")
(UNSPEC_CADD270 "cadd270")
(UNSPEC_CDOT "cdot")
@@ -3639,9 +3739,15 @@
(UNSPEC_CMLA90 "cmla90")
(UNSPEC_CMLA180 "cmla180")
(UNSPEC_CMLA270 "cmla270")
+ (UNSPEC_EORQV "eorqv")
(UNSPEC_FADDV "plus")
+ (UNSPEC_FADDQV "faddqv")
+ (UNSPEC_FMAXQV "fmaxqv")
+ (UNSPEC_FMAXNMQV "fmaxnmqv")
(UNSPEC_FMAXNMV "smax")
(UNSPEC_FMAXV "smax_nan")
+ (UNSPEC_FMINQV "fminqv")
+ (UNSPEC_FMINNMQV "fminnmqv")
(UNSPEC_FMINNMV "smin")
(UNSPEC_FMINV "smin_nan")
(UNSPEC_SMUL_HIGHPART "smulh")
@@ -3657,11 +3763,16 @@
(UNSPEC_FTSSEL "ftssel")
(UNSPEC_LD1_COUNT "ld1")
(UNSPEC_LDNT1_COUNT "ldnt1")
+ (UNSPEC_ORQV "orqv")
(UNSPEC_PMULLB "pmullb")
(UNSPEC_PMULLB_PAIR "pmullb_pair")
(UNSPEC_PMULLT "pmullt")
(UNSPEC_PMULLT_PAIR "pmullt_pair")
(UNSPEC_SMATMUL "smatmul")
+ (UNSPEC_SMAXQV "smaxqv")
+ (UNSPEC_SMINQV "sminqv")
+ (UNSPEC_UMAXQV "umaxqv")
+ (UNSPEC_UMINQV "uminqv")
(UNSPEC_UZP "uzp")
(UNSPEC_UZPQ "uzpq")
(UNSPEC_ZIP "zip")
@@ -3955,12 +4066,16 @@
(define_int_attr perm_insn [(UNSPEC_ZIP1 "zip1") (UNSPEC_ZIP2 "zip2")
(UNSPEC_ZIP1Q "zip1") (UNSPEC_ZIP2Q "zip2")
+ (UNSPEC_ZIPQ1 "zipq1") (UNSPEC_ZIPQ2 "zipq2")
(UNSPEC_TRN1 "trn1") (UNSPEC_TRN2 "trn2")
(UNSPEC_TRN1Q "trn1") (UNSPEC_TRN2Q "trn2")
(UNSPEC_UZP1 "uzp1") (UNSPEC_UZP2 "uzp2")
(UNSPEC_UZP1Q "uzp1") (UNSPEC_UZP2Q "uzp2")
+ (UNSPEC_UZPQ1 "uzpq1") (UNSPEC_UZPQ2 "uzpq2")
(UNSPEC_UZP "uzp") (UNSPEC_UZPQ "uzp")
- (UNSPEC_ZIP "zip") (UNSPEC_ZIPQ "zip")])
+ (UNSPEC_ZIP "zip") (UNSPEC_ZIPQ "zip")
+ (UNSPEC_TBL "tbl") (UNSPEC_TBLQ "tblq")
+ (UNSPEC_TBX "tbx") (UNSPEC_TBXQ "tbxq")])
; op code for REV instructions (size within which elements are reversed).
(define_int_attr rev_op [(UNSPEC_REV64 "64") (UNSPEC_REV32 "32")
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/extq_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/extq_1.c
new file mode 100644
index 0000000..82357b5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/extq_1.c
@@ -0,0 +1,77 @@
+/* { dg-do compile } */
+
+#include <arm_sve.h>
+
+#pragma GCC target "+sve2p1"
+
+void
+f1 (svbool_t pg, svint8_t s8, svuint8_t u8, svint16_t s16, svuint16_t u16,
+ svfloat16_t f16, svint32_t s32, svuint32_t u32, svfloat32_t f32,
+ svint64_t s64, svuint64_t u64, svfloat64_t f64, int i)
+{
+ svextq (pg, pg, 0); /* { dg-error {'svextq' has no form that takes 'svbool_t' arguments} } */
+ svextq (s8, s8, i); /* { dg-error {argument 3 of 'svextq' must be an integer constant expression} } */
+
+ svextq (s8, s8, -1); /* { dg-error {passing -1 to argument 3 of 'svextq', which expects a value in the range \[0, 15\]} } */
+ svextq (s8, s8, 0);
+ svextq (s8, s8, 15);
+ svextq (s8, s8, 16); /* { dg-error {passing 16 to argument 3 of 'svextq', which expects a value in the range \[0, 15\]} } */
+
+ svextq (u8, u8, -1); /* { dg-error {passing -1 to argument 3 of 'svextq', which expects a value in the range \[0, 15\]} } */
+ svextq (u8, u8, 0);
+ svextq (u8, u8, 15);
+ svextq (u8, u8, 16); /* { dg-error {passing 16 to argument 3 of 'svextq', which expects a value in the range \[0, 15\]} } */
+
+ svextq (s16, s16, -1); /* { dg-error {passing -1 to argument 3 of 'svextq', which expects a value in the range \[0, 7\]} } */
+ svextq (s16, s16, 0);
+ svextq (s16, s16, 7);
+ svextq (s16, s16, 8); /* { dg-error {passing 8 to argument 3 of 'svextq', which expects a value in the range \[0, 7\]} } */
+
+ svextq (u16, u16, -1); /* { dg-error {passing -1 to argument 3 of 'svextq', which expects a value in the range \[0, 7\]} } */
+ svextq (u16, u16, 0);
+ svextq (u16, u16, 7);
+ svextq (u16, u16, 8); /* { dg-error {passing 8 to argument 3 of 'svextq', which expects a value in the range \[0, 7\]} } */
+
+ svextq (f16, f16, -1); /* { dg-error {passing -1 to argument 3 of 'svextq', which expects a value in the range \[0, 7\]} } */
+ svextq (f16, f16, 0);
+ svextq (f16, f16, 7);
+ svextq (f16, f16, 8); /* { dg-error {passing 8 to argument 3 of 'svextq', which expects a value in the range \[0, 7\]} } */
+
+ svextq (s32, s32, -1); /* { dg-error {passing -1 to argument 3 of 'svextq', which expects a value in the range \[0, 3\]} } */
+ svextq (s32, s32, 0);
+ svextq (s32, s32, 3);
+ svextq (s32, s32, 4); /* { dg-error {passing 4 to argument 3 of 'svextq', which expects a value in the range \[0, 3\]} } */
+
+ svextq (u32, u32, -1); /* { dg-error {passing -1 to argument 3 of 'svextq', which expects a value in the range \[0, 3\]} } */
+ svextq (u32, u32, 0);
+ svextq (u32, u32, 3);
+ svextq (u32, u32, 4); /* { dg-error {passing 4 to argument 3 of 'svextq', which expects a value in the range \[0, 3\]} } */
+
+ svextq (f32, f32, -1); /* { dg-error {passing -1 to argument 3 of 'svextq', which expects a value in the range \[0, 3\]} } */
+ svextq (f32, f32, 0);
+ svextq (f32, f32, 3);
+ svextq (f32, f32, 4); /* { dg-error {passing 4 to argument 3 of 'svextq', which expects a value in the range \[0, 3\]} } */
+
+ svextq (s64, s64, -1); /* { dg-error {passing -1 to argument 3 of 'svextq', which expects a value in the range \[0, 1\]} } */
+ svextq (s64, s64, 0);
+ svextq (s64, s64, 1);
+ svextq (s64, s64, 2); /* { dg-error {passing 2 to argument 3 of 'svextq', which expects a value in the range \[0, 1\]} } */
+
+ svextq (u64, u64, -1); /* { dg-error {passing -1 to argument 3 of 'svextq', which expects a value in the range \[0, 1\]} } */
+ svextq (u64, u64, 0);
+ svextq (u64, u64, 1);
+ svextq (u64, u64, 2); /* { dg-error {passing 2 to argument 3 of 'svextq', which expects a value in the range \[0, 1\]} } */
+
+ svextq (f64, f64, -1); /* { dg-error {passing -1 to argument 3 of 'svextq', which expects a value in the range \[0, 1\]} } */
+ svextq (f64, f64, 0);
+ svextq (f64, f64, 1);
+ svextq (f64, f64, 2); /* { dg-error {passing 2 to argument 3 of 'svextq', which expects a value in the range \[0, 1\]} } */
+}
+
+#pragma GCC target "+nosve2p1"
+
+void
+f2 (svint8_t s8)
+{
+ svextq (s8, s8, 0); /* { dg-error {ACLE function 'svextq_s8' requires ISA extension 'sve2p1'} } */
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ld1sh_gather_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ld1sh_gather_1.c
deleted file mode 100644
index c9f49b6..0000000
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/ld1sh_gather_1.c
+++ /dev/null
@@ -1,35 +0,0 @@
-/* { dg-do compile } */
-/* { dg-options "-std=c99 -Wpointer-sign" } */
-
-#include <arm_sve.h>
-
-struct s { int i; };
-
-void
-f1 (svbool_t pg, short *s16_ptr, unsigned short *u16_ptr,
- svint8_t s8, svint16_t s16,
- svint32_t s32, svuint32_t u32, svfloat32_t f32,
- svint64_t s64, svuint64_t u64, svfloat64_t f64, struct s s)
-{
- svld1sh_gather_index (pg, s16_ptr, s32); /* { dg-error {implicit declaration of function 'svld1sh_gather_index'; did you mean 'svld1_gather_index'} } */
- svld1sh_gather_index_u32 (pg, s16_ptr); /* { dg-error {too few arguments to function 'svld1sh_gather_index_u32'} } */
- svld1sh_gather_index_u32 (pg, s16_ptr, s32, 0); /* { dg-error {too many arguments to function 'svld1sh_gather_index_u32'} } */
- svld1sh_gather_index_u32 (pg, u16_ptr, s32); /* { dg-warning {pointer targets in passing argument 2 of 'svld1sh_gather_s32index_u32' differ in signedness} } */
- svld1sh_gather_index_u32 (pg, s16_ptr, pg); /* { dg-error {passing 'svbool_t' to argument 3 of 'svld1sh_gather_index_u32', which expects a vector of 32-bit integers} } */
- svld1sh_gather_index_u32 (pg, s16_ptr, s8); /* { dg-error {passing 'svint8_t' to argument 3 of 'svld1sh_gather_index_u32', which expects a vector of 32-bit integers} } */
- svld1sh_gather_index_u32 (pg, s16_ptr, s16); /* { dg-error {passing 'svint16_t' to argument 3 of 'svld1sh_gather_index_u32', which expects a vector of 32-bit integers} } */
- svld1sh_gather_index_u32 (pg, s16_ptr, s32);
- svld1sh_gather_index_u32 (pg, s16_ptr, u32);
- svld1sh_gather_index_u32 (pg, s16_ptr, f32); /* { dg-error {passing 'svfloat32_t' to argument 3 of 'svld1sh_gather_index_u32', which expects a vector of 32-bit integers} } */
- svld1sh_gather_index_u32 (pg, s16_ptr, s64); /* { dg-error {passing 'svint64_t' to argument 3 of 'svld1sh_gather_index_u32', which expects a vector of 32-bit integers} } */
- svld1sh_gather_index_u32 (pg, s16_ptr, u64); /* { dg-error {passing 'svuint64_t' to argument 3 of 'svld1sh_gather_index_u32', which expects a vector of 32-bit integers} } */
- svld1sh_gather_index_u32 (pg, s16_ptr, f64); /* { dg-error {passing 'svfloat64_t' to argument 3 of 'svld1sh_gather_index_u32', which expects a vector of 32-bit integers} } */
-
- svld1sh_gather_index_u32 (pg, 0, s32);
- svld1sh_gather_index_u32 (pg, s, s32); /* { dg-error {'struct s' to argument 2 of 'svld1sh_gather_index_u32', which expects a vector or pointer base address} } */
-
- svld1sh_gather_index_u32 (pg, pg, 0); /* { dg-error {passing 'svbool_t' to argument 2 of 'svld1sh_gather_index_u32', which expects 'svuint32_t'} } */
- svld1sh_gather_index_u32 (pg, s32, 0); /* { dg-error {passing 'svint32_t' to argument 2 of 'svld1sh_gather_index_u32', which expects 'svuint32_t'} } */
- svld1sh_gather_index_u32 (pg, u32, 0);
- svld1sh_gather_index_u32 (pg, u64, 0); /* { dg-error {passing 'svuint64_t' to argument 2 of 'svld1sh_gather_index_u32', which expects 'svuint32_t'} } */
-}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_index_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_index_1.c
index c9f49b6..bb2e1f1 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_index_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_index_1.c
@@ -11,7 +11,7 @@ f1 (svbool_t pg, short *s16_ptr, unsigned short *u16_ptr,
svint32_t s32, svuint32_t u32, svfloat32_t f32,
svint64_t s64, svuint64_t u64, svfloat64_t f64, struct s s)
{
- svld1sh_gather_index (pg, s16_ptr, s32); /* { dg-error {implicit declaration of function 'svld1sh_gather_index'; did you mean 'svld1_gather_index'} } */
+ svld1sh_gather_index (pg, s16_ptr, s32); /* { dg-error {implicit declaration of function 'svld1sh_gather_index'; did you mean 'svld1q?_gather_index'} } */
svld1sh_gather_index_u32 (pg, s16_ptr); /* { dg-error {too few arguments to function 'svld1sh_gather_index_u32'} } */
svld1sh_gather_index_u32 (pg, s16_ptr, s32, 0); /* { dg-error {too many arguments to function 'svld1sh_gather_index_u32'} } */
svld1sh_gather_index_u32 (pg, u16_ptr, s32); /* { dg-warning {pointer targets in passing argument 2 of 'svld1sh_gather_s32index_u32' differ in signedness} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_1.c
index 41bf2da..e40fd05 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_1.c
@@ -11,7 +11,7 @@ f1 (svbool_t pg, signed char *s8_ptr, short *s16_ptr,
svint32_t s32, svuint32_t u32, svfloat32_t f32,
svint64_t s64, svuint64_t u64, svfloat64_t f64, struct s s)
{
- svld1sb_gather_offset (pg, s8_ptr, s32); /* { dg-error {implicit declaration of function 'svld1sb_gather_offset'; did you mean 'svld1_gather_offset'} } */
+ svld1sb_gather_offset (pg, s8_ptr, s32); /* { dg-error {implicit declaration of function 'svld1sb_gather_offset'; did you mean 'svld1q?_gather_offset'} } */
svld1sb_gather_offset_s32 (pg, s8_ptr); /* { dg-error {too few arguments to function 'svld1sb_gather_offset_s32'} } */
svld1sb_gather_offset_s32 (pg, s8_ptr, s32, 0); /* { dg-error {too many arguments to function 'svld1sb_gather_offset_s32'} } */
svld1sb_gather_offset_s32 (pg, s16_ptr, s32); /* { dg-error {passing argument 2 of 'svld1sb_gather_s32offset_s32' from incompatible pointer type} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_2.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_2.c
index 1261b49..e60bdf5 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_2.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_2.c
@@ -11,7 +11,7 @@ f1 (svbool_t pg, signed char *s8_ptr, short *s16_ptr,
svint32_t s32, svuint32_t u32, svfloat32_t f32,
svint64_t s64, svuint64_t u64, svfloat64_t f64, struct s s)
{
- svld1sb_gather_offset (pg, s8_ptr, s32); /* { dg-error {implicit declaration of function 'svld1sb_gather_offset'; did you mean 'svld1_gather_offset'} } */
+ svld1sb_gather_offset (pg, s8_ptr, s32); /* { dg-error {implicit declaration of function 'svld1sb_gather_offset'; did you mean 'svld1q?_gather_offset'} } */
svld1sb_gather_offset_u32 (pg, s8_ptr); /* { dg-error {too few arguments to function 'svld1sb_gather_offset_u32'} } */
svld1sb_gather_offset_u32 (pg, s8_ptr, s32, 0); /* { dg-error {too many arguments to function 'svld1sb_gather_offset_u32'} } */
svld1sb_gather_offset_u32 (pg, s16_ptr, s32); /* { dg-error {passing argument 2 of 'svld1sb_gather_s32offset_u32' from incompatible pointer type} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_3.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_3.c
index 518348d..9c99425 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_3.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_3.c
@@ -11,7 +11,7 @@ f1 (svbool_t pg, signed char *s8_ptr, short *s16_ptr,
svint32_t s32, svuint32_t u32, svfloat32_t f32,
svint64_t s64, svuint64_t u64, svfloat64_t f64, struct s s)
{
- svld1sb_gather_offset (pg, s8_ptr, s64); /* { dg-error {implicit declaration of function 'svld1sb_gather_offset'; did you mean 'svld1_gather_offset'} } */
+ svld1sb_gather_offset (pg, s8_ptr, s64); /* { dg-error {implicit declaration of function 'svld1sb_gather_offset'; did you mean 'svld1q?_gather_offset'} } */
svld1sb_gather_offset_s64 (pg, s8_ptr); /* { dg-error {too few arguments to function 'svld1sb_gather_offset_s64'} } */
svld1sb_gather_offset_s64 (pg, s8_ptr, s64, 0); /* { dg-error {too many arguments to function 'svld1sb_gather_offset_s64'} } */
svld1sb_gather_offset_s64 (pg, s16_ptr, s64); /* { dg-error {passing argument 2 of 'svld1sb_gather_s64offset_s64' from incompatible pointer type} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_4.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_4.c
index 6086911..0a2beae 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_4.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_4.c
@@ -11,7 +11,7 @@ f1 (svbool_t pg, signed char *s8_ptr, short *s16_ptr,
svint32_t s32, svuint32_t u32, svfloat32_t f32,
svint64_t s64, svuint64_t u64, svfloat64_t f64, struct s s)
{
- svld1sb_gather_offset (pg, s8_ptr, s64); /* { dg-error {implicit declaration of function 'svld1sb_gather_offset'; did you mean 'svld1_gather_offset'} } */
+ svld1sb_gather_offset (pg, s8_ptr, s64); /* { dg-error {implicit declaration of function 'svld1sb_gather_offset'; did you mean 'svld1q?_gather_offset'} } */
svld1sb_gather_offset_u64 (pg, s8_ptr); /* { dg-error {too few arguments to function 'svld1sb_gather_offset_u64'} } */
svld1sb_gather_offset_u64 (pg, s8_ptr, s64, 0); /* { dg-error {too many arguments to function 'svld1sb_gather_offset_u64'} } */
svld1sb_gather_offset_u64 (pg, s16_ptr, s64); /* { dg-error {passing argument 2 of 'svld1sb_gather_s64offset_u64' from incompatible pointer type} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_5.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_5.c
index 9e2ccee..7c42a85 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_5.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_ext_gather_offset_5.c
@@ -11,7 +11,7 @@ f1 (svbool_t pg, unsigned char *s8_ptr, unsigned short *s16_ptr,
svint32_t s32, svuint32_t u32, svfloat32_t f32,
svint64_t s64, svuint64_t u64, svfloat64_t f64, struct s s)
{
- svld1ub_gather_offset (pg, s8_ptr, s32); /* { dg-error {implicit declaration of function 'svld1ub_gather_offset'; did you mean 'svld1_gather_offset'} } */
+ svld1ub_gather_offset (pg, s8_ptr, s32); /* { dg-error {implicit declaration of function 'svld1ub_gather_offset'; did you mean 'svld1q?_gather_offset'} } */
svld1ub_gather_offset_s32 (pg, s8_ptr); /* { dg-error {too few arguments to function 'svld1ub_gather_offset_s32'} } */
svld1ub_gather_offset_s32 (pg, s8_ptr, s32, 0); /* { dg-error {too many arguments to function 'svld1ub_gather_offset_s32'} } */
svld1ub_gather_offset_s32 (pg, s16_ptr, s32); /* { dg-error {passing argument 2 of 'svld1ub_gather_s32offset_s32' from incompatible pointer type} } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_gather64_sv_index_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_gather64_sv_index_1.c
new file mode 100644
index 0000000..5497b69
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_gather64_sv_index_1.c
@@ -0,0 +1,57 @@
+/* { dg-do compile } */
+/* { dg-options "-std=c99" } */
+
+#include <arm_sve.h>
+
+#pragma GCC target "+nosve2p1"
+
+void
+f1 (svbool_t pg, int16_t *s16_ptr, svuint64_t u64)
+{
+ svld1q_gather_index (svptrue_b8 (), s16_ptr, u64); /* { dg-error {ACLE function 'svld1q_gather_u64index_s16' requires ISA extension 'sve2p1'} } */
+}
+
+#pragma GCC target "+sve2p1"
+
+struct s { signed char x; };
+
+svuint32_t
+f2 (svbool_t pg, int8_t *s8_ptr, int16_t *s16_ptr,
+ int32_t *s32_ptr, uint32_t *u32_ptr, float *f32_ptr,
+ int64_t *s64_ptr, uint64_t *u64_ptr, double *f64_ptr,
+ void *void_ptr, struct s *s_ptr, _Complex float *cf32_ptr, int **ptr_ptr,
+ svint8_t s8, svint16_t s16,
+ svint32_t s32, svuint32_t u32, svfloat32_t f32,
+ svint64_t s64, svuint64_t u64, svfloat64_t f64)
+{
+ svld1q_gather_index (pg, s32_ptr); /* { dg-error {too few arguments to function 'svld1q_gather_index'} } */
+ svld1q_gather_index (pg, s32_ptr, s64, 0); /* { dg-error {too many arguments to function 'svld1q_gather_index'} } */
+ svld1q_gather_index (0, s32_ptr, s64); /* { dg-error {passing 'int' to argument 1 of 'svld1q_gather_index', which expects 'svbool_t'} } */
+ svld1q_gather_index (pg, 0, s64); /* { dg-error {passing 'int' to argument 2 of 'svld1q_gather_index', which expects a pointer type} } */
+ svld1q_gather_index (pg, (int32_t *) 0, s64);
+ svld1q_gather_index (pg, void_ptr, s64); /* { dg-error {passing 'void \*' to argument 2 of 'svld1q_gather_index', but 'void' is not a valid SVE element type} } */
+ svld1q_gather_index (pg, s_ptr, s64); /* { dg-error {passing 'struct s \*' to argument 2 of 'svld1q_gather_index', but 'struct s' is not a valid SVE element type} } */
+ svld1q_gather_index (pg, f32_ptr, s64);
+ svld1q_gather_index (pg, cf32_ptr, s64); /* { dg-error {passing '_Complex float \*' to argument 2 of 'svld1q_gather_index', but 'complex float' is not a valid SVE element type} } */
+ svld1q_gather_index (pg, ptr_ptr, u64); /* { dg-error {passing 'int \*\*' to argument 2 of 'svld1q_gather_index', but 'int \*' is not a valid SVE element type} } */
+ svld1q_gather_index (pg, u32, 0); /* { dg-error {passing 'svuint32_t' to argument 2 of 'svld1q_gather_index', which expects a pointer type} } */
+ svld1q_gather_index (pg, u64, 0); /* { dg-error {passing 'svuint64_t' to argument 2 of 'svld1q_gather_index', which expects a pointer type} } */
+
+ svld1q_gather_index (pg, s8_ptr, s64); /* { dg-error {passing 'int8_t \*'[^\n]* to argument 2 of 'svld1q_gather_index', which expects the data to be 16 bits or wider} } */
+ /* { dg-message {use the 'offset' rather than 'index' form for 8-bit data} "" { target *-*-* } .-1 } */
+
+ svld1q_gather_index (pg, s32_ptr, s8); /* { dg-error {passing 'svint8_t' to argument 3 of 'svld1q_gather_index', which expects a vector of 64-bit integers} } */
+ svld1q_gather_index (pg, s16_ptr, s16); /* { dg-error {passing 'svint16_t' to argument 3 of 'svld1q_gather_index', which expects a vector of 64-bit integers} } */
+ svld1q_gather_index (pg, s32_ptr, s32); /* { dg-error {passing 'svint32_t' to argument 3 of 'svld1q_gather_index', which expects a vector of 64-bit integers} } */
+ svld1q_gather_index (pg, u32_ptr, s64);
+
+ svld1q_gather_index (pg, s32_ptr, s32); /* { dg-error {passing 'svint32_t' to argument 3 of 'svld1q_gather_index', which expects a vector of 64-bit integers} } */
+ svld1q_gather_index (pg, s32_ptr, u32); /* { dg-error {passing 'svuint32_t' to argument 3 of 'svld1q_gather_index', which expects a vector of 64-bit integers} } */
+ svld1q_gather_index (pg, f32_ptr, f32); /* { dg-error {passing 'svfloat32_t' to argument 3 of 'svld1q_gather_index', which expects a vector of 64-bit integers} } */
+ svld1q_gather_index (pg, s32_ptr, s64);
+ svld1q_gather_index (pg, u64_ptr, s64);
+ svld1q_gather_index (pg, s64_ptr, u64);
+ svld1q_gather_index (pg, f64_ptr, f64); /* { dg-error {passing 'svfloat64_t' to argument 3 of 'svld1q_gather_index', which expects a vector of 64-bit integers} } */
+
+ return svld1q_gather_index (pg, s32_ptr, s64); /* { dg-error {incompatible types when returning type 'svint32_t' but 'svuint32_t' was expected} } */
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_gather64_sv_offset_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_gather64_sv_offset_1.c
new file mode 100644
index 0000000..87ceed1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/load_gather64_sv_offset_1.c
@@ -0,0 +1,54 @@
+/* { dg-do compile } */
+/* { dg-options "-std=c99" } */
+
+#include <arm_sve.h>
+
+#pragma GCC target "+nosve2p1"
+
+void
+f1 (svbool_t pg, int8_t *s8_ptr, svuint64_t u64)
+{
+ svld1q_gather_offset (svptrue_b8 (), s8_ptr, u64); /* { dg-error {ACLE function 'svld1q_gather_u64offset_s8' requires ISA extension 'sve2p1'} } */
+}
+
+#pragma GCC target "+sve2p1"
+
+struct s { signed char x; };
+
+svuint32_t
+f2 (svbool_t pg, int8_t *s8_ptr, int16_t *s16_ptr,
+ int32_t *s32_ptr, float *f32_ptr,
+ int64_t *s64_ptr, uint64_t *u64_ptr, double *f64_ptr,
+ void *void_ptr, struct s *s_ptr, _Complex float *cf32_ptr, int **ptr_ptr,
+ svint8_t s8, svint16_t s16,
+ svint32_t s32, svuint32_t u32, svfloat32_t f32,
+ svint64_t s64, svuint64_t u64, svfloat64_t f64)
+{
+ svld1q_gather_offset (pg, s32_ptr); /* { dg-error {too few arguments to function 'svld1q_gather_offset'} } */
+ svld1q_gather_offset (pg, s32_ptr, s64, 0); /* { dg-error {too many arguments to function 'svld1q_gather_offset'} } */
+ svld1q_gather_offset (0, s32_ptr, s64); /* { dg-error {passing 'int' to argument 1 of 'svld1q_gather_offset', which expects 'svbool_t'} } */
+ svld1q_gather_offset (pg, 0, s64); /* { dg-error {passing 'int' to argument 2 of 'svld1q_gather_offset', which expects a pointer type} } */
+ svld1q_gather_offset (pg, (int32_t *) 0, s64);
+ svld1q_gather_offset (pg, void_ptr, s64); /* { dg-error {passing 'void \*' to argument 2 of 'svld1q_gather_offset', but 'void' is not a valid SVE element type} } */
+ svld1q_gather_offset (pg, s_ptr, s64); /* { dg-error {passing 'struct s \*' to argument 2 of 'svld1q_gather_offset', but 'struct s' is not a valid SVE element type} } */
+ svld1q_gather_offset (pg, f32_ptr, s64);
+ svld1q_gather_offset (pg, cf32_ptr, s64); /* { dg-error {passing '_Complex float \*' to argument 2 of 'svld1q_gather_offset', but 'complex float' is not a valid SVE element type} } */
+ svld1q_gather_offset (pg, ptr_ptr, u64); /* { dg-error {passing 'int \*\*' to argument 2 of 'svld1q_gather_offset', but 'int \*' is not a valid SVE element type} } */
+ svld1q_gather_offset (pg, u32, 0); /* { dg-error {passing 'svuint32_t' to argument 2 of 'svld1q_gather_offset', which expects a pointer type} } */
+ svld1q_gather_offset (pg, u64, 0); /* { dg-error {passing 'svuint64_t' to argument 2 of 'svld1q_gather_offset', which expects a pointer type} } */
+
+ svld1q_gather_offset (pg, s8_ptr, s8); /* { dg-error {passing 'svint8_t' to argument 3 of 'svld1q_gather_offset', which expects a vector of 64-bit integers} } */
+ svld1q_gather_offset (pg, s16_ptr, s16); /* { dg-error {passing 'svint16_t' to argument 3 of 'svld1q_gather_offset', which expects a vector of 64-bit integers} } */
+ svld1q_gather_offset (pg, s8_ptr, s32); /* { dg-error {passing 'svint32_t' to argument 3 of 'svld1q_gather_offset', which expects a vector of 64-bit integers} } */
+ svld1q_gather_offset (pg, s8_ptr, s64);
+
+ svld1q_gather_offset (pg, s32_ptr, s32); /* { dg-error {passing 'svint32_t' to argument 3 of 'svld1q_gather_offset', which expects a vector of 64-bit integers} } */
+ svld1q_gather_offset (pg, s32_ptr, u32); /* { dg-error {passing 'svuint32_t' to argument 3 of 'svld1q_gather_offset', which expects a vector of 64-bit integers} } */
+ svld1q_gather_offset (pg, f32_ptr, f32); /* { dg-error {passing 'svfloat32_t' to argument 3 of 'svld1q_gather_offset', which expects a vector of 64-bit integers} } */
+ svld1q_gather_offset (pg, s32_ptr, s64);
+ svld1q_gather_offset (pg, u64_ptr, s64);
+ svld1q_gather_offset (pg, s64_ptr, u64);
+ svld1q_gather_offset (pg, f64_ptr, f64); /* { dg-error {passing 'svfloat64_t' to argument 3 of 'svld1q_gather_offset', which expects a vector of 64-bit integers} } */
+
+ return svld1q_gather_offset (pg, s32_ptr, s64); /* { dg-error {incompatible types when returning type 'svint32_t' but 'svuint32_t' was expected} } */
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/pmov_from_vector_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/pmov_from_vector_1.c
new file mode 100644
index 0000000..8f47ee4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/pmov_from_vector_1.c
@@ -0,0 +1,26 @@
+/* { dg-do compile } */
+
+#include <arm_sve.h>
+
+void
+f1 (svint32_t s32)
+{
+ svpmov (s32); /* { dg-error {ACLE function 'svpmov_s32' requires ISA extension 'sve2p1'} } */
+}
+
+#pragma GCC target "+sve2p1"
+
+void
+f2 (svbool_t pg, svint8_t s8, svint32_t s32, svuint32_t u32, svfloat32_t f32,
+ svuint32x2_t u32x2)
+{
+ svpmov (); /* { dg-error {too few arguments to function 'svpmov'} } */
+ svpmov (s8, s8); /* { dg-error {too many arguments to function 'svpmov'} } */
+
+ svpmov (0); /* { dg-error {passing 'int' to argument 1 of 'svpmov', which expects an SVE type rather than a scalar type} } */
+ svpmov (pg); /* { dg-error {'svpmov' has no form that takes 'svbool_t' arguments} } */
+ svpmov (s32);
+ svpmov (u32);
+ svpmov (f32); /* { dg-error {'svpmov' has no form that takes 'svfloat32_t' arguments} } */
+ svpmov (u32x2); /* { dg-error {passing 'svuint32x2_t' to argument 1 of 'svpmov', which expects a single SVE vector rather than a tuple} } */
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/pmov_from_vector_lane_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/pmov_from_vector_lane_1.c
new file mode 100644
index 0000000..0f129ae
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/pmov_from_vector_lane_1.c
@@ -0,0 +1,41 @@
+/* { dg-do compile } */
+
+#include <arm_sve.h>
+
+void
+f1 (svint32_t s32)
+{
+ svpmov_lane (s32, 0); /* { dg-error {ACLE function 'svpmov_lane_s32' requires ISA extension 'sve2p1'} } */
+}
+
+#pragma GCC target "+sve2p1"
+
+void
+f2 (svbool_t pg, svint8_t s8, svuint16_t u16, svint32_t s32, svuint32_t u32,
+ svfloat32_t f32, svuint64_t u64, svuint32x2_t u32x2, int x)
+{
+ svpmov_lane (s8); /* { dg-error {too few arguments to function 'svpmov_lane'} } */
+ svpmov_lane (s8, 0, 0); /* { dg-error {too many arguments to function 'svpmov_lane'} } */
+
+ svpmov_lane (0, 0); /* { dg-error {passing 'int' to argument 1 of 'svpmov_lane', which expects an SVE type rather than a scalar type} } */
+ svpmov_lane (pg, 0); /* { dg-error {'svpmov_lane' has no form that takes 'svbool_t' arguments} } */
+ svpmov_lane (s32, 0);
+ svpmov_lane (u32, 0);
+ svpmov_lane (f32, 0); /* { dg-error {'svpmov_lane' has no form that takes 'svfloat32_t' arguments} } */
+ svpmov_lane (u32x2, 0); /* { dg-error {passing 'svuint32x2_t' to argument 1 of 'svpmov_lane', which expects a single SVE vector rather than a tuple} } */
+
+ svpmov_lane (s8, s8); /* { dg-error {argument 2 of 'svpmov_lane' must be an integer constant expression} } */
+ svpmov_lane (s8, pg); /* { dg-error {argument 2 of 'svpmov_lane' must be an integer constant expression} } */
+ svpmov_lane (s8, x); /* { dg-error {argument 2 of 'svpmov_lane' must be an integer constant expression} } */
+ svpmov_lane (s8, -1); /* { dg-error {passing -1 to argument 2 of 'svpmov_lane', which expects the value 0} } */
+ svpmov_lane (s8, 1); /* { dg-error {passing 1 to argument 2 of 'svpmov_lane', which expects the value 0} } */
+
+ svpmov_lane (u16, -1); /* { dg-error {passing -1 to argument 2 of 'svpmov_lane', which expects a value in the range \[0, 1\]} } */
+ svpmov_lane (u16, 2); /* { dg-error {passing 2 to argument 2 of 'svpmov_lane', which expects a value in the range \[0, 1\]} } */
+
+ svpmov_lane (s32, -1); /* { dg-error {passing -1 to argument 2 of 'svpmov_lane', which expects a value in the range \[0, 3\]} } */
+ svpmov_lane (s32, 4); /* { dg-error {passing 4 to argument 2 of 'svpmov_lane', which expects a value in the range \[0, 3\]} } */
+
+ svpmov_lane (u64, -1); /* { dg-error {passing -1 to argument 2 of 'svpmov_lane', which expects a value in the range \[0, 7\]} } */
+ svpmov_lane (u64, 8); /* { dg-error {passing 8 to argument 2 of 'svpmov_lane', which expects a value in the range \[0, 7\]} } */
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/pmov_to_vector_lane_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/pmov_to_vector_lane_1.c
new file mode 100644
index 0000000..d80cb95
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/pmov_to_vector_lane_1.c
@@ -0,0 +1,45 @@
+/* { dg-do compile } */
+
+#include <arm_sve.h>
+
+void
+f1 (svint32_t s32, svbool_t pg)
+{
+ svpmov_lane_m (s32, pg, 1); /* { dg-error {ACLE function 'svpmov_lane_s32_m' requires ISA extension 'sve2p1'} } */
+}
+
+#pragma GCC target "+sve2p1"
+
+void
+f2 (svbool_t pg, svint8_t s8, svuint8_t u8, svuint16_t u16, svint32_t s32,
+ svuint32_t u32, svfloat32_t f32, svuint64_t u64, svuint32x2_t u32x2, int x)
+{
+ svpmov_lane_m (u16, pg); /* { dg-error {too few arguments to function 'svpmov_lane_m'} } */
+ svpmov_lane_m (u16, pg, 1, 0); /* { dg-error {too many arguments to function 'svpmov_lane_m'} } */
+
+ svpmov_lane_m (0, pg, 1); /* { dg-error {passing 'int' to argument 1 of 'svpmov_lane_m', which expects an SVE type rather than a scalar type} } */
+ svpmov_lane_m (pg, pg, 1); /* { dg-error {'svpmov_lane_m' has no form that takes 'svbool_t' arguments} } */
+ svpmov_lane_m (s32, pg, 1);
+ svpmov_lane_m (u32, pg, 1);
+ svpmov_lane_m (f32, pg, 1); /* { dg-error {'svpmov_lane_m' has no form that takes 'svfloat32_t' arguments} } */
+ svpmov_lane_m (u32x2, pg, 1); /* { dg-error {passing 'svuint32x2_t' to argument 1 of 'svpmov_lane_m', which expects a single SVE vector rather than a tuple} } */
+
+ svpmov_lane_m (u16, u16, 1); /* { dg-error {passing 'svuint16_t' to argument 2 of 'svpmov_lane_m', which expects 'svbool_t'} } */
+ svpmov_lane_m (u16, x, 1); /* { dg-error {passing 'int' to argument 2 of 'svpmov_lane_m', which expects 'svbool_t'} } */
+ svpmov_lane_m (u16, u32x2, 1); /* { dg-error {passing 'svuint32x2_t' to argument 2 of 'svpmov_lane_m', which expects 'svbool_t'} } */
+
+ svpmov_lane_m (s8, pg, 0); /* { dg-error {'svpmov_lane_m' has no form that takes 'svint8_t' arguments} } */
+ svpmov_lane_m (u8, pg, 0); /* { dg-error {'svpmov_lane_m' has no form that takes 'svuint8_t' arguments} } */
+
+ svpmov_lane_m (u16, pg, u16); /* { dg-error {argument 3 of 'svpmov_lane_m' must be an integer constant expression} } */
+ svpmov_lane_m (u16, pg, pg); /* { dg-error {argument 3 of 'svpmov_lane_m' must be an integer constant expression} } */
+ svpmov_lane_m (u16, pg, x); /* { dg-error {argument 3 of 'svpmov_lane_m' must be an integer constant expression} } */
+ svpmov_lane_m (u16, pg, 0); /* { dg-error {passing 0 to argument 3 of 'svpmov_lane_m', which expects the value 1} } */
+ svpmov_lane_m (u16, pg, 2); /* { dg-error {passing 2 to argument 3 of 'svpmov_lane_m', which expects the value 1} } */
+
+ svpmov_lane_m (s32, pg, 0); /* { dg-error {passing 0 to argument 3 of 'svpmov_lane_m', which expects a value in the range \[1, 3\]} } */
+ svpmov_lane_m (s32, pg, 4); /* { dg-error {passing 4 to argument 3 of 'svpmov_lane_m', which expects a value in the range \[1, 3\]} } */
+
+ svpmov_lane_m (u64, pg, 0); /* { dg-error {passing 0 to argument 3 of 'svpmov_lane_m', which expects a value in the range \[1, 7\]} } */
+ svpmov_lane_m (u64, pg, 8); /* { dg-error {passing 8 to argument 3 of 'svpmov_lane_m', which expects a value in the range \[1, 7\]} } */
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/pmov_to_vector_lane_2.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/pmov_to_vector_lane_2.c
new file mode 100644
index 0000000..1ef6b26
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/pmov_to_vector_lane_2.c
@@ -0,0 +1,19 @@
+/* { dg-do compile } */
+
+#include <arm_sve.h>
+
+#pragma GCC target "+sve2p1"
+
+void
+f1 (svbool_t pg, svint16_t s16, svuint32_t u32, svint64_t s64, int x)
+{
+ svpmov_lane_s16_m (s16, pg, x); /* { dg-error {argument 3 of 'svpmov_lane_s16_m' must be an integer constant expression} } */
+ svpmov_lane_s16_m (s16, pg, 0); /* { dg-error {passing 0 to argument 3 of 'svpmov_lane_s16_m', which expects the value 1} } */
+ svpmov_lane_s16_m (s16, pg, 2); /* { dg-error {passing 2 to argument 3 of 'svpmov_lane_s16_m', which expects the value 1} } */
+
+ svpmov_lane_u32_m (u32, pg, 0); /* { dg-error {passing 0 to argument 3 of 'svpmov_lane_u32_m', which expects a value in the range \[1, 3\]} } */
+ svpmov_lane_u32_m (u32, pg, 4); /* { dg-error {passing 4 to argument 3 of 'svpmov_lane_u32_m', which expects a value in the range \[1, 3\]} } */
+
+ svpmov_lane_s64_m (s64, pg, 0); /* { dg-error {passing 0 to argument 3 of 'svpmov_lane_s64_m', which expects a value in the range \[1, 7\]} } */
+ svpmov_lane_s64_m (s64, pg, 8); /* { dg-error {passing 8 to argument 3 of 'svpmov_lane_s64_m', which expects a value in the range \[1, 7\]} } */
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/store_scatter64_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/store_scatter64_1.c
new file mode 100644
index 0000000..70dac58
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/store_scatter64_1.c
@@ -0,0 +1,32 @@
+/* { dg-do compile } */
+/* { dg-options "-std=c99" } */
+
+#include <arm_sve.h>
+
+#pragma GCC target ("+sve2p1")
+
+struct s { signed char x; };
+
+svuint32_t
+f1 (svbool_t pg, signed char *s8_ptr, short *s16_ptr,
+ int32_t *s32_ptr, float *f32_ptr, void *void_ptr, struct s *s_ptr,
+ _Complex float *cf32_ptr,
+ svint8_t s8, svint16_t s16, svuint16_t u16,
+ svint32_t s32, svuint32_t u32, svfloat32_t f32,
+ svint64_t s64, svuint64_t u64, svfloat64_t f64, struct s s)
+{
+ svst1q_scatter (pg, u64); /* { dg-error {too few arguments to function 'svst1q_scatter'} } */
+ svst1q_scatter (pg, u64, s8, 0); /* { dg-error {too many arguments to function 'svst1q_scatter'} } */
+ svst1q_scatter (0, u64, s8); /* { dg-error {passing 'int' to argument 1 of 'svst1q_scatter', which expects 'svbool_t'} } */
+ svst1q_scatter (pg, 0, s32); /* { dg-error {passing 'int' to argument 2 of 'svst1q_scatter', which expects an SVE type rather than a scalar type} } */
+ svst1q_scatter (pg, void_ptr, s32); /* { dg-error {passing 'void \*' to argument 2 of 'svst1q_scatter', which expects an SVE type rather than a scalar type} } */
+ svst1q_scatter (pg, u32, s8); /* { dg-error {passing 'svuint32_t' to argument 2 of 'svst1q_scatter', which expects 'svuint64_t'} } */
+ svst1q_scatter (pg, s32, s16); /* { dg-error {passing 'svint32_t' to argument 2 of 'svst1q_scatter', which expects 'svuint64_t'} } */
+ svst1q_scatter (pg, u64, s32);
+ svst1q_scatter (pg, s64, s64); /* { dg-error {passing 'svint64_t' to argument 2 of 'svst1q_scatter', which expects 'svuint64_t'} } */
+ svst1q_scatter (pg, f64, u64); /* { dg-error {passing 'svfloat64_t' to argument 2 of 'svst1q_scatter', which expects 'svuint64_t'} } */
+
+ svst1q_scatter (pg, u64, 0); /* { dg-error {passing 'int' to argument 3 of 'svst1q_scatter', which expects an SVE type rather than a scalar type} } */
+ svst1q_scatter (pg, u64, pg); /* { dg-error {'svst1q_scatter' has no form that takes 'svbool_t' arguments} } */
+ svst1q_scatter (pg, u64, s); /* { dg-error {passing 'struct s' to argument 3 of 'svst1q_scatter', which expects an SVE type} } */
+ }
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/store_scatter64_index_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/store_scatter64_index_1.c
new file mode 100644
index 0000000..fdfc508
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/store_scatter64_index_1.c
@@ -0,0 +1,59 @@
+/* { dg-do compile } */
+/* { dg-options "-std=c99" } */
+
+#include <arm_sve.h>
+
+#pragma GCC target ("+sve2p1")
+
+struct s { signed char x; };
+
+svuint32_t
+f1 (svbool_t pg, short *s16_ptr, uint16_t *u16_ptr,
+ int32_t *s32_ptr, float *f32_ptr,
+ void *void_ptr, struct s *s_ptr, _Complex float *cf32_ptr,
+ svint8_t s8, svuint8_t u8, svint16_t s16, svuint16_t u16,
+ svint32_t s32, svuint32_t u32, svfloat32_t f32,
+ svint64_t s64, svuint64_t u64, svfloat64_t f64, struct s s)
+{
+ svst1q_scatter_index (pg, s32_ptr, u64); /* { dg-error {too few arguments to function 'svst1q_scatter_index'} } */
+ svst1q_scatter_index (pg, s32_ptr, u64, s32, 0); /* { dg-error {too many arguments to function 'svst1q_scatter_index'} } */
+ svst1q_scatter_index (0, s32_ptr, u64, s32); /* { dg-error {passing 'int' to argument 1 of 'svst1q_scatter_index', which expects 'svbool_t'} } */
+ svst1q_scatter_index (pg, 0, u64, s32);
+ svst1q_scatter_index (pg, (int32_t *) 0, u64, s32);
+ svst1q_scatter_index (pg, void_ptr, u64, s32);
+ svst1q_scatter_index (pg, s_ptr, u64, s32); /* { dg-error "passing argument 2 of 'svst1q_scatter_u64index_s32' from incompatible pointer type" } */
+ svst1q_scatter_index (pg, f32_ptr, u64, s32); /* { dg-error "passing argument 2 of 'svst1q_scatter_u64index_s32' from incompatible pointer type" } */
+ svst1q_scatter_index (pg, f32_ptr, u64, f32);
+ svst1q_scatter_index (pg, cf32_ptr, u64, f32); /* { dg-error "passing argument 2 of 'svst1q_scatter_u64index_f32' from incompatible pointer type" } */
+ svst1q_scatter_index (pg, s, u64, s32); /* { dg-error {passing 'struct s' to argument 2 of 'svst1q_scatter_index', which expects a vector or pointer base address} } */
+
+ svst1q_scatter_index (pg, u64, void_ptr, s32); /* { dg-error "passing argument 3 of 'svst1q_scatter_u64base_index_s32' makes integer from pointer without a cast" } */
+ svst1q_scatter_index (pg, u64, pg, s32); /* { dg-error {passing 'svbool_t' to argument 3 of 'svst1q_scatter_index', which expects 'int64_t'} } */
+ svst1q_scatter_index (pg, u64, s64, s32); /* { dg-error {passing 'svint64_t' to argument 3 of 'svst1q_scatter_index', which expects 'int64_t'} } */
+
+ svst1q_scatter_index (pg, void_ptr, u64, pg); /* { dg-error {'svst1q_scatter_index' has no form that takes 'svbool_t' arguments} } */
+ svst1q_scatter_index (pg, u64, 0, s8); /* { dg-error {'svst1q_scatter_index' has no form that takes 'svint8_t' arguments} } */
+ svst1q_scatter_index (pg, void_ptr, s64, u8); /* { dg-error {'svst1q_scatter_index' has no form that takes 'svuint8_t' arguments} } */
+
+ svst1q_scatter_index (pg, s32, 0, s32); /* { dg-error {passing 'svint32_t' to argument 2 of 'svst1q_scatter_index', which expects 'svuint64_t'} } */
+
+ svst1q_scatter_index (pg, u32, 0, u16); /* { dg-error {passing 'svuint32_t' to argument 2 of 'svst1q_scatter_index', which expects 'svuint64_t'} } */
+ svst1q_scatter_index (pg, s32, 0, u32); /* { dg-error {passing 'svint32_t' to argument 2 of 'svst1q_scatter_index', which expects 'svuint64_t'} } */
+
+ svst1q_scatter_index (pg, u64, 0, s16);
+ svst1q_scatter_index (pg, s64, 0, u16); /* { dg-error {passing 'svint64_t' to argument 2 of 'svst1q_scatter_index', which expects 'svuint64_t'} } */
+
+ svst1q_scatter_index (pg, u64, 0, u32);
+ svst1q_scatter_index (pg, s64, 0, u64); /* { dg-error {passing 'svint64_t' to argument 2 of 'svst1q_scatter_index', which expects 'svuint64_t'} } */
+
+ svst1q_scatter_index (pg, u64, 0, f64);
+ svst1q_scatter_index (pg, s64, 0, f64); /* { dg-error {passing 'svint64_t' to argument 2 of 'svst1q_scatter_index', which expects 'svuint64_t'} } */
+
+ svst1q_scatter_index (pg, s16_ptr, s8, s16); /* { dg-error {passing 'svint8_t' to argument 3 of 'svst1q_scatter_index', which expects a vector of 64-bit integers} } */
+ svst1q_scatter_index (pg, u16_ptr, s16, u16); /* { dg-error {passing 'svint16_t' to argument 3 of 'svst1q_scatter_index', which expects a vector of 64-bit integers} } */
+ svst1q_scatter_index (pg, s32_ptr, s32, s32); /* { dg-error {passing 'svint32_t' to argument 3 of 'svst1q_scatter_index', which expects a vector of 64-bit integers} } */
+ svst1q_scatter_index (pg, f32_ptr, f32, s32); /* { dg-error {passing 'svfloat32_t' to argument 3 of 'svst1q_scatter_index', which expects a vector of 64-bit integers} } */
+ svst1q_scatter_index (pg, s32_ptr, s64, s32);
+ svst1q_scatter_index (pg, s32_ptr, u64, s32);
+ svst1q_scatter_index (pg, s32_ptr, f64, s32); /* { dg-error {passing 'svfloat64_t' to argument 3 of 'svst1q_scatter_index', which expects a vector of 64-bit integers} } */
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/store_scatter64_offset_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/store_scatter64_offset_1.c
new file mode 100644
index 0000000..4cad49d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/store_scatter64_offset_1.c
@@ -0,0 +1,58 @@
+/* { dg-do compile } */
+/* { dg-options "-std=c99" } */
+
+#include <arm_sve.h>
+
+#pragma GCC target ("+sve2p1")
+
+struct s { signed char x; };
+
+svuint32_t
+f1 (svbool_t pg, signed char *s8_ptr, short *s16_ptr,
+ int32_t *s32_ptr, float *f32_ptr, void *void_ptr, struct s *s_ptr,
+ _Complex float *cf32_ptr,
+ svint8_t s8, svint16_t s16, svuint16_t u16,
+ svint32_t s32, svuint32_t u32, svfloat32_t f32,
+ svint64_t s64, svuint64_t u64, svfloat64_t f64, struct s s)
+{
+ svst1q_scatter_offset (pg, s32_ptr, u64); /* { dg-error {too few arguments to function 'svst1q_scatter_offset'} } */
+ svst1q_scatter_offset (pg, s32_ptr, u64, s32, 0); /* { dg-error {too many arguments to function 'svst1q_scatter_offset'} } */
+ svst1q_scatter_offset (0, s32_ptr, u64, s32); /* { dg-error {passing 'int' to argument 1 of 'svst1q_scatter_offset', which expects 'svbool_t'} } */
+ svst1q_scatter_offset (pg, 0, u64, s32);
+ svst1q_scatter_offset (pg, (int32_t *) 0, u64, s32);
+ svst1q_scatter_offset (pg, void_ptr, u64, s32);
+ svst1q_scatter_offset (pg, s_ptr, u64, s32); /* { dg-error "passing argument 2 of 'svst1q_scatter_u64offset_s32' from incompatible pointer type" } */
+ svst1q_scatter_offset (pg, f32_ptr, u64, s32); /* { dg-error "passing argument 2 of 'svst1q_scatter_u64offset_s32' from incompatible pointer type" } */
+ svst1q_scatter_offset (pg, f32_ptr, u64, f32);
+ svst1q_scatter_offset (pg, cf32_ptr, u64, f32); /* { dg-error "passing argument 2 of 'svst1q_scatter_u64offset_f32' from incompatible pointer type" } */
+ svst1q_scatter_offset (pg, s, u64, s32); /* { dg-error {passing 'struct s' to argument 2 of 'svst1q_scatter_offset', which expects a vector or pointer base address} } */
+
+ svst1q_scatter_offset (pg, u64, void_ptr, s32); /* { dg-error "passing argument 3 of 'svst1q_scatter_u64base_offset_s32' makes integer from pointer without a cast" } */
+ svst1q_scatter_offset (pg, u64, pg, s32); /* { dg-error {passing 'svbool_t' to argument 3 of 'svst1q_scatter_offset', which expects 'int64_t'} } */
+ svst1q_scatter_offset (pg, u64, s64, s32); /* { dg-error {passing 'svint64_t' to argument 3 of 'svst1q_scatter_offset', which expects 'int64_t'} } */
+
+ svst1q_scatter_offset (pg, void_ptr, u64, pg); /* { dg-error {'svst1q_scatter_offset' has no form that takes 'svbool_t' arguments} } */
+
+ svst1q_scatter_offset (pg, u32, 0, s8); /* { dg-error {passing 'svuint32_t' to argument 2 of 'svst1q_scatter_offset', which expects 'svuint64_t'} } */
+ svst1q_scatter_offset (pg, s32, 0, s32); /* { dg-error {passing 'svint32_t' to argument 2 of 'svst1q_scatter_offset', which expects 'svuint64_t'} } */
+
+ svst1q_scatter_offset (pg, u32, 0, u16); /* { dg-error {passing 'svuint32_t' to argument 2 of 'svst1q_scatter_offset', which expects 'svuint64_t'} } */
+ svst1q_scatter_offset (pg, s32, 0, u32); /* { dg-error {passing 'svint32_t' to argument 2 of 'svst1q_scatter_offset', which expects 'svuint64_t'} } */
+
+ svst1q_scatter_offset (pg, u64, 0, s8);
+ svst1q_scatter_offset (pg, s64, 0, s16); /* { dg-error {passing 'svint64_t' to argument 2 of 'svst1q_scatter_offset', which expects 'svuint64_t'} } */
+
+ svst1q_scatter_offset (pg, u64, 0, u32);
+ svst1q_scatter_offset (pg, s64, 0, u64); /* { dg-error {passing 'svint64_t' to argument 2 of 'svst1q_scatter_offset', which expects 'svuint64_t'} } */
+
+ svst1q_scatter_offset (pg, u64, 0, f64);
+ svst1q_scatter_offset (pg, s64, 0, f64); /* { dg-error {passing 'svint64_t' to argument 2 of 'svst1q_scatter_offset', which expects 'svuint64_t'} } */
+
+ svst1q_scatter_offset (pg, s8_ptr, s8, s8); /* { dg-error {passing 'svint8_t' to argument 3 of 'svst1q_scatter_offset', which expects a vector of 64-bit integers} } */
+ svst1q_scatter_offset (pg, s16_ptr, s16, s16); /* { dg-error {passing 'svint16_t' to argument 3 of 'svst1q_scatter_offset', which expects a vector of 64-bit integers} } */
+ svst1q_scatter_offset (pg, s32_ptr, s32, s32); /* { dg-error {passing 'svint32_t' to argument 3 of 'svst1q_scatter_offset', which expects a vector of 64-bit integers} } */
+ svst1q_scatter_offset (pg, f32_ptr, f32, s32); /* { dg-error {passing 'svfloat32_t' to argument 3 of 'svst1q_scatter_offset', which expects a vector of 64-bit integers} } */
+ svst1q_scatter_offset (pg, s32_ptr, s64, s32);
+ svst1q_scatter_offset (pg, s32_ptr, u64, s32);
+ svst1q_scatter_offset (pg, s32_ptr, f64, s32); /* { dg-error {passing 'svfloat64_t' to argument 3 of 'svst1q_scatter_offset', which expects a vector of 64-bit integers} } */
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_lane_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_lane_1.c
new file mode 100644
index 0000000..8a7b35d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/unary_lane_1.c
@@ -0,0 +1,42 @@
+/* { dg-do compile } */
+
+#include <arm_sve.h>
+
+#pragma GCC target "+sve2p1"
+
+void
+f1 (svbool_t pg, svint8_t s8, svfloat16_t f16, svfloat32_t f32,
+ svfloat64_t f64, svint32_t s32, int i)
+{
+ svdup_laneq (f32); /* { dg-error {too few arguments to function 'svdup_laneq'} } */
+ svdup_laneq (f32, 0, 0); /* { dg-error {too many arguments to function 'svdup_laneq'} } */
+ svdup_laneq (pg, 0); /* { dg-error {'svdup_laneq' has no form that takes 'svbool_t' arguments} } */
+ svdup_laneq (1, 0); /* { dg-error {passing 'int' to argument 1 of 'svdup_laneq', which expects an SVE type rather than a scalar} } */
+ svdup_laneq (f32, s32); /* { dg-error {argument 2 of 'svdup_laneq' must be an integer constant expression} } */
+ svdup_laneq (f32, i); /* { dg-error {argument 2 of 'svdup_laneq' must be an integer constant expression} } */
+
+ svdup_laneq (s8, 0);
+ svdup_laneq (s8, 15);
+ svdup_laneq (s8, 16); /* { dg-error {passing 16 to argument 2 of 'svdup_laneq', which expects a value in the range \[0, 15\]} } */
+ svdup_laneq (s8, -1); /* { dg-error {passing -1 to argument 2 of 'svdup_laneq', which expects a value in the range \[0, 15\]} } */
+
+ svdup_laneq (f16, 0);
+ svdup_laneq (f16, 7);
+ svdup_laneq (f16, 8); /* { dg-error {passing 8 to argument 2 of 'svdup_laneq', which expects a value in the range \[0, 7\]} } */
+ svdup_laneq (f16, -1); /* { dg-error {passing -1 to argument 2 of 'svdup_laneq', which expects a value in the range \[0, 7\]} } */
+
+ svdup_laneq (f32, 0);
+ svdup_laneq (f32, 3);
+ svdup_laneq (f32, 4); /* { dg-error {passing 4 to argument 2 of 'svdup_laneq', which expects a value in the range \[0, 3\]} } */
+ svdup_laneq (f32, -1); /* { dg-error {passing -1 to argument 2 of 'svdup_laneq', which expects a value in the range \[0, 3\]} } */
+
+ svdup_laneq (s32, 0);
+ svdup_laneq (s32, 3);
+ svdup_laneq (s32, 4); /* { dg-error {passing 4 to argument 2 of 'svdup_laneq', which expects a value in the range \[0, 3\]} } */
+ svdup_laneq (s32, -1); /* { dg-error {passing -1 to argument 2 of 'svdup_laneq', which expects a value in the range \[0, 3\]} } */
+
+ svdup_laneq (f64, 0);
+ svdup_laneq (f64, 1);
+ svdup_laneq (f64, 2); /* { dg-error {passing 2 to argument 2 of 'svdup_laneq', which expects a value in the range \[0, 1\]} } */
+ svdup_laneq (f64, -1); /* { dg-error {passing -1 to argument 2 of 'svdup_laneq', which expects a value in the range \[0, 1\]} } */
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/addqv_f16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/addqv_f16.c
new file mode 100644
index 0000000..66716ce
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/addqv_f16.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** addqv_d0_f16_tied:
+** faddqv v0\.8h, p0, z0\.h
+** ret
+*/
+TEST_REDUCTION_D (addqv_d0_f16_tied, float16x8_t, svfloat16_t,
+ d0 = svaddqv_f16 (p0, z0),
+ d0 = svaddqv (p0, z0))
+
+/*
+** addqv_d0_f16_untied:
+** faddqv v0\.8h, p0, z1\.h
+** ret
+*/
+TEST_REDUCTION_D (addqv_d0_f16_untied, float16x8_t, svfloat16_t,
+ d0 = svaddqv_f16 (p0, z1),
+ d0 = svaddqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/addqv_f32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/addqv_f32.c
new file mode 100644
index 0000000..f6941f2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/addqv_f32.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** addqv_d0_f32_tied:
+** faddqv v0\.4s, p0, z0\.s
+** ret
+*/
+TEST_REDUCTION_D (addqv_d0_f32_tied, float32x4_t, svfloat32_t,
+ d0 = svaddqv_f32 (p0, z0),
+ d0 = svaddqv (p0, z0))
+
+/*
+** addqv_d0_f32_untied:
+** faddqv v0\.4s, p0, z1\.s
+** ret
+*/
+TEST_REDUCTION_D (addqv_d0_f32_untied, float32x4_t, svfloat32_t,
+ d0 = svaddqv_f32 (p0, z1),
+ d0 = svaddqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/addqv_f64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/addqv_f64.c
new file mode 100644
index 0000000..69d062a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/addqv_f64.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** addqv_d0_f64_tied:
+** faddqv v0\.2d, p0, z0\.d
+** ret
+*/
+TEST_REDUCTION_D (addqv_d0_f64_tied, float64x2_t, svfloat64_t,
+ d0 = svaddqv_f64 (p0, z0),
+ d0 = svaddqv (p0, z0))
+
+/*
+** addqv_d0_f64_untied:
+** faddqv v0\.2d, p0, z1\.d
+** ret
+*/
+TEST_REDUCTION_D (addqv_d0_f64_untied, float64x2_t, svfloat64_t,
+ d0 = svaddqv_f64 (p0, z1),
+ d0 = svaddqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/addqv_s16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/addqv_s16.c
new file mode 100644
index 0000000..d4af096
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/addqv_s16.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** addqv_d0_s16_tied:
+** addqv v0\.8h, p0, z0\.h
+** ret
+*/
+TEST_REDUCTION_D (addqv_d0_s16_tied, int16x8_t, svint16_t,
+ d0 = svaddqv_s16 (p0, z0),
+ d0 = svaddqv (p0, z0))
+
+/*
+** addqv_d0_s16_untied:
+** addqv v0\.8h, p0, z1\.h
+** ret
+*/
+TEST_REDUCTION_D (addqv_d0_s16_untied, int16x8_t, svint16_t,
+ d0 = svaddqv_s16 (p0, z1),
+ d0 = svaddqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/addqv_s32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/addqv_s32.c
new file mode 100644
index 0000000..8d5b91b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/addqv_s32.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** addqv_d0_s32_tied:
+** addqv v0\.4s, p0, z0\.s
+** ret
+*/
+TEST_REDUCTION_D (addqv_d0_s32_tied, int32x4_t, svint32_t,
+ d0 = svaddqv_s32 (p0, z0),
+ d0 = svaddqv (p0, z0))
+
+/*
+** addqv_d0_s32_untied:
+** addqv v0\.4s, p0, z1\.s
+** ret
+*/
+TEST_REDUCTION_D (addqv_d0_s32_untied, int32x4_t, svint32_t,
+ d0 = svaddqv_s32 (p0, z1),
+ d0 = svaddqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/addqv_s64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/addqv_s64.c
new file mode 100644
index 0000000..0c18077
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/addqv_s64.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** addqv_d0_s64_tied:
+** addqv v0\.2d, p0, z0\.d
+** ret
+*/
+TEST_REDUCTION_D (addqv_d0_s64_tied, int64x2_t, svint64_t,
+ d0 = svaddqv_s64 (p0, z0),
+ d0 = svaddqv (p0, z0))
+
+/*
+** addqv_d0_s64_untied:
+** addqv v0\.2d, p0, z1\.d
+** ret
+*/
+TEST_REDUCTION_D (addqv_d0_s64_untied, int64x2_t, svint64_t,
+ d0 = svaddqv_s64 (p0, z1),
+ d0 = svaddqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/addqv_s8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/addqv_s8.c
new file mode 100644
index 0000000..5224473
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/addqv_s8.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** addqv_d0_s8_tied:
+** addqv v0\.16b, p0, z0\.b
+** ret
+*/
+TEST_REDUCTION_D (addqv_d0_s8_tied, int8x16_t, svint8_t,
+ d0 = svaddqv_s8 (p0, z0),
+ d0 = svaddqv (p0, z0))
+
+/*
+** addqv_d0_s8_untied:
+** addqv v0\.16b, p0, z1\.b
+** ret
+*/
+TEST_REDUCTION_D (addqv_d0_s8_untied, int8x16_t, svint8_t,
+ d0 = svaddqv_s8 (p0, z1),
+ d0 = svaddqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/addqv_u16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/addqv_u16.c
new file mode 100644
index 0000000..e80f09c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/addqv_u16.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** addqv_d0_u16_tied:
+** addqv v0\.8h, p0, z0\.h
+** ret
+*/
+TEST_REDUCTION_D (addqv_d0_u16_tied, uint16x8_t, svuint16_t,
+ d0 = svaddqv_u16 (p0, z0),
+ d0 = svaddqv (p0, z0))
+
+/*
+** addqv_d0_u16_untied:
+** addqv v0\.8h, p0, z1\.h
+** ret
+*/
+TEST_REDUCTION_D (addqv_d0_u16_untied, uint16x8_t, svuint16_t,
+ d0 = svaddqv_u16 (p0, z1),
+ d0 = svaddqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/addqv_u32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/addqv_u32.c
new file mode 100644
index 0000000..fb0e4f8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/addqv_u32.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** addqv_d0_u32_tied:
+** addqv v0\.4s, p0, z0\.s
+** ret
+*/
+TEST_REDUCTION_D (addqv_d0_u32_tied, uint32x4_t, svuint32_t,
+ d0 = svaddqv_u32 (p0, z0),
+ d0 = svaddqv (p0, z0))
+
+/*
+** addqv_d0_u32_untied:
+** addqv v0\.4s, p0, z1\.s
+** ret
+*/
+TEST_REDUCTION_D (addqv_d0_u32_untied, uint32x4_t, svuint32_t,
+ d0 = svaddqv_u32 (p0, z1),
+ d0 = svaddqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/addqv_u64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/addqv_u64.c
new file mode 100644
index 0000000..9f91c6f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/addqv_u64.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** addqv_d0_u64_tied:
+** addqv v0\.2d, p0, z0\.d
+** ret
+*/
+TEST_REDUCTION_D (addqv_d0_u64_tied, uint64x2_t, svuint64_t,
+ d0 = svaddqv_u64 (p0, z0),
+ d0 = svaddqv (p0, z0))
+
+/*
+** addqv_d0_u64_untied:
+** addqv v0\.2d, p0, z1\.d
+** ret
+*/
+TEST_REDUCTION_D (addqv_d0_u64_untied, uint64x2_t, svuint64_t,
+ d0 = svaddqv_u64 (p0, z1),
+ d0 = svaddqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/addqv_u8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/addqv_u8.c
new file mode 100644
index 0000000..5220d03
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/addqv_u8.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** addqv_d0_u8_tied:
+** addqv v0\.16b, p0, z0\.b
+** ret
+*/
+TEST_REDUCTION_D (addqv_d0_u8_tied, uint8x16_t, svuint8_t,
+ d0 = svaddqv_u8 (p0, z0),
+ d0 = svaddqv (p0, z0))
+
+/*
+** addqv_d0_u8_untied:
+** addqv v0\.16b, p0, z1\.b
+** ret
+*/
+TEST_REDUCTION_D (addqv_d0_u8_untied, uint8x16_t, svuint8_t,
+ d0 = svaddqv_u8 (p0, z1),
+ d0 = svaddqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/andqv_s16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/andqv_s16.c
new file mode 100644
index 0000000..bfcad48
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/andqv_s16.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** andqv_d0_s16_tied:
+** andqv v0\.8h, p0, z0\.h
+** ret
+*/
+TEST_REDUCTION_D (andqv_d0_s16_tied, int16x8_t, svint16_t,
+ d0 = svandqv_s16 (p0, z0),
+ d0 = svandqv (p0, z0))
+
+/*
+** andqv_d0_s16_untied:
+** andqv v0\.8h, p0, z1\.h
+** ret
+*/
+TEST_REDUCTION_D (andqv_d0_s16_untied, int16x8_t, svint16_t,
+ d0 = svandqv_s16 (p0, z1),
+ d0 = svandqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/andqv_s32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/andqv_s32.c
new file mode 100644
index 0000000..82b6b69
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/andqv_s32.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** andqv_d0_s32_tied:
+** andqv v0\.4s, p0, z0\.s
+** ret
+*/
+TEST_REDUCTION_D (andqv_d0_s32_tied, int32x4_t, svint32_t,
+ d0 = svandqv_s32 (p0, z0),
+ d0 = svandqv (p0, z0))
+
+/*
+** andqv_d0_s32_untied:
+** andqv v0\.4s, p0, z1\.s
+** ret
+*/
+TEST_REDUCTION_D (andqv_d0_s32_untied, int32x4_t, svint32_t,
+ d0 = svandqv_s32 (p0, z1),
+ d0 = svandqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/andqv_s64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/andqv_s64.c
new file mode 100644
index 0000000..fce8a47
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/andqv_s64.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** andqv_d0_s64_tied:
+** andqv v0\.2d, p0, z0\.d
+** ret
+*/
+TEST_REDUCTION_D (andqv_d0_s64_tied, int64x2_t, svint64_t,
+ d0 = svandqv_s64 (p0, z0),
+ d0 = svandqv (p0, z0))
+
+/*
+** andqv_d0_s64_untied:
+** andqv v0\.2d, p0, z1\.d
+** ret
+*/
+TEST_REDUCTION_D (andqv_d0_s64_untied, int64x2_t, svint64_t,
+ d0 = svandqv_s64 (p0, z1),
+ d0 = svandqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/andqv_s8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/andqv_s8.c
new file mode 100644
index 0000000..f21b2a4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/andqv_s8.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** andqv_d0_s8_tied:
+** andqv v0\.16b, p0, z0\.b
+** ret
+*/
+TEST_REDUCTION_D (andqv_d0_s8_tied, int8x16_t, svint8_t,
+ d0 = svandqv_s8 (p0, z0),
+ d0 = svandqv (p0, z0))
+
+/*
+** andqv_d0_s8_untied:
+** andqv v0\.16b, p0, z1\.b
+** ret
+*/
+TEST_REDUCTION_D (andqv_d0_s8_untied, int8x16_t, svint8_t,
+ d0 = svandqv_s8 (p0, z1),
+ d0 = svandqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/andqv_u16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/andqv_u16.c
new file mode 100644
index 0000000..7044e88
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/andqv_u16.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** andqv_d0_u16_tied:
+** andqv v0\.8h, p0, z0\.h
+** ret
+*/
+TEST_REDUCTION_D (andqv_d0_u16_tied, uint16x8_t, svuint16_t,
+ d0 = svandqv_u16 (p0, z0),
+ d0 = svandqv (p0, z0))
+
+/*
+** andqv_d0_u16_untied:
+** andqv v0\.8h, p0, z1\.h
+** ret
+*/
+TEST_REDUCTION_D (andqv_d0_u16_untied, uint16x8_t, svuint16_t,
+ d0 = svandqv_u16 (p0, z1),
+ d0 = svandqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/andqv_u32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/andqv_u32.c
new file mode 100644
index 0000000..408aa2b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/andqv_u32.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** andqv_d0_u32_tied:
+** andqv v0\.4s, p0, z0\.s
+** ret
+*/
+TEST_REDUCTION_D (andqv_d0_u32_tied, uint32x4_t, svuint32_t,
+ d0 = svandqv_u32 (p0, z0),
+ d0 = svandqv (p0, z0))
+
+/*
+** andqv_d0_u32_untied:
+** andqv v0\.4s, p0, z1\.s
+** ret
+*/
+TEST_REDUCTION_D (andqv_d0_u32_untied, uint32x4_t, svuint32_t,
+ d0 = svandqv_u32 (p0, z1),
+ d0 = svandqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/andqv_u64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/andqv_u64.c
new file mode 100644
index 0000000..10ba468
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/andqv_u64.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** andqv_d0_u64_tied:
+** andqv v0\.2d, p0, z0\.d
+** ret
+*/
+TEST_REDUCTION_D (andqv_d0_u64_tied, uint64x2_t, svuint64_t,
+ d0 = svandqv_u64 (p0, z0),
+ d0 = svandqv (p0, z0))
+
+/*
+** andqv_d0_u64_untied:
+** andqv v0\.2d, p0, z1\.d
+** ret
+*/
+TEST_REDUCTION_D (andqv_d0_u64_untied, uint64x2_t, svuint64_t,
+ d0 = svandqv_u64 (p0, z1),
+ d0 = svandqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/andqv_u8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/andqv_u8.c
new file mode 100644
index 0000000..4694b58
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/andqv_u8.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** andqv_d0_u8_tied:
+** andqv v0\.16b, p0, z0\.b
+** ret
+*/
+TEST_REDUCTION_D (andqv_d0_u8_tied, uint8x16_t, svuint8_t,
+ d0 = svandqv_u8 (p0, z0),
+ d0 = svandqv (p0, z0))
+
+/*
+** andqv_d0_u8_untied:
+** andqv v0\.16b, p0, z1\.b
+** ret
+*/
+TEST_REDUCTION_D (andqv_d0_u8_untied, uint8x16_t, svuint8_t,
+ d0 = svandqv_u8 (p0, z1),
+ d0 = svandqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/dup_laneq_bf16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/dup_laneq_bf16.c
new file mode 100644
index 0000000..bb76b81
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/dup_laneq_bf16.c
@@ -0,0 +1,53 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** dup_laneq_0_bf16_tied1:
+** dupq z0\.h, z0\.h\[0\]
+** ret
+*/
+TEST_UNIFORM_Z (dup_laneq_0_bf16_tied1, svbfloat16_t,
+ z0 = svdup_laneq_bf16 (z0, 0),
+ z0 = svdup_laneq (z0, 0))
+
+/*
+** dup_laneq_0_bf16_untied:
+** dupq z0\.h, z1\.h\[0\]
+** ret
+*/
+TEST_UNIFORM_Z (dup_laneq_0_bf16_untied, svbfloat16_t,
+ z0 = svdup_laneq_bf16 (z1, 0),
+ z0 = svdup_laneq (z1, 0))
+
+/*
+** dup_laneq_3_bf16:
+** dupq z0\.h, z0\.h\[3\]
+** ret
+*/
+TEST_UNIFORM_Z (dup_laneq_3_bf16, svbfloat16_t,
+ z0 = svdup_laneq_bf16 (z0, 3),
+ z0 = svdup_laneq (z0, 3))
+
+/*
+** dup_laneq_4_bf16:
+** dupq z0\.h, z0\.h\[4\]
+** ret
+*/
+TEST_UNIFORM_Z (dup_laneq_4_bf16, svbfloat16_t,
+ z0 = svdup_laneq_bf16 (z0, 4),
+ z0 = svdup_laneq (z0, 4))
+
+/*
+** dup_laneq_7_bf16:
+** dupq z0\.h, z0\.h\[7\]
+** ret
+*/
+TEST_UNIFORM_Z (dup_laneq_7_bf16, svbfloat16_t,
+ z0 = svdup_laneq_bf16 (z0, 7),
+ z0 = svdup_laneq (z0, 7))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/dup_laneq_f16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/dup_laneq_f16.c
new file mode 100644
index 0000000..5f8bddb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/dup_laneq_f16.c
@@ -0,0 +1,53 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** dup_laneq_0_f16_tied1:
+** dupq z0\.h, z0\.h\[0\]
+** ret
+*/
+TEST_UNIFORM_Z (dup_laneq_0_f16_tied1, svfloat16_t,
+ z0 = svdup_laneq_f16 (z0, 0),
+ z0 = svdup_laneq (z0, 0))
+
+/*
+** dup_laneq_0_f16_untied:
+** dupq z0\.h, z1\.h\[0\]
+** ret
+*/
+TEST_UNIFORM_Z (dup_laneq_0_f16_untied, svfloat16_t,
+ z0 = svdup_laneq_f16 (z1, 0),
+ z0 = svdup_laneq (z1, 0))
+
+/*
+** dup_laneq_3_f16:
+** dupq z0\.h, z0\.h\[3\]
+** ret
+*/
+TEST_UNIFORM_Z (dup_laneq_3_f16, svfloat16_t,
+ z0 = svdup_laneq_f16 (z0, 3),
+ z0 = svdup_laneq (z0, 3))
+
+/*
+** dup_laneq_4_f16:
+** dupq z0\.h, z0\.h\[4\]
+** ret
+*/
+TEST_UNIFORM_Z (dup_laneq_4_f16, svfloat16_t,
+ z0 = svdup_laneq_f16 (z0, 4),
+ z0 = svdup_laneq (z0, 4))
+
+/*
+** dup_laneq_7_f16:
+** dupq z0\.h, z0\.h\[7\]
+** ret
+*/
+TEST_UNIFORM_Z (dup_laneq_7_f16, svfloat16_t,
+ z0 = svdup_laneq_f16 (z0, 7),
+ z0 = svdup_laneq (z0, 7))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/dup_laneq_f32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/dup_laneq_f32.c
new file mode 100644
index 0000000..1f9341a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/dup_laneq_f32.c
@@ -0,0 +1,53 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** dup_laneq_0_f32_tied1:
+** dupq z0\.s, z0\.s\[0\]
+** ret
+*/
+TEST_UNIFORM_Z (dup_laneq_0_f32_tied1, svfloat32_t,
+ z0 = svdup_laneq_f32 (z0, 0),
+ z0 = svdup_laneq (z0, 0))
+
+/*
+** dup_laneq_0_f32_untied:
+** dupq z0\.s, z1\.s\[0\]
+** ret
+*/
+TEST_UNIFORM_Z (dup_laneq_0_f32_untied, svfloat32_t,
+ z0 = svdup_laneq_f32 (z1, 0),
+ z0 = svdup_laneq (z1, 0))
+
+/*
+** dup_laneq_1_f32:
+** dupq z0\.s, z0\.s\[1\]
+** ret
+*/
+TEST_UNIFORM_Z (dup_laneq_1_f32, svfloat32_t,
+ z0 = svdup_laneq_f32 (z0, 1),
+ z0 = svdup_laneq (z0, 1))
+
+/*
+** dup_laneq_2_f32:
+** dupq z0\.s, z0\.s\[2\]
+** ret
+*/
+TEST_UNIFORM_Z (dup_laneq_2_f32, svfloat32_t,
+ z0 = svdup_laneq_f32 (z0, 2),
+ z0 = svdup_laneq (z0, 2))
+
+/*
+** dup_laneq_3_f32:
+** dupq z0\.s, z0\.s\[3\]
+** ret
+*/
+TEST_UNIFORM_Z (dup_laneq_3_f32, svfloat32_t,
+ z0 = svdup_laneq_f32 (z0, 3),
+ z0 = svdup_laneq (z0, 3))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/dup_laneq_f64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/dup_laneq_f64.c
new file mode 100644
index 0000000..41b9d73
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/dup_laneq_f64.c
@@ -0,0 +1,35 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** dup_laneq_0_f64_tied1:
+** dupq z0\.d, z0\.d\[0\]
+** ret
+*/
+TEST_UNIFORM_Z (dup_laneq_0_f64_tied1, svfloat64_t,
+ z0 = svdup_laneq_f64 (z0, 0),
+ z0 = svdup_laneq (z0, 0))
+
+/*
+** dup_laneq_0_f64_untied:
+** dupq z0\.d, z1\.d\[0\]
+** ret
+*/
+TEST_UNIFORM_Z (dup_laneq_0_f64_untied, svfloat64_t,
+ z0 = svdup_laneq_f64 (z1, 0),
+ z0 = svdup_laneq (z1, 0))
+
+/*
+** dup_laneq_1_f64:
+** dupq z0\.d, z0\.d\[1\]
+** ret
+*/
+TEST_UNIFORM_Z (dup_laneq_1_f64, svfloat64_t,
+ z0 = svdup_laneq_f64 (z0, 1),
+ z0 = svdup_laneq (z0, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/dup_laneq_s16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/dup_laneq_s16.c
new file mode 100644
index 0000000..9cef3c0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/dup_laneq_s16.c
@@ -0,0 +1,53 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** dup_laneq_0_s16_tied1:
+** dupq z0\.h, z0\.h\[0\]
+** ret
+*/
+TEST_UNIFORM_Z (dup_laneq_0_s16_tied1, svint16_t,
+ z0 = svdup_laneq_s16 (z0, 0),
+ z0 = svdup_laneq (z0, 0))
+
+/*
+** dup_laneq_0_s16_untied:
+** dupq z0\.h, z1\.h\[0\]
+** ret
+*/
+TEST_UNIFORM_Z (dup_laneq_0_s16_untied, svint16_t,
+ z0 = svdup_laneq_s16 (z1, 0),
+ z0 = svdup_laneq (z1, 0))
+
+/*
+** dup_laneq_3_s16:
+** dupq z0\.h, z0\.h\[3\]
+** ret
+*/
+TEST_UNIFORM_Z (dup_laneq_3_s16, svint16_t,
+ z0 = svdup_laneq_s16 (z0, 3),
+ z0 = svdup_laneq (z0, 3))
+
+/*
+** dup_laneq_4_s16:
+** dupq z0\.h, z0\.h\[4\]
+** ret
+*/
+TEST_UNIFORM_Z (dup_laneq_4_s16, svint16_t,
+ z0 = svdup_laneq_s16 (z0, 4),
+ z0 = svdup_laneq (z0, 4))
+
+/*
+** dup_laneq_7_s16:
+** dupq z0\.h, z0\.h\[7\]
+** ret
+*/
+TEST_UNIFORM_Z (dup_laneq_7_s16, svint16_t,
+ z0 = svdup_laneq_s16 (z0, 7),
+ z0 = svdup_laneq (z0, 7))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/dup_laneq_s32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/dup_laneq_s32.c
new file mode 100644
index 0000000..70afc38
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/dup_laneq_s32.c
@@ -0,0 +1,53 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** dup_laneq_0_s32_tied1:
+** dupq z0\.s, z0\.s\[0\]
+** ret
+*/
+TEST_UNIFORM_Z (dup_laneq_0_s32_tied1, svint32_t,
+ z0 = svdup_laneq_s32 (z0, 0),
+ z0 = svdup_laneq (z0, 0))
+
+/*
+** dup_laneq_0_s32_untied:
+** dupq z0\.s, z1\.s\[0\]
+** ret
+*/
+TEST_UNIFORM_Z (dup_laneq_0_s32_untied, svint32_t,
+ z0 = svdup_laneq_s32 (z1, 0),
+ z0 = svdup_laneq (z1, 0))
+
+/*
+** dup_laneq_1_s32:
+** dupq z0\.s, z0\.s\[1\]
+** ret
+*/
+TEST_UNIFORM_Z (dup_laneq_1_s32, svint32_t,
+ z0 = svdup_laneq_s32 (z0, 1),
+ z0 = svdup_laneq (z0, 1))
+
+/*
+** dup_laneq_2_s32:
+** dupq z0\.s, z0\.s\[2\]
+** ret
+*/
+TEST_UNIFORM_Z (dup_laneq_2_s32, svint32_t,
+ z0 = svdup_laneq_s32 (z0, 2),
+ z0 = svdup_laneq (z0, 2))
+
+/*
+** dup_laneq_3_s32:
+** dupq z0\.s, z0\.s\[3\]
+** ret
+*/
+TEST_UNIFORM_Z (dup_laneq_3_s32, svint32_t,
+ z0 = svdup_laneq_s32 (z0, 3),
+ z0 = svdup_laneq (z0, 3))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/dup_laneq_s64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/dup_laneq_s64.c
new file mode 100644
index 0000000..b3c3d86
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/dup_laneq_s64.c
@@ -0,0 +1,35 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** dup_laneq_0_s64_tied1:
+** dupq z0\.d, z0\.d\[0\]
+** ret
+*/
+TEST_UNIFORM_Z (dup_laneq_0_s64_tied1, svint64_t,
+ z0 = svdup_laneq_s64 (z0, 0),
+ z0 = svdup_laneq (z0, 0))
+
+/*
+** dup_laneq_0_s64_untied:
+** dupq z0\.d, z1\.d\[0\]
+** ret
+*/
+TEST_UNIFORM_Z (dup_laneq_0_s64_untied, svint64_t,
+ z0 = svdup_laneq_s64 (z1, 0),
+ z0 = svdup_laneq (z1, 0))
+
+/*
+** dup_laneq_1_s64:
+** dupq z0\.d, z0\.d\[1\]
+** ret
+*/
+TEST_UNIFORM_Z (dup_laneq_1_s64, svint64_t,
+ z0 = svdup_laneq_s64 (z0, 1),
+ z0 = svdup_laneq (z0, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/dup_laneq_s8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/dup_laneq_s8.c
new file mode 100644
index 0000000..49400da
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/dup_laneq_s8.c
@@ -0,0 +1,53 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** dup_laneq_0_s8_tied1:
+** dupq z0\.b, z0\.b\[0\]
+** ret
+*/
+TEST_UNIFORM_Z (dup_laneq_0_s8_tied1, svint8_t,
+ z0 = svdup_laneq_s8 (z0, 0),
+ z0 = svdup_laneq (z0, 0))
+
+/*
+** dup_laneq_0_s8_untied:
+** dupq z0\.b, z1\.b\[0\]
+** ret
+*/
+TEST_UNIFORM_Z (dup_laneq_0_s8_untied, svint8_t,
+ z0 = svdup_laneq_s8 (z1, 0),
+ z0 = svdup_laneq (z1, 0))
+
+/*
+** dup_laneq_7_s8:
+** dupq z0\.b, z0\.b\[7\]
+** ret
+*/
+TEST_UNIFORM_Z (dup_laneq_7_s8, svint8_t,
+ z0 = svdup_laneq_s8 (z0, 7),
+ z0 = svdup_laneq (z0, 7))
+
+/*
+** dup_laneq_8_s8:
+** dupq z0\.b, z0\.b\[8\]
+** ret
+*/
+TEST_UNIFORM_Z (dup_laneq_8_s8, svint8_t,
+ z0 = svdup_laneq_s8 (z0, 8),
+ z0 = svdup_laneq (z0, 8))
+
+/*
+** dup_laneq_15_s8:
+** dupq z0\.b, z0\.b\[15\]
+** ret
+*/
+TEST_UNIFORM_Z (dup_laneq_15_s8, svint8_t,
+ z0 = svdup_laneq_s8 (z0, 15),
+ z0 = svdup_laneq (z0, 15))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/dup_laneq_u16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/dup_laneq_u16.c
new file mode 100644
index 0000000..90a5100
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/dup_laneq_u16.c
@@ -0,0 +1,53 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** dup_laneq_0_u16_tied1:
+** dupq z0\.h, z0\.h\[0\]
+** ret
+*/
+TEST_UNIFORM_Z (dup_laneq_0_u16_tied1, svuint16_t,
+ z0 = svdup_laneq_u16 (z0, 0),
+ z0 = svdup_laneq (z0, 0))
+
+/*
+** dup_laneq_0_u16_untied:
+** dupq z0\.h, z1\.h\[0\]
+** ret
+*/
+TEST_UNIFORM_Z (dup_laneq_0_u16_untied, svuint16_t,
+ z0 = svdup_laneq_u16 (z1, 0),
+ z0 = svdup_laneq (z1, 0))
+
+/*
+** dup_laneq_3_u16:
+** dupq z0\.h, z0\.h\[3\]
+** ret
+*/
+TEST_UNIFORM_Z (dup_laneq_3_u16, svuint16_t,
+ z0 = svdup_laneq_u16 (z0, 3),
+ z0 = svdup_laneq (z0, 3))
+
+/*
+** dup_laneq_4_u16:
+** dupq z0\.h, z0\.h\[4\]
+** ret
+*/
+TEST_UNIFORM_Z (dup_laneq_4_u16, svuint16_t,
+ z0 = svdup_laneq_u16 (z0, 4),
+ z0 = svdup_laneq (z0, 4))
+
+/*
+** dup_laneq_7_u16:
+** dupq z0\.h, z0\.h\[7\]
+** ret
+*/
+TEST_UNIFORM_Z (dup_laneq_7_u16, svuint16_t,
+ z0 = svdup_laneq_u16 (z0, 7),
+ z0 = svdup_laneq (z0, 7))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/dup_laneq_u32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/dup_laneq_u32.c
new file mode 100644
index 0000000..5792dd1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/dup_laneq_u32.c
@@ -0,0 +1,53 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** dup_laneq_0_u32_tied1:
+** dupq z0\.s, z0\.s\[0\]
+** ret
+*/
+TEST_UNIFORM_Z (dup_laneq_0_u32_tied1, svuint32_t,
+ z0 = svdup_laneq_u32 (z0, 0),
+ z0 = svdup_laneq (z0, 0))
+
+/*
+** dup_laneq_0_u32_untied:
+** dupq z0\.s, z1\.s\[0\]
+** ret
+*/
+TEST_UNIFORM_Z (dup_laneq_0_u32_untied, svuint32_t,
+ z0 = svdup_laneq_u32 (z1, 0),
+ z0 = svdup_laneq (z1, 0))
+
+/*
+** dup_laneq_1_u32:
+** dupq z0\.s, z0\.s\[1\]
+** ret
+*/
+TEST_UNIFORM_Z (dup_laneq_1_u32, svuint32_t,
+ z0 = svdup_laneq_u32 (z0, 1),
+ z0 = svdup_laneq (z0, 1))
+
+/*
+** dup_laneq_2_u32:
+** dupq z0\.s, z0\.s\[2\]
+** ret
+*/
+TEST_UNIFORM_Z (dup_laneq_2_u32, svuint32_t,
+ z0 = svdup_laneq_u32 (z0, 2),
+ z0 = svdup_laneq (z0, 2))
+
+/*
+** dup_laneq_3_u32:
+** dupq z0\.s, z0\.s\[3\]
+** ret
+*/
+TEST_UNIFORM_Z (dup_laneq_3_u32, svuint32_t,
+ z0 = svdup_laneq_u32 (z0, 3),
+ z0 = svdup_laneq (z0, 3))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/dup_laneq_u64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/dup_laneq_u64.c
new file mode 100644
index 0000000..7d5e81e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/dup_laneq_u64.c
@@ -0,0 +1,35 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** dup_laneq_0_u64_tied1:
+** dupq z0\.d, z0\.d\[0\]
+** ret
+*/
+TEST_UNIFORM_Z (dup_laneq_0_u64_tied1, svuint64_t,
+ z0 = svdup_laneq_u64 (z0, 0),
+ z0 = svdup_laneq (z0, 0))
+
+/*
+** dup_laneq_0_u64_untied:
+** dupq z0\.d, z1\.d\[0\]
+** ret
+*/
+TEST_UNIFORM_Z (dup_laneq_0_u64_untied, svuint64_t,
+ z0 = svdup_laneq_u64 (z1, 0),
+ z0 = svdup_laneq (z1, 0))
+
+/*
+** dup_laneq_1_u64:
+** dupq z0\.d, z0\.d\[1\]
+** ret
+*/
+TEST_UNIFORM_Z (dup_laneq_1_u64, svuint64_t,
+ z0 = svdup_laneq_u64 (z0, 1),
+ z0 = svdup_laneq (z0, 1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/dup_laneq_u8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/dup_laneq_u8.c
new file mode 100644
index 0000000..64d7144b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/dup_laneq_u8.c
@@ -0,0 +1,53 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** dup_laneq_0_u8_tied1:
+** dupq z0\.b, z0\.b\[0\]
+** ret
+*/
+TEST_UNIFORM_Z (dup_laneq_0_u8_tied1, svuint8_t,
+ z0 = svdup_laneq_u8 (z0, 0),
+ z0 = svdup_laneq (z0, 0))
+
+/*
+** dup_laneq_0_u8_untied:
+** dupq z0\.b, z1\.b\[0\]
+** ret
+*/
+TEST_UNIFORM_Z (dup_laneq_0_u8_untied, svuint8_t,
+ z0 = svdup_laneq_u8 (z1, 0),
+ z0 = svdup_laneq (z1, 0))
+
+/*
+** dup_laneq_7_u8:
+** dupq z0\.b, z0\.b\[7\]
+** ret
+*/
+TEST_UNIFORM_Z (dup_laneq_7_u8, svuint8_t,
+ z0 = svdup_laneq_u8 (z0, 7),
+ z0 = svdup_laneq (z0, 7))
+
+/*
+** dup_laneq_8_u8:
+** dupq z0\.b, z0\.b\[8\]
+** ret
+*/
+TEST_UNIFORM_Z (dup_laneq_8_u8, svuint8_t,
+ z0 = svdup_laneq_u8 (z0, 8),
+ z0 = svdup_laneq (z0, 8))
+
+/*
+** dup_laneq_15_u8:
+** dupq z0\.b, z0\.b\[15\]
+** ret
+*/
+TEST_UNIFORM_Z (dup_laneq_15_u8, svuint8_t,
+ z0 = svdup_laneq_u8 (z0, 15),
+ z0 = svdup_laneq (z0, 15))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/eorqv_s16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/eorqv_s16.c
new file mode 100644
index 0000000..d2c6a2a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/eorqv_s16.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** eorqv_d0_s16_tied:
+** eorqv v0\.8h, p0, z0\.h
+** ret
+*/
+TEST_REDUCTION_D (eorqv_d0_s16_tied, int16x8_t, svint16_t,
+ d0 = sveorqv_s16 (p0, z0),
+ d0 = sveorqv (p0, z0))
+
+/*
+** eorqv_d0_s16_untied:
+** eorqv v0\.8h, p0, z1\.h
+** ret
+*/
+TEST_REDUCTION_D (eorqv_d0_s16_untied, int16x8_t, svint16_t,
+ d0 = sveorqv_s16 (p0, z1),
+ d0 = sveorqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/eorqv_s32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/eorqv_s32.c
new file mode 100644
index 0000000..51c9c6e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/eorqv_s32.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** eorqv_d0_s32_tied:
+** eorqv v0\.4s, p0, z0\.s
+** ret
+*/
+TEST_REDUCTION_D (eorqv_d0_s32_tied, int32x4_t, svint32_t,
+ d0 = sveorqv_s32 (p0, z0),
+ d0 = sveorqv (p0, z0))
+
+/*
+** eorqv_d0_s32_untied:
+** eorqv v0\.4s, p0, z1\.s
+** ret
+*/
+TEST_REDUCTION_D (eorqv_d0_s32_untied, int32x4_t, svint32_t,
+ d0 = sveorqv_s32 (p0, z1),
+ d0 = sveorqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/eorqv_s64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/eorqv_s64.c
new file mode 100644
index 0000000..1e040fa
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/eorqv_s64.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** eorqv_d0_s64_tied:
+** eorqv v0\.2d, p0, z0\.d
+** ret
+*/
+TEST_REDUCTION_D (eorqv_d0_s64_tied, int64x2_t, svint64_t,
+ d0 = sveorqv_s64 (p0, z0),
+ d0 = sveorqv (p0, z0))
+
+/*
+** eorqv_d0_s64_untied:
+** eorqv v0\.2d, p0, z1\.d
+** ret
+*/
+TEST_REDUCTION_D (eorqv_d0_s64_untied, int64x2_t, svint64_t,
+ d0 = sveorqv_s64 (p0, z1),
+ d0 = sveorqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/eorqv_s8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/eorqv_s8.c
new file mode 100644
index 0000000..5f129669
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/eorqv_s8.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** eorqv_d0_s8_tied:
+** eorqv v0\.16b, p0, z0\.b
+** ret
+*/
+TEST_REDUCTION_D (eorqv_d0_s8_tied, int8x16_t, svint8_t,
+ d0 = sveorqv_s8 (p0, z0),
+ d0 = sveorqv (p0, z0))
+
+/*
+** eorqv_d0_s8_untied:
+** eorqv v0\.16b, p0, z1\.b
+** ret
+*/
+TEST_REDUCTION_D (eorqv_d0_s8_untied, int8x16_t, svint8_t,
+ d0 = sveorqv_s8 (p0, z1),
+ d0 = sveorqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/eorqv_u16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/eorqv_u16.c
new file mode 100644
index 0000000..ee631a2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/eorqv_u16.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** eorqv_d0_u16_tied:
+** eorqv v0\.8h, p0, z0\.h
+** ret
+*/
+TEST_REDUCTION_D (eorqv_d0_u16_tied, uint16x8_t, svuint16_t,
+ d0 = sveorqv_u16 (p0, z0),
+ d0 = sveorqv (p0, z0))
+
+/*
+** eorqv_d0_u16_untied:
+** eorqv v0\.8h, p0, z1\.h
+** ret
+*/
+TEST_REDUCTION_D (eorqv_d0_u16_untied, uint16x8_t, svuint16_t,
+ d0 = sveorqv_u16 (p0, z1),
+ d0 = sveorqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/eorqv_u32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/eorqv_u32.c
new file mode 100644
index 0000000..25dfac8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/eorqv_u32.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** eorqv_d0_u32_tied:
+** eorqv v0\.4s, p0, z0\.s
+** ret
+*/
+TEST_REDUCTION_D (eorqv_d0_u32_tied, uint32x4_t, svuint32_t,
+ d0 = sveorqv_u32 (p0, z0),
+ d0 = sveorqv (p0, z0))
+
+/*
+** eorqv_d0_u32_untied:
+** eorqv v0\.4s, p0, z1\.s
+** ret
+*/
+TEST_REDUCTION_D (eorqv_d0_u32_untied, uint32x4_t, svuint32_t,
+ d0 = sveorqv_u32 (p0, z1),
+ d0 = sveorqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/eorqv_u64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/eorqv_u64.c
new file mode 100644
index 0000000..3c5cfbe
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/eorqv_u64.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** eorqv_d0_u64_tied:
+** eorqv v0\.2d, p0, z0\.d
+** ret
+*/
+TEST_REDUCTION_D (eorqv_d0_u64_tied, uint64x2_t, svuint64_t,
+ d0 = sveorqv_u64 (p0, z0),
+ d0 = sveorqv (p0, z0))
+
+/*
+** eorqv_d0_u64_untied:
+** eorqv v0\.2d, p0, z1\.d
+** ret
+*/
+TEST_REDUCTION_D (eorqv_d0_u64_untied, uint64x2_t, svuint64_t,
+ d0 = sveorqv_u64 (p0, z1),
+ d0 = sveorqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/eorqv_u8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/eorqv_u8.c
new file mode 100644
index 0000000..300ae99
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/eorqv_u8.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** eorqv_d0_u8_tied:
+** eorqv v0\.16b, p0, z0\.b
+** ret
+*/
+TEST_REDUCTION_D (eorqv_d0_u8_tied, uint8x16_t, svuint8_t,
+ d0 = sveorqv_u8 (p0, z0),
+ d0 = sveorqv (p0, z0))
+
+/*
+** eorqv_d0_u8_untied:
+** eorqv v0\.16b, p0, z1\.b
+** ret
+*/
+TEST_REDUCTION_D (eorqv_d0_u8_untied, uint8x16_t, svuint8_t,
+ d0 = sveorqv_u8 (p0, z1),
+ d0 = sveorqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/extq_bf16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/extq_bf16.c
new file mode 100644
index 0000000..9718944
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/extq_bf16.c
@@ -0,0 +1,77 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** extq_1_bf16_tied1:
+** extq z0\.b, z0\.b, z1\.b, #2
+** ret
+*/
+TEST_UNIFORM_Z (extq_1_bf16_tied1, svbfloat16_t,
+ z0 = svextq_bf16 (z0, z1, 1),
+ z0 = svextq (z0, z1, 1))
+
+/*
+** extq_1_bf16_tied2:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** extq z0\.b, z0\.b, \1\.b, #2
+** ret
+*/
+TEST_UNIFORM_Z (extq_1_bf16_tied2, svbfloat16_t,
+ z0 = svextq_bf16 (z1, z0, 1),
+ z0 = svextq (z1, z0, 1))
+
+/*
+** extq_1_bf16_untied:
+** movprfx z0, z1
+** extq z0\.b, z0\.b, z2\.b, #2
+** ret
+*/
+TEST_UNIFORM_Z (extq_1_bf16_untied, svbfloat16_t,
+ z0 = svextq_bf16 (z1, z2, 1),
+ z0 = svextq (z1, z2, 1))
+
+/*
+** extq_0_bf16:
+** mov z0\.d, z1\.d
+** ret
+*/
+TEST_UNIFORM_Z (extq_0_bf16, svbfloat16_t,
+ z0 = svextq_bf16 (z1, z2, 0),
+ z0 = svextq (z1, z2, 0))
+
+/*
+** extq_2_bf16:
+** movprfx z0, z1
+** extq z0\.b, z0\.b, z2\.b, #4
+** ret
+*/
+TEST_UNIFORM_Z (extq_2_bf16, svbfloat16_t,
+ z0 = svextq_bf16 (z1, z2, 2),
+ z0 = svextq (z1, z2, 2))
+
+/*
+** extq_3_bf16:
+** movprfx z0, z1
+** extq z0\.b, z0\.b, z2\.b, #6
+** ret
+*/
+TEST_UNIFORM_Z (extq_3_bf16, svbfloat16_t,
+ z0 = svextq_bf16 (z1, z2, 3),
+ z0 = svextq (z1, z2, 3))
+
+/*
+** extq_7_bf16:
+** movprfx z0, z1
+** extq z0\.b, z0\.b, z2\.b, #14
+** ret
+*/
+TEST_UNIFORM_Z (extq_7_bf16, svbfloat16_t,
+ z0 = svextq_bf16 (z1, z2, 7),
+ z0 = svextq (z1, z2, 7))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/extq_f16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/extq_f16.c
new file mode 100644
index 0000000..3d099bf
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/extq_f16.c
@@ -0,0 +1,77 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** extq_1_f16_tied1:
+** extq z0\.b, z0\.b, z1\.b, #2
+** ret
+*/
+TEST_UNIFORM_Z (extq_1_f16_tied1, svfloat16_t,
+ z0 = svextq_f16 (z0, z1, 1),
+ z0 = svextq (z0, z1, 1))
+
+/*
+** extq_1_f16_tied2:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** extq z0\.b, z0\.b, \1\.b, #2
+** ret
+*/
+TEST_UNIFORM_Z (extq_1_f16_tied2, svfloat16_t,
+ z0 = svextq_f16 (z1, z0, 1),
+ z0 = svextq (z1, z0, 1))
+
+/*
+** extq_1_f16_untied:
+** movprfx z0, z1
+** extq z0\.b, z0\.b, z2\.b, #2
+** ret
+*/
+TEST_UNIFORM_Z (extq_1_f16_untied, svfloat16_t,
+ z0 = svextq_f16 (z1, z2, 1),
+ z0 = svextq (z1, z2, 1))
+
+/*
+** extq_0_f16:
+** mov z0\.d, z1\.d
+** ret
+*/
+TEST_UNIFORM_Z (extq_0_f16, svfloat16_t,
+ z0 = svextq_f16 (z1, z2, 0),
+ z0 = svextq (z1, z2, 0))
+
+/*
+** extq_2_f16:
+** movprfx z0, z1
+** extq z0\.b, z0\.b, z2\.b, #4
+** ret
+*/
+TEST_UNIFORM_Z (extq_2_f16, svfloat16_t,
+ z0 = svextq_f16 (z1, z2, 2),
+ z0 = svextq (z1, z2, 2))
+
+/*
+** extq_3_f16:
+** movprfx z0, z1
+** extq z0\.b, z0\.b, z2\.b, #6
+** ret
+*/
+TEST_UNIFORM_Z (extq_3_f16, svfloat16_t,
+ z0 = svextq_f16 (z1, z2, 3),
+ z0 = svextq (z1, z2, 3))
+
+/*
+** extq_7_f16:
+** movprfx z0, z1
+** extq z0\.b, z0\.b, z2\.b, #14
+** ret
+*/
+TEST_UNIFORM_Z (extq_7_f16, svfloat16_t,
+ z0 = svextq_f16 (z1, z2, 7),
+ z0 = svextq (z1, z2, 7))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/extq_f32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/extq_f32.c
new file mode 100644
index 0000000..ba456c5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/extq_f32.c
@@ -0,0 +1,67 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** extq_1_f32_tied1:
+** extq z0\.b, z0\.b, z1\.b, #4
+** ret
+*/
+TEST_UNIFORM_Z (extq_1_f32_tied1, svfloat32_t,
+ z0 = svextq_f32 (z0, z1, 1),
+ z0 = svextq (z0, z1, 1))
+
+/*
+** extq_1_f32_tied2:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** extq z0\.b, z0\.b, \1\.b, #4
+** ret
+*/
+TEST_UNIFORM_Z (extq_1_f32_tied2, svfloat32_t,
+ z0 = svextq_f32 (z1, z0, 1),
+ z0 = svextq (z1, z0, 1))
+
+/*
+** extq_1_f32_untied:
+** movprfx z0, z1
+** extq z0\.b, z0\.b, z2\.b, #4
+** ret
+*/
+TEST_UNIFORM_Z (extq_1_f32_untied, svfloat32_t,
+ z0 = svextq_f32 (z1, z2, 1),
+ z0 = svextq (z1, z2, 1))
+
+/*
+** extq_0_f32:
+** mov z0\.d, z1\.d
+** ret
+*/
+TEST_UNIFORM_Z (extq_0_f32, svfloat32_t,
+ z0 = svextq_f32 (z1, z2, 0),
+ z0 = svextq (z1, z2, 0))
+
+/*
+** extq_2_f32:
+** movprfx z0, z1
+** extq z0\.b, z0\.b, z2\.b, #8
+** ret
+*/
+TEST_UNIFORM_Z (extq_2_f32, svfloat32_t,
+ z0 = svextq_f32 (z1, z2, 2),
+ z0 = svextq (z1, z2, 2))
+
+/*
+** extq_3_f32:
+** movprfx z0, z1
+** extq z0\.b, z0\.b, z2\.b, #12
+** ret
+*/
+TEST_UNIFORM_Z (extq_3_f32, svfloat32_t,
+ z0 = svextq_f32 (z1, z2, 3),
+ z0 = svextq (z1, z2, 3))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/extq_f64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/extq_f64.c
new file mode 100644
index 0000000..76501de
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/extq_f64.c
@@ -0,0 +1,47 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** extq_1_f64_tied1:
+** extq z0\.b, z0\.b, z1\.b, #8
+** ret
+*/
+TEST_UNIFORM_Z (extq_1_f64_tied1, svfloat64_t,
+ z0 = svextq_f64 (z0, z1, 1),
+ z0 = svextq (z0, z1, 1))
+
+/*
+** extq_1_f64_tied2:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** extq z0\.b, z0\.b, \1\.b, #8
+** ret
+*/
+TEST_UNIFORM_Z (extq_1_f64_tied2, svfloat64_t,
+ z0 = svextq_f64 (z1, z0, 1),
+ z0 = svextq (z1, z0, 1))
+
+/*
+** extq_1_f64_untied:
+** movprfx z0, z1
+** extq z0\.b, z0\.b, z2\.b, #8
+** ret
+*/
+TEST_UNIFORM_Z (extq_1_f64_untied, svfloat64_t,
+ z0 = svextq_f64 (z1, z2, 1),
+ z0 = svextq (z1, z2, 1))
+
+/*
+** extq_0_f64:
+** mov z0\.d, z1\.d
+** ret
+*/
+TEST_UNIFORM_Z (extq_0_f64, svfloat64_t,
+ z0 = svextq_f64 (z1, z2, 0),
+ z0 = svextq (z1, z2, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/extq_s16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/extq_s16.c
new file mode 100644
index 0000000..ff6a346
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/extq_s16.c
@@ -0,0 +1,77 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** extq_1_s16_tied1:
+** extq z0\.b, z0\.b, z1\.b, #2
+** ret
+*/
+TEST_UNIFORM_Z (extq_1_s16_tied1, svint16_t,
+ z0 = svextq_s16 (z0, z1, 1),
+ z0 = svextq (z0, z1, 1))
+
+/*
+** extq_1_s16_tied2:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** extq z0\.b, z0\.b, \1\.b, #2
+** ret
+*/
+TEST_UNIFORM_Z (extq_1_s16_tied2, svint16_t,
+ z0 = svextq_s16 (z1, z0, 1),
+ z0 = svextq (z1, z0, 1))
+
+/*
+** extq_1_s16_untied:
+** movprfx z0, z1
+** extq z0\.b, z0\.b, z2\.b, #2
+** ret
+*/
+TEST_UNIFORM_Z (extq_1_s16_untied, svint16_t,
+ z0 = svextq_s16 (z1, z2, 1),
+ z0 = svextq (z1, z2, 1))
+
+/*
+** extq_0_s16:
+** mov z0\.d, z1\.d
+** ret
+*/
+TEST_UNIFORM_Z (extq_0_s16, svint16_t,
+ z0 = svextq_s16 (z1, z2, 0),
+ z0 = svextq (z1, z2, 0))
+
+/*
+** extq_2_s16:
+** movprfx z0, z1
+** extq z0\.b, z0\.b, z2\.b, #4
+** ret
+*/
+TEST_UNIFORM_Z (extq_2_s16, svint16_t,
+ z0 = svextq_s16 (z1, z2, 2),
+ z0 = svextq (z1, z2, 2))
+
+/*
+** extq_3_s16:
+** movprfx z0, z1
+** extq z0\.b, z0\.b, z2\.b, #6
+** ret
+*/
+TEST_UNIFORM_Z (extq_3_s16, svint16_t,
+ z0 = svextq_s16 (z1, z2, 3),
+ z0 = svextq (z1, z2, 3))
+
+/*
+** extq_7_s16:
+** movprfx z0, z1
+** extq z0\.b, z0\.b, z2\.b, #14
+** ret
+*/
+TEST_UNIFORM_Z (extq_7_s16, svint16_t,
+ z0 = svextq_s16 (z1, z2, 7),
+ z0 = svextq (z1, z2, 7))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/extq_s32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/extq_s32.c
new file mode 100644
index 0000000..faafb49
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/extq_s32.c
@@ -0,0 +1,67 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** extq_1_s32_tied1:
+** extq z0\.b, z0\.b, z1\.b, #4
+** ret
+*/
+TEST_UNIFORM_Z (extq_1_s32_tied1, svint32_t,
+ z0 = svextq_s32 (z0, z1, 1),
+ z0 = svextq (z0, z1, 1))
+
+/*
+** extq_1_s32_tied2:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** extq z0\.b, z0\.b, \1\.b, #4
+** ret
+*/
+TEST_UNIFORM_Z (extq_1_s32_tied2, svint32_t,
+ z0 = svextq_s32 (z1, z0, 1),
+ z0 = svextq (z1, z0, 1))
+
+/*
+** extq_1_s32_untied:
+** movprfx z0, z1
+** extq z0\.b, z0\.b, z2\.b, #4
+** ret
+*/
+TEST_UNIFORM_Z (extq_1_s32_untied, svint32_t,
+ z0 = svextq_s32 (z1, z2, 1),
+ z0 = svextq (z1, z2, 1))
+
+/*
+** extq_0_s32:
+** mov z0\.d, z1\.d
+** ret
+*/
+TEST_UNIFORM_Z (extq_0_s32, svint32_t,
+ z0 = svextq_s32 (z1, z2, 0),
+ z0 = svextq (z1, z2, 0))
+
+/*
+** extq_2_s32:
+** movprfx z0, z1
+** extq z0\.b, z0\.b, z2\.b, #8
+** ret
+*/
+TEST_UNIFORM_Z (extq_2_s32, svint32_t,
+ z0 = svextq_s32 (z1, z2, 2),
+ z0 = svextq (z1, z2, 2))
+
+/*
+** extq_3_s32:
+** movprfx z0, z1
+** extq z0\.b, z0\.b, z2\.b, #12
+** ret
+*/
+TEST_UNIFORM_Z (extq_3_s32, svint32_t,
+ z0 = svextq_s32 (z1, z2, 3),
+ z0 = svextq (z1, z2, 3))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/extq_s64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/extq_s64.c
new file mode 100644
index 0000000..4ad39f2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/extq_s64.c
@@ -0,0 +1,47 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** extq_1_s64_tied1:
+** extq z0\.b, z0\.b, z1\.b, #8
+** ret
+*/
+TEST_UNIFORM_Z (extq_1_s64_tied1, svint64_t,
+ z0 = svextq_s64 (z0, z1, 1),
+ z0 = svextq (z0, z1, 1))
+
+/*
+** extq_1_s64_tied2:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** extq z0\.b, z0\.b, \1\.b, #8
+** ret
+*/
+TEST_UNIFORM_Z (extq_1_s64_tied2, svint64_t,
+ z0 = svextq_s64 (z1, z0, 1),
+ z0 = svextq (z1, z0, 1))
+
+/*
+** extq_1_s64_untied:
+** movprfx z0, z1
+** extq z0\.b, z0\.b, z2\.b, #8
+** ret
+*/
+TEST_UNIFORM_Z (extq_1_s64_untied, svint64_t,
+ z0 = svextq_s64 (z1, z2, 1),
+ z0 = svextq (z1, z2, 1))
+
+/*
+** extq_0_s64:
+** mov z0\.d, z1\.d
+** ret
+*/
+TEST_UNIFORM_Z (extq_0_s64, svint64_t,
+ z0 = svextq_s64 (z1, z2, 0),
+ z0 = svextq (z1, z2, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/extq_s8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/extq_s8.c
new file mode 100644
index 0000000..325b90d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/extq_s8.c
@@ -0,0 +1,77 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** extq_1_s8_tied1:
+** extq z0\.b, z0\.b, z1\.b, #1
+** ret
+*/
+TEST_UNIFORM_Z (extq_1_s8_tied1, svint8_t,
+ z0 = svextq_s8 (z0, z1, 1),
+ z0 = svextq (z0, z1, 1))
+
+/*
+** extq_1_s8_tied2:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** extq z0\.b, z0\.b, \1\.b, #1
+** ret
+*/
+TEST_UNIFORM_Z (extq_1_s8_tied2, svint8_t,
+ z0 = svextq_s8 (z1, z0, 1),
+ z0 = svextq (z1, z0, 1))
+
+/*
+** extq_1_s8_untied:
+** movprfx z0, z1
+** extq z0\.b, z0\.b, z2\.b, #1
+** ret
+*/
+TEST_UNIFORM_Z (extq_1_s8_untied, svint8_t,
+ z0 = svextq_s8 (z1, z2, 1),
+ z0 = svextq (z1, z2, 1))
+
+/*
+** extq_0_s8:
+** mov z0\.d, z1\.d
+** ret
+*/
+TEST_UNIFORM_Z (extq_0_s8, svint8_t,
+ z0 = svextq_s8 (z1, z2, 0),
+ z0 = svextq (z1, z2, 0))
+
+/*
+** extq_2_s8:
+** movprfx z0, z1
+** extq z0\.b, z0\.b, z2\.b, #2
+** ret
+*/
+TEST_UNIFORM_Z (extq_2_s8, svint8_t,
+ z0 = svextq_s8 (z1, z2, 2),
+ z0 = svextq (z1, z2, 2))
+
+/*
+** extq_3_s8:
+** movprfx z0, z1
+** extq z0\.b, z0\.b, z2\.b, #3
+** ret
+*/
+TEST_UNIFORM_Z (extq_3_s8, svint8_t,
+ z0 = svextq_s8 (z1, z2, 3),
+ z0 = svextq (z1, z2, 3))
+
+/*
+** extq_15_s8:
+** movprfx z0, z1
+** extq z0\.b, z0\.b, z2\.b, #15
+** ret
+*/
+TEST_UNIFORM_Z (extq_15_s8, svint8_t,
+ z0 = svextq_s8 (z1, z2, 15),
+ z0 = svextq (z1, z2, 15))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/extq_u16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/extq_u16.c
new file mode 100644
index 0000000..8b6c5964
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/extq_u16.c
@@ -0,0 +1,77 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** extq_1_u16_tied1:
+** extq z0\.b, z0\.b, z1\.b, #2
+** ret
+*/
+TEST_UNIFORM_Z (extq_1_u16_tied1, svuint16_t,
+ z0 = svextq_u16 (z0, z1, 1),
+ z0 = svextq (z0, z1, 1))
+
+/*
+** extq_1_u16_tied2:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** extq z0\.b, z0\.b, \1\.b, #2
+** ret
+*/
+TEST_UNIFORM_Z (extq_1_u16_tied2, svuint16_t,
+ z0 = svextq_u16 (z1, z0, 1),
+ z0 = svextq (z1, z0, 1))
+
+/*
+** extq_1_u16_untied:
+** movprfx z0, z1
+** extq z0\.b, z0\.b, z2\.b, #2
+** ret
+*/
+TEST_UNIFORM_Z (extq_1_u16_untied, svuint16_t,
+ z0 = svextq_u16 (z1, z2, 1),
+ z0 = svextq (z1, z2, 1))
+
+/*
+** extq_0_u16:
+** mov z0\.d, z1\.d
+** ret
+*/
+TEST_UNIFORM_Z (extq_0_u16, svuint16_t,
+ z0 = svextq_u16 (z1, z2, 0),
+ z0 = svextq (z1, z2, 0))
+
+/*
+** extq_2_u16:
+** movprfx z0, z1
+** extq z0\.b, z0\.b, z2\.b, #4
+** ret
+*/
+TEST_UNIFORM_Z (extq_2_u16, svuint16_t,
+ z0 = svextq_u16 (z1, z2, 2),
+ z0 = svextq (z1, z2, 2))
+
+/*
+** extq_3_u16:
+** movprfx z0, z1
+** extq z0\.b, z0\.b, z2\.b, #6
+** ret
+*/
+TEST_UNIFORM_Z (extq_3_u16, svuint16_t,
+ z0 = svextq_u16 (z1, z2, 3),
+ z0 = svextq (z1, z2, 3))
+
+/*
+** extq_7_u16:
+** movprfx z0, z1
+** extq z0\.b, z0\.b, z2\.b, #14
+** ret
+*/
+TEST_UNIFORM_Z (extq_7_u16, svuint16_t,
+ z0 = svextq_u16 (z1, z2, 7),
+ z0 = svextq (z1, z2, 7))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/extq_u32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/extq_u32.c
new file mode 100644
index 0000000..aaefac1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/extq_u32.c
@@ -0,0 +1,67 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** extq_1_u32_tied1:
+** extq z0\.b, z0\.b, z1\.b, #4
+** ret
+*/
+TEST_UNIFORM_Z (extq_1_u32_tied1, svuint32_t,
+ z0 = svextq_u32 (z0, z1, 1),
+ z0 = svextq (z0, z1, 1))
+
+/*
+** extq_1_u32_tied2:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** extq z0\.b, z0\.b, \1\.b, #4
+** ret
+*/
+TEST_UNIFORM_Z (extq_1_u32_tied2, svuint32_t,
+ z0 = svextq_u32 (z1, z0, 1),
+ z0 = svextq (z1, z0, 1))
+
+/*
+** extq_1_u32_untied:
+** movprfx z0, z1
+** extq z0\.b, z0\.b, z2\.b, #4
+** ret
+*/
+TEST_UNIFORM_Z (extq_1_u32_untied, svuint32_t,
+ z0 = svextq_u32 (z1, z2, 1),
+ z0 = svextq (z1, z2, 1))
+
+/*
+** extq_0_u32:
+** mov z0\.d, z1\.d
+** ret
+*/
+TEST_UNIFORM_Z (extq_0_u32, svuint32_t,
+ z0 = svextq_u32 (z1, z2, 0),
+ z0 = svextq (z1, z2, 0))
+
+/*
+** extq_2_u32:
+** movprfx z0, z1
+** extq z0\.b, z0\.b, z2\.b, #8
+** ret
+*/
+TEST_UNIFORM_Z (extq_2_u32, svuint32_t,
+ z0 = svextq_u32 (z1, z2, 2),
+ z0 = svextq (z1, z2, 2))
+
+/*
+** extq_3_u32:
+** movprfx z0, z1
+** extq z0\.b, z0\.b, z2\.b, #12
+** ret
+*/
+TEST_UNIFORM_Z (extq_3_u32, svuint32_t,
+ z0 = svextq_u32 (z1, z2, 3),
+ z0 = svextq (z1, z2, 3))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/extq_u64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/extq_u64.c
new file mode 100644
index 0000000..fa720b1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/extq_u64.c
@@ -0,0 +1,47 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** extq_1_u64_tied1:
+** extq z0\.b, z0\.b, z1\.b, #8
+** ret
+*/
+TEST_UNIFORM_Z (extq_1_u64_tied1, svuint64_t,
+ z0 = svextq_u64 (z0, z1, 1),
+ z0 = svextq (z0, z1, 1))
+
+/*
+** extq_1_u64_tied2:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** extq z0\.b, z0\.b, \1\.b, #8
+** ret
+*/
+TEST_UNIFORM_Z (extq_1_u64_tied2, svuint64_t,
+ z0 = svextq_u64 (z1, z0, 1),
+ z0 = svextq (z1, z0, 1))
+
+/*
+** extq_1_u64_untied:
+** movprfx z0, z1
+** extq z0\.b, z0\.b, z2\.b, #8
+** ret
+*/
+TEST_UNIFORM_Z (extq_1_u64_untied, svuint64_t,
+ z0 = svextq_u64 (z1, z2, 1),
+ z0 = svextq (z1, z2, 1))
+
+/*
+** extq_0_u64:
+** mov z0\.d, z1\.d
+** ret
+*/
+TEST_UNIFORM_Z (extq_0_u64, svuint64_t,
+ z0 = svextq_u64 (z1, z2, 0),
+ z0 = svextq (z1, z2, 0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/extq_u8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/extq_u8.c
new file mode 100644
index 0000000..a7dcfc36
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/extq_u8.c
@@ -0,0 +1,77 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** extq_1_u8_tied1:
+** extq z0\.b, z0\.b, z1\.b, #1
+** ret
+*/
+TEST_UNIFORM_Z (extq_1_u8_tied1, svuint8_t,
+ z0 = svextq_u8 (z0, z1, 1),
+ z0 = svextq (z0, z1, 1))
+
+/*
+** extq_1_u8_tied2:
+** mov (z[0-9]+)\.d, z0\.d
+** movprfx z0, z1
+** extq z0\.b, z0\.b, \1\.b, #1
+** ret
+*/
+TEST_UNIFORM_Z (extq_1_u8_tied2, svuint8_t,
+ z0 = svextq_u8 (z1, z0, 1),
+ z0 = svextq (z1, z0, 1))
+
+/*
+** extq_1_u8_untied:
+** movprfx z0, z1
+** extq z0\.b, z0\.b, z2\.b, #1
+** ret
+*/
+TEST_UNIFORM_Z (extq_1_u8_untied, svuint8_t,
+ z0 = svextq_u8 (z1, z2, 1),
+ z0 = svextq (z1, z2, 1))
+
+/*
+** extq_0_u8:
+** mov z0\.d, z1\.d
+** ret
+*/
+TEST_UNIFORM_Z (extq_0_u8, svuint8_t,
+ z0 = svextq_u8 (z1, z2, 0),
+ z0 = svextq (z1, z2, 0))
+
+/*
+** extq_2_u8:
+** movprfx z0, z1
+** extq z0\.b, z0\.b, z2\.b, #2
+** ret
+*/
+TEST_UNIFORM_Z (extq_2_u8, svuint8_t,
+ z0 = svextq_u8 (z1, z2, 2),
+ z0 = svextq (z1, z2, 2))
+
+/*
+** extq_3_u8:
+** movprfx z0, z1
+** extq z0\.b, z0\.b, z2\.b, #3
+** ret
+*/
+TEST_UNIFORM_Z (extq_3_u8, svuint8_t,
+ z0 = svextq_u8 (z1, z2, 3),
+ z0 = svextq (z1, z2, 3))
+
+/*
+** extq_15_u8:
+** movprfx z0, z1
+** extq z0\.b, z0\.b, z2\.b, #15
+** ret
+*/
+TEST_UNIFORM_Z (extq_15_u8, svuint8_t,
+ z0 = svextq_u8 (z1, z2, 15),
+ z0 = svextq (z1, z2, 15))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1q_gather_bf16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1q_gather_bf16.c
new file mode 100644
index 0000000..e11da4a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1q_gather_bf16.c
@@ -0,0 +1,179 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** ld1q_gather_tied:
+** ld1q {z0\.q}, p0/z, \[z0\.d\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_tied, svbfloat16_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_bf16 (p0, z0),
+ z0_res = svld1q_gather_bf16 (p0, z0))
+
+/*
+** ld1q_gather_untied:
+** ld1q {z0\.q}, p0/z, \[z1\.d\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_untied, svbfloat16_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_bf16 (p0, z1),
+ z0_res = svld1q_gather_bf16 (p0, z1))
+
+/*
+** ld1q_gather_x0_offset:
+** ld1q {z0\.q}, p0/z, \[z0\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_x0_offset, svbfloat16_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_offset_bf16 (p0, z0, x0),
+ z0_res = svld1q_gather_offset_bf16 (p0, z0, x0))
+
+/*
+** ld1q_gather_m2_offset:
+** mov (x[0-9]+), #?-2
+** ld1q {z0\.q}, p0/z, \[z0\.d, \1\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_m2_offset, svbfloat16_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_offset_bf16 (p0, z0, -2),
+ z0_res = svld1q_gather_offset_bf16 (p0, z0, -2))
+
+/*
+** ld1q_gather_0_offset:
+** ld1q {z0\.q}, p0/z, \[z0\.d\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_0_offset, svbfloat16_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_offset_bf16 (p0, z0, 0),
+ z0_res = svld1q_gather_offset_bf16 (p0, z0, 0))
+
+/*
+** ld1q_gather_6_offset:
+** mov (x[0-9]+), #?6
+** ld1q {z0\.q}, p0/z, \[z0\.d, \1\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_6_offset, svbfloat16_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_offset_bf16 (p0, z0, 6),
+ z0_res = svld1q_gather_offset_bf16 (p0, z0, 6))
+
+/*
+** ld1q_gather_x0_index:
+** lsl (x[0-9]+), x0, #?1
+** ld1q {z0\.q}, p0/z, \[z0\.d, \1\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_x0_index, svbfloat16_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_index_bf16 (p0, z0, x0),
+ z0_res = svld1q_gather_index_bf16 (p0, z0, x0))
+
+/*
+** ld1q_gather_m1_index:
+** mov (x[0-9]+), #?-2
+** ld1q {z0\.q}, p0/z, \[z0\.d, \1\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_m1_index, svbfloat16_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_index_bf16 (p0, z0, -1),
+ z0_res = svld1q_gather_index_bf16 (p0, z0, -1))
+
+/*
+** ld1q_gather_0_index:
+** ld1q {z0\.q}, p0/z, \[z0\.d\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_0_index, svbfloat16_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_index_bf16 (p0, z0, 0),
+ z0_res = svld1q_gather_index_bf16 (p0, z0, 0))
+
+/*
+** ld1q_gather_5_index:
+** mov (x[0-9]+), #?10
+** ld1q {z0\.q}, p0/z, \[z0\.d, \1\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_5_index, svbfloat16_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_index_bf16 (p0, z0, 5),
+ z0_res = svld1q_gather_index_bf16 (p0, z0, 5))
+
+/*
+** ld1q_gather_s64offset_tied:
+** ld1q {z0\.q}, p0/z, \[z0\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_s64offset_tied, svbfloat16_t, bfloat16_t, svint64_t,
+ z0_res = svld1q_gather_s64offset_bf16 (p0, x0, z0),
+ z0_res = svld1q_gather_offset (p0, x0, z0))
+
+/*
+** ld1q_gather_s64offset_untied:
+** ld1q {z0\.q}, p0/z, \[z1\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_s64offset_untied, svbfloat16_t, bfloat16_t, svint64_t,
+ z0_res = svld1q_gather_s64offset_bf16 (p0, x0, z1),
+ z0_res = svld1q_gather_offset (p0, x0, z1))
+
+/*
+** ld1q_gather_u64offset_tied:
+** ld1q {z0\.q}, p0/z, \[z0\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_u64offset_tied, svbfloat16_t, bfloat16_t, svuint64_t,
+ z0_res = svld1q_gather_u64offset_bf16 (p0, x0, z0),
+ z0_res = svld1q_gather_offset (p0, x0, z0))
+
+/*
+** ld1q_gather_u64offset_untied:
+** ld1q {z0\.q}, p0/z, \[z1\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_u64offset_untied, svbfloat16_t, bfloat16_t, svuint64_t,
+ z0_res = svld1q_gather_u64offset_bf16 (p0, x0, z1),
+ z0_res = svld1q_gather_offset (p0, x0, z1))
+
+/*
+** ld1q_gather_s64index_tied: { xfail *-*-* }
+** add (z[1-9][0-9]*\.d), z0\.d, z0\.d
+** ld1q {z0\.q}, p0/z, \[\1, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_s64index_tied, svbfloat16_t, bfloat16_t, svint64_t,
+ z0_res = svld1q_gather_s64index_bf16 (p0, x0, z0),
+ z0_res = svld1q_gather_index (p0, x0, z0))
+
+/*
+** ld1q_gather_s64index_untied:
+** add (z[0-9]+\.d), z1\.d, z1\.d
+** ld1q {z0\.q}, p0/z, \[\1, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_s64index_untied, svbfloat16_t, bfloat16_t, svint64_t,
+ z0_res = svld1q_gather_s64index_bf16 (p0, x0, z1),
+ z0_res = svld1q_gather_index (p0, x0, z1))
+
+/*
+** ld1q_gather_u64index_tied: { xfail *-*-* }
+** add (z[1-9][0-9]*\.d), z0\.d, z0\.d
+** ld1q {z0\.q}, p0/z, \[\1, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_u64index_tied, svbfloat16_t, bfloat16_t, svuint64_t,
+ z0_res = svld1q_gather_u64index_bf16 (p0, x0, z0),
+ z0_res = svld1q_gather_index (p0, x0, z0))
+
+/*
+** ld1q_gather_u64index_untied:
+** add (z[0-9]+\.d), z1\.d, z1\.d
+** ld1q {z0\.q}, p0/z, \[\1, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_u64index_untied, svbfloat16_t, bfloat16_t, svuint64_t,
+ z0_res = svld1q_gather_u64index_bf16 (p0, x0, z1),
+ z0_res = svld1q_gather_index (p0, x0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1q_gather_f16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1q_gather_f16.c
new file mode 100644
index 0000000..d148312
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1q_gather_f16.c
@@ -0,0 +1,179 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** ld1q_gather_tied:
+** ld1q {z0\.q}, p0/z, \[z0\.d\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_tied, svfloat16_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_f16 (p0, z0),
+ z0_res = svld1q_gather_f16 (p0, z0))
+
+/*
+** ld1q_gather_untied:
+** ld1q {z0\.q}, p0/z, \[z1\.d\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_untied, svfloat16_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_f16 (p0, z1),
+ z0_res = svld1q_gather_f16 (p0, z1))
+
+/*
+** ld1q_gather_x0_offset:
+** ld1q {z0\.q}, p0/z, \[z0\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_x0_offset, svfloat16_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_offset_f16 (p0, z0, x0),
+ z0_res = svld1q_gather_offset_f16 (p0, z0, x0))
+
+/*
+** ld1q_gather_m2_offset:
+** mov (x[0-9]+), #?-2
+** ld1q {z0\.q}, p0/z, \[z0\.d, \1\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_m2_offset, svfloat16_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_offset_f16 (p0, z0, -2),
+ z0_res = svld1q_gather_offset_f16 (p0, z0, -2))
+
+/*
+** ld1q_gather_0_offset:
+** ld1q {z0\.q}, p0/z, \[z0\.d\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_0_offset, svfloat16_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_offset_f16 (p0, z0, 0),
+ z0_res = svld1q_gather_offset_f16 (p0, z0, 0))
+
+/*
+** ld1q_gather_6_offset:
+** mov (x[0-9]+), #?6
+** ld1q {z0\.q}, p0/z, \[z0\.d, \1\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_6_offset, svfloat16_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_offset_f16 (p0, z0, 6),
+ z0_res = svld1q_gather_offset_f16 (p0, z0, 6))
+
+/*
+** ld1q_gather_x0_index:
+** lsl (x[0-9]+), x0, #?1
+** ld1q {z0\.q}, p0/z, \[z0\.d, \1\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_x0_index, svfloat16_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_index_f16 (p0, z0, x0),
+ z0_res = svld1q_gather_index_f16 (p0, z0, x0))
+
+/*
+** ld1q_gather_m1_index:
+** mov (x[0-9]+), #?-2
+** ld1q {z0\.q}, p0/z, \[z0\.d, \1\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_m1_index, svfloat16_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_index_f16 (p0, z0, -1),
+ z0_res = svld1q_gather_index_f16 (p0, z0, -1))
+
+/*
+** ld1q_gather_0_index:
+** ld1q {z0\.q}, p0/z, \[z0\.d\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_0_index, svfloat16_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_index_f16 (p0, z0, 0),
+ z0_res = svld1q_gather_index_f16 (p0, z0, 0))
+
+/*
+** ld1q_gather_5_index:
+** mov (x[0-9]+), #?10
+** ld1q {z0\.q}, p0/z, \[z0\.d, \1\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_5_index, svfloat16_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_index_f16 (p0, z0, 5),
+ z0_res = svld1q_gather_index_f16 (p0, z0, 5))
+
+/*
+** ld1q_gather_s64offset_tied:
+** ld1q {z0\.q}, p0/z, \[z0\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_s64offset_tied, svfloat16_t, float16_t, svint64_t,
+ z0_res = svld1q_gather_s64offset_f16 (p0, x0, z0),
+ z0_res = svld1q_gather_offset (p0, x0, z0))
+
+/*
+** ld1q_gather_s64offset_untied:
+** ld1q {z0\.q}, p0/z, \[z1\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_s64offset_untied, svfloat16_t, float16_t, svint64_t,
+ z0_res = svld1q_gather_s64offset_f16 (p0, x0, z1),
+ z0_res = svld1q_gather_offset (p0, x0, z1))
+
+/*
+** ld1q_gather_u64offset_tied:
+** ld1q {z0\.q}, p0/z, \[z0\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_u64offset_tied, svfloat16_t, float16_t, svuint64_t,
+ z0_res = svld1q_gather_u64offset_f16 (p0, x0, z0),
+ z0_res = svld1q_gather_offset (p0, x0, z0))
+
+/*
+** ld1q_gather_u64offset_untied:
+** ld1q {z0\.q}, p0/z, \[z1\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_u64offset_untied, svfloat16_t, float16_t, svuint64_t,
+ z0_res = svld1q_gather_u64offset_f16 (p0, x0, z1),
+ z0_res = svld1q_gather_offset (p0, x0, z1))
+
+/*
+** ld1q_gather_s64index_tied: { xfail *-*-* }
+** add (z[1-9][0-9]*\.d), z0\.d, z0\.d
+** ld1q {z0\.q}, p0/z, \[\1, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_s64index_tied, svfloat16_t, float16_t, svint64_t,
+ z0_res = svld1q_gather_s64index_f16 (p0, x0, z0),
+ z0_res = svld1q_gather_index (p0, x0, z0))
+
+/*
+** ld1q_gather_s64index_untied:
+** add (z[0-9]+\.d), z1\.d, z1\.d
+** ld1q {z0\.q}, p0/z, \[\1, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_s64index_untied, svfloat16_t, float16_t, svint64_t,
+ z0_res = svld1q_gather_s64index_f16 (p0, x0, z1),
+ z0_res = svld1q_gather_index (p0, x0, z1))
+
+/*
+** ld1q_gather_u64index_tied: { xfail *-*-* }
+** add (z[1-9][0-9]*\.d), z0\.d, z0\.d
+** ld1q {z0\.q}, p0/z, \[\1, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_u64index_tied, svfloat16_t, float16_t, svuint64_t,
+ z0_res = svld1q_gather_u64index_f16 (p0, x0, z0),
+ z0_res = svld1q_gather_index (p0, x0, z0))
+
+/*
+** ld1q_gather_u64index_untied:
+** add (z[0-9]+\.d), z1\.d, z1\.d
+** ld1q {z0\.q}, p0/z, \[\1, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_u64index_untied, svfloat16_t, float16_t, svuint64_t,
+ z0_res = svld1q_gather_u64index_f16 (p0, x0, z1),
+ z0_res = svld1q_gather_index (p0, x0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1q_gather_f32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1q_gather_f32.c
new file mode 100644
index 0000000..54f57c6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1q_gather_f32.c
@@ -0,0 +1,179 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** ld1q_gather_tied:
+** ld1q {z0\.q}, p0/z, \[z0\.d\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_tied, svfloat32_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_f32 (p0, z0),
+ z0_res = svld1q_gather_f32 (p0, z0))
+
+/*
+** ld1q_gather_untied:
+** ld1q {z0\.q}, p0/z, \[z1\.d\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_untied, svfloat32_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_f32 (p0, z1),
+ z0_res = svld1q_gather_f32 (p0, z1))
+
+/*
+** ld1q_gather_x0_offset:
+** ld1q {z0\.q}, p0/z, \[z0\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_x0_offset, svfloat32_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_offset_f32 (p0, z0, x0),
+ z0_res = svld1q_gather_offset_f32 (p0, z0, x0))
+
+/*
+** ld1q_gather_m4_offset:
+** mov (x[0-9]+), #?-4
+** ld1q {z0\.q}, p0/z, \[z0\.d, \1\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_m4_offset, svfloat32_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_offset_f32 (p0, z0, -4),
+ z0_res = svld1q_gather_offset_f32 (p0, z0, -4))
+
+/*
+** ld1q_gather_0_offset:
+** ld1q {z0\.q}, p0/z, \[z0\.d\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_0_offset, svfloat32_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_offset_f32 (p0, z0, 0),
+ z0_res = svld1q_gather_offset_f32 (p0, z0, 0))
+
+/*
+** ld1q_gather_7_offset:
+** mov (x[0-9]+), #?7
+** ld1q {z0\.q}, p0/z, \[z0\.d, \1\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_7_offset, svfloat32_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_offset_f32 (p0, z0, 7),
+ z0_res = svld1q_gather_offset_f32 (p0, z0, 7))
+
+/*
+** ld1q_gather_x0_index:
+** lsl (x[0-9]+), x0, #?2
+** ld1q {z0\.q}, p0/z, \[z0\.d, \1\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_x0_index, svfloat32_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_index_f32 (p0, z0, x0),
+ z0_res = svld1q_gather_index_f32 (p0, z0, x0))
+
+/*
+** ld1q_gather_m1_index:
+** mov (x[0-9]+), #?-4
+** ld1q {z0\.q}, p0/z, \[z0\.d, \1\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_m1_index, svfloat32_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_index_f32 (p0, z0, -1),
+ z0_res = svld1q_gather_index_f32 (p0, z0, -1))
+
+/*
+** ld1q_gather_0_index:
+** ld1q {z0\.q}, p0/z, \[z0\.d\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_0_index, svfloat32_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_index_f32 (p0, z0, 0),
+ z0_res = svld1q_gather_index_f32 (p0, z0, 0))
+
+/*
+** ld1q_gather_5_index:
+** mov (x[0-9]+), #?20
+** ld1q {z0\.q}, p0/z, \[z0\.d, \1\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_5_index, svfloat32_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_index_f32 (p0, z0, 5),
+ z0_res = svld1q_gather_index_f32 (p0, z0, 5))
+
+/*
+** ld1q_gather_s64offset_tied:
+** ld1q {z0\.q}, p0/z, \[z0\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_s64offset_tied, svfloat32_t, float32_t, svint64_t,
+ z0_res = svld1q_gather_s64offset_f32 (p0, x0, z0),
+ z0_res = svld1q_gather_offset (p0, x0, z0))
+
+/*
+** ld1q_gather_s64offset_untied:
+** ld1q {z0\.q}, p0/z, \[z1\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_s64offset_untied, svfloat32_t, float32_t, svint64_t,
+ z0_res = svld1q_gather_s64offset_f32 (p0, x0, z1),
+ z0_res = svld1q_gather_offset (p0, x0, z1))
+
+/*
+** ld1q_gather_u64offset_tied:
+** ld1q {z0\.q}, p0/z, \[z0\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_u64offset_tied, svfloat32_t, float32_t, svuint64_t,
+ z0_res = svld1q_gather_u64offset_f32 (p0, x0, z0),
+ z0_res = svld1q_gather_offset (p0, x0, z0))
+
+/*
+** ld1q_gather_u64offset_untied:
+** ld1q {z0\.q}, p0/z, \[z1\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_u64offset_untied, svfloat32_t, float32_t, svuint64_t,
+ z0_res = svld1q_gather_u64offset_f32 (p0, x0, z1),
+ z0_res = svld1q_gather_offset (p0, x0, z1))
+
+/*
+** ld1q_gather_s64index_tied: { xfail *-*-* }
+** lsl (z[1-9][0-9]*\.d), z0\.d, #2
+** ld1q {z0\.q}, p0/z, \[\1, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_s64index_tied, svfloat32_t, float32_t, svint64_t,
+ z0_res = svld1q_gather_s64index_f32 (p0, x0, z0),
+ z0_res = svld1q_gather_index (p0, x0, z0))
+
+/*
+** ld1q_gather_s64index_untied:
+** lsl (z[0-9]+\.d), z1\.d, #2
+** ld1q {z0\.q}, p0/z, \[\1, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_s64index_untied, svfloat32_t, float32_t, svint64_t,
+ z0_res = svld1q_gather_s64index_f32 (p0, x0, z1),
+ z0_res = svld1q_gather_index (p0, x0, z1))
+
+/*
+** ld1q_gather_u64index_tied: { xfail *-*-* }
+** lsl (z[1-9][0-9]*\.d), z0\.d, #2
+** ld1q {z0\.q}, p0/z, \[\1, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_u64index_tied, svfloat32_t, float32_t, svuint64_t,
+ z0_res = svld1q_gather_u64index_f32 (p0, x0, z0),
+ z0_res = svld1q_gather_index (p0, x0, z0))
+
+/*
+** ld1q_gather_u64index_untied:
+** lsl (z[0-9]+\.d), z1\.d, #2
+** ld1q {z0\.q}, p0/z, \[\1, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_u64index_untied, svfloat32_t, float32_t, svuint64_t,
+ z0_res = svld1q_gather_u64index_f32 (p0, x0, z1),
+ z0_res = svld1q_gather_index (p0, x0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1q_gather_f64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1q_gather_f64.c
new file mode 100644
index 0000000..c52bcc2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1q_gather_f64.c
@@ -0,0 +1,179 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** ld1q_gather_tied:
+** ld1q {z0\.q}, p0/z, \[z0\.d\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_tied, svfloat64_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_f64 (p0, z0),
+ z0_res = svld1q_gather_f64 (p0, z0))
+
+/*
+** ld1q_gather_untied:
+** ld1q {z0\.q}, p0/z, \[z1\.d\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_untied, svfloat64_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_f64 (p0, z1),
+ z0_res = svld1q_gather_f64 (p0, z1))
+
+/*
+** ld1q_gather_x0_offset:
+** ld1q {z0\.q}, p0/z, \[z0\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_x0_offset, svfloat64_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_offset_f64 (p0, z0, x0),
+ z0_res = svld1q_gather_offset_f64 (p0, z0, x0))
+
+/*
+** ld1q_gather_m8_offset:
+** mov (x[0-9]+), #?-8
+** ld1q {z0\.q}, p0/z, \[z0\.d, \1\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_m8_offset, svfloat64_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_offset_f64 (p0, z0, -8),
+ z0_res = svld1q_gather_offset_f64 (p0, z0, -8))
+
+/*
+** ld1q_gather_0_offset:
+** ld1q {z0\.q}, p0/z, \[z0\.d\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_0_offset, svfloat64_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_offset_f64 (p0, z0, 0),
+ z0_res = svld1q_gather_offset_f64 (p0, z0, 0))
+
+/*
+** ld1q_gather_5_offset:
+** mov (x[0-9]+), #?5
+** ld1q {z0\.q}, p0/z, \[z0\.d, \1\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_5_offset, svfloat64_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_offset_f64 (p0, z0, 5),
+ z0_res = svld1q_gather_offset_f64 (p0, z0, 5))
+
+/*
+** ld1q_gather_x0_index:
+** lsl (x[0-9]+), x0, #?3
+** ld1q {z0\.q}, p0/z, \[z0\.d, \1\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_x0_index, svfloat64_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_index_f64 (p0, z0, x0),
+ z0_res = svld1q_gather_index_f64 (p0, z0, x0))
+
+/*
+** ld1q_gather_m1_index:
+** mov (x[0-9]+), #?-8
+** ld1q {z0\.q}, p0/z, \[z0\.d, \1\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_m1_index, svfloat64_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_index_f64 (p0, z0, -1),
+ z0_res = svld1q_gather_index_f64 (p0, z0, -1))
+
+/*
+** ld1q_gather_0_index:
+** ld1q {z0\.q}, p0/z, \[z0\.d\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_0_index, svfloat64_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_index_f64 (p0, z0, 0),
+ z0_res = svld1q_gather_index_f64 (p0, z0, 0))
+
+/*
+** ld1q_gather_5_index:
+** mov (x[0-9]+), #?40
+** ld1q {z0\.q}, p0/z, \[z0\.d, \1\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_5_index, svfloat64_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_index_f64 (p0, z0, 5),
+ z0_res = svld1q_gather_index_f64 (p0, z0, 5))
+
+/*
+** ld1q_gather_s64offset_tied:
+** ld1q {z0\.q}, p0/z, \[z0\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_s64offset_tied, svfloat64_t, float64_t, svint64_t,
+ z0_res = svld1q_gather_s64offset_f64 (p0, x0, z0),
+ z0_res = svld1q_gather_offset (p0, x0, z0))
+
+/*
+** ld1q_gather_s64offset_untied:
+** ld1q {z0\.q}, p0/z, \[z1\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_s64offset_untied, svfloat64_t, float64_t, svint64_t,
+ z0_res = svld1q_gather_s64offset_f64 (p0, x0, z1),
+ z0_res = svld1q_gather_offset (p0, x0, z1))
+
+/*
+** ld1q_gather_u64offset_tied:
+** ld1q {z0\.q}, p0/z, \[z0\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_u64offset_tied, svfloat64_t, float64_t, svuint64_t,
+ z0_res = svld1q_gather_u64offset_f64 (p0, x0, z0),
+ z0_res = svld1q_gather_offset (p0, x0, z0))
+
+/*
+** ld1q_gather_u64offset_untied:
+** ld1q {z0\.q}, p0/z, \[z1\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_u64offset_untied, svfloat64_t, float64_t, svuint64_t,
+ z0_res = svld1q_gather_u64offset_f64 (p0, x0, z1),
+ z0_res = svld1q_gather_offset (p0, x0, z1))
+
+/*
+** ld1q_gather_s64index_tied: { xfail *-*-* }
+** lsl (z[1-9][0-9]*\.d), z0\.d, #3
+** ld1q {z0\.q}, p0/z, \[\1, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_s64index_tied, svfloat64_t, float64_t, svint64_t,
+ z0_res = svld1q_gather_s64index_f64 (p0, x0, z0),
+ z0_res = svld1q_gather_index (p0, x0, z0))
+
+/*
+** ld1q_gather_s64index_untied:
+** lsl (z[0-9]+\.d), z1\.d, #3
+** ld1q {z0\.q}, p0/z, \[\1, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_s64index_untied, svfloat64_t, float64_t, svint64_t,
+ z0_res = svld1q_gather_s64index_f64 (p0, x0, z1),
+ z0_res = svld1q_gather_index (p0, x0, z1))
+
+/*
+** ld1q_gather_u64index_tied: { xfail *-*-* }
+** lsl (z[1-9][0-9]*\.d), z0\.d, #3
+** ld1q {z0\.q}, p0/z, \[\1, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_u64index_tied, svfloat64_t, float64_t, svuint64_t,
+ z0_res = svld1q_gather_u64index_f64 (p0, x0, z0),
+ z0_res = svld1q_gather_index (p0, x0, z0))
+
+/*
+** ld1q_gather_u64index_untied:
+** lsl (z[0-9]+\.d), z1\.d, #3
+** ld1q {z0\.q}, p0/z, \[\1, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_u64index_untied, svfloat64_t, float64_t, svuint64_t,
+ z0_res = svld1q_gather_u64index_f64 (p0, x0, z1),
+ z0_res = svld1q_gather_index (p0, x0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1q_gather_s16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1q_gather_s16.c
new file mode 100644
index 0000000..84c5991
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1q_gather_s16.c
@@ -0,0 +1,179 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** ld1q_gather_tied:
+** ld1q {z0\.q}, p0/z, \[z0\.d\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_tied, svint16_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_s16 (p0, z0),
+ z0_res = svld1q_gather_s16 (p0, z0))
+
+/*
+** ld1q_gather_untied:
+** ld1q {z0\.q}, p0/z, \[z1\.d\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_untied, svint16_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_s16 (p0, z1),
+ z0_res = svld1q_gather_s16 (p0, z1))
+
+/*
+** ld1q_gather_x0_offset:
+** ld1q {z0\.q}, p0/z, \[z0\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_x0_offset, svint16_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_offset_s16 (p0, z0, x0),
+ z0_res = svld1q_gather_offset_s16 (p0, z0, x0))
+
+/*
+** ld1q_gather_m2_offset:
+** mov (x[0-9]+), #?-2
+** ld1q {z0\.q}, p0/z, \[z0\.d, \1\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_m2_offset, svint16_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_offset_s16 (p0, z0, -2),
+ z0_res = svld1q_gather_offset_s16 (p0, z0, -2))
+
+/*
+** ld1q_gather_0_offset:
+** ld1q {z0\.q}, p0/z, \[z0\.d\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_0_offset, svint16_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_offset_s16 (p0, z0, 0),
+ z0_res = svld1q_gather_offset_s16 (p0, z0, 0))
+
+/*
+** ld1q_gather_6_offset:
+** mov (x[0-9]+), #?6
+** ld1q {z0\.q}, p0/z, \[z0\.d, \1\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_6_offset, svint16_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_offset_s16 (p0, z0, 6),
+ z0_res = svld1q_gather_offset_s16 (p0, z0, 6))
+
+/*
+** ld1q_gather_x0_index:
+** lsl (x[0-9]+), x0, #?1
+** ld1q {z0\.q}, p0/z, \[z0\.d, \1\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_x0_index, svint16_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_index_s16 (p0, z0, x0),
+ z0_res = svld1q_gather_index_s16 (p0, z0, x0))
+
+/*
+** ld1q_gather_m1_index:
+** mov (x[0-9]+), #?-2
+** ld1q {z0\.q}, p0/z, \[z0\.d, \1\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_m1_index, svint16_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_index_s16 (p0, z0, -1),
+ z0_res = svld1q_gather_index_s16 (p0, z0, -1))
+
+/*
+** ld1q_gather_0_index:
+** ld1q {z0\.q}, p0/z, \[z0\.d\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_0_index, svint16_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_index_s16 (p0, z0, 0),
+ z0_res = svld1q_gather_index_s16 (p0, z0, 0))
+
+/*
+** ld1q_gather_5_index:
+** mov (x[0-9]+), #?10
+** ld1q {z0\.q}, p0/z, \[z0\.d, \1\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_5_index, svint16_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_index_s16 (p0, z0, 5),
+ z0_res = svld1q_gather_index_s16 (p0, z0, 5))
+
+/*
+** ld1q_gather_s64offset_tied:
+** ld1q {z0\.q}, p0/z, \[z0\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_s64offset_tied, svint16_t, int16_t, svint64_t,
+ z0_res = svld1q_gather_s64offset_s16 (p0, x0, z0),
+ z0_res = svld1q_gather_offset (p0, x0, z0))
+
+/*
+** ld1q_gather_s64offset_untied:
+** ld1q {z0\.q}, p0/z, \[z1\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_s64offset_untied, svint16_t, int16_t, svint64_t,
+ z0_res = svld1q_gather_s64offset_s16 (p0, x0, z1),
+ z0_res = svld1q_gather_offset (p0, x0, z1))
+
+/*
+** ld1q_gather_u64offset_tied:
+** ld1q {z0\.q}, p0/z, \[z0\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_u64offset_tied, svint16_t, int16_t, svuint64_t,
+ z0_res = svld1q_gather_u64offset_s16 (p0, x0, z0),
+ z0_res = svld1q_gather_offset (p0, x0, z0))
+
+/*
+** ld1q_gather_u64offset_untied:
+** ld1q {z0\.q}, p0/z, \[z1\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_u64offset_untied, svint16_t, int16_t, svuint64_t,
+ z0_res = svld1q_gather_u64offset_s16 (p0, x0, z1),
+ z0_res = svld1q_gather_offset (p0, x0, z1))
+
+/*
+** ld1q_gather_s64index_tied: { xfail *-*-* }
+** add (z[1-9][0-9]*\.d), z0\.d, z0\.d
+** ld1q {z0\.q}, p0/z, \[\1, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_s64index_tied, svint16_t, int16_t, svint64_t,
+ z0_res = svld1q_gather_s64index_s16 (p0, x0, z0),
+ z0_res = svld1q_gather_index (p0, x0, z0))
+
+/*
+** ld1q_gather_s64index_untied:
+** add (z[0-9]+\.d), z1\.d, z1\.d
+** ld1q {z0\.q}, p0/z, \[\1, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_s64index_untied, svint16_t, int16_t, svint64_t,
+ z0_res = svld1q_gather_s64index_s16 (p0, x0, z1),
+ z0_res = svld1q_gather_index (p0, x0, z1))
+
+/*
+** ld1q_gather_u64index_tied: { xfail *-*-* }
+** add (z[1-9][0-9]*\.d), z0\.d, z0\.d
+** ld1q {z0\.q}, p0/z, \[\1, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_u64index_tied, svint16_t, int16_t, svuint64_t,
+ z0_res = svld1q_gather_u64index_s16 (p0, x0, z0),
+ z0_res = svld1q_gather_index (p0, x0, z0))
+
+/*
+** ld1q_gather_u64index_untied:
+** add (z[0-9]+\.d), z1\.d, z1\.d
+** ld1q {z0\.q}, p0/z, \[\1, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_u64index_untied, svint16_t, int16_t, svuint64_t,
+ z0_res = svld1q_gather_u64index_s16 (p0, x0, z1),
+ z0_res = svld1q_gather_index (p0, x0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1q_gather_s32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1q_gather_s32.c
new file mode 100644
index 0000000..33d78c3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1q_gather_s32.c
@@ -0,0 +1,179 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** ld1q_gather_tied:
+** ld1q {z0\.q}, p0/z, \[z0\.d\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_tied, svint32_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_s32 (p0, z0),
+ z0_res = svld1q_gather_s32 (p0, z0))
+
+/*
+** ld1q_gather_untied:
+** ld1q {z0\.q}, p0/z, \[z1\.d\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_untied, svint32_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_s32 (p0, z1),
+ z0_res = svld1q_gather_s32 (p0, z1))
+
+/*
+** ld1q_gather_x0_offset:
+** ld1q {z0\.q}, p0/z, \[z0\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_x0_offset, svint32_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_offset_s32 (p0, z0, x0),
+ z0_res = svld1q_gather_offset_s32 (p0, z0, x0))
+
+/*
+** ld1q_gather_m4_offset:
+** mov (x[0-9]+), #?-4
+** ld1q {z0\.q}, p0/z, \[z0\.d, \1\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_m4_offset, svint32_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_offset_s32 (p0, z0, -4),
+ z0_res = svld1q_gather_offset_s32 (p0, z0, -4))
+
+/*
+** ld1q_gather_0_offset:
+** ld1q {z0\.q}, p0/z, \[z0\.d\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_0_offset, svint32_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_offset_s32 (p0, z0, 0),
+ z0_res = svld1q_gather_offset_s32 (p0, z0, 0))
+
+/*
+** ld1q_gather_7_offset:
+** mov (x[0-9]+), #?7
+** ld1q {z0\.q}, p0/z, \[z0\.d, \1\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_7_offset, svint32_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_offset_s32 (p0, z0, 7),
+ z0_res = svld1q_gather_offset_s32 (p0, z0, 7))
+
+/*
+** ld1q_gather_x0_index:
+** lsl (x[0-9]+), x0, #?2
+** ld1q {z0\.q}, p0/z, \[z0\.d, \1\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_x0_index, svint32_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_index_s32 (p0, z0, x0),
+ z0_res = svld1q_gather_index_s32 (p0, z0, x0))
+
+/*
+** ld1q_gather_m1_index:
+** mov (x[0-9]+), #?-4
+** ld1q {z0\.q}, p0/z, \[z0\.d, \1\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_m1_index, svint32_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_index_s32 (p0, z0, -1),
+ z0_res = svld1q_gather_index_s32 (p0, z0, -1))
+
+/*
+** ld1q_gather_0_index:
+** ld1q {z0\.q}, p0/z, \[z0\.d\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_0_index, svint32_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_index_s32 (p0, z0, 0),
+ z0_res = svld1q_gather_index_s32 (p0, z0, 0))
+
+/*
+** ld1q_gather_5_index:
+** mov (x[0-9]+), #?20
+** ld1q {z0\.q}, p0/z, \[z0\.d, \1\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_5_index, svint32_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_index_s32 (p0, z0, 5),
+ z0_res = svld1q_gather_index_s32 (p0, z0, 5))
+
+/*
+** ld1q_gather_s64offset_tied:
+** ld1q {z0\.q}, p0/z, \[z0\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_s64offset_tied, svint32_t, int32_t, svint64_t,
+ z0_res = svld1q_gather_s64offset_s32 (p0, x0, z0),
+ z0_res = svld1q_gather_offset (p0, x0, z0))
+
+/*
+** ld1q_gather_s64offset_untied:
+** ld1q {z0\.q}, p0/z, \[z1\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_s64offset_untied, svint32_t, int32_t, svint64_t,
+ z0_res = svld1q_gather_s64offset_s32 (p0, x0, z1),
+ z0_res = svld1q_gather_offset (p0, x0, z1))
+
+/*
+** ld1q_gather_u64offset_tied:
+** ld1q {z0\.q}, p0/z, \[z0\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_u64offset_tied, svint32_t, int32_t, svuint64_t,
+ z0_res = svld1q_gather_u64offset_s32 (p0, x0, z0),
+ z0_res = svld1q_gather_offset (p0, x0, z0))
+
+/*
+** ld1q_gather_u64offset_untied:
+** ld1q {z0\.q}, p0/z, \[z1\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_u64offset_untied, svint32_t, int32_t, svuint64_t,
+ z0_res = svld1q_gather_u64offset_s32 (p0, x0, z1),
+ z0_res = svld1q_gather_offset (p0, x0, z1))
+
+/*
+** ld1q_gather_s64index_tied: { xfail *-*-* }
+** lsl (z[1-9][0-9]*\.d), z0\.d, #2
+** ld1q {z0\.q}, p0/z, \[\1, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_s64index_tied, svint32_t, int32_t, svint64_t,
+ z0_res = svld1q_gather_s64index_s32 (p0, x0, z0),
+ z0_res = svld1q_gather_index (p0, x0, z0))
+
+/*
+** ld1q_gather_s64index_untied:
+** lsl (z[0-9]+\.d), z1\.d, #2
+** ld1q {z0\.q}, p0/z, \[\1, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_s64index_untied, svint32_t, int32_t, svint64_t,
+ z0_res = svld1q_gather_s64index_s32 (p0, x0, z1),
+ z0_res = svld1q_gather_index (p0, x0, z1))
+
+/*
+** ld1q_gather_u64index_tied: { xfail *-*-* }
+** lsl (z[1-9][0-9]*\.d), z0\.d, #2
+** ld1q {z0\.q}, p0/z, \[\1, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_u64index_tied, svint32_t, int32_t, svuint64_t,
+ z0_res = svld1q_gather_u64index_s32 (p0, x0, z0),
+ z0_res = svld1q_gather_index (p0, x0, z0))
+
+/*
+** ld1q_gather_u64index_untied:
+** lsl (z[0-9]+\.d), z1\.d, #2
+** ld1q {z0\.q}, p0/z, \[\1, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_u64index_untied, svint32_t, int32_t, svuint64_t,
+ z0_res = svld1q_gather_u64index_s32 (p0, x0, z1),
+ z0_res = svld1q_gather_index (p0, x0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1q_gather_s64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1q_gather_s64.c
new file mode 100644
index 0000000..f534867
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1q_gather_s64.c
@@ -0,0 +1,179 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** ld1q_gather_tied:
+** ld1q {z0\.q}, p0/z, \[z0\.d\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_tied, svint64_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_s64 (p0, z0),
+ z0_res = svld1q_gather_s64 (p0, z0))
+
+/*
+** ld1q_gather_untied:
+** ld1q {z0\.q}, p0/z, \[z1\.d\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_untied, svint64_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_s64 (p0, z1),
+ z0_res = svld1q_gather_s64 (p0, z1))
+
+/*
+** ld1q_gather_x0_offset:
+** ld1q {z0\.q}, p0/z, \[z0\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_x0_offset, svint64_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_offset_s64 (p0, z0, x0),
+ z0_res = svld1q_gather_offset_s64 (p0, z0, x0))
+
+/*
+** ld1q_gather_m8_offset:
+** mov (x[0-9]+), #?-8
+** ld1q {z0\.q}, p0/z, \[z0\.d, \1\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_m8_offset, svint64_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_offset_s64 (p0, z0, -8),
+ z0_res = svld1q_gather_offset_s64 (p0, z0, -8))
+
+/*
+** ld1q_gather_0_offset:
+** ld1q {z0\.q}, p0/z, \[z0\.d\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_0_offset, svint64_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_offset_s64 (p0, z0, 0),
+ z0_res = svld1q_gather_offset_s64 (p0, z0, 0))
+
+/*
+** ld1q_gather_5_offset:
+** mov (x[0-9]+), #?5
+** ld1q {z0\.q}, p0/z, \[z0\.d, \1\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_5_offset, svint64_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_offset_s64 (p0, z0, 5),
+ z0_res = svld1q_gather_offset_s64 (p0, z0, 5))
+
+/*
+** ld1q_gather_x0_index:
+** lsl (x[0-9]+), x0, #?3
+** ld1q {z0\.q}, p0/z, \[z0\.d, \1\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_x0_index, svint64_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_index_s64 (p0, z0, x0),
+ z0_res = svld1q_gather_index_s64 (p0, z0, x0))
+
+/*
+** ld1q_gather_m1_index:
+** mov (x[0-9]+), #?-8
+** ld1q {z0\.q}, p0/z, \[z0\.d, \1\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_m1_index, svint64_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_index_s64 (p0, z0, -1),
+ z0_res = svld1q_gather_index_s64 (p0, z0, -1))
+
+/*
+** ld1q_gather_0_index:
+** ld1q {z0\.q}, p0/z, \[z0\.d\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_0_index, svint64_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_index_s64 (p0, z0, 0),
+ z0_res = svld1q_gather_index_s64 (p0, z0, 0))
+
+/*
+** ld1q_gather_5_index:
+** mov (x[0-9]+), #?40
+** ld1q {z0\.q}, p0/z, \[z0\.d, \1\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_5_index, svint64_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_index_s64 (p0, z0, 5),
+ z0_res = svld1q_gather_index_s64 (p0, z0, 5))
+
+/*
+** ld1q_gather_s64offset_tied:
+** ld1q {z0\.q}, p0/z, \[z0\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_s64offset_tied, svint64_t, int64_t, svint64_t,
+ z0_res = svld1q_gather_s64offset_s64 (p0, x0, z0),
+ z0_res = svld1q_gather_offset (p0, x0, z0))
+
+/*
+** ld1q_gather_s64offset_untied:
+** ld1q {z0\.q}, p0/z, \[z1\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_s64offset_untied, svint64_t, int64_t, svint64_t,
+ z0_res = svld1q_gather_s64offset_s64 (p0, x0, z1),
+ z0_res = svld1q_gather_offset (p0, x0, z1))
+
+/*
+** ld1q_gather_u64offset_tied:
+** ld1q {z0\.q}, p0/z, \[z0\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_u64offset_tied, svint64_t, int64_t, svuint64_t,
+ z0_res = svld1q_gather_u64offset_s64 (p0, x0, z0),
+ z0_res = svld1q_gather_offset (p0, x0, z0))
+
+/*
+** ld1q_gather_u64offset_untied:
+** ld1q {z0\.q}, p0/z, \[z1\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_u64offset_untied, svint64_t, int64_t, svuint64_t,
+ z0_res = svld1q_gather_u64offset_s64 (p0, x0, z1),
+ z0_res = svld1q_gather_offset (p0, x0, z1))
+
+/*
+** ld1q_gather_s64index_tied: { xfail *-*-* }
+** lsl (z[1-9][0-9]*\.d), z0\.d, #3
+** ld1q {z0\.q}, p0/z, \[\1, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_s64index_tied, svint64_t, int64_t, svint64_t,
+ z0_res = svld1q_gather_s64index_s64 (p0, x0, z0),
+ z0_res = svld1q_gather_index (p0, x0, z0))
+
+/*
+** ld1q_gather_s64index_untied:
+** lsl (z[0-9]+\.d), z1\.d, #3
+** ld1q {z0\.q}, p0/z, \[\1, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_s64index_untied, svint64_t, int64_t, svint64_t,
+ z0_res = svld1q_gather_s64index_s64 (p0, x0, z1),
+ z0_res = svld1q_gather_index (p0, x0, z1))
+
+/*
+** ld1q_gather_u64index_tied: { xfail *-*-* }
+** lsl (z[1-9][0-9]*\.d), z0\.d, #3
+** ld1q {z0\.q}, p0/z, \[\1, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_u64index_tied, svint64_t, int64_t, svuint64_t,
+ z0_res = svld1q_gather_u64index_s64 (p0, x0, z0),
+ z0_res = svld1q_gather_index (p0, x0, z0))
+
+/*
+** ld1q_gather_u64index_untied:
+** lsl (z[0-9]+\.d), z1\.d, #3
+** ld1q {z0\.q}, p0/z, \[\1, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_u64index_untied, svint64_t, int64_t, svuint64_t,
+ z0_res = svld1q_gather_u64index_s64 (p0, x0, z1),
+ z0_res = svld1q_gather_index (p0, x0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1q_gather_s8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1q_gather_s8.c
new file mode 100644
index 0000000..080eda3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1q_gather_s8.c
@@ -0,0 +1,109 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** ld1q_gather_tied1:
+** ld1q {z0\.q}, p0/z, \[z0\.d\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_tied1, svint8_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_s8 (p0, z0),
+ z0_res = svld1q_gather_s8 (p0, z0))
+
+/*
+** ld1q_gather_untied:
+** ld1q {z0\.q}, p0/z, \[z1\.d\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_untied, svint8_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_s8 (p0, z1),
+ z0_res = svld1q_gather_s8 (p0, z1))
+
+/*
+** ld1q_gather_x0_offset_tied:
+** ld1q {z0\.q}, p0/z, \[z0\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_x0_offset_tied, svint8_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_offset_s8 (p0, z0, x0),
+ z0_res = svld1q_gather_offset_s8 (p0, z0, x0))
+
+/*
+** ld1q_gather_x0_offset_untied:
+** ld1q {z0\.q}, p0/z, \[z1\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_x0_offset_untied, svint8_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_offset_s8 (p0, z1, x0),
+ z0_res = svld1q_gather_offset_s8 (p0, z1, x0))
+
+/*
+** ld1q_gather_m16_offset:
+** mov (x[0-9]+), #?-16
+** ld1q {z0\.q}, p0/z, \[z0\.d, \1\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_m16_offset, svint8_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_offset_s8 (p0, z0, -16),
+ z0_res = svld1q_gather_offset_s8 (p0, z0, -16))
+
+/*
+** ld1q_gather_0_offset:
+** ld1q {z0\.q}, p0/z, \[z0\.d\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_0_offset, svint8_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_offset_s8 (p0, z0, 0),
+ z0_res = svld1q_gather_offset_s8 (p0, z0, 0))
+
+/*
+** ld1q_gather_16_offset:
+** mov (x[0-9]+), #?16
+** ld1q {z0\.q}, p0/z, \[z0\.d, \1\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_16_offset, svint8_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_offset_s8 (p0, z0, 16),
+ z0_res = svld1q_gather_offset_s8 (p0, z0, 16))
+
+/*
+** ld1q_gather_s64offset_tied:
+** ld1q {z0\.q}, p0/z, \[z0\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_s64offset_tied, svint8_t, int8_t, svint64_t,
+ z0_res = svld1q_gather_s64offset_s8 (p0, x0, z0),
+ z0_res = svld1q_gather_offset (p0, x0, z0))
+
+/*
+** ld1q_gather_s64offset_untied:
+** ld1q {z0\.q}, p0/z, \[z1\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_s64offset_untied, svint8_t, int8_t, svint64_t,
+ z0_res = svld1q_gather_s64offset_s8 (p0, x0, z1),
+ z0_res = svld1q_gather_offset (p0, x0, z1))
+
+/*
+** ld1q_gather_u64offset_tied:
+** ld1q {z0\.q}, p0/z, \[z0\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_u64offset_tied, svint8_t, int8_t, svuint64_t,
+ z0_res = svld1q_gather_u64offset_s8 (p0, x0, z0),
+ z0_res = svld1q_gather_offset (p0, x0, z0))
+
+/*
+** ld1q_gather_u64offset_untied:
+** ld1q {z0\.q}, p0/z, \[z1\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_u64offset_untied, svint8_t, int8_t, svuint64_t,
+ z0_res = svld1q_gather_u64offset_s8 (p0, x0, z1),
+ z0_res = svld1q_gather_offset (p0, x0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1q_gather_u16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1q_gather_u16.c
new file mode 100644
index 0000000..4d0c6de
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1q_gather_u16.c
@@ -0,0 +1,179 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** ld1q_gather_tied:
+** ld1q {z0\.q}, p0/z, \[z0\.d\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_tied, svuint16_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_u16 (p0, z0),
+ z0_res = svld1q_gather_u16 (p0, z0))
+
+/*
+** ld1q_gather_untied:
+** ld1q {z0\.q}, p0/z, \[z1\.d\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_untied, svuint16_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_u16 (p0, z1),
+ z0_res = svld1q_gather_u16 (p0, z1))
+
+/*
+** ld1q_gather_x0_offset:
+** ld1q {z0\.q}, p0/z, \[z0\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_x0_offset, svuint16_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_offset_u16 (p0, z0, x0),
+ z0_res = svld1q_gather_offset_u16 (p0, z0, x0))
+
+/*
+** ld1q_gather_m2_offset:
+** mov (x[0-9]+), #?-2
+** ld1q {z0\.q}, p0/z, \[z0\.d, \1\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_m2_offset, svuint16_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_offset_u16 (p0, z0, -2),
+ z0_res = svld1q_gather_offset_u16 (p0, z0, -2))
+
+/*
+** ld1q_gather_0_offset:
+** ld1q {z0\.q}, p0/z, \[z0\.d\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_0_offset, svuint16_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_offset_u16 (p0, z0, 0),
+ z0_res = svld1q_gather_offset_u16 (p0, z0, 0))
+
+/*
+** ld1q_gather_6_offset:
+** mov (x[0-9]+), #?6
+** ld1q {z0\.q}, p0/z, \[z0\.d, \1\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_6_offset, svuint16_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_offset_u16 (p0, z0, 6),
+ z0_res = svld1q_gather_offset_u16 (p0, z0, 6))
+
+/*
+** ld1q_gather_x0_index:
+** lsl (x[0-9]+), x0, #?1
+** ld1q {z0\.q}, p0/z, \[z0\.d, \1\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_x0_index, svuint16_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_index_u16 (p0, z0, x0),
+ z0_res = svld1q_gather_index_u16 (p0, z0, x0))
+
+/*
+** ld1q_gather_m1_index:
+** mov (x[0-9]+), #?-2
+** ld1q {z0\.q}, p0/z, \[z0\.d, \1\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_m1_index, svuint16_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_index_u16 (p0, z0, -1),
+ z0_res = svld1q_gather_index_u16 (p0, z0, -1))
+
+/*
+** ld1q_gather_0_index:
+** ld1q {z0\.q}, p0/z, \[z0\.d\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_0_index, svuint16_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_index_u16 (p0, z0, 0),
+ z0_res = svld1q_gather_index_u16 (p0, z0, 0))
+
+/*
+** ld1q_gather_5_index:
+** mov (x[0-9]+), #?10
+** ld1q {z0\.q}, p0/z, \[z0\.d, \1\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_5_index, svuint16_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_index_u16 (p0, z0, 5),
+ z0_res = svld1q_gather_index_u16 (p0, z0, 5))
+
+/*
+** ld1q_gather_s64offset_tied:
+** ld1q {z0\.q}, p0/z, \[z0\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_s64offset_tied, svuint16_t, uint16_t, svint64_t,
+ z0_res = svld1q_gather_s64offset_u16 (p0, x0, z0),
+ z0_res = svld1q_gather_offset (p0, x0, z0))
+
+/*
+** ld1q_gather_s64offset_untied:
+** ld1q {z0\.q}, p0/z, \[z1\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_s64offset_untied, svuint16_t, uint16_t, svint64_t,
+ z0_res = svld1q_gather_s64offset_u16 (p0, x0, z1),
+ z0_res = svld1q_gather_offset (p0, x0, z1))
+
+/*
+** ld1q_gather_u64offset_tied:
+** ld1q {z0\.q}, p0/z, \[z0\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_u64offset_tied, svuint16_t, uint16_t, svuint64_t,
+ z0_res = svld1q_gather_u64offset_u16 (p0, x0, z0),
+ z0_res = svld1q_gather_offset (p0, x0, z0))
+
+/*
+** ld1q_gather_u64offset_untied:
+** ld1q {z0\.q}, p0/z, \[z1\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_u64offset_untied, svuint16_t, uint16_t, svuint64_t,
+ z0_res = svld1q_gather_u64offset_u16 (p0, x0, z1),
+ z0_res = svld1q_gather_offset (p0, x0, z1))
+
+/*
+** ld1q_gather_s64index_tied: { xfail *-*-* }
+** add (z[1-9][0-9]*\.d), z0\.d, z0\.d
+** ld1q {z0\.q}, p0/z, \[\1, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_s64index_tied, svuint16_t, uint16_t, svint64_t,
+ z0_res = svld1q_gather_s64index_u16 (p0, x0, z0),
+ z0_res = svld1q_gather_index (p0, x0, z0))
+
+/*
+** ld1q_gather_s64index_untied:
+** add (z[0-9]+\.d), z1\.d, z1\.d
+** ld1q {z0\.q}, p0/z, \[\1, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_s64index_untied, svuint16_t, uint16_t, svint64_t,
+ z0_res = svld1q_gather_s64index_u16 (p0, x0, z1),
+ z0_res = svld1q_gather_index (p0, x0, z1))
+
+/*
+** ld1q_gather_u64index_tied: { xfail *-*-* }
+** add (z[1-9][0-9]*\.d), z0\.d, z0\.d
+** ld1q {z0\.q}, p0/z, \[\1, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_u64index_tied, svuint16_t, uint16_t, svuint64_t,
+ z0_res = svld1q_gather_u64index_u16 (p0, x0, z0),
+ z0_res = svld1q_gather_index (p0, x0, z0))
+
+/*
+** ld1q_gather_u64index_untied:
+** add (z[0-9]+\.d), z1\.d, z1\.d
+** ld1q {z0\.q}, p0/z, \[\1, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_u64index_untied, svuint16_t, uint16_t, svuint64_t,
+ z0_res = svld1q_gather_u64index_u16 (p0, x0, z1),
+ z0_res = svld1q_gather_index (p0, x0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1q_gather_u32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1q_gather_u32.c
new file mode 100644
index 0000000..3ba15ff
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1q_gather_u32.c
@@ -0,0 +1,179 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** ld1q_gather_tied:
+** ld1q {z0\.q}, p0/z, \[z0\.d\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_tied, svuint32_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_u32 (p0, z0),
+ z0_res = svld1q_gather_u32 (p0, z0))
+
+/*
+** ld1q_gather_untied:
+** ld1q {z0\.q}, p0/z, \[z1\.d\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_untied, svuint32_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_u32 (p0, z1),
+ z0_res = svld1q_gather_u32 (p0, z1))
+
+/*
+** ld1q_gather_x0_offset:
+** ld1q {z0\.q}, p0/z, \[z0\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_x0_offset, svuint32_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_offset_u32 (p0, z0, x0),
+ z0_res = svld1q_gather_offset_u32 (p0, z0, x0))
+
+/*
+** ld1q_gather_m4_offset:
+** mov (x[0-9]+), #?-4
+** ld1q {z0\.q}, p0/z, \[z0\.d, \1\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_m4_offset, svuint32_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_offset_u32 (p0, z0, -4),
+ z0_res = svld1q_gather_offset_u32 (p0, z0, -4))
+
+/*
+** ld1q_gather_0_offset:
+** ld1q {z0\.q}, p0/z, \[z0\.d\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_0_offset, svuint32_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_offset_u32 (p0, z0, 0),
+ z0_res = svld1q_gather_offset_u32 (p0, z0, 0))
+
+/*
+** ld1q_gather_7_offset:
+** mov (x[0-9]+), #?7
+** ld1q {z0\.q}, p0/z, \[z0\.d, \1\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_7_offset, svuint32_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_offset_u32 (p0, z0, 7),
+ z0_res = svld1q_gather_offset_u32 (p0, z0, 7))
+
+/*
+** ld1q_gather_x0_index:
+** lsl (x[0-9]+), x0, #?2
+** ld1q {z0\.q}, p0/z, \[z0\.d, \1\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_x0_index, svuint32_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_index_u32 (p0, z0, x0),
+ z0_res = svld1q_gather_index_u32 (p0, z0, x0))
+
+/*
+** ld1q_gather_m1_index:
+** mov (x[0-9]+), #?-4
+** ld1q {z0\.q}, p0/z, \[z0\.d, \1\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_m1_index, svuint32_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_index_u32 (p0, z0, -1),
+ z0_res = svld1q_gather_index_u32 (p0, z0, -1))
+
+/*
+** ld1q_gather_0_index:
+** ld1q {z0\.q}, p0/z, \[z0\.d\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_0_index, svuint32_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_index_u32 (p0, z0, 0),
+ z0_res = svld1q_gather_index_u32 (p0, z0, 0))
+
+/*
+** ld1q_gather_5_index:
+** mov (x[0-9]+), #?20
+** ld1q {z0\.q}, p0/z, \[z0\.d, \1\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_5_index, svuint32_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_index_u32 (p0, z0, 5),
+ z0_res = svld1q_gather_index_u32 (p0, z0, 5))
+
+/*
+** ld1q_gather_s64offset_tied:
+** ld1q {z0\.q}, p0/z, \[z0\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_s64offset_tied, svuint32_t, uint32_t, svint64_t,
+ z0_res = svld1q_gather_s64offset_u32 (p0, x0, z0),
+ z0_res = svld1q_gather_offset (p0, x0, z0))
+
+/*
+** ld1q_gather_s64offset_untied:
+** ld1q {z0\.q}, p0/z, \[z1\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_s64offset_untied, svuint32_t, uint32_t, svint64_t,
+ z0_res = svld1q_gather_s64offset_u32 (p0, x0, z1),
+ z0_res = svld1q_gather_offset (p0, x0, z1))
+
+/*
+** ld1q_gather_u64offset_tied:
+** ld1q {z0\.q}, p0/z, \[z0\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_u64offset_tied, svuint32_t, uint32_t, svuint64_t,
+ z0_res = svld1q_gather_u64offset_u32 (p0, x0, z0),
+ z0_res = svld1q_gather_offset (p0, x0, z0))
+
+/*
+** ld1q_gather_u64offset_untied:
+** ld1q {z0\.q}, p0/z, \[z1\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_u64offset_untied, svuint32_t, uint32_t, svuint64_t,
+ z0_res = svld1q_gather_u64offset_u32 (p0, x0, z1),
+ z0_res = svld1q_gather_offset (p0, x0, z1))
+
+/*
+** ld1q_gather_s64index_tied: { xfail *-*-* }
+** lsl (z[1-9][0-9]*\.d), z0\.d, #2
+** ld1q {z0\.q}, p0/z, \[\1, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_s64index_tied, svuint32_t, uint32_t, svint64_t,
+ z0_res = svld1q_gather_s64index_u32 (p0, x0, z0),
+ z0_res = svld1q_gather_index (p0, x0, z0))
+
+/*
+** ld1q_gather_s64index_untied:
+** lsl (z[0-9]+\.d), z1\.d, #2
+** ld1q {z0\.q}, p0/z, \[\1, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_s64index_untied, svuint32_t, uint32_t, svint64_t,
+ z0_res = svld1q_gather_s64index_u32 (p0, x0, z1),
+ z0_res = svld1q_gather_index (p0, x0, z1))
+
+/*
+** ld1q_gather_u64index_tied: { xfail *-*-* }
+** lsl (z[1-9][0-9]*\.d), z0\.d, #2
+** ld1q {z0\.q}, p0/z, \[\1, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_u64index_tied, svuint32_t, uint32_t, svuint64_t,
+ z0_res = svld1q_gather_u64index_u32 (p0, x0, z0),
+ z0_res = svld1q_gather_index (p0, x0, z0))
+
+/*
+** ld1q_gather_u64index_untied:
+** lsl (z[0-9]+\.d), z1\.d, #2
+** ld1q {z0\.q}, p0/z, \[\1, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_u64index_untied, svuint32_t, uint32_t, svuint64_t,
+ z0_res = svld1q_gather_u64index_u32 (p0, x0, z1),
+ z0_res = svld1q_gather_index (p0, x0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1q_gather_u64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1q_gather_u64.c
new file mode 100644
index 0000000..73b79a1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1q_gather_u64.c
@@ -0,0 +1,179 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** ld1q_gather_tied:
+** ld1q {z0\.q}, p0/z, \[z0\.d\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_tied, svuint64_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_u64 (p0, z0),
+ z0_res = svld1q_gather_u64 (p0, z0))
+
+/*
+** ld1q_gather_untied:
+** ld1q {z0\.q}, p0/z, \[z1\.d\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_untied, svuint64_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_u64 (p0, z1),
+ z0_res = svld1q_gather_u64 (p0, z1))
+
+/*
+** ld1q_gather_x0_offset:
+** ld1q {z0\.q}, p0/z, \[z0\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_x0_offset, svuint64_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_offset_u64 (p0, z0, x0),
+ z0_res = svld1q_gather_offset_u64 (p0, z0, x0))
+
+/*
+** ld1q_gather_m8_offset:
+** mov (x[0-9]+), #?-8
+** ld1q {z0\.q}, p0/z, \[z0\.d, \1\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_m8_offset, svuint64_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_offset_u64 (p0, z0, -8),
+ z0_res = svld1q_gather_offset_u64 (p0, z0, -8))
+
+/*
+** ld1q_gather_0_offset:
+** ld1q {z0\.q}, p0/z, \[z0\.d\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_0_offset, svuint64_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_offset_u64 (p0, z0, 0),
+ z0_res = svld1q_gather_offset_u64 (p0, z0, 0))
+
+/*
+** ld1q_gather_5_offset:
+** mov (x[0-9]+), #?5
+** ld1q {z0\.q}, p0/z, \[z0\.d, \1\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_5_offset, svuint64_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_offset_u64 (p0, z0, 5),
+ z0_res = svld1q_gather_offset_u64 (p0, z0, 5))
+
+/*
+** ld1q_gather_x0_index:
+** lsl (x[0-9]+), x0, #?3
+** ld1q {z0\.q}, p0/z, \[z0\.d, \1\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_x0_index, svuint64_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_index_u64 (p0, z0, x0),
+ z0_res = svld1q_gather_index_u64 (p0, z0, x0))
+
+/*
+** ld1q_gather_m1_index:
+** mov (x[0-9]+), #?-8
+** ld1q {z0\.q}, p0/z, \[z0\.d, \1\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_m1_index, svuint64_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_index_u64 (p0, z0, -1),
+ z0_res = svld1q_gather_index_u64 (p0, z0, -1))
+
+/*
+** ld1q_gather_0_index:
+** ld1q {z0\.q}, p0/z, \[z0\.d\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_0_index, svuint64_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_index_u64 (p0, z0, 0),
+ z0_res = svld1q_gather_index_u64 (p0, z0, 0))
+
+/*
+** ld1q_gather_5_index:
+** mov (x[0-9]+), #?40
+** ld1q {z0\.q}, p0/z, \[z0\.d, \1\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_5_index, svuint64_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_index_u64 (p0, z0, 5),
+ z0_res = svld1q_gather_index_u64 (p0, z0, 5))
+
+/*
+** ld1q_gather_s64offset_tied:
+** ld1q {z0\.q}, p0/z, \[z0\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_s64offset_tied, svuint64_t, uint64_t, svint64_t,
+ z0_res = svld1q_gather_s64offset_u64 (p0, x0, z0),
+ z0_res = svld1q_gather_offset (p0, x0, z0))
+
+/*
+** ld1q_gather_s64offset_untied:
+** ld1q {z0\.q}, p0/z, \[z1\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_s64offset_untied, svuint64_t, uint64_t, svint64_t,
+ z0_res = svld1q_gather_s64offset_u64 (p0, x0, z1),
+ z0_res = svld1q_gather_offset (p0, x0, z1))
+
+/*
+** ld1q_gather_u64offset_tied:
+** ld1q {z0\.q}, p0/z, \[z0\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_u64offset_tied, svuint64_t, uint64_t, svuint64_t,
+ z0_res = svld1q_gather_u64offset_u64 (p0, x0, z0),
+ z0_res = svld1q_gather_offset (p0, x0, z0))
+
+/*
+** ld1q_gather_u64offset_untied:
+** ld1q {z0\.q}, p0/z, \[z1\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_u64offset_untied, svuint64_t, uint64_t, svuint64_t,
+ z0_res = svld1q_gather_u64offset_u64 (p0, x0, z1),
+ z0_res = svld1q_gather_offset (p0, x0, z1))
+
+/*
+** ld1q_gather_s64index_tied: { xfail *-*-* }
+** lsl (z[1-9][0-9]*\.d), z0\.d, #3
+** ld1q {z0\.q}, p0/z, \[\1, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_s64index_tied, svuint64_t, uint64_t, svint64_t,
+ z0_res = svld1q_gather_s64index_u64 (p0, x0, z0),
+ z0_res = svld1q_gather_index (p0, x0, z0))
+
+/*
+** ld1q_gather_s64index_untied:
+** lsl (z[0-9]+\.d), z1\.d, #3
+** ld1q {z0\.q}, p0/z, \[\1, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_s64index_untied, svuint64_t, uint64_t, svint64_t,
+ z0_res = svld1q_gather_s64index_u64 (p0, x0, z1),
+ z0_res = svld1q_gather_index (p0, x0, z1))
+
+/*
+** ld1q_gather_u64index_tied: { xfail *-*-* }
+** lsl (z[1-9][0-9]*\.d), z0\.d, #3
+** ld1q {z0\.q}, p0/z, \[\1, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_u64index_tied, svuint64_t, uint64_t, svuint64_t,
+ z0_res = svld1q_gather_u64index_u64 (p0, x0, z0),
+ z0_res = svld1q_gather_index (p0, x0, z0))
+
+/*
+** ld1q_gather_u64index_untied:
+** lsl (z[0-9]+\.d), z1\.d, #3
+** ld1q {z0\.q}, p0/z, \[\1, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_u64index_untied, svuint64_t, uint64_t, svuint64_t,
+ z0_res = svld1q_gather_u64index_u64 (p0, x0, z1),
+ z0_res = svld1q_gather_index (p0, x0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1q_gather_u8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1q_gather_u8.c
new file mode 100644
index 0000000..cb27045
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1q_gather_u8.c
@@ -0,0 +1,109 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** ld1q_gather_tied1:
+** ld1q {z0\.q}, p0/z, \[z0\.d\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_tied1, svuint8_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_u8 (p0, z0),
+ z0_res = svld1q_gather_u8 (p0, z0))
+
+/*
+** ld1q_gather_untied:
+** ld1q {z0\.q}, p0/z, \[z1\.d\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_untied, svuint8_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_u8 (p0, z1),
+ z0_res = svld1q_gather_u8 (p0, z1))
+
+/*
+** ld1q_gather_x0_offset_tied:
+** ld1q {z0\.q}, p0/z, \[z0\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_x0_offset_tied, svuint8_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_offset_u8 (p0, z0, x0),
+ z0_res = svld1q_gather_offset_u8 (p0, z0, x0))
+
+/*
+** ld1q_gather_x0_offset_untied:
+** ld1q {z0\.q}, p0/z, \[z1\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_x0_offset_untied, svuint8_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_offset_u8 (p0, z1, x0),
+ z0_res = svld1q_gather_offset_u8 (p0, z1, x0))
+
+/*
+** ld1q_gather_m16_offset:
+** mov (x[0-9]+), #?-16
+** ld1q {z0\.q}, p0/z, \[z0\.d, \1\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_m16_offset, svuint8_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_offset_u8 (p0, z0, -16),
+ z0_res = svld1q_gather_offset_u8 (p0, z0, -16))
+
+/*
+** ld1q_gather_0_offset:
+** ld1q {z0\.q}, p0/z, \[z0\.d\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_0_offset, svuint8_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_offset_u8 (p0, z0, 0),
+ z0_res = svld1q_gather_offset_u8 (p0, z0, 0))
+
+/*
+** ld1q_gather_16_offset:
+** mov (x[0-9]+), #?16
+** ld1q {z0\.q}, p0/z, \[z0\.d, \1\]
+** ret
+*/
+TEST_LOAD_GATHER_ZS (ld1q_gather_16_offset, svuint8_t, svuint64_t,
+ z0_res = svld1q_gather_u64base_offset_u8 (p0, z0, 16),
+ z0_res = svld1q_gather_offset_u8 (p0, z0, 16))
+
+/*
+** ld1q_gather_s64offset_tied:
+** ld1q {z0\.q}, p0/z, \[z0\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_s64offset_tied, svuint8_t, uint8_t, svint64_t,
+ z0_res = svld1q_gather_s64offset_u8 (p0, x0, z0),
+ z0_res = svld1q_gather_offset (p0, x0, z0))
+
+/*
+** ld1q_gather_s64offset_untied:
+** ld1q {z0\.q}, p0/z, \[z1\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_s64offset_untied, svuint8_t, uint8_t, svint64_t,
+ z0_res = svld1q_gather_s64offset_u8 (p0, x0, z1),
+ z0_res = svld1q_gather_offset (p0, x0, z1))
+
+/*
+** ld1q_gather_u64offset_tied:
+** ld1q {z0\.q}, p0/z, \[z0\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_u64offset_tied, svuint8_t, uint8_t, svuint64_t,
+ z0_res = svld1q_gather_u64offset_u8 (p0, x0, z0),
+ z0_res = svld1q_gather_offset (p0, x0, z0))
+
+/*
+** ld1q_gather_u64offset_untied:
+** ld1q {z0\.q}, p0/z, \[z1\.d, x0\]
+** ret
+*/
+TEST_LOAD_GATHER_SZ (ld1q_gather_u64offset_untied, svuint8_t, uint8_t, svuint64_t,
+ z0_res = svld1q_gather_u64offset_u8 (p0, x0, z1),
+ z0_res = svld1q_gather_offset (p0, x0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1udq_f64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1udq_f64.c
new file mode 100644
index 0000000..d311f57
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1udq_f64.c
@@ -0,0 +1,163 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** ld1udq_f64_base:
+** ld1d {z0\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld1udq_f64_base, svfloat64_t, float64_t,
+ z0 = svld1udq_f64 (p0, x0),
+ z0 = svld1udq (p0, x0))
+
+/*
+** ld1udq_f64_index:
+** ld1d {z0\.q}, p0/z, \[x0, x1, lsl 3\]
+** ret
+*/
+TEST_LOAD (ld1udq_f64_index, svfloat64_t, float64_t,
+ z0 = svld1udq_f64 (p0, x0 + x1),
+ z0 = svld1udq (p0, x0 + x1))
+
+/*
+** ld1udq_f64_1:
+** ld1d {z0\.q}, p0/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD (ld1udq_f64_1, svfloat64_t, float64_t,
+ z0 = svld1udq_f64 (p0, x0 + svcntd ()),
+ z0 = svld1udq (p0, x0 + svcntd ()))
+
+/*
+** ld1udq_f64_7:
+** ld1d {z0\.q}, p0/z, \[x0, #6, mul vl\]
+** ret
+*/
+TEST_LOAD (ld1udq_f64_7, svfloat64_t, float64_t,
+ z0 = svld1udq_f64 (p0, x0 + svcntd () * 3),
+ z0 = svld1udq (p0, x0 + svcntd () * 3))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1udq_f64_8:
+** incb x0, all, mul #4
+** ld1d {z0\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld1udq_f64_8, svfloat64_t, float64_t,
+ z0 = svld1udq_f64 (p0, x0 + svcntd () * 4),
+ z0 = svld1udq (p0, x0 + svcntd () * 4))
+
+/*
+** ld1udq_f64_m2:
+** ld1d {z0\.q}, p0/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD (ld1udq_f64_m2, svfloat64_t, float64_t,
+ z0 = svld1udq_f64 (p0, x0 - svcntd ()),
+ z0 = svld1udq (p0, x0 - svcntd ()))
+
+/*
+** ld1udq_f64_m8:
+** ld1d {z0\.q}, p0/z, \[x0, #-8, mul vl\]
+** ret
+*/
+TEST_LOAD (ld1udq_f64_m8, svfloat64_t, float64_t,
+ z0 = svld1udq_f64 (p0, x0 - svcntd () * 4),
+ z0 = svld1udq (p0, x0 - svcntd () * 4))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1udq_f64_m10:
+** decb x0, all, mul #5
+** ld1d {z0\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld1udq_f64_m10, svfloat64_t, float64_t,
+ z0 = svld1udq_f64 (p0, x0 - svcntd () * 5),
+ z0 = svld1udq (p0, x0 - svcntd () * 5))
+
+/*
+** ld1udq_vnum_f64_0:
+** ld1d {z0\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld1udq_vnum_f64_0, svfloat64_t, float64_t,
+ z0 = svld1udq_vnum_f64 (p0, x0, 0),
+ z0 = svld1udq_vnum (p0, x0, 0))
+
+/*
+** ld1udq_vnum_f64_1:
+** ld1d {z0\.q}, p0/z, \[x0, #1, mul vl\]
+** ret
+*/
+TEST_LOAD (ld1udq_vnum_f64_1, svfloat64_t, float64_t,
+ z0 = svld1udq_vnum_f64 (p0, x0, 1),
+ z0 = svld1udq_vnum (p0, x0, 1))
+
+/*
+** ld1udq_vnum_f64_7:
+** ld1d {z0\.q}, p0/z, \[x0, #7, mul vl\]
+** ret
+*/
+TEST_LOAD (ld1udq_vnum_f64_7, svfloat64_t, float64_t,
+ z0 = svld1udq_vnum_f64 (p0, x0, 7),
+ z0 = svld1udq_vnum (p0, x0, 7))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1udq_vnum_f64_8:
+** incb x0, all, mul #4
+** ld1d {z0\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld1udq_vnum_f64_8, svfloat64_t, float64_t,
+ z0 = svld1udq_vnum_f64 (p0, x0, 8),
+ z0 = svld1udq_vnum (p0, x0, 8))
+
+/*
+** ld1udq_vnum_f64_m1:
+** ld1d {z0\.q}, p0/z, \[x0, #-1, mul vl\]
+** ret
+*/
+TEST_LOAD (ld1udq_vnum_f64_m1, svfloat64_t, float64_t,
+ z0 = svld1udq_vnum_f64 (p0, x0, -1),
+ z0 = svld1udq_vnum (p0, x0, -1))
+
+/*
+** ld1udq_vnum_f64_m8:
+** ld1d {z0\.q}, p0/z, \[x0, #-8, mul vl\]
+** ret
+*/
+TEST_LOAD (ld1udq_vnum_f64_m8, svfloat64_t, float64_t,
+ z0 = svld1udq_vnum_f64 (p0, x0, -8),
+ z0 = svld1udq_vnum (p0, x0, -8))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1udq_vnum_f64_m9:
+** dech x0, all, mul #9
+** ld1d {z0\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld1udq_vnum_f64_m9, svfloat64_t, float64_t,
+ z0 = svld1udq_vnum_f64 (p0, x0, -9),
+ z0 = svld1udq_vnum (p0, x0, -9))
+
+/* Using MUL to calculate an index would also be OK. */
+/*
+** ld1udq_vnum_f64_x1:
+** cnth (x[0-9]+)
+** madd (x[0-9]+), (x1, \1|\1, x1), x0
+** ld1d {z0\.q}, p0/z, \[\2\]
+** ret
+*/
+TEST_LOAD (ld1udq_vnum_f64_x1, svfloat64_t, float64_t,
+ z0 = svld1udq_vnum_f64 (p0, x0, x1),
+ z0 = svld1udq_vnum (p0, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1udq_s64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1udq_s64.c
new file mode 100644
index 0000000..0779852
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1udq_s64.c
@@ -0,0 +1,163 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** ld1udq_s64_base:
+** ld1d {z0\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld1udq_s64_base, svint64_t, int64_t,
+ z0 = svld1udq_s64 (p0, x0),
+ z0 = svld1udq (p0, x0))
+
+/*
+** ld1udq_s64_index:
+** ld1d {z0\.q}, p0/z, \[x0, x1, lsl 3\]
+** ret
+*/
+TEST_LOAD (ld1udq_s64_index, svint64_t, int64_t,
+ z0 = svld1udq_s64 (p0, x0 + x1),
+ z0 = svld1udq (p0, x0 + x1))
+
+/*
+** ld1udq_s64_1:
+** ld1d {z0\.q}, p0/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD (ld1udq_s64_1, svint64_t, int64_t,
+ z0 = svld1udq_s64 (p0, x0 + svcntd ()),
+ z0 = svld1udq (p0, x0 + svcntd ()))
+
+/*
+** ld1udq_s64_7:
+** ld1d {z0\.q}, p0/z, \[x0, #6, mul vl\]
+** ret
+*/
+TEST_LOAD (ld1udq_s64_7, svint64_t, int64_t,
+ z0 = svld1udq_s64 (p0, x0 + svcntd () * 3),
+ z0 = svld1udq (p0, x0 + svcntd () * 3))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1udq_s64_8:
+** incb x0, all, mul #4
+** ld1d {z0\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld1udq_s64_8, svint64_t, int64_t,
+ z0 = svld1udq_s64 (p0, x0 + svcntd () * 4),
+ z0 = svld1udq (p0, x0 + svcntd () * 4))
+
+/*
+** ld1udq_s64_m2:
+** ld1d {z0\.q}, p0/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD (ld1udq_s64_m2, svint64_t, int64_t,
+ z0 = svld1udq_s64 (p0, x0 - svcntd ()),
+ z0 = svld1udq (p0, x0 - svcntd ()))
+
+/*
+** ld1udq_s64_m8:
+** ld1d {z0\.q}, p0/z, \[x0, #-8, mul vl\]
+** ret
+*/
+TEST_LOAD (ld1udq_s64_m8, svint64_t, int64_t,
+ z0 = svld1udq_s64 (p0, x0 - svcntd () * 4),
+ z0 = svld1udq (p0, x0 - svcntd () * 4))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1udq_s64_m10:
+** decb x0, all, mul #5
+** ld1d {z0\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld1udq_s64_m10, svint64_t, int64_t,
+ z0 = svld1udq_s64 (p0, x0 - svcntd () * 5),
+ z0 = svld1udq (p0, x0 - svcntd () * 5))
+
+/*
+** ld1udq_vnum_s64_0:
+** ld1d {z0\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld1udq_vnum_s64_0, svint64_t, int64_t,
+ z0 = svld1udq_vnum_s64 (p0, x0, 0),
+ z0 = svld1udq_vnum (p0, x0, 0))
+
+/*
+** ld1udq_vnum_s64_1:
+** ld1d {z0\.q}, p0/z, \[x0, #1, mul vl\]
+** ret
+*/
+TEST_LOAD (ld1udq_vnum_s64_1, svint64_t, int64_t,
+ z0 = svld1udq_vnum_s64 (p0, x0, 1),
+ z0 = svld1udq_vnum (p0, x0, 1))
+
+/*
+** ld1udq_vnum_s64_7:
+** ld1d {z0\.q}, p0/z, \[x0, #7, mul vl\]
+** ret
+*/
+TEST_LOAD (ld1udq_vnum_s64_7, svint64_t, int64_t,
+ z0 = svld1udq_vnum_s64 (p0, x0, 7),
+ z0 = svld1udq_vnum (p0, x0, 7))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1udq_vnum_s64_8:
+** incb x0, all, mul #4
+** ld1d {z0\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld1udq_vnum_s64_8, svint64_t, int64_t,
+ z0 = svld1udq_vnum_s64 (p0, x0, 8),
+ z0 = svld1udq_vnum (p0, x0, 8))
+
+/*
+** ld1udq_vnum_s64_m1:
+** ld1d {z0\.q}, p0/z, \[x0, #-1, mul vl\]
+** ret
+*/
+TEST_LOAD (ld1udq_vnum_s64_m1, svint64_t, int64_t,
+ z0 = svld1udq_vnum_s64 (p0, x0, -1),
+ z0 = svld1udq_vnum (p0, x0, -1))
+
+/*
+** ld1udq_vnum_s64_m8:
+** ld1d {z0\.q}, p0/z, \[x0, #-8, mul vl\]
+** ret
+*/
+TEST_LOAD (ld1udq_vnum_s64_m8, svint64_t, int64_t,
+ z0 = svld1udq_vnum_s64 (p0, x0, -8),
+ z0 = svld1udq_vnum (p0, x0, -8))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1udq_vnum_s64_m9:
+** dech x0, all, mul #9
+** ld1d {z0\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld1udq_vnum_s64_m9, svint64_t, int64_t,
+ z0 = svld1udq_vnum_s64 (p0, x0, -9),
+ z0 = svld1udq_vnum (p0, x0, -9))
+
+/* Using MUL to calculate an index would also be OK. */
+/*
+** ld1udq_vnum_s64_x1:
+** cnth (x[0-9]+)
+** madd (x[0-9]+), (x1, \1|\1, x1), x0
+** ld1d {z0\.q}, p0/z, \[\2\]
+** ret
+*/
+TEST_LOAD (ld1udq_vnum_s64_x1, svint64_t, int64_t,
+ z0 = svld1udq_vnum_s64 (p0, x0, x1),
+ z0 = svld1udq_vnum (p0, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1udq_u64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1udq_u64.c
new file mode 100644
index 0000000..7473f67
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1udq_u64.c
@@ -0,0 +1,163 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** ld1udq_u64_base:
+** ld1d {z0\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld1udq_u64_base, svuint64_t, uint64_t,
+ z0 = svld1udq_u64 (p0, x0),
+ z0 = svld1udq (p0, x0))
+
+/*
+** ld1udq_u64_index:
+** ld1d {z0\.q}, p0/z, \[x0, x1, lsl 3\]
+** ret
+*/
+TEST_LOAD (ld1udq_u64_index, svuint64_t, uint64_t,
+ z0 = svld1udq_u64 (p0, x0 + x1),
+ z0 = svld1udq (p0, x0 + x1))
+
+/*
+** ld1udq_u64_1:
+** ld1d {z0\.q}, p0/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD (ld1udq_u64_1, svuint64_t, uint64_t,
+ z0 = svld1udq_u64 (p0, x0 + svcntd ()),
+ z0 = svld1udq (p0, x0 + svcntd ()))
+
+/*
+** ld1udq_u64_7:
+** ld1d {z0\.q}, p0/z, \[x0, #6, mul vl\]
+** ret
+*/
+TEST_LOAD (ld1udq_u64_7, svuint64_t, uint64_t,
+ z0 = svld1udq_u64 (p0, x0 + svcntd () * 3),
+ z0 = svld1udq (p0, x0 + svcntd () * 3))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1udq_u64_8:
+** incb x0, all, mul #4
+** ld1d {z0\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld1udq_u64_8, svuint64_t, uint64_t,
+ z0 = svld1udq_u64 (p0, x0 + svcntd () * 4),
+ z0 = svld1udq (p0, x0 + svcntd () * 4))
+
+/*
+** ld1udq_u64_m2:
+** ld1d {z0\.q}, p0/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD (ld1udq_u64_m2, svuint64_t, uint64_t,
+ z0 = svld1udq_u64 (p0, x0 - svcntd ()),
+ z0 = svld1udq (p0, x0 - svcntd ()))
+
+/*
+** ld1udq_u64_m8:
+** ld1d {z0\.q}, p0/z, \[x0, #-8, mul vl\]
+** ret
+*/
+TEST_LOAD (ld1udq_u64_m8, svuint64_t, uint64_t,
+ z0 = svld1udq_u64 (p0, x0 - svcntd () * 4),
+ z0 = svld1udq (p0, x0 - svcntd () * 4))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1udq_u64_m10:
+** decb x0, all, mul #5
+** ld1d {z0\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld1udq_u64_m10, svuint64_t, uint64_t,
+ z0 = svld1udq_u64 (p0, x0 - svcntd () * 5),
+ z0 = svld1udq (p0, x0 - svcntd () * 5))
+
+/*
+** ld1udq_vnum_u64_0:
+** ld1d {z0\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld1udq_vnum_u64_0, svuint64_t, uint64_t,
+ z0 = svld1udq_vnum_u64 (p0, x0, 0),
+ z0 = svld1udq_vnum (p0, x0, 0))
+
+/*
+** ld1udq_vnum_u64_1:
+** ld1d {z0\.q}, p0/z, \[x0, #1, mul vl\]
+** ret
+*/
+TEST_LOAD (ld1udq_vnum_u64_1, svuint64_t, uint64_t,
+ z0 = svld1udq_vnum_u64 (p0, x0, 1),
+ z0 = svld1udq_vnum (p0, x0, 1))
+
+/*
+** ld1udq_vnum_u64_7:
+** ld1d {z0\.q}, p0/z, \[x0, #7, mul vl\]
+** ret
+*/
+TEST_LOAD (ld1udq_vnum_u64_7, svuint64_t, uint64_t,
+ z0 = svld1udq_vnum_u64 (p0, x0, 7),
+ z0 = svld1udq_vnum (p0, x0, 7))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1udq_vnum_u64_8:
+** incb x0, all, mul #4
+** ld1d {z0\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld1udq_vnum_u64_8, svuint64_t, uint64_t,
+ z0 = svld1udq_vnum_u64 (p0, x0, 8),
+ z0 = svld1udq_vnum (p0, x0, 8))
+
+/*
+** ld1udq_vnum_u64_m1:
+** ld1d {z0\.q}, p0/z, \[x0, #-1, mul vl\]
+** ret
+*/
+TEST_LOAD (ld1udq_vnum_u64_m1, svuint64_t, uint64_t,
+ z0 = svld1udq_vnum_u64 (p0, x0, -1),
+ z0 = svld1udq_vnum (p0, x0, -1))
+
+/*
+** ld1udq_vnum_u64_m8:
+** ld1d {z0\.q}, p0/z, \[x0, #-8, mul vl\]
+** ret
+*/
+TEST_LOAD (ld1udq_vnum_u64_m8, svuint64_t, uint64_t,
+ z0 = svld1udq_vnum_u64 (p0, x0, -8),
+ z0 = svld1udq_vnum (p0, x0, -8))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1udq_vnum_u64_m9:
+** dech x0, all, mul #9
+** ld1d {z0\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld1udq_vnum_u64_m9, svuint64_t, uint64_t,
+ z0 = svld1udq_vnum_u64 (p0, x0, -9),
+ z0 = svld1udq_vnum (p0, x0, -9))
+
+/* Using MUL to calculate an index would also be OK. */
+/*
+** ld1udq_vnum_u64_x1:
+** cnth (x[0-9]+)
+** madd (x[0-9]+), (x1, \1|\1, x1), x0
+** ld1d {z0\.q}, p0/z, \[\2\]
+** ret
+*/
+TEST_LOAD (ld1udq_vnum_u64_x1, svuint64_t, uint64_t,
+ z0 = svld1udq_vnum_u64 (p0, x0, x1),
+ z0 = svld1udq_vnum (p0, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1uwq_f32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1uwq_f32.c
new file mode 100644
index 0000000..3a91fd0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1uwq_f32.c
@@ -0,0 +1,163 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** ld1uwq_f32_base:
+** ld1w {z0\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld1uwq_f32_base, svfloat32_t, float32_t,
+ z0 = svld1uwq_f32 (p0, x0),
+ z0 = svld1uwq (p0, x0))
+
+/*
+** ld1uwq_f32_index:
+** ld1w {z0\.q}, p0/z, \[x0, x1, lsl 2\]
+** ret
+*/
+TEST_LOAD (ld1uwq_f32_index, svfloat32_t, float32_t,
+ z0 = svld1uwq_f32 (p0, x0 + x1),
+ z0 = svld1uwq (p0, x0 + x1))
+
+/*
+** ld1uwq_f32_1:
+** ld1w {z0\.q}, p0/z, \[x0, #1, mul vl\]
+** ret
+*/
+TEST_LOAD (ld1uwq_f32_1, svfloat32_t, float32_t,
+ z0 = svld1uwq_f32 (p0, x0 + svcntw () / 4),
+ z0 = svld1uwq (p0, x0 + svcntw () / 4))
+
+/*
+** ld1uwq_f32_7:
+** ld1w {z0\.q}, p0/z, \[x0, #7, mul vl\]
+** ret
+*/
+TEST_LOAD (ld1uwq_f32_7, svfloat32_t, float32_t,
+ z0 = svld1uwq_f32 (p0, x0 + svcntw () * 7 / 4),
+ z0 = svld1uwq (p0, x0 + svcntw () * 7 / 4))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1uwq_f32_8:
+** incb x0, all, mul #2
+** ld1w {z0\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld1uwq_f32_8, svfloat32_t, float32_t,
+ z0 = svld1uwq_f32 (p0, x0 + svcntw () * 2),
+ z0 = svld1uwq (p0, x0 + svcntw () * 2))
+
+/*
+** ld1uwq_f32_m1:
+** ld1w {z0\.q}, p0/z, \[x0, #-1, mul vl\]
+** ret
+*/
+TEST_LOAD (ld1uwq_f32_m1, svfloat32_t, float32_t,
+ z0 = svld1uwq_f32 (p0, x0 - svcntw () / 4),
+ z0 = svld1uwq (p0, x0 - svcntw () / 4))
+
+/*
+** ld1uwq_f32_m8:
+** ld1w {z0\.q}, p0/z, \[x0, #-8, mul vl\]
+** ret
+*/
+TEST_LOAD (ld1uwq_f32_m8, svfloat32_t, float32_t,
+ z0 = svld1uwq_f32 (p0, x0 - svcntw () * 2),
+ z0 = svld1uwq (p0, x0 - svcntw () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1uwq_f32_m9:
+** decw x0, all, mul #9
+** ld1w {z0\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld1uwq_f32_m9, svfloat32_t, float32_t,
+ z0 = svld1uwq_f32 (p0, x0 - svcntw () * 9 / 4),
+ z0 = svld1uwq (p0, x0 - svcntw () * 9 / 4))
+
+/*
+** ld1uwq_vnum_f32_0:
+** ld1w {z0\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld1uwq_vnum_f32_0, svfloat32_t, float32_t,
+ z0 = svld1uwq_vnum_f32 (p0, x0, 0),
+ z0 = svld1uwq_vnum (p0, x0, 0))
+
+/*
+** ld1uwq_vnum_f32_1:
+** ld1w {z0\.q}, p0/z, \[x0, #1, mul vl\]
+** ret
+*/
+TEST_LOAD (ld1uwq_vnum_f32_1, svfloat32_t, float32_t,
+ z0 = svld1uwq_vnum_f32 (p0, x0, 1),
+ z0 = svld1uwq_vnum (p0, x0, 1))
+
+/*
+** ld1uwq_vnum_f32_7:
+** ld1w {z0\.q}, p0/z, \[x0, #7, mul vl\]
+** ret
+*/
+TEST_LOAD (ld1uwq_vnum_f32_7, svfloat32_t, float32_t,
+ z0 = svld1uwq_vnum_f32 (p0, x0, 7),
+ z0 = svld1uwq_vnum (p0, x0, 7))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1uwq_vnum_f32_8:
+** incb x0, all, mul #2
+** ld1w {z0\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld1uwq_vnum_f32_8, svfloat32_t, float32_t,
+ z0 = svld1uwq_vnum_f32 (p0, x0, 8),
+ z0 = svld1uwq_vnum (p0, x0, 8))
+
+/*
+** ld1uwq_vnum_f32_m1:
+** ld1w {z0\.q}, p0/z, \[x0, #-1, mul vl\]
+** ret
+*/
+TEST_LOAD (ld1uwq_vnum_f32_m1, svfloat32_t, float32_t,
+ z0 = svld1uwq_vnum_f32 (p0, x0, -1),
+ z0 = svld1uwq_vnum (p0, x0, -1))
+
+/*
+** ld1uwq_vnum_f32_m8:
+** ld1w {z0\.q}, p0/z, \[x0, #-8, mul vl\]
+** ret
+*/
+TEST_LOAD (ld1uwq_vnum_f32_m8, svfloat32_t, float32_t,
+ z0 = svld1uwq_vnum_f32 (p0, x0, -8),
+ z0 = svld1uwq_vnum (p0, x0, -8))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1uwq_vnum_f32_m9:
+** decw x0, all, mul #9
+** ld1w {z0\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld1uwq_vnum_f32_m9, svfloat32_t, float32_t,
+ z0 = svld1uwq_vnum_f32 (p0, x0, -9),
+ z0 = svld1uwq_vnum (p0, x0, -9))
+
+/* Using MUL to calculate an index would also be OK. */
+/*
+** ld1uwq_vnum_f32_x1:
+** cntw (x[0-9]+)
+** madd (x[0-9]+), (x1, \1|\1, x1), x0
+** ld1w {z0\.q}, p0/z, \[\2\]
+** ret
+*/
+TEST_LOAD (ld1uwq_vnum_f32_x1, svfloat32_t, float32_t,
+ z0 = svld1uwq_vnum_f32 (p0, x0, x1),
+ z0 = svld1uwq_vnum (p0, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1uwq_s32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1uwq_s32.c
new file mode 100644
index 0000000..f18fada
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1uwq_s32.c
@@ -0,0 +1,163 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** ld1uwq_s32_base:
+** ld1w {z0\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld1uwq_s32_base, svint32_t, int32_t,
+ z0 = svld1uwq_s32 (p0, x0),
+ z0 = svld1uwq (p0, x0))
+
+/*
+** ld1uwq_s32_index:
+** ld1w {z0\.q}, p0/z, \[x0, x1, lsl 2\]
+** ret
+*/
+TEST_LOAD (ld1uwq_s32_index, svint32_t, int32_t,
+ z0 = svld1uwq_s32 (p0, x0 + x1),
+ z0 = svld1uwq (p0, x0 + x1))
+
+/*
+** ld1uwq_s32_1:
+** ld1w {z0\.q}, p0/z, \[x0, #1, mul vl\]
+** ret
+*/
+TEST_LOAD (ld1uwq_s32_1, svint32_t, int32_t,
+ z0 = svld1uwq_s32 (p0, x0 + svcntw () / 4),
+ z0 = svld1uwq (p0, x0 + svcntw () / 4))
+
+/*
+** ld1uwq_s32_7:
+** ld1w {z0\.q}, p0/z, \[x0, #7, mul vl\]
+** ret
+*/
+TEST_LOAD (ld1uwq_s32_7, svint32_t, int32_t,
+ z0 = svld1uwq_s32 (p0, x0 + svcntw () * 7 / 4),
+ z0 = svld1uwq (p0, x0 + svcntw () * 7 / 4))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1uwq_s32_8:
+** incb x0, all, mul #2
+** ld1w {z0\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld1uwq_s32_8, svint32_t, int32_t,
+ z0 = svld1uwq_s32 (p0, x0 + svcntw () * 2),
+ z0 = svld1uwq (p0, x0 + svcntw () * 2))
+
+/*
+** ld1uwq_s32_m1:
+** ld1w {z0\.q}, p0/z, \[x0, #-1, mul vl\]
+** ret
+*/
+TEST_LOAD (ld1uwq_s32_m1, svint32_t, int32_t,
+ z0 = svld1uwq_s32 (p0, x0 - svcntw () / 4),
+ z0 = svld1uwq (p0, x0 - svcntw () / 4))
+
+/*
+** ld1uwq_s32_m8:
+** ld1w {z0\.q}, p0/z, \[x0, #-8, mul vl\]
+** ret
+*/
+TEST_LOAD (ld1uwq_s32_m8, svint32_t, int32_t,
+ z0 = svld1uwq_s32 (p0, x0 - svcntw () * 2),
+ z0 = svld1uwq (p0, x0 - svcntw () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1uwq_s32_m9:
+** decw x0, all, mul #9
+** ld1w {z0\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld1uwq_s32_m9, svint32_t, int32_t,
+ z0 = svld1uwq_s32 (p0, x0 - svcntw () * 9 / 4),
+ z0 = svld1uwq (p0, x0 - svcntw () * 9 / 4))
+
+/*
+** ld1uwq_vnum_s32_0:
+** ld1w {z0\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld1uwq_vnum_s32_0, svint32_t, int32_t,
+ z0 = svld1uwq_vnum_s32 (p0, x0, 0),
+ z0 = svld1uwq_vnum (p0, x0, 0))
+
+/*
+** ld1uwq_vnum_s32_1:
+** ld1w {z0\.q}, p0/z, \[x0, #1, mul vl\]
+** ret
+*/
+TEST_LOAD (ld1uwq_vnum_s32_1, svint32_t, int32_t,
+ z0 = svld1uwq_vnum_s32 (p0, x0, 1),
+ z0 = svld1uwq_vnum (p0, x0, 1))
+
+/*
+** ld1uwq_vnum_s32_7:
+** ld1w {z0\.q}, p0/z, \[x0, #7, mul vl\]
+** ret
+*/
+TEST_LOAD (ld1uwq_vnum_s32_7, svint32_t, int32_t,
+ z0 = svld1uwq_vnum_s32 (p0, x0, 7),
+ z0 = svld1uwq_vnum (p0, x0, 7))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1uwq_vnum_s32_8:
+** incb x0, all, mul #2
+** ld1w {z0\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld1uwq_vnum_s32_8, svint32_t, int32_t,
+ z0 = svld1uwq_vnum_s32 (p0, x0, 8),
+ z0 = svld1uwq_vnum (p0, x0, 8))
+
+/*
+** ld1uwq_vnum_s32_m1:
+** ld1w {z0\.q}, p0/z, \[x0, #-1, mul vl\]
+** ret
+*/
+TEST_LOAD (ld1uwq_vnum_s32_m1, svint32_t, int32_t,
+ z0 = svld1uwq_vnum_s32 (p0, x0, -1),
+ z0 = svld1uwq_vnum (p0, x0, -1))
+
+/*
+** ld1uwq_vnum_s32_m8:
+** ld1w {z0\.q}, p0/z, \[x0, #-8, mul vl\]
+** ret
+*/
+TEST_LOAD (ld1uwq_vnum_s32_m8, svint32_t, int32_t,
+ z0 = svld1uwq_vnum_s32 (p0, x0, -8),
+ z0 = svld1uwq_vnum (p0, x0, -8))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1uwq_vnum_s32_m9:
+** decw x0, all, mul #9
+** ld1w {z0\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld1uwq_vnum_s32_m9, svint32_t, int32_t,
+ z0 = svld1uwq_vnum_s32 (p0, x0, -9),
+ z0 = svld1uwq_vnum (p0, x0, -9))
+
+/* Using MUL to calculate an index would also be OK. */
+/*
+** ld1uwq_vnum_s32_x1:
+** cntw (x[0-9]+)
+** madd (x[0-9]+), (x1, \1|\1, x1), x0
+** ld1w {z0\.q}, p0/z, \[\2\]
+** ret
+*/
+TEST_LOAD (ld1uwq_vnum_s32_x1, svint32_t, int32_t,
+ z0 = svld1uwq_vnum_s32 (p0, x0, x1),
+ z0 = svld1uwq_vnum (p0, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1uwq_u32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1uwq_u32.c
new file mode 100644
index 0000000..068f9a4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld1uwq_u32.c
@@ -0,0 +1,163 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** ld1uwq_u32_base:
+** ld1w {z0\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld1uwq_u32_base, svuint32_t, uint32_t,
+ z0 = svld1uwq_u32 (p0, x0),
+ z0 = svld1uwq (p0, x0))
+
+/*
+** ld1uwq_u32_index:
+** ld1w {z0\.q}, p0/z, \[x0, x1, lsl 2\]
+** ret
+*/
+TEST_LOAD (ld1uwq_u32_index, svuint32_t, uint32_t,
+ z0 = svld1uwq_u32 (p0, x0 + x1),
+ z0 = svld1uwq (p0, x0 + x1))
+
+/*
+** ld1uwq_u32_1:
+** ld1w {z0\.q}, p0/z, \[x0, #1, mul vl\]
+** ret
+*/
+TEST_LOAD (ld1uwq_u32_1, svuint32_t, uint32_t,
+ z0 = svld1uwq_u32 (p0, x0 + svcntw () / 4),
+ z0 = svld1uwq (p0, x0 + svcntw () / 4))
+
+/*
+** ld1uwq_u32_7:
+** ld1w {z0\.q}, p0/z, \[x0, #7, mul vl\]
+** ret
+*/
+TEST_LOAD (ld1uwq_u32_7, svuint32_t, uint32_t,
+ z0 = svld1uwq_u32 (p0, x0 + svcntw () * 7 / 4),
+ z0 = svld1uwq (p0, x0 + svcntw () * 7 / 4))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1uwq_u32_8:
+** incb x0, all, mul #2
+** ld1w {z0\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld1uwq_u32_8, svuint32_t, uint32_t,
+ z0 = svld1uwq_u32 (p0, x0 + svcntw () * 2),
+ z0 = svld1uwq (p0, x0 + svcntw () * 2))
+
+/*
+** ld1uwq_u32_m1:
+** ld1w {z0\.q}, p0/z, \[x0, #-1, mul vl\]
+** ret
+*/
+TEST_LOAD (ld1uwq_u32_m1, svuint32_t, uint32_t,
+ z0 = svld1uwq_u32 (p0, x0 - svcntw () / 4),
+ z0 = svld1uwq (p0, x0 - svcntw () / 4))
+
+/*
+** ld1uwq_u32_m8:
+** ld1w {z0\.q}, p0/z, \[x0, #-8, mul vl\]
+** ret
+*/
+TEST_LOAD (ld1uwq_u32_m8, svuint32_t, uint32_t,
+ z0 = svld1uwq_u32 (p0, x0 - svcntw () * 2),
+ z0 = svld1uwq (p0, x0 - svcntw () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1uwq_u32_m9:
+** decw x0, all, mul #9
+** ld1w {z0\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld1uwq_u32_m9, svuint32_t, uint32_t,
+ z0 = svld1uwq_u32 (p0, x0 - svcntw () * 9 / 4),
+ z0 = svld1uwq (p0, x0 - svcntw () * 9 / 4))
+
+/*
+** ld1uwq_vnum_u32_0:
+** ld1w {z0\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld1uwq_vnum_u32_0, svuint32_t, uint32_t,
+ z0 = svld1uwq_vnum_u32 (p0, x0, 0),
+ z0 = svld1uwq_vnum (p0, x0, 0))
+
+/*
+** ld1uwq_vnum_u32_1:
+** ld1w {z0\.q}, p0/z, \[x0, #1, mul vl\]
+** ret
+*/
+TEST_LOAD (ld1uwq_vnum_u32_1, svuint32_t, uint32_t,
+ z0 = svld1uwq_vnum_u32 (p0, x0, 1),
+ z0 = svld1uwq_vnum (p0, x0, 1))
+
+/*
+** ld1uwq_vnum_u32_7:
+** ld1w {z0\.q}, p0/z, \[x0, #7, mul vl\]
+** ret
+*/
+TEST_LOAD (ld1uwq_vnum_u32_7, svuint32_t, uint32_t,
+ z0 = svld1uwq_vnum_u32 (p0, x0, 7),
+ z0 = svld1uwq_vnum (p0, x0, 7))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1uwq_vnum_u32_8:
+** incb x0, all, mul #2
+** ld1w {z0\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld1uwq_vnum_u32_8, svuint32_t, uint32_t,
+ z0 = svld1uwq_vnum_u32 (p0, x0, 8),
+ z0 = svld1uwq_vnum (p0, x0, 8))
+
+/*
+** ld1uwq_vnum_u32_m1:
+** ld1w {z0\.q}, p0/z, \[x0, #-1, mul vl\]
+** ret
+*/
+TEST_LOAD (ld1uwq_vnum_u32_m1, svuint32_t, uint32_t,
+ z0 = svld1uwq_vnum_u32 (p0, x0, -1),
+ z0 = svld1uwq_vnum (p0, x0, -1))
+
+/*
+** ld1uwq_vnum_u32_m8:
+** ld1w {z0\.q}, p0/z, \[x0, #-8, mul vl\]
+** ret
+*/
+TEST_LOAD (ld1uwq_vnum_u32_m8, svuint32_t, uint32_t,
+ z0 = svld1uwq_vnum_u32 (p0, x0, -8),
+ z0 = svld1uwq_vnum (p0, x0, -8))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld1uwq_vnum_u32_m9:
+** decw x0, all, mul #9
+** ld1w {z0\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld1uwq_vnum_u32_m9, svuint32_t, uint32_t,
+ z0 = svld1uwq_vnum_u32 (p0, x0, -9),
+ z0 = svld1uwq_vnum (p0, x0, -9))
+
+/* Using MUL to calculate an index would also be OK. */
+/*
+** ld1uwq_vnum_u32_x1:
+** cntw (x[0-9]+)
+** madd (x[0-9]+), (x1, \1|\1, x1), x0
+** ld1w {z0\.q}, p0/z, \[\2\]
+** ret
+*/
+TEST_LOAD (ld1uwq_vnum_u32_x1, svuint32_t, uint32_t,
+ z0 = svld1uwq_vnum_u32 (p0, x0, x1),
+ z0 = svld1uwq_vnum (p0, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld2q_bf16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld2q_bf16.c
new file mode 100644
index 0000000..f57021d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld2q_bf16.c
@@ -0,0 +1,234 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** ld2q_bf16_base:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_bf16_base, svbfloat16x2_t, bfloat16_t,
+ z0 = svld2q_bf16 (p0, x0),
+ z0 = svld2q (p0, x0))
+
+/*
+** ld2q_bf16_index:
+** add (x[0-9]), x0, x1, lsl #?1
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld2q_bf16_index, svbfloat16x2_t, bfloat16_t,
+ z0 = svld2q_bf16 (p0, x0 + x1),
+ z0 = svld2q (p0, x0 + x1))
+
+/*
+** ld2q_bf16_index2:
+** add (x[0-9]), x0, x1, lsl #?2
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld2q_bf16_index2, svbfloat16x2_t, bfloat16_t,
+ z0 = svld2q_bf16 (p0, x0 + x1 * 2),
+ z0 = svld2q (p0, x0 + x1 * 2))
+
+/*
+** ld2q_bf16_index4:
+** add (x[0-9]), x0, x1, lsl #?3
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld2q_bf16_index4, svbfloat16x2_t, bfloat16_t,
+ z0 = svld2q_bf16 (p0, x0 + x1 * 4),
+ z0 = svld2q (p0, x0 + x1 * 4))
+
+/*
+** ld2q_bf16_index8:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_LOAD (ld2q_bf16_index8, svbfloat16x2_t, bfloat16_t,
+ z0 = svld2q_bf16 (p0, x0 + x1 * 8),
+ z0 = svld2q (p0, x0 + x1 * 8))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_bf16_1:
+** incb x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_bf16_1, svbfloat16x2_t, bfloat16_t,
+ z0 = svld2q_bf16 (p0, x0 + svcnth ()),
+ z0 = svld2q (p0, x0 + svcnth ()))
+
+/*
+** ld2q_bf16_2:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_bf16_2, svbfloat16x2_t, bfloat16_t,
+ z0 = svld2q_bf16 (p0, x0 + svcnth () * 2),
+ z0 = svld2q (p0, x0 + svcnth () * 2))
+
+/*
+** ld2q_bf16_14:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_bf16_14, svbfloat16x2_t, bfloat16_t,
+ z0 = svld2q_bf16 (p0, x0 + svcnth () * 14),
+ z0 = svld2q (p0, x0 + svcnth () * 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_bf16_16:
+** incb x0, all, mul #16
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_bf16_16, svbfloat16x2_t, bfloat16_t,
+ z0 = svld2q_bf16 (p0, x0 + svcnth () * 16),
+ z0 = svld2q (p0, x0 + svcnth () * 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_bf16_m1:
+** decb x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_bf16_m1, svbfloat16x2_t, bfloat16_t,
+ z0 = svld2q_bf16 (p0, x0 - svcnth ()),
+ z0 = svld2q (p0, x0 - svcnth ()))
+
+/*
+** ld2q_bf16_m2:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_bf16_m2, svbfloat16x2_t, bfloat16_t,
+ z0 = svld2q_bf16 (p0, x0 - svcnth () * 2),
+ z0 = svld2q (p0, x0 - svcnth () * 2))
+
+/*
+** ld2q_bf16_m16:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_bf16_m16, svbfloat16x2_t, bfloat16_t,
+ z0 = svld2q_bf16 (p0, x0 - svcnth () * 16),
+ z0 = svld2q (p0, x0 - svcnth () * 16))
+
+/*
+** ld2q_bf16_m18:
+** addvl (x[0-9]+), x0, #-18
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld2q_bf16_m18, svbfloat16x2_t, bfloat16_t,
+ z0 = svld2q_bf16 (p0, x0 - svcnth () * 18),
+ z0 = svld2q (p0, x0 - svcnth () * 18))
+
+/*
+** ld2q_vnum_bf16_0:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_bf16_0, svbfloat16x2_t, bfloat16_t,
+ z0 = svld2q_vnum_bf16 (p0, x0, 0),
+ z0 = svld2q_vnum (p0, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_vnum_bf16_1:
+** incb x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_bf16_1, svbfloat16x2_t, bfloat16_t,
+ z0 = svld2q_vnum_bf16 (p0, x0, 1),
+ z0 = svld2q_vnum (p0, x0, 1))
+
+/*
+** ld2q_vnum_bf16_2:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_bf16_2, svbfloat16x2_t, bfloat16_t,
+ z0 = svld2q_vnum_bf16 (p0, x0, 2),
+ z0 = svld2q_vnum (p0, x0, 2))
+
+/*
+** ld2q_vnum_bf16_14:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_bf16_14, svbfloat16x2_t, bfloat16_t,
+ z0 = svld2q_vnum_bf16 (p0, x0, 14),
+ z0 = svld2q_vnum (p0, x0, 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_vnum_bf16_16:
+** incb x0, all, mul #16
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_bf16_16, svbfloat16x2_t, bfloat16_t,
+ z0 = svld2q_vnum_bf16 (p0, x0, 16),
+ z0 = svld2q_vnum (p0, x0, 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_vnum_bf16_m1:
+** decb x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_bf16_m1, svbfloat16x2_t, bfloat16_t,
+ z0 = svld2q_vnum_bf16 (p0, x0, -1),
+ z0 = svld2q_vnum (p0, x0, -1))
+
+/*
+** ld2q_vnum_bf16_m2:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_bf16_m2, svbfloat16x2_t, bfloat16_t,
+ z0 = svld2q_vnum_bf16 (p0, x0, -2),
+ z0 = svld2q_vnum (p0, x0, -2))
+
+/*
+** ld2q_vnum_bf16_m16:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_bf16_m16, svbfloat16x2_t, bfloat16_t,
+ z0 = svld2q_vnum_bf16 (p0, x0, -16),
+ z0 = svld2q_vnum (p0, x0, -16))
+
+/*
+** ld2q_vnum_bf16_m18:
+** addvl (x[0-9]+), x0, #-18
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_bf16_m18, svbfloat16x2_t, bfloat16_t,
+ z0 = svld2q_vnum_bf16 (p0, x0, -18),
+ z0 = svld2q_vnum (p0, x0, -18))
+
+/*
+** ld2q_vnum_bf16_x1:
+** cntb (x[0-9]+)
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\2\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_bf16_x1, svbfloat16x2_t, bfloat16_t,
+ z0 = svld2q_vnum_bf16 (p0, x0, x1),
+ z0 = svld2q_vnum (p0, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld2q_f16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld2q_f16.c
new file mode 100644
index 0000000..448d36c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld2q_f16.c
@@ -0,0 +1,234 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** ld2q_f16_base:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_f16_base, svfloat16x2_t, float16_t,
+ z0 = svld2q_f16 (p0, x0),
+ z0 = svld2q (p0, x0))
+
+/*
+** ld2q_f16_index:
+** add (x[0-9]), x0, x1, lsl #?1
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld2q_f16_index, svfloat16x2_t, float16_t,
+ z0 = svld2q_f16 (p0, x0 + x1),
+ z0 = svld2q (p0, x0 + x1))
+
+/*
+** ld2q_f16_index2:
+** add (x[0-9]), x0, x1, lsl #?2
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld2q_f16_index2, svfloat16x2_t, float16_t,
+ z0 = svld2q_f16 (p0, x0 + x1 * 2),
+ z0 = svld2q (p0, x0 + x1 * 2))
+
+/*
+** ld2q_f16_index4:
+** add (x[0-9]), x0, x1, lsl #?3
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld2q_f16_index4, svfloat16x2_t, float16_t,
+ z0 = svld2q_f16 (p0, x0 + x1 * 4),
+ z0 = svld2q (p0, x0 + x1 * 4))
+
+/*
+** ld2q_f16_index8:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_LOAD (ld2q_f16_index8, svfloat16x2_t, float16_t,
+ z0 = svld2q_f16 (p0, x0 + x1 * 8),
+ z0 = svld2q (p0, x0 + x1 * 8))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_f16_1:
+** incb x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_f16_1, svfloat16x2_t, float16_t,
+ z0 = svld2q_f16 (p0, x0 + svcnth ()),
+ z0 = svld2q (p0, x0 + svcnth ()))
+
+/*
+** ld2q_f16_2:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_f16_2, svfloat16x2_t, float16_t,
+ z0 = svld2q_f16 (p0, x0 + svcnth () * 2),
+ z0 = svld2q (p0, x0 + svcnth () * 2))
+
+/*
+** ld2q_f16_14:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_f16_14, svfloat16x2_t, float16_t,
+ z0 = svld2q_f16 (p0, x0 + svcnth () * 14),
+ z0 = svld2q (p0, x0 + svcnth () * 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_f16_16:
+** incb x0, all, mul #16
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_f16_16, svfloat16x2_t, float16_t,
+ z0 = svld2q_f16 (p0, x0 + svcnth () * 16),
+ z0 = svld2q (p0, x0 + svcnth () * 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_f16_m1:
+** decb x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_f16_m1, svfloat16x2_t, float16_t,
+ z0 = svld2q_f16 (p0, x0 - svcnth ()),
+ z0 = svld2q (p0, x0 - svcnth ()))
+
+/*
+** ld2q_f16_m2:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_f16_m2, svfloat16x2_t, float16_t,
+ z0 = svld2q_f16 (p0, x0 - svcnth () * 2),
+ z0 = svld2q (p0, x0 - svcnth () * 2))
+
+/*
+** ld2q_f16_m16:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_f16_m16, svfloat16x2_t, float16_t,
+ z0 = svld2q_f16 (p0, x0 - svcnth () * 16),
+ z0 = svld2q (p0, x0 - svcnth () * 16))
+
+/*
+** ld2q_f16_m18:
+** addvl (x[0-9]+), x0, #-18
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld2q_f16_m18, svfloat16x2_t, float16_t,
+ z0 = svld2q_f16 (p0, x0 - svcnth () * 18),
+ z0 = svld2q (p0, x0 - svcnth () * 18))
+
+/*
+** ld2q_vnum_f16_0:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_f16_0, svfloat16x2_t, float16_t,
+ z0 = svld2q_vnum_f16 (p0, x0, 0),
+ z0 = svld2q_vnum (p0, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_vnum_f16_1:
+** incb x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_f16_1, svfloat16x2_t, float16_t,
+ z0 = svld2q_vnum_f16 (p0, x0, 1),
+ z0 = svld2q_vnum (p0, x0, 1))
+
+/*
+** ld2q_vnum_f16_2:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_f16_2, svfloat16x2_t, float16_t,
+ z0 = svld2q_vnum_f16 (p0, x0, 2),
+ z0 = svld2q_vnum (p0, x0, 2))
+
+/*
+** ld2q_vnum_f16_14:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_f16_14, svfloat16x2_t, float16_t,
+ z0 = svld2q_vnum_f16 (p0, x0, 14),
+ z0 = svld2q_vnum (p0, x0, 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_vnum_f16_16:
+** incb x0, all, mul #16
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_f16_16, svfloat16x2_t, float16_t,
+ z0 = svld2q_vnum_f16 (p0, x0, 16),
+ z0 = svld2q_vnum (p0, x0, 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_vnum_f16_m1:
+** decb x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_f16_m1, svfloat16x2_t, float16_t,
+ z0 = svld2q_vnum_f16 (p0, x0, -1),
+ z0 = svld2q_vnum (p0, x0, -1))
+
+/*
+** ld2q_vnum_f16_m2:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_f16_m2, svfloat16x2_t, float16_t,
+ z0 = svld2q_vnum_f16 (p0, x0, -2),
+ z0 = svld2q_vnum (p0, x0, -2))
+
+/*
+** ld2q_vnum_f16_m16:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_f16_m16, svfloat16x2_t, float16_t,
+ z0 = svld2q_vnum_f16 (p0, x0, -16),
+ z0 = svld2q_vnum (p0, x0, -16))
+
+/*
+** ld2q_vnum_f16_m18:
+** addvl (x[0-9]+), x0, #-18
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_f16_m18, svfloat16x2_t, float16_t,
+ z0 = svld2q_vnum_f16 (p0, x0, -18),
+ z0 = svld2q_vnum (p0, x0, -18))
+
+/*
+** ld2q_vnum_f16_x1:
+** cntb (x[0-9]+)
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\2\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_f16_x1, svfloat16x2_t, float16_t,
+ z0 = svld2q_vnum_f16 (p0, x0, x1),
+ z0 = svld2q_vnum (p0, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld2q_f32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld2q_f32.c
new file mode 100644
index 0000000..1cd7f6f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld2q_f32.c
@@ -0,0 +1,224 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** ld2q_f32_base:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_f32_base, svfloat32x2_t, float32_t,
+ z0 = svld2q_f32 (p0, x0),
+ z0 = svld2q (p0, x0))
+
+/*
+** ld2q_f32_index:
+** add (x[0-9]), x0, x1, lsl #?2
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld2q_f32_index, svfloat32x2_t, float32_t,
+ z0 = svld2q_f32 (p0, x0 + x1),
+ z0 = svld2q (p0, x0 + x1))
+
+/*
+** ld2q_f32_index2:
+** add (x[0-9]), x0, x1, lsl #?3
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld2q_f32_index2, svfloat32x2_t, float32_t,
+ z0 = svld2q_f32 (p0, x0 + x1 * 2),
+ z0 = svld2q (p0, x0 + x1 * 2))
+
+/*
+** ld2q_f32_index4:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_LOAD (ld2q_f32_index4, svfloat32x2_t, float32_t,
+ z0 = svld2q_f32 (p0, x0 + x1 * 4),
+ z0 = svld2q (p0, x0 + x1 * 4))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_f32_1:
+** incb x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_f32_1, svfloat32x2_t, float32_t,
+ z0 = svld2q_f32 (p0, x0 + svcntw ()),
+ z0 = svld2q (p0, x0 + svcntw ()))
+
+/*
+** ld2q_f32_2:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_f32_2, svfloat32x2_t, float32_t,
+ z0 = svld2q_f32 (p0, x0 + svcntw () * 2),
+ z0 = svld2q (p0, x0 + svcntw () * 2))
+
+/*
+** ld2q_f32_14:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_f32_14, svfloat32x2_t, float32_t,
+ z0 = svld2q_f32 (p0, x0 + svcntw () * 14),
+ z0 = svld2q (p0, x0 + svcntw () * 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_f32_16:
+** incb x0, all, mul #16
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_f32_16, svfloat32x2_t, float32_t,
+ z0 = svld2q_f32 (p0, x0 + svcntw () * 16),
+ z0 = svld2q (p0, x0 + svcntw () * 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_f32_m1:
+** decb x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_f32_m1, svfloat32x2_t, float32_t,
+ z0 = svld2q_f32 (p0, x0 - svcntw ()),
+ z0 = svld2q (p0, x0 - svcntw ()))
+
+/*
+** ld2q_f32_m2:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_f32_m2, svfloat32x2_t, float32_t,
+ z0 = svld2q_f32 (p0, x0 - svcntw () * 2),
+ z0 = svld2q (p0, x0 - svcntw () * 2))
+
+/*
+** ld2q_f32_m16:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_f32_m16, svfloat32x2_t, float32_t,
+ z0 = svld2q_f32 (p0, x0 - svcntw () * 16),
+ z0 = svld2q (p0, x0 - svcntw () * 16))
+
+/*
+** ld2q_f32_m18:
+** addvl (x[0-9]+), x0, #-18
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld2q_f32_m18, svfloat32x2_t, float32_t,
+ z0 = svld2q_f32 (p0, x0 - svcntw () * 18),
+ z0 = svld2q (p0, x0 - svcntw () * 18))
+
+/*
+** ld2q_vnum_f32_0:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_f32_0, svfloat32x2_t, float32_t,
+ z0 = svld2q_vnum_f32 (p0, x0, 0),
+ z0 = svld2q_vnum (p0, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_vnum_f32_1:
+** incb x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_f32_1, svfloat32x2_t, float32_t,
+ z0 = svld2q_vnum_f32 (p0, x0, 1),
+ z0 = svld2q_vnum (p0, x0, 1))
+
+/*
+** ld2q_vnum_f32_2:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_f32_2, svfloat32x2_t, float32_t,
+ z0 = svld2q_vnum_f32 (p0, x0, 2),
+ z0 = svld2q_vnum (p0, x0, 2))
+
+/*
+** ld2q_vnum_f32_14:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_f32_14, svfloat32x2_t, float32_t,
+ z0 = svld2q_vnum_f32 (p0, x0, 14),
+ z0 = svld2q_vnum (p0, x0, 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_vnum_f32_16:
+** incb x0, all, mul #16
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_f32_16, svfloat32x2_t, float32_t,
+ z0 = svld2q_vnum_f32 (p0, x0, 16),
+ z0 = svld2q_vnum (p0, x0, 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_vnum_f32_m1:
+** decb x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_f32_m1, svfloat32x2_t, float32_t,
+ z0 = svld2q_vnum_f32 (p0, x0, -1),
+ z0 = svld2q_vnum (p0, x0, -1))
+
+/*
+** ld2q_vnum_f32_m2:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_f32_m2, svfloat32x2_t, float32_t,
+ z0 = svld2q_vnum_f32 (p0, x0, -2),
+ z0 = svld2q_vnum (p0, x0, -2))
+
+/*
+** ld2q_vnum_f32_m16:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_f32_m16, svfloat32x2_t, float32_t,
+ z0 = svld2q_vnum_f32 (p0, x0, -16),
+ z0 = svld2q_vnum (p0, x0, -16))
+
+/*
+** ld2q_vnum_f32_m18:
+** addvl (x[0-9]+), x0, #-18
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_f32_m18, svfloat32x2_t, float32_t,
+ z0 = svld2q_vnum_f32 (p0, x0, -18),
+ z0 = svld2q_vnum (p0, x0, -18))
+
+/*
+** ld2q_vnum_f32_x1:
+** cntb (x[0-9]+)
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\2\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_f32_x1, svfloat32x2_t, float32_t,
+ z0 = svld2q_vnum_f32 (p0, x0, x1),
+ z0 = svld2q_vnum (p0, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld2q_f64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld2q_f64.c
new file mode 100644
index 0000000..2a61f80
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld2q_f64.c
@@ -0,0 +1,214 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** ld2q_f64_base:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_f64_base, svfloat64x2_t, float64_t,
+ z0 = svld2q_f64 (p0, x0),
+ z0 = svld2q (p0, x0))
+
+/*
+** ld2q_f64_index:
+** add (x[0-9]), x0, x1, lsl #?3
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld2q_f64_index, svfloat64x2_t, float64_t,
+ z0 = svld2q_f64 (p0, x0 + x1),
+ z0 = svld2q (p0, x0 + x1))
+
+/*
+** ld2q_f64_index2:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_LOAD (ld2q_f64_index2, svfloat64x2_t, float64_t,
+ z0 = svld2q_f64 (p0, x0 + x1 * 2),
+ z0 = svld2q (p0, x0 + x1 * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_f64_1:
+** incb x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_f64_1, svfloat64x2_t, float64_t,
+ z0 = svld2q_f64 (p0, x0 + svcntd ()),
+ z0 = svld2q (p0, x0 + svcntd ()))
+
+/*
+** ld2q_f64_2:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_f64_2, svfloat64x2_t, float64_t,
+ z0 = svld2q_f64 (p0, x0 + svcntd () * 2),
+ z0 = svld2q (p0, x0 + svcntd () * 2))
+
+/*
+** ld2q_f64_14:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_f64_14, svfloat64x2_t, float64_t,
+ z0 = svld2q_f64 (p0, x0 + svcntd () * 14),
+ z0 = svld2q (p0, x0 + svcntd () * 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_f64_16:
+** incb x0, all, mul #16
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_f64_16, svfloat64x2_t, float64_t,
+ z0 = svld2q_f64 (p0, x0 + svcntd () * 16),
+ z0 = svld2q (p0, x0 + svcntd () * 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_f64_m1:
+** decb x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_f64_m1, svfloat64x2_t, float64_t,
+ z0 = svld2q_f64 (p0, x0 - svcntd ()),
+ z0 = svld2q (p0, x0 - svcntd ()))
+
+/*
+** ld2q_f64_m2:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_f64_m2, svfloat64x2_t, float64_t,
+ z0 = svld2q_f64 (p0, x0 - svcntd () * 2),
+ z0 = svld2q (p0, x0 - svcntd () * 2))
+
+/*
+** ld2q_f64_m16:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_f64_m16, svfloat64x2_t, float64_t,
+ z0 = svld2q_f64 (p0, x0 - svcntd () * 16),
+ z0 = svld2q (p0, x0 - svcntd () * 16))
+
+/*
+** ld2q_f64_m18:
+** addvl (x[0-9]+), x0, #-18
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld2q_f64_m18, svfloat64x2_t, float64_t,
+ z0 = svld2q_f64 (p0, x0 - svcntd () * 18),
+ z0 = svld2q (p0, x0 - svcntd () * 18))
+
+/*
+** ld2q_vnum_f64_0:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_f64_0, svfloat64x2_t, float64_t,
+ z0 = svld2q_vnum_f64 (p0, x0, 0),
+ z0 = svld2q_vnum (p0, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_vnum_f64_1:
+** incb x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_f64_1, svfloat64x2_t, float64_t,
+ z0 = svld2q_vnum_f64 (p0, x0, 1),
+ z0 = svld2q_vnum (p0, x0, 1))
+
+/*
+** ld2q_vnum_f64_2:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_f64_2, svfloat64x2_t, float64_t,
+ z0 = svld2q_vnum_f64 (p0, x0, 2),
+ z0 = svld2q_vnum (p0, x0, 2))
+
+/*
+** ld2q_vnum_f64_14:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_f64_14, svfloat64x2_t, float64_t,
+ z0 = svld2q_vnum_f64 (p0, x0, 14),
+ z0 = svld2q_vnum (p0, x0, 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_vnum_f64_16:
+** incb x0, all, mul #16
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_f64_16, svfloat64x2_t, float64_t,
+ z0 = svld2q_vnum_f64 (p0, x0, 16),
+ z0 = svld2q_vnum (p0, x0, 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_vnum_f64_m1:
+** decb x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_f64_m1, svfloat64x2_t, float64_t,
+ z0 = svld2q_vnum_f64 (p0, x0, -1),
+ z0 = svld2q_vnum (p0, x0, -1))
+
+/*
+** ld2q_vnum_f64_m2:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_f64_m2, svfloat64x2_t, float64_t,
+ z0 = svld2q_vnum_f64 (p0, x0, -2),
+ z0 = svld2q_vnum (p0, x0, -2))
+
+/*
+** ld2q_vnum_f64_m16:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_f64_m16, svfloat64x2_t, float64_t,
+ z0 = svld2q_vnum_f64 (p0, x0, -16),
+ z0 = svld2q_vnum (p0, x0, -16))
+
+/*
+** ld2q_vnum_f64_m18:
+** addvl (x[0-9]+), x0, #-18
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_f64_m18, svfloat64x2_t, float64_t,
+ z0 = svld2q_vnum_f64 (p0, x0, -18),
+ z0 = svld2q_vnum (p0, x0, -18))
+
+/*
+** ld2q_vnum_f64_x1:
+** cntb (x[0-9]+)
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\2\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_f64_x1, svfloat64x2_t, float64_t,
+ z0 = svld2q_vnum_f64 (p0, x0, x1),
+ z0 = svld2q_vnum (p0, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld2q_s16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld2q_s16.c
new file mode 100644
index 0000000..b07f469
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld2q_s16.c
@@ -0,0 +1,234 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** ld2q_s16_base:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_s16_base, svint16x2_t, int16_t,
+ z0 = svld2q_s16 (p0, x0),
+ z0 = svld2q (p0, x0))
+
+/*
+** ld2q_s16_index:
+** add (x[0-9]), x0, x1, lsl #?1
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld2q_s16_index, svint16x2_t, int16_t,
+ z0 = svld2q_s16 (p0, x0 + x1),
+ z0 = svld2q (p0, x0 + x1))
+
+/*
+** ld2q_s16_index2:
+** add (x[0-9]), x0, x1, lsl #?2
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld2q_s16_index2, svint16x2_t, int16_t,
+ z0 = svld2q_s16 (p0, x0 + x1 * 2),
+ z0 = svld2q (p0, x0 + x1 * 2))
+
+/*
+** ld2q_s16_index4:
+** add (x[0-9]), x0, x1, lsl #?3
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld2q_s16_index4, svint16x2_t, int16_t,
+ z0 = svld2q_s16 (p0, x0 + x1 * 4),
+ z0 = svld2q (p0, x0 + x1 * 4))
+
+/*
+** ld2q_s16_index8:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_LOAD (ld2q_s16_index8, svint16x2_t, int16_t,
+ z0 = svld2q_s16 (p0, x0 + x1 * 8),
+ z0 = svld2q (p0, x0 + x1 * 8))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_s16_1:
+** incb x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_s16_1, svint16x2_t, int16_t,
+ z0 = svld2q_s16 (p0, x0 + svcnth ()),
+ z0 = svld2q (p0, x0 + svcnth ()))
+
+/*
+** ld2q_s16_2:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_s16_2, svint16x2_t, int16_t,
+ z0 = svld2q_s16 (p0, x0 + svcnth () * 2),
+ z0 = svld2q (p0, x0 + svcnth () * 2))
+
+/*
+** ld2q_s16_14:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_s16_14, svint16x2_t, int16_t,
+ z0 = svld2q_s16 (p0, x0 + svcnth () * 14),
+ z0 = svld2q (p0, x0 + svcnth () * 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_s16_16:
+** incb x0, all, mul #16
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_s16_16, svint16x2_t, int16_t,
+ z0 = svld2q_s16 (p0, x0 + svcnth () * 16),
+ z0 = svld2q (p0, x0 + svcnth () * 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_s16_m1:
+** decb x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_s16_m1, svint16x2_t, int16_t,
+ z0 = svld2q_s16 (p0, x0 - svcnth ()),
+ z0 = svld2q (p0, x0 - svcnth ()))
+
+/*
+** ld2q_s16_m2:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_s16_m2, svint16x2_t, int16_t,
+ z0 = svld2q_s16 (p0, x0 - svcnth () * 2),
+ z0 = svld2q (p0, x0 - svcnth () * 2))
+
+/*
+** ld2q_s16_m16:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_s16_m16, svint16x2_t, int16_t,
+ z0 = svld2q_s16 (p0, x0 - svcnth () * 16),
+ z0 = svld2q (p0, x0 - svcnth () * 16))
+
+/*
+** ld2q_s16_m18:
+** addvl (x[0-9]+), x0, #-18
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld2q_s16_m18, svint16x2_t, int16_t,
+ z0 = svld2q_s16 (p0, x0 - svcnth () * 18),
+ z0 = svld2q (p0, x0 - svcnth () * 18))
+
+/*
+** ld2q_vnum_s16_0:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_s16_0, svint16x2_t, int16_t,
+ z0 = svld2q_vnum_s16 (p0, x0, 0),
+ z0 = svld2q_vnum (p0, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_vnum_s16_1:
+** incb x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_s16_1, svint16x2_t, int16_t,
+ z0 = svld2q_vnum_s16 (p0, x0, 1),
+ z0 = svld2q_vnum (p0, x0, 1))
+
+/*
+** ld2q_vnum_s16_2:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_s16_2, svint16x2_t, int16_t,
+ z0 = svld2q_vnum_s16 (p0, x0, 2),
+ z0 = svld2q_vnum (p0, x0, 2))
+
+/*
+** ld2q_vnum_s16_14:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_s16_14, svint16x2_t, int16_t,
+ z0 = svld2q_vnum_s16 (p0, x0, 14),
+ z0 = svld2q_vnum (p0, x0, 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_vnum_s16_16:
+** incb x0, all, mul #16
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_s16_16, svint16x2_t, int16_t,
+ z0 = svld2q_vnum_s16 (p0, x0, 16),
+ z0 = svld2q_vnum (p0, x0, 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_vnum_s16_m1:
+** decb x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_s16_m1, svint16x2_t, int16_t,
+ z0 = svld2q_vnum_s16 (p0, x0, -1),
+ z0 = svld2q_vnum (p0, x0, -1))
+
+/*
+** ld2q_vnum_s16_m2:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_s16_m2, svint16x2_t, int16_t,
+ z0 = svld2q_vnum_s16 (p0, x0, -2),
+ z0 = svld2q_vnum (p0, x0, -2))
+
+/*
+** ld2q_vnum_s16_m16:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_s16_m16, svint16x2_t, int16_t,
+ z0 = svld2q_vnum_s16 (p0, x0, -16),
+ z0 = svld2q_vnum (p0, x0, -16))
+
+/*
+** ld2q_vnum_s16_m18:
+** addvl (x[0-9]+), x0, #-18
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_s16_m18, svint16x2_t, int16_t,
+ z0 = svld2q_vnum_s16 (p0, x0, -18),
+ z0 = svld2q_vnum (p0, x0, -18))
+
+/*
+** ld2q_vnum_s16_x1:
+** cntb (x[0-9]+)
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\2\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_s16_x1, svint16x2_t, int16_t,
+ z0 = svld2q_vnum_s16 (p0, x0, x1),
+ z0 = svld2q_vnum (p0, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld2q_s32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld2q_s32.c
new file mode 100644
index 0000000..f5de3de
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld2q_s32.c
@@ -0,0 +1,224 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** ld2q_s32_base:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_s32_base, svint32x2_t, int32_t,
+ z0 = svld2q_s32 (p0, x0),
+ z0 = svld2q (p0, x0))
+
+/*
+** ld2q_s32_index:
+** add (x[0-9]), x0, x1, lsl #?2
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld2q_s32_index, svint32x2_t, int32_t,
+ z0 = svld2q_s32 (p0, x0 + x1),
+ z0 = svld2q (p0, x0 + x1))
+
+/*
+** ld2q_s32_index2:
+** add (x[0-9]), x0, x1, lsl #?3
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld2q_s32_index2, svint32x2_t, int32_t,
+ z0 = svld2q_s32 (p0, x0 + x1 * 2),
+ z0 = svld2q (p0, x0 + x1 * 2))
+
+/*
+** ld2q_s32_index4:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_LOAD (ld2q_s32_index4, svint32x2_t, int32_t,
+ z0 = svld2q_s32 (p0, x0 + x1 * 4),
+ z0 = svld2q (p0, x0 + x1 * 4))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_s32_1:
+** incb x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_s32_1, svint32x2_t, int32_t,
+ z0 = svld2q_s32 (p0, x0 + svcntw ()),
+ z0 = svld2q (p0, x0 + svcntw ()))
+
+/*
+** ld2q_s32_2:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_s32_2, svint32x2_t, int32_t,
+ z0 = svld2q_s32 (p0, x0 + svcntw () * 2),
+ z0 = svld2q (p0, x0 + svcntw () * 2))
+
+/*
+** ld2q_s32_14:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_s32_14, svint32x2_t, int32_t,
+ z0 = svld2q_s32 (p0, x0 + svcntw () * 14),
+ z0 = svld2q (p0, x0 + svcntw () * 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_s32_16:
+** incb x0, all, mul #16
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_s32_16, svint32x2_t, int32_t,
+ z0 = svld2q_s32 (p0, x0 + svcntw () * 16),
+ z0 = svld2q (p0, x0 + svcntw () * 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_s32_m1:
+** decb x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_s32_m1, svint32x2_t, int32_t,
+ z0 = svld2q_s32 (p0, x0 - svcntw ()),
+ z0 = svld2q (p0, x0 - svcntw ()))
+
+/*
+** ld2q_s32_m2:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_s32_m2, svint32x2_t, int32_t,
+ z0 = svld2q_s32 (p0, x0 - svcntw () * 2),
+ z0 = svld2q (p0, x0 - svcntw () * 2))
+
+/*
+** ld2q_s32_m16:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_s32_m16, svint32x2_t, int32_t,
+ z0 = svld2q_s32 (p0, x0 - svcntw () * 16),
+ z0 = svld2q (p0, x0 - svcntw () * 16))
+
+/*
+** ld2q_s32_m18:
+** addvl (x[0-9]+), x0, #-18
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld2q_s32_m18, svint32x2_t, int32_t,
+ z0 = svld2q_s32 (p0, x0 - svcntw () * 18),
+ z0 = svld2q (p0, x0 - svcntw () * 18))
+
+/*
+** ld2q_vnum_s32_0:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_s32_0, svint32x2_t, int32_t,
+ z0 = svld2q_vnum_s32 (p0, x0, 0),
+ z0 = svld2q_vnum (p0, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_vnum_s32_1:
+** incb x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_s32_1, svint32x2_t, int32_t,
+ z0 = svld2q_vnum_s32 (p0, x0, 1),
+ z0 = svld2q_vnum (p0, x0, 1))
+
+/*
+** ld2q_vnum_s32_2:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_s32_2, svint32x2_t, int32_t,
+ z0 = svld2q_vnum_s32 (p0, x0, 2),
+ z0 = svld2q_vnum (p0, x0, 2))
+
+/*
+** ld2q_vnum_s32_14:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_s32_14, svint32x2_t, int32_t,
+ z0 = svld2q_vnum_s32 (p0, x0, 14),
+ z0 = svld2q_vnum (p0, x0, 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_vnum_s32_16:
+** incb x0, all, mul #16
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_s32_16, svint32x2_t, int32_t,
+ z0 = svld2q_vnum_s32 (p0, x0, 16),
+ z0 = svld2q_vnum (p0, x0, 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_vnum_s32_m1:
+** decb x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_s32_m1, svint32x2_t, int32_t,
+ z0 = svld2q_vnum_s32 (p0, x0, -1),
+ z0 = svld2q_vnum (p0, x0, -1))
+
+/*
+** ld2q_vnum_s32_m2:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_s32_m2, svint32x2_t, int32_t,
+ z0 = svld2q_vnum_s32 (p0, x0, -2),
+ z0 = svld2q_vnum (p0, x0, -2))
+
+/*
+** ld2q_vnum_s32_m16:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_s32_m16, svint32x2_t, int32_t,
+ z0 = svld2q_vnum_s32 (p0, x0, -16),
+ z0 = svld2q_vnum (p0, x0, -16))
+
+/*
+** ld2q_vnum_s32_m18:
+** addvl (x[0-9]+), x0, #-18
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_s32_m18, svint32x2_t, int32_t,
+ z0 = svld2q_vnum_s32 (p0, x0, -18),
+ z0 = svld2q_vnum (p0, x0, -18))
+
+/*
+** ld2q_vnum_s32_x1:
+** cntb (x[0-9]+)
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\2\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_s32_x1, svint32x2_t, int32_t,
+ z0 = svld2q_vnum_s32 (p0, x0, x1),
+ z0 = svld2q_vnum (p0, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld2q_s64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld2q_s64.c
new file mode 100644
index 0000000..4bd7d94
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld2q_s64.c
@@ -0,0 +1,214 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** ld2q_s64_base:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_s64_base, svint64x2_t, int64_t,
+ z0 = svld2q_s64 (p0, x0),
+ z0 = svld2q (p0, x0))
+
+/*
+** ld2q_s64_index:
+** add (x[0-9]), x0, x1, lsl #?3
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld2q_s64_index, svint64x2_t, int64_t,
+ z0 = svld2q_s64 (p0, x0 + x1),
+ z0 = svld2q (p0, x0 + x1))
+
+/*
+** ld2q_s64_index2:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_LOAD (ld2q_s64_index2, svint64x2_t, int64_t,
+ z0 = svld2q_s64 (p0, x0 + x1 * 2),
+ z0 = svld2q (p0, x0 + x1 * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_s64_1:
+** incb x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_s64_1, svint64x2_t, int64_t,
+ z0 = svld2q_s64 (p0, x0 + svcntd ()),
+ z0 = svld2q (p0, x0 + svcntd ()))
+
+/*
+** ld2q_s64_2:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_s64_2, svint64x2_t, int64_t,
+ z0 = svld2q_s64 (p0, x0 + svcntd () * 2),
+ z0 = svld2q (p0, x0 + svcntd () * 2))
+
+/*
+** ld2q_s64_14:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_s64_14, svint64x2_t, int64_t,
+ z0 = svld2q_s64 (p0, x0 + svcntd () * 14),
+ z0 = svld2q (p0, x0 + svcntd () * 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_s64_16:
+** incb x0, all, mul #16
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_s64_16, svint64x2_t, int64_t,
+ z0 = svld2q_s64 (p0, x0 + svcntd () * 16),
+ z0 = svld2q (p0, x0 + svcntd () * 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_s64_m1:
+** decb x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_s64_m1, svint64x2_t, int64_t,
+ z0 = svld2q_s64 (p0, x0 - svcntd ()),
+ z0 = svld2q (p0, x0 - svcntd ()))
+
+/*
+** ld2q_s64_m2:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_s64_m2, svint64x2_t, int64_t,
+ z0 = svld2q_s64 (p0, x0 - svcntd () * 2),
+ z0 = svld2q (p0, x0 - svcntd () * 2))
+
+/*
+** ld2q_s64_m16:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_s64_m16, svint64x2_t, int64_t,
+ z0 = svld2q_s64 (p0, x0 - svcntd () * 16),
+ z0 = svld2q (p0, x0 - svcntd () * 16))
+
+/*
+** ld2q_s64_m18:
+** addvl (x[0-9]+), x0, #-18
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld2q_s64_m18, svint64x2_t, int64_t,
+ z0 = svld2q_s64 (p0, x0 - svcntd () * 18),
+ z0 = svld2q (p0, x0 - svcntd () * 18))
+
+/*
+** ld2q_vnum_s64_0:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_s64_0, svint64x2_t, int64_t,
+ z0 = svld2q_vnum_s64 (p0, x0, 0),
+ z0 = svld2q_vnum (p0, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_vnum_s64_1:
+** incb x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_s64_1, svint64x2_t, int64_t,
+ z0 = svld2q_vnum_s64 (p0, x0, 1),
+ z0 = svld2q_vnum (p0, x0, 1))
+
+/*
+** ld2q_vnum_s64_2:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_s64_2, svint64x2_t, int64_t,
+ z0 = svld2q_vnum_s64 (p0, x0, 2),
+ z0 = svld2q_vnum (p0, x0, 2))
+
+/*
+** ld2q_vnum_s64_14:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_s64_14, svint64x2_t, int64_t,
+ z0 = svld2q_vnum_s64 (p0, x0, 14),
+ z0 = svld2q_vnum (p0, x0, 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_vnum_s64_16:
+** incb x0, all, mul #16
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_s64_16, svint64x2_t, int64_t,
+ z0 = svld2q_vnum_s64 (p0, x0, 16),
+ z0 = svld2q_vnum (p0, x0, 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_vnum_s64_m1:
+** decb x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_s64_m1, svint64x2_t, int64_t,
+ z0 = svld2q_vnum_s64 (p0, x0, -1),
+ z0 = svld2q_vnum (p0, x0, -1))
+
+/*
+** ld2q_vnum_s64_m2:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_s64_m2, svint64x2_t, int64_t,
+ z0 = svld2q_vnum_s64 (p0, x0, -2),
+ z0 = svld2q_vnum (p0, x0, -2))
+
+/*
+** ld2q_vnum_s64_m16:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_s64_m16, svint64x2_t, int64_t,
+ z0 = svld2q_vnum_s64 (p0, x0, -16),
+ z0 = svld2q_vnum (p0, x0, -16))
+
+/*
+** ld2q_vnum_s64_m18:
+** addvl (x[0-9]+), x0, #-18
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_s64_m18, svint64x2_t, int64_t,
+ z0 = svld2q_vnum_s64 (p0, x0, -18),
+ z0 = svld2q_vnum (p0, x0, -18))
+
+/*
+** ld2q_vnum_s64_x1:
+** cntb (x[0-9]+)
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\2\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_s64_x1, svint64x2_t, int64_t,
+ z0 = svld2q_vnum_s64 (p0, x0, x1),
+ z0 = svld2q_vnum (p0, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld2q_s8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld2q_s8.c
new file mode 100644
index 0000000..5abe82f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld2q_s8.c
@@ -0,0 +1,244 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** ld2q_s8_base:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_s8_base, svint8x2_t, int8_t,
+ z0 = svld2q_s8 (p0, x0),
+ z0 = svld2q (p0, x0))
+
+/*
+** ld2q_s8_index:
+** add (x[0-9]), (?:x0, x1|x1, x0)
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld2q_s8_index, svint8x2_t, int8_t,
+ z0 = svld2q_s8 (p0, x0 + x1),
+ z0 = svld2q (p0, x0 + x1))
+
+/*
+** ld2q_s8_index2:
+** add (x[0-9]), x0, x1, lsl #?1
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld2q_s8_index2, svint8x2_t, int8_t,
+ z0 = svld2q_s8 (p0, x0 + x1 * 2),
+ z0 = svld2q (p0, x0 + x1 * 2))
+
+/*
+** ld2q_s8_index4:
+** add (x[0-9]), x0, x1, lsl #?2
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld2q_s8_index4, svint8x2_t, int8_t,
+ z0 = svld2q_s8 (p0, x0 + x1 * 4),
+ z0 = svld2q (p0, x0 + x1 * 4))
+
+/*
+** ld2q_s8_index8:
+** add (x[0-9]), x0, x1, lsl #?3
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld2q_s8_index8, svint8x2_t, int8_t,
+ z0 = svld2q_s8 (p0, x0 + x1 * 8),
+ z0 = svld2q (p0, x0 + x1 * 8))
+
+/*
+** ld2q_s8_index16:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_LOAD (ld2q_s8_index16, svint8x2_t, int8_t,
+ z0 = svld2q_s8 (p0, x0 + x1 * 16),
+ z0 = svld2q (p0, x0 + x1 * 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_s8_1:
+** incb x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_s8_1, svint8x2_t, int8_t,
+ z0 = svld2q_s8 (p0, x0 + svcntb ()),
+ z0 = svld2q (p0, x0 + svcntb ()))
+
+/*
+** ld2q_s8_2:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_s8_2, svint8x2_t, int8_t,
+ z0 = svld2q_s8 (p0, x0 + svcntb () * 2),
+ z0 = svld2q (p0, x0 + svcntb () * 2))
+
+/*
+** ld2q_s8_14:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_s8_14, svint8x2_t, int8_t,
+ z0 = svld2q_s8 (p0, x0 + svcntb () * 14),
+ z0 = svld2q (p0, x0 + svcntb () * 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_s8_16:
+** incb x0, all, mul #16
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_s8_16, svint8x2_t, int8_t,
+ z0 = svld2q_s8 (p0, x0 + svcntb () * 16),
+ z0 = svld2q (p0, x0 + svcntb () * 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_s8_m1:
+** decb x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_s8_m1, svint8x2_t, int8_t,
+ z0 = svld2q_s8 (p0, x0 - svcntb ()),
+ z0 = svld2q (p0, x0 - svcntb ()))
+
+/*
+** ld2q_s8_m2:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_s8_m2, svint8x2_t, int8_t,
+ z0 = svld2q_s8 (p0, x0 - svcntb () * 2),
+ z0 = svld2q (p0, x0 - svcntb () * 2))
+
+/*
+** ld2q_s8_m16:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_s8_m16, svint8x2_t, int8_t,
+ z0 = svld2q_s8 (p0, x0 - svcntb () * 16),
+ z0 = svld2q (p0, x0 - svcntb () * 16))
+
+/*
+** ld2q_s8_m18:
+** addvl (x[0-9]+), x0, #-18
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld2q_s8_m18, svint8x2_t, int8_t,
+ z0 = svld2q_s8 (p0, x0 - svcntb () * 18),
+ z0 = svld2q (p0, x0 - svcntb () * 18))
+
+/*
+** ld2q_vnum_s8_0:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_s8_0, svint8x2_t, int8_t,
+ z0 = svld2q_vnum_s8 (p0, x0, 0),
+ z0 = svld2q_vnum (p0, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_vnum_s8_1:
+** incb x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_s8_1, svint8x2_t, int8_t,
+ z0 = svld2q_vnum_s8 (p0, x0, 1),
+ z0 = svld2q_vnum (p0, x0, 1))
+
+/*
+** ld2q_vnum_s8_2:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_s8_2, svint8x2_t, int8_t,
+ z0 = svld2q_vnum_s8 (p0, x0, 2),
+ z0 = svld2q_vnum (p0, x0, 2))
+
+/*
+** ld2q_vnum_s8_14:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_s8_14, svint8x2_t, int8_t,
+ z0 = svld2q_vnum_s8 (p0, x0, 14),
+ z0 = svld2q_vnum (p0, x0, 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_vnum_s8_16:
+** incb x0, all, mul #16
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_s8_16, svint8x2_t, int8_t,
+ z0 = svld2q_vnum_s8 (p0, x0, 16),
+ z0 = svld2q_vnum (p0, x0, 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_vnum_s8_m1:
+** decb x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_s8_m1, svint8x2_t, int8_t,
+ z0 = svld2q_vnum_s8 (p0, x0, -1),
+ z0 = svld2q_vnum (p0, x0, -1))
+
+/*
+** ld2q_vnum_s8_m2:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_s8_m2, svint8x2_t, int8_t,
+ z0 = svld2q_vnum_s8 (p0, x0, -2),
+ z0 = svld2q_vnum (p0, x0, -2))
+
+/*
+** ld2q_vnum_s8_m16:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_s8_m16, svint8x2_t, int8_t,
+ z0 = svld2q_vnum_s8 (p0, x0, -16),
+ z0 = svld2q_vnum (p0, x0, -16))
+
+/*
+** ld2q_vnum_s8_m18:
+** addvl (x[0-9]+), x0, #-18
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_s8_m18, svint8x2_t, int8_t,
+ z0 = svld2q_vnum_s8 (p0, x0, -18),
+ z0 = svld2q_vnum (p0, x0, -18))
+
+/*
+** ld2q_vnum_s8_x1:
+** cntb (x[0-9]+)
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\2\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_s8_x1, svint8x2_t, int8_t,
+ z0 = svld2q_vnum_s8 (p0, x0, x1),
+ z0 = svld2q_vnum (p0, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld2q_u16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld2q_u16.c
new file mode 100644
index 0000000..f97b8e2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld2q_u16.c
@@ -0,0 +1,234 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** ld2q_u16_base:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_u16_base, svuint16x2_t, uint16_t,
+ z0 = svld2q_u16 (p0, x0),
+ z0 = svld2q (p0, x0))
+
+/*
+** ld2q_u16_index:
+** add (x[0-9]), x0, x1, lsl #?1
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld2q_u16_index, svuint16x2_t, uint16_t,
+ z0 = svld2q_u16 (p0, x0 + x1),
+ z0 = svld2q (p0, x0 + x1))
+
+/*
+** ld2q_u16_index2:
+** add (x[0-9]), x0, x1, lsl #?2
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld2q_u16_index2, svuint16x2_t, uint16_t,
+ z0 = svld2q_u16 (p0, x0 + x1 * 2),
+ z0 = svld2q (p0, x0 + x1 * 2))
+
+/*
+** ld2q_u16_index4:
+** add (x[0-9]), x0, x1, lsl #?3
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld2q_u16_index4, svuint16x2_t, uint16_t,
+ z0 = svld2q_u16 (p0, x0 + x1 * 4),
+ z0 = svld2q (p0, x0 + x1 * 4))
+
+/*
+** ld2q_u16_index8:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_LOAD (ld2q_u16_index8, svuint16x2_t, uint16_t,
+ z0 = svld2q_u16 (p0, x0 + x1 * 8),
+ z0 = svld2q (p0, x0 + x1 * 8))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_u16_1:
+** incb x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_u16_1, svuint16x2_t, uint16_t,
+ z0 = svld2q_u16 (p0, x0 + svcnth ()),
+ z0 = svld2q (p0, x0 + svcnth ()))
+
+/*
+** ld2q_u16_2:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_u16_2, svuint16x2_t, uint16_t,
+ z0 = svld2q_u16 (p0, x0 + svcnth () * 2),
+ z0 = svld2q (p0, x0 + svcnth () * 2))
+
+/*
+** ld2q_u16_14:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_u16_14, svuint16x2_t, uint16_t,
+ z0 = svld2q_u16 (p0, x0 + svcnth () * 14),
+ z0 = svld2q (p0, x0 + svcnth () * 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_u16_16:
+** incb x0, all, mul #16
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_u16_16, svuint16x2_t, uint16_t,
+ z0 = svld2q_u16 (p0, x0 + svcnth () * 16),
+ z0 = svld2q (p0, x0 + svcnth () * 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_u16_m1:
+** decb x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_u16_m1, svuint16x2_t, uint16_t,
+ z0 = svld2q_u16 (p0, x0 - svcnth ()),
+ z0 = svld2q (p0, x0 - svcnth ()))
+
+/*
+** ld2q_u16_m2:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_u16_m2, svuint16x2_t, uint16_t,
+ z0 = svld2q_u16 (p0, x0 - svcnth () * 2),
+ z0 = svld2q (p0, x0 - svcnth () * 2))
+
+/*
+** ld2q_u16_m16:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_u16_m16, svuint16x2_t, uint16_t,
+ z0 = svld2q_u16 (p0, x0 - svcnth () * 16),
+ z0 = svld2q (p0, x0 - svcnth () * 16))
+
+/*
+** ld2q_u16_m18:
+** addvl (x[0-9]+), x0, #-18
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld2q_u16_m18, svuint16x2_t, uint16_t,
+ z0 = svld2q_u16 (p0, x0 - svcnth () * 18),
+ z0 = svld2q (p0, x0 - svcnth () * 18))
+
+/*
+** ld2q_vnum_u16_0:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_u16_0, svuint16x2_t, uint16_t,
+ z0 = svld2q_vnum_u16 (p0, x0, 0),
+ z0 = svld2q_vnum (p0, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_vnum_u16_1:
+** incb x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_u16_1, svuint16x2_t, uint16_t,
+ z0 = svld2q_vnum_u16 (p0, x0, 1),
+ z0 = svld2q_vnum (p0, x0, 1))
+
+/*
+** ld2q_vnum_u16_2:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_u16_2, svuint16x2_t, uint16_t,
+ z0 = svld2q_vnum_u16 (p0, x0, 2),
+ z0 = svld2q_vnum (p0, x0, 2))
+
+/*
+** ld2q_vnum_u16_14:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_u16_14, svuint16x2_t, uint16_t,
+ z0 = svld2q_vnum_u16 (p0, x0, 14),
+ z0 = svld2q_vnum (p0, x0, 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_vnum_u16_16:
+** incb x0, all, mul #16
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_u16_16, svuint16x2_t, uint16_t,
+ z0 = svld2q_vnum_u16 (p0, x0, 16),
+ z0 = svld2q_vnum (p0, x0, 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_vnum_u16_m1:
+** decb x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_u16_m1, svuint16x2_t, uint16_t,
+ z0 = svld2q_vnum_u16 (p0, x0, -1),
+ z0 = svld2q_vnum (p0, x0, -1))
+
+/*
+** ld2q_vnum_u16_m2:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_u16_m2, svuint16x2_t, uint16_t,
+ z0 = svld2q_vnum_u16 (p0, x0, -2),
+ z0 = svld2q_vnum (p0, x0, -2))
+
+/*
+** ld2q_vnum_u16_m16:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_u16_m16, svuint16x2_t, uint16_t,
+ z0 = svld2q_vnum_u16 (p0, x0, -16),
+ z0 = svld2q_vnum (p0, x0, -16))
+
+/*
+** ld2q_vnum_u16_m18:
+** addvl (x[0-9]+), x0, #-18
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_u16_m18, svuint16x2_t, uint16_t,
+ z0 = svld2q_vnum_u16 (p0, x0, -18),
+ z0 = svld2q_vnum (p0, x0, -18))
+
+/*
+** ld2q_vnum_u16_x1:
+** cntb (x[0-9]+)
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\2\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_u16_x1, svuint16x2_t, uint16_t,
+ z0 = svld2q_vnum_u16 (p0, x0, x1),
+ z0 = svld2q_vnum (p0, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld2q_u32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld2q_u32.c
new file mode 100644
index 0000000..1e31f8d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld2q_u32.c
@@ -0,0 +1,224 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** ld2q_u32_base:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_u32_base, svuint32x2_t, uint32_t,
+ z0 = svld2q_u32 (p0, x0),
+ z0 = svld2q (p0, x0))
+
+/*
+** ld2q_u32_index:
+** add (x[0-9]), x0, x1, lsl #?2
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld2q_u32_index, svuint32x2_t, uint32_t,
+ z0 = svld2q_u32 (p0, x0 + x1),
+ z0 = svld2q (p0, x0 + x1))
+
+/*
+** ld2q_u32_index2:
+** add (x[0-9]), x0, x1, lsl #?3
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld2q_u32_index2, svuint32x2_t, uint32_t,
+ z0 = svld2q_u32 (p0, x0 + x1 * 2),
+ z0 = svld2q (p0, x0 + x1 * 2))
+
+/*
+** ld2q_u32_index4:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_LOAD (ld2q_u32_index4, svuint32x2_t, uint32_t,
+ z0 = svld2q_u32 (p0, x0 + x1 * 4),
+ z0 = svld2q (p0, x0 + x1 * 4))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_u32_1:
+** incb x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_u32_1, svuint32x2_t, uint32_t,
+ z0 = svld2q_u32 (p0, x0 + svcntw ()),
+ z0 = svld2q (p0, x0 + svcntw ()))
+
+/*
+** ld2q_u32_2:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_u32_2, svuint32x2_t, uint32_t,
+ z0 = svld2q_u32 (p0, x0 + svcntw () * 2),
+ z0 = svld2q (p0, x0 + svcntw () * 2))
+
+/*
+** ld2q_u32_14:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_u32_14, svuint32x2_t, uint32_t,
+ z0 = svld2q_u32 (p0, x0 + svcntw () * 14),
+ z0 = svld2q (p0, x0 + svcntw () * 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_u32_16:
+** incb x0, all, mul #16
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_u32_16, svuint32x2_t, uint32_t,
+ z0 = svld2q_u32 (p0, x0 + svcntw () * 16),
+ z0 = svld2q (p0, x0 + svcntw () * 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_u32_m1:
+** decb x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_u32_m1, svuint32x2_t, uint32_t,
+ z0 = svld2q_u32 (p0, x0 - svcntw ()),
+ z0 = svld2q (p0, x0 - svcntw ()))
+
+/*
+** ld2q_u32_m2:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_u32_m2, svuint32x2_t, uint32_t,
+ z0 = svld2q_u32 (p0, x0 - svcntw () * 2),
+ z0 = svld2q (p0, x0 - svcntw () * 2))
+
+/*
+** ld2q_u32_m16:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_u32_m16, svuint32x2_t, uint32_t,
+ z0 = svld2q_u32 (p0, x0 - svcntw () * 16),
+ z0 = svld2q (p0, x0 - svcntw () * 16))
+
+/*
+** ld2q_u32_m18:
+** addvl (x[0-9]+), x0, #-18
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld2q_u32_m18, svuint32x2_t, uint32_t,
+ z0 = svld2q_u32 (p0, x0 - svcntw () * 18),
+ z0 = svld2q (p0, x0 - svcntw () * 18))
+
+/*
+** ld2q_vnum_u32_0:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_u32_0, svuint32x2_t, uint32_t,
+ z0 = svld2q_vnum_u32 (p0, x0, 0),
+ z0 = svld2q_vnum (p0, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_vnum_u32_1:
+** incb x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_u32_1, svuint32x2_t, uint32_t,
+ z0 = svld2q_vnum_u32 (p0, x0, 1),
+ z0 = svld2q_vnum (p0, x0, 1))
+
+/*
+** ld2q_vnum_u32_2:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_u32_2, svuint32x2_t, uint32_t,
+ z0 = svld2q_vnum_u32 (p0, x0, 2),
+ z0 = svld2q_vnum (p0, x0, 2))
+
+/*
+** ld2q_vnum_u32_14:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_u32_14, svuint32x2_t, uint32_t,
+ z0 = svld2q_vnum_u32 (p0, x0, 14),
+ z0 = svld2q_vnum (p0, x0, 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_vnum_u32_16:
+** incb x0, all, mul #16
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_u32_16, svuint32x2_t, uint32_t,
+ z0 = svld2q_vnum_u32 (p0, x0, 16),
+ z0 = svld2q_vnum (p0, x0, 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_vnum_u32_m1:
+** decb x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_u32_m1, svuint32x2_t, uint32_t,
+ z0 = svld2q_vnum_u32 (p0, x0, -1),
+ z0 = svld2q_vnum (p0, x0, -1))
+
+/*
+** ld2q_vnum_u32_m2:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_u32_m2, svuint32x2_t, uint32_t,
+ z0 = svld2q_vnum_u32 (p0, x0, -2),
+ z0 = svld2q_vnum (p0, x0, -2))
+
+/*
+** ld2q_vnum_u32_m16:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_u32_m16, svuint32x2_t, uint32_t,
+ z0 = svld2q_vnum_u32 (p0, x0, -16),
+ z0 = svld2q_vnum (p0, x0, -16))
+
+/*
+** ld2q_vnum_u32_m18:
+** addvl (x[0-9]+), x0, #-18
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_u32_m18, svuint32x2_t, uint32_t,
+ z0 = svld2q_vnum_u32 (p0, x0, -18),
+ z0 = svld2q_vnum (p0, x0, -18))
+
+/*
+** ld2q_vnum_u32_x1:
+** cntb (x[0-9]+)
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\2\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_u32_x1, svuint32x2_t, uint32_t,
+ z0 = svld2q_vnum_u32 (p0, x0, x1),
+ z0 = svld2q_vnum (p0, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld2q_u64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld2q_u64.c
new file mode 100644
index 0000000..a9051dc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld2q_u64.c
@@ -0,0 +1,214 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** ld2q_u64_base:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_u64_base, svuint64x2_t, uint64_t,
+ z0 = svld2q_u64 (p0, x0),
+ z0 = svld2q (p0, x0))
+
+/*
+** ld2q_u64_index:
+** add (x[0-9]), x0, x1, lsl #?3
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld2q_u64_index, svuint64x2_t, uint64_t,
+ z0 = svld2q_u64 (p0, x0 + x1),
+ z0 = svld2q (p0, x0 + x1))
+
+/*
+** ld2q_u64_index2:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_LOAD (ld2q_u64_index2, svuint64x2_t, uint64_t,
+ z0 = svld2q_u64 (p0, x0 + x1 * 2),
+ z0 = svld2q (p0, x0 + x1 * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_u64_1:
+** incb x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_u64_1, svuint64x2_t, uint64_t,
+ z0 = svld2q_u64 (p0, x0 + svcntd ()),
+ z0 = svld2q (p0, x0 + svcntd ()))
+
+/*
+** ld2q_u64_2:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_u64_2, svuint64x2_t, uint64_t,
+ z0 = svld2q_u64 (p0, x0 + svcntd () * 2),
+ z0 = svld2q (p0, x0 + svcntd () * 2))
+
+/*
+** ld2q_u64_14:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_u64_14, svuint64x2_t, uint64_t,
+ z0 = svld2q_u64 (p0, x0 + svcntd () * 14),
+ z0 = svld2q (p0, x0 + svcntd () * 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_u64_16:
+** incb x0, all, mul #16
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_u64_16, svuint64x2_t, uint64_t,
+ z0 = svld2q_u64 (p0, x0 + svcntd () * 16),
+ z0 = svld2q (p0, x0 + svcntd () * 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_u64_m1:
+** decb x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_u64_m1, svuint64x2_t, uint64_t,
+ z0 = svld2q_u64 (p0, x0 - svcntd ()),
+ z0 = svld2q (p0, x0 - svcntd ()))
+
+/*
+** ld2q_u64_m2:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_u64_m2, svuint64x2_t, uint64_t,
+ z0 = svld2q_u64 (p0, x0 - svcntd () * 2),
+ z0 = svld2q (p0, x0 - svcntd () * 2))
+
+/*
+** ld2q_u64_m16:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_u64_m16, svuint64x2_t, uint64_t,
+ z0 = svld2q_u64 (p0, x0 - svcntd () * 16),
+ z0 = svld2q (p0, x0 - svcntd () * 16))
+
+/*
+** ld2q_u64_m18:
+** addvl (x[0-9]+), x0, #-18
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld2q_u64_m18, svuint64x2_t, uint64_t,
+ z0 = svld2q_u64 (p0, x0 - svcntd () * 18),
+ z0 = svld2q (p0, x0 - svcntd () * 18))
+
+/*
+** ld2q_vnum_u64_0:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_u64_0, svuint64x2_t, uint64_t,
+ z0 = svld2q_vnum_u64 (p0, x0, 0),
+ z0 = svld2q_vnum (p0, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_vnum_u64_1:
+** incb x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_u64_1, svuint64x2_t, uint64_t,
+ z0 = svld2q_vnum_u64 (p0, x0, 1),
+ z0 = svld2q_vnum (p0, x0, 1))
+
+/*
+** ld2q_vnum_u64_2:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_u64_2, svuint64x2_t, uint64_t,
+ z0 = svld2q_vnum_u64 (p0, x0, 2),
+ z0 = svld2q_vnum (p0, x0, 2))
+
+/*
+** ld2q_vnum_u64_14:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_u64_14, svuint64x2_t, uint64_t,
+ z0 = svld2q_vnum_u64 (p0, x0, 14),
+ z0 = svld2q_vnum (p0, x0, 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_vnum_u64_16:
+** incb x0, all, mul #16
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_u64_16, svuint64x2_t, uint64_t,
+ z0 = svld2q_vnum_u64 (p0, x0, 16),
+ z0 = svld2q_vnum (p0, x0, 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_vnum_u64_m1:
+** decb x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_u64_m1, svuint64x2_t, uint64_t,
+ z0 = svld2q_vnum_u64 (p0, x0, -1),
+ z0 = svld2q_vnum (p0, x0, -1))
+
+/*
+** ld2q_vnum_u64_m2:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_u64_m2, svuint64x2_t, uint64_t,
+ z0 = svld2q_vnum_u64 (p0, x0, -2),
+ z0 = svld2q_vnum (p0, x0, -2))
+
+/*
+** ld2q_vnum_u64_m16:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_u64_m16, svuint64x2_t, uint64_t,
+ z0 = svld2q_vnum_u64 (p0, x0, -16),
+ z0 = svld2q_vnum (p0, x0, -16))
+
+/*
+** ld2q_vnum_u64_m18:
+** addvl (x[0-9]+), x0, #-18
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_u64_m18, svuint64x2_t, uint64_t,
+ z0 = svld2q_vnum_u64 (p0, x0, -18),
+ z0 = svld2q_vnum (p0, x0, -18))
+
+/*
+** ld2q_vnum_u64_x1:
+** cntb (x[0-9]+)
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\2\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_u64_x1, svuint64x2_t, uint64_t,
+ z0 = svld2q_vnum_u64 (p0, x0, x1),
+ z0 = svld2q_vnum (p0, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld2q_u8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld2q_u8.c
new file mode 100644
index 0000000..29df5f3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld2q_u8.c
@@ -0,0 +1,244 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** ld2q_u8_base:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_u8_base, svuint8x2_t, uint8_t,
+ z0 = svld2q_u8 (p0, x0),
+ z0 = svld2q (p0, x0))
+
+/*
+** ld2q_u8_index:
+** add (x[0-9]), (?:x0, x1|x1, x0)
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld2q_u8_index, svuint8x2_t, uint8_t,
+ z0 = svld2q_u8 (p0, x0 + x1),
+ z0 = svld2q (p0, x0 + x1))
+
+/*
+** ld2q_u8_index2:
+** add (x[0-9]), x0, x1, lsl #?1
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld2q_u8_index2, svuint8x2_t, uint8_t,
+ z0 = svld2q_u8 (p0, x0 + x1 * 2),
+ z0 = svld2q (p0, x0 + x1 * 2))
+
+/*
+** ld2q_u8_index4:
+** add (x[0-9]), x0, x1, lsl #?2
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld2q_u8_index4, svuint8x2_t, uint8_t,
+ z0 = svld2q_u8 (p0, x0 + x1 * 4),
+ z0 = svld2q (p0, x0 + x1 * 4))
+
+/*
+** ld2q_u8_index8:
+** add (x[0-9]), x0, x1, lsl #?3
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld2q_u8_index8, svuint8x2_t, uint8_t,
+ z0 = svld2q_u8 (p0, x0 + x1 * 8),
+ z0 = svld2q (p0, x0 + x1 * 8))
+
+/*
+** ld2q_u8_index16:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_LOAD (ld2q_u8_index16, svuint8x2_t, uint8_t,
+ z0 = svld2q_u8 (p0, x0 + x1 * 16),
+ z0 = svld2q (p0, x0 + x1 * 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_u8_1:
+** incb x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_u8_1, svuint8x2_t, uint8_t,
+ z0 = svld2q_u8 (p0, x0 + svcntb ()),
+ z0 = svld2q (p0, x0 + svcntb ()))
+
+/*
+** ld2q_u8_2:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_u8_2, svuint8x2_t, uint8_t,
+ z0 = svld2q_u8 (p0, x0 + svcntb () * 2),
+ z0 = svld2q (p0, x0 + svcntb () * 2))
+
+/*
+** ld2q_u8_14:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_u8_14, svuint8x2_t, uint8_t,
+ z0 = svld2q_u8 (p0, x0 + svcntb () * 14),
+ z0 = svld2q (p0, x0 + svcntb () * 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_u8_16:
+** incb x0, all, mul #16
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_u8_16, svuint8x2_t, uint8_t,
+ z0 = svld2q_u8 (p0, x0 + svcntb () * 16),
+ z0 = svld2q (p0, x0 + svcntb () * 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_u8_m1:
+** decb x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_u8_m1, svuint8x2_t, uint8_t,
+ z0 = svld2q_u8 (p0, x0 - svcntb ()),
+ z0 = svld2q (p0, x0 - svcntb ()))
+
+/*
+** ld2q_u8_m2:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_u8_m2, svuint8x2_t, uint8_t,
+ z0 = svld2q_u8 (p0, x0 - svcntb () * 2),
+ z0 = svld2q (p0, x0 - svcntb () * 2))
+
+/*
+** ld2q_u8_m16:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_u8_m16, svuint8x2_t, uint8_t,
+ z0 = svld2q_u8 (p0, x0 - svcntb () * 16),
+ z0 = svld2q (p0, x0 - svcntb () * 16))
+
+/*
+** ld2q_u8_m18:
+** addvl (x[0-9]+), x0, #-18
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld2q_u8_m18, svuint8x2_t, uint8_t,
+ z0 = svld2q_u8 (p0, x0 - svcntb () * 18),
+ z0 = svld2q (p0, x0 - svcntb () * 18))
+
+/*
+** ld2q_vnum_u8_0:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_u8_0, svuint8x2_t, uint8_t,
+ z0 = svld2q_vnum_u8 (p0, x0, 0),
+ z0 = svld2q_vnum (p0, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_vnum_u8_1:
+** incb x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_u8_1, svuint8x2_t, uint8_t,
+ z0 = svld2q_vnum_u8 (p0, x0, 1),
+ z0 = svld2q_vnum (p0, x0, 1))
+
+/*
+** ld2q_vnum_u8_2:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_u8_2, svuint8x2_t, uint8_t,
+ z0 = svld2q_vnum_u8 (p0, x0, 2),
+ z0 = svld2q_vnum (p0, x0, 2))
+
+/*
+** ld2q_vnum_u8_14:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_u8_14, svuint8x2_t, uint8_t,
+ z0 = svld2q_vnum_u8 (p0, x0, 14),
+ z0 = svld2q_vnum (p0, x0, 14))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_vnum_u8_16:
+** incb x0, all, mul #16
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_u8_16, svuint8x2_t, uint8_t,
+ z0 = svld2q_vnum_u8 (p0, x0, 16),
+ z0 = svld2q_vnum (p0, x0, 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld2q_vnum_u8_m1:
+** decb x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_u8_m1, svuint8x2_t, uint8_t,
+ z0 = svld2q_vnum_u8 (p0, x0, -1),
+ z0 = svld2q_vnum (p0, x0, -1))
+
+/*
+** ld2q_vnum_u8_m2:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_u8_m2, svuint8x2_t, uint8_t,
+ z0 = svld2q_vnum_u8 (p0, x0, -2),
+ z0 = svld2q_vnum (p0, x0, -2))
+
+/*
+** ld2q_vnum_u8_m16:
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_u8_m16, svuint8x2_t, uint8_t,
+ z0 = svld2q_vnum_u8 (p0, x0, -16),
+ z0 = svld2q_vnum (p0, x0, -16))
+
+/*
+** ld2q_vnum_u8_m18:
+** addvl (x[0-9]+), x0, #-18
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_u8_m18, svuint8x2_t, uint8_t,
+ z0 = svld2q_vnum_u8 (p0, x0, -18),
+ z0 = svld2q_vnum (p0, x0, -18))
+
+/*
+** ld2q_vnum_u8_x1:
+** cntb (x[0-9]+)
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld2q {z0\.q(?: - |, )z1\.q}, p0/z, \[\2\]
+** ret
+*/
+TEST_LOAD (ld2q_vnum_u8_x1, svuint8x2_t, uint8_t,
+ z0 = svld2q_vnum_u8 (p0, x0, x1),
+ z0 = svld2q_vnum (p0, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld3q_bf16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld3q_bf16.c
new file mode 100644
index 0000000..ec4f17c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld3q_bf16.c
@@ -0,0 +1,281 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** ld3q_bf16_base:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_bf16_base, svbfloat16x3_t, bfloat16_t,
+ z0 = svld3q_bf16 (p0, x0),
+ z0 = svld3q (p0, x0))
+
+/*
+** ld3q_bf16_index:
+** add (x[0-9]), x0, x1, lsl #?1
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_bf16_index, svbfloat16x3_t, bfloat16_t,
+ z0 = svld3q_bf16 (p0, x0 + x1),
+ z0 = svld3q (p0, x0 + x1))
+
+/*
+** ld3q_bf16_index2:
+** add (x[0-9]), x0, x1, lsl #?2
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_bf16_index2, svbfloat16x3_t, bfloat16_t,
+ z0 = svld3q_bf16 (p0, x0 + x1 * 2),
+ z0 = svld3q (p0, x0 + x1 * 2))
+
+/*
+** ld3q_bf16_index4:
+** add (x[0-9]), x0, x1, lsl #?3
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_bf16_index4, svbfloat16x3_t, bfloat16_t,
+ z0 = svld3q_bf16 (p0, x0 + x1 * 4),
+ z0 = svld3q (p0, x0 + x1 * 4))
+
+/*
+** ld3q_bf16_index8:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_LOAD (ld3q_bf16_index8, svbfloat16x3_t, bfloat16_t,
+ z0 = svld3q_bf16 (p0, x0 + x1 * 8),
+ z0 = svld3q (p0, x0 + x1 * 8))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_bf16_1:
+** incb x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_bf16_1, svbfloat16x3_t, bfloat16_t,
+ z0 = svld3q_bf16 (p0, x0 + svcnth ()),
+ z0 = svld3q (p0, x0 + svcnth ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_bf16_2:
+** incb x0, all, mul #2
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_bf16_2, svbfloat16x3_t, bfloat16_t,
+ z0 = svld3q_bf16 (p0, x0 + svcnth () * 2),
+ z0 = svld3q (p0, x0 + svcnth () * 2))
+
+/*
+** ld3q_bf16_3:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #3, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_bf16_3, svbfloat16x3_t, bfloat16_t,
+ z0 = svld3q_bf16 (p0, x0 + svcnth () * 3),
+ z0 = svld3q (p0, x0 + svcnth () * 3))
+
+/*
+** ld3q_bf16_21:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #21, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_bf16_21, svbfloat16x3_t, bfloat16_t,
+ z0 = svld3q_bf16 (p0, x0 + svcnth () * 21),
+ z0 = svld3q (p0, x0 + svcnth () * 21))
+
+/*
+** ld3q_bf16_24:
+** addvl (x[0-9]+), x0, #24
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_bf16_24, svbfloat16x3_t, bfloat16_t,
+ z0 = svld3q_bf16 (p0, x0 + svcnth () * 24),
+ z0 = svld3q (p0, x0 + svcnth () * 24))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_bf16_m1:
+** decb x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_bf16_m1, svbfloat16x3_t, bfloat16_t,
+ z0 = svld3q_bf16 (p0, x0 - svcnth ()),
+ z0 = svld3q (p0, x0 - svcnth ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_bf16_m2:
+** decb x0, all, mul #2
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_bf16_m2, svbfloat16x3_t, bfloat16_t,
+ z0 = svld3q_bf16 (p0, x0 - svcnth () * 2),
+ z0 = svld3q (p0, x0 - svcnth () * 2))
+
+/*
+** ld3q_bf16_m3:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #-3, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_bf16_m3, svbfloat16x3_t, bfloat16_t,
+ z0 = svld3q_bf16 (p0, x0 - svcnth () * 3),
+ z0 = svld3q (p0, x0 - svcnth () * 3))
+
+/*
+** ld3q_bf16_m24:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #-24, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_bf16_m24, svbfloat16x3_t, bfloat16_t,
+ z0 = svld3q_bf16 (p0, x0 - svcnth () * 24),
+ z0 = svld3q (p0, x0 - svcnth () * 24))
+
+/*
+** ld3q_bf16_m27:
+** addvl (x[0-9]+), x0, #-27
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_bf16_m27, svbfloat16x3_t, bfloat16_t,
+ z0 = svld3q_bf16 (p0, x0 - svcnth () * 27),
+ z0 = svld3q (p0, x0 - svcnth () * 27))
+
+/*
+** ld3q_vnum_bf16_0:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_bf16_0, svbfloat16x3_t, bfloat16_t,
+ z0 = svld3q_vnum_bf16 (p0, x0, 0),
+ z0 = svld3q_vnum (p0, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_vnum_bf16_1:
+** incb x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_bf16_1, svbfloat16x3_t, bfloat16_t,
+ z0 = svld3q_vnum_bf16 (p0, x0, 1),
+ z0 = svld3q_vnum (p0, x0, 1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_vnum_bf16_2:
+** incb x0, all, mul #2
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_bf16_2, svbfloat16x3_t, bfloat16_t,
+ z0 = svld3q_vnum_bf16 (p0, x0, 2),
+ z0 = svld3q_vnum (p0, x0, 2))
+
+/*
+** ld3q_vnum_bf16_3:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #3, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_bf16_3, svbfloat16x3_t, bfloat16_t,
+ z0 = svld3q_vnum_bf16 (p0, x0, 3),
+ z0 = svld3q_vnum (p0, x0, 3))
+
+/*
+** ld3q_vnum_bf16_21:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #21, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_bf16_21, svbfloat16x3_t, bfloat16_t,
+ z0 = svld3q_vnum_bf16 (p0, x0, 21),
+ z0 = svld3q_vnum (p0, x0, 21))
+
+/*
+** ld3q_vnum_bf16_24:
+** addvl (x[0-9]+), x0, #24
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_bf16_24, svbfloat16x3_t, bfloat16_t,
+ z0 = svld3q_vnum_bf16 (p0, x0, 24),
+ z0 = svld3q_vnum (p0, x0, 24))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_vnum_bf16_m1:
+** decb x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_bf16_m1, svbfloat16x3_t, bfloat16_t,
+ z0 = svld3q_vnum_bf16 (p0, x0, -1),
+ z0 = svld3q_vnum (p0, x0, -1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_vnum_bf16_m2:
+** decb x0, all, mul #2
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_bf16_m2, svbfloat16x3_t, bfloat16_t,
+ z0 = svld3q_vnum_bf16 (p0, x0, -2),
+ z0 = svld3q_vnum (p0, x0, -2))
+
+/*
+** ld3q_vnum_bf16_m3:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #-3, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_bf16_m3, svbfloat16x3_t, bfloat16_t,
+ z0 = svld3q_vnum_bf16 (p0, x0, -3),
+ z0 = svld3q_vnum (p0, x0, -3))
+
+/*
+** ld3q_vnum_bf16_m24:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #-24, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_bf16_m24, svbfloat16x3_t, bfloat16_t,
+ z0 = svld3q_vnum_bf16 (p0, x0, -24),
+ z0 = svld3q_vnum (p0, x0, -24))
+
+/*
+** ld3q_vnum_bf16_m27:
+** addvl (x[0-9]+), x0, #-27
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_bf16_m27, svbfloat16x3_t, bfloat16_t,
+ z0 = svld3q_vnum_bf16 (p0, x0, -27),
+ z0 = svld3q_vnum (p0, x0, -27))
+
+/*
+** ld3q_vnum_bf16_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD (ld3q_vnum_bf16_x1, svbfloat16x3_t, bfloat16_t,
+ z0 = svld3q_vnum_bf16 (p0, x0, x1),
+ z0 = svld3q_vnum (p0, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld3q_f16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld3q_f16.c
new file mode 100644
index 0000000..57be344
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld3q_f16.c
@@ -0,0 +1,281 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** ld3q_f16_base:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_f16_base, svfloat16x3_t, float16_t,
+ z0 = svld3q_f16 (p0, x0),
+ z0 = svld3q (p0, x0))
+
+/*
+** ld3q_f16_index:
+** add (x[0-9]), x0, x1, lsl #?1
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_f16_index, svfloat16x3_t, float16_t,
+ z0 = svld3q_f16 (p0, x0 + x1),
+ z0 = svld3q (p0, x0 + x1))
+
+/*
+** ld3q_f16_index2:
+** add (x[0-9]), x0, x1, lsl #?2
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_f16_index2, svfloat16x3_t, float16_t,
+ z0 = svld3q_f16 (p0, x0 + x1 * 2),
+ z0 = svld3q (p0, x0 + x1 * 2))
+
+/*
+** ld3q_f16_index4:
+** add (x[0-9]), x0, x1, lsl #?3
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_f16_index4, svfloat16x3_t, float16_t,
+ z0 = svld3q_f16 (p0, x0 + x1 * 4),
+ z0 = svld3q (p0, x0 + x1 * 4))
+
+/*
+** ld3q_f16_index8:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_LOAD (ld3q_f16_index8, svfloat16x3_t, float16_t,
+ z0 = svld3q_f16 (p0, x0 + x1 * 8),
+ z0 = svld3q (p0, x0 + x1 * 8))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_f16_1:
+** incb x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_f16_1, svfloat16x3_t, float16_t,
+ z0 = svld3q_f16 (p0, x0 + svcnth ()),
+ z0 = svld3q (p0, x0 + svcnth ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_f16_2:
+** incb x0, all, mul #2
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_f16_2, svfloat16x3_t, float16_t,
+ z0 = svld3q_f16 (p0, x0 + svcnth () * 2),
+ z0 = svld3q (p0, x0 + svcnth () * 2))
+
+/*
+** ld3q_f16_3:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #3, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_f16_3, svfloat16x3_t, float16_t,
+ z0 = svld3q_f16 (p0, x0 + svcnth () * 3),
+ z0 = svld3q (p0, x0 + svcnth () * 3))
+
+/*
+** ld3q_f16_21:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #21, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_f16_21, svfloat16x3_t, float16_t,
+ z0 = svld3q_f16 (p0, x0 + svcnth () * 21),
+ z0 = svld3q (p0, x0 + svcnth () * 21))
+
+/*
+** ld3q_f16_24:
+** addvl (x[0-9]+), x0, #24
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_f16_24, svfloat16x3_t, float16_t,
+ z0 = svld3q_f16 (p0, x0 + svcnth () * 24),
+ z0 = svld3q (p0, x0 + svcnth () * 24))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_f16_m1:
+** decb x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_f16_m1, svfloat16x3_t, float16_t,
+ z0 = svld3q_f16 (p0, x0 - svcnth ()),
+ z0 = svld3q (p0, x0 - svcnth ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_f16_m2:
+** decb x0, all, mul #2
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_f16_m2, svfloat16x3_t, float16_t,
+ z0 = svld3q_f16 (p0, x0 - svcnth () * 2),
+ z0 = svld3q (p0, x0 - svcnth () * 2))
+
+/*
+** ld3q_f16_m3:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #-3, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_f16_m3, svfloat16x3_t, float16_t,
+ z0 = svld3q_f16 (p0, x0 - svcnth () * 3),
+ z0 = svld3q (p0, x0 - svcnth () * 3))
+
+/*
+** ld3q_f16_m24:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #-24, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_f16_m24, svfloat16x3_t, float16_t,
+ z0 = svld3q_f16 (p0, x0 - svcnth () * 24),
+ z0 = svld3q (p0, x0 - svcnth () * 24))
+
+/*
+** ld3q_f16_m27:
+** addvl (x[0-9]+), x0, #-27
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_f16_m27, svfloat16x3_t, float16_t,
+ z0 = svld3q_f16 (p0, x0 - svcnth () * 27),
+ z0 = svld3q (p0, x0 - svcnth () * 27))
+
+/*
+** ld3q_vnum_f16_0:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_f16_0, svfloat16x3_t, float16_t,
+ z0 = svld3q_vnum_f16 (p0, x0, 0),
+ z0 = svld3q_vnum (p0, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_vnum_f16_1:
+** incb x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_f16_1, svfloat16x3_t, float16_t,
+ z0 = svld3q_vnum_f16 (p0, x0, 1),
+ z0 = svld3q_vnum (p0, x0, 1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_vnum_f16_2:
+** incb x0, all, mul #2
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_f16_2, svfloat16x3_t, float16_t,
+ z0 = svld3q_vnum_f16 (p0, x0, 2),
+ z0 = svld3q_vnum (p0, x0, 2))
+
+/*
+** ld3q_vnum_f16_3:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #3, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_f16_3, svfloat16x3_t, float16_t,
+ z0 = svld3q_vnum_f16 (p0, x0, 3),
+ z0 = svld3q_vnum (p0, x0, 3))
+
+/*
+** ld3q_vnum_f16_21:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #21, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_f16_21, svfloat16x3_t, float16_t,
+ z0 = svld3q_vnum_f16 (p0, x0, 21),
+ z0 = svld3q_vnum (p0, x0, 21))
+
+/*
+** ld3q_vnum_f16_24:
+** addvl (x[0-9]+), x0, #24
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_f16_24, svfloat16x3_t, float16_t,
+ z0 = svld3q_vnum_f16 (p0, x0, 24),
+ z0 = svld3q_vnum (p0, x0, 24))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_vnum_f16_m1:
+** decb x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_f16_m1, svfloat16x3_t, float16_t,
+ z0 = svld3q_vnum_f16 (p0, x0, -1),
+ z0 = svld3q_vnum (p0, x0, -1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_vnum_f16_m2:
+** decb x0, all, mul #2
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_f16_m2, svfloat16x3_t, float16_t,
+ z0 = svld3q_vnum_f16 (p0, x0, -2),
+ z0 = svld3q_vnum (p0, x0, -2))
+
+/*
+** ld3q_vnum_f16_m3:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #-3, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_f16_m3, svfloat16x3_t, float16_t,
+ z0 = svld3q_vnum_f16 (p0, x0, -3),
+ z0 = svld3q_vnum (p0, x0, -3))
+
+/*
+** ld3q_vnum_f16_m24:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #-24, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_f16_m24, svfloat16x3_t, float16_t,
+ z0 = svld3q_vnum_f16 (p0, x0, -24),
+ z0 = svld3q_vnum (p0, x0, -24))
+
+/*
+** ld3q_vnum_f16_m27:
+** addvl (x[0-9]+), x0, #-27
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_f16_m27, svfloat16x3_t, float16_t,
+ z0 = svld3q_vnum_f16 (p0, x0, -27),
+ z0 = svld3q_vnum (p0, x0, -27))
+
+/*
+** ld3q_vnum_f16_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD (ld3q_vnum_f16_x1, svfloat16x3_t, float16_t,
+ z0 = svld3q_vnum_f16 (p0, x0, x1),
+ z0 = svld3q_vnum (p0, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld3q_f32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld3q_f32.c
new file mode 100644
index 0000000..2304f89
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld3q_f32.c
@@ -0,0 +1,271 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** ld3q_f32_base:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_f32_base, svfloat32x3_t, float32_t,
+ z0 = svld3q_f32 (p0, x0),
+ z0 = svld3q (p0, x0))
+
+/*
+** ld3q_f32_index:
+** add (x[0-9]), x0, x1, lsl #?2
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_f32_index, svfloat32x3_t, float32_t,
+ z0 = svld3q_f32 (p0, x0 + x1),
+ z0 = svld3q (p0, x0 + x1))
+
+/*
+** ld3q_f32_index2:
+** add (x[0-9]), x0, x1, lsl #?3
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_f32_index2, svfloat32x3_t, float32_t,
+ z0 = svld3q_f32 (p0, x0 + x1 * 2),
+ z0 = svld3q (p0, x0 + x1 * 2))
+
+/*
+** ld3q_f32_index4:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_LOAD (ld3q_f32_index4, svfloat32x3_t, float32_t,
+ z0 = svld3q_f32 (p0, x0 + x1 * 4),
+ z0 = svld3q (p0, x0 + x1 * 4))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_f32_1:
+** incb x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_f32_1, svfloat32x3_t, float32_t,
+ z0 = svld3q_f32 (p0, x0 + svcntw ()),
+ z0 = svld3q (p0, x0 + svcntw ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_f32_2:
+** incb x0, all, mul #2
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_f32_2, svfloat32x3_t, float32_t,
+ z0 = svld3q_f32 (p0, x0 + svcntw () * 2),
+ z0 = svld3q (p0, x0 + svcntw () * 2))
+
+/*
+** ld3q_f32_3:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #3, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_f32_3, svfloat32x3_t, float32_t,
+ z0 = svld3q_f32 (p0, x0 + svcntw () * 3),
+ z0 = svld3q (p0, x0 + svcntw () * 3))
+
+/*
+** ld3q_f32_21:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #21, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_f32_21, svfloat32x3_t, float32_t,
+ z0 = svld3q_f32 (p0, x0 + svcntw () * 21),
+ z0 = svld3q (p0, x0 + svcntw () * 21))
+
+/*
+** ld3q_f32_24:
+** addvl (x[0-9]+), x0, #24
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_f32_24, svfloat32x3_t, float32_t,
+ z0 = svld3q_f32 (p0, x0 + svcntw () * 24),
+ z0 = svld3q (p0, x0 + svcntw () * 24))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_f32_m1:
+** decb x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_f32_m1, svfloat32x3_t, float32_t,
+ z0 = svld3q_f32 (p0, x0 - svcntw ()),
+ z0 = svld3q (p0, x0 - svcntw ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_f32_m2:
+** decb x0, all, mul #2
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_f32_m2, svfloat32x3_t, float32_t,
+ z0 = svld3q_f32 (p0, x0 - svcntw () * 2),
+ z0 = svld3q (p0, x0 - svcntw () * 2))
+
+/*
+** ld3q_f32_m3:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #-3, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_f32_m3, svfloat32x3_t, float32_t,
+ z0 = svld3q_f32 (p0, x0 - svcntw () * 3),
+ z0 = svld3q (p0, x0 - svcntw () * 3))
+
+/*
+** ld3q_f32_m24:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #-24, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_f32_m24, svfloat32x3_t, float32_t,
+ z0 = svld3q_f32 (p0, x0 - svcntw () * 24),
+ z0 = svld3q (p0, x0 - svcntw () * 24))
+
+/*
+** ld3q_f32_m27:
+** addvl (x[0-9]+), x0, #-27
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_f32_m27, svfloat32x3_t, float32_t,
+ z0 = svld3q_f32 (p0, x0 - svcntw () * 27),
+ z0 = svld3q (p0, x0 - svcntw () * 27))
+
+/*
+** ld3q_vnum_f32_0:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_f32_0, svfloat32x3_t, float32_t,
+ z0 = svld3q_vnum_f32 (p0, x0, 0),
+ z0 = svld3q_vnum (p0, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_vnum_f32_1:
+** incb x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_f32_1, svfloat32x3_t, float32_t,
+ z0 = svld3q_vnum_f32 (p0, x0, 1),
+ z0 = svld3q_vnum (p0, x0, 1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_vnum_f32_2:
+** incb x0, all, mul #2
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_f32_2, svfloat32x3_t, float32_t,
+ z0 = svld3q_vnum_f32 (p0, x0, 2),
+ z0 = svld3q_vnum (p0, x0, 2))
+
+/*
+** ld3q_vnum_f32_3:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #3, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_f32_3, svfloat32x3_t, float32_t,
+ z0 = svld3q_vnum_f32 (p0, x0, 3),
+ z0 = svld3q_vnum (p0, x0, 3))
+
+/*
+** ld3q_vnum_f32_21:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #21, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_f32_21, svfloat32x3_t, float32_t,
+ z0 = svld3q_vnum_f32 (p0, x0, 21),
+ z0 = svld3q_vnum (p0, x0, 21))
+
+/*
+** ld3q_vnum_f32_24:
+** addvl (x[0-9]+), x0, #24
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_f32_24, svfloat32x3_t, float32_t,
+ z0 = svld3q_vnum_f32 (p0, x0, 24),
+ z0 = svld3q_vnum (p0, x0, 24))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_vnum_f32_m1:
+** decb x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_f32_m1, svfloat32x3_t, float32_t,
+ z0 = svld3q_vnum_f32 (p0, x0, -1),
+ z0 = svld3q_vnum (p0, x0, -1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_vnum_f32_m2:
+** decb x0, all, mul #2
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_f32_m2, svfloat32x3_t, float32_t,
+ z0 = svld3q_vnum_f32 (p0, x0, -2),
+ z0 = svld3q_vnum (p0, x0, -2))
+
+/*
+** ld3q_vnum_f32_m3:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #-3, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_f32_m3, svfloat32x3_t, float32_t,
+ z0 = svld3q_vnum_f32 (p0, x0, -3),
+ z0 = svld3q_vnum (p0, x0, -3))
+
+/*
+** ld3q_vnum_f32_m24:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #-24, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_f32_m24, svfloat32x3_t, float32_t,
+ z0 = svld3q_vnum_f32 (p0, x0, -24),
+ z0 = svld3q_vnum (p0, x0, -24))
+
+/*
+** ld3q_vnum_f32_m27:
+** addvl (x[0-9]+), x0, #-27
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_f32_m27, svfloat32x3_t, float32_t,
+ z0 = svld3q_vnum_f32 (p0, x0, -27),
+ z0 = svld3q_vnum (p0, x0, -27))
+
+/*
+** ld3q_vnum_f32_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD (ld3q_vnum_f32_x1, svfloat32x3_t, float32_t,
+ z0 = svld3q_vnum_f32 (p0, x0, x1),
+ z0 = svld3q_vnum (p0, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld3q_f64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld3q_f64.c
new file mode 100644
index 0000000..6650526
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld3q_f64.c
@@ -0,0 +1,261 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** ld3q_f64_base:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_f64_base, svfloat64x3_t, float64_t,
+ z0 = svld3q_f64 (p0, x0),
+ z0 = svld3q (p0, x0))
+
+/*
+** ld3q_f64_index:
+** add (x[0-9]), x0, x1, lsl #?3
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_f64_index, svfloat64x3_t, float64_t,
+ z0 = svld3q_f64 (p0, x0 + x1),
+ z0 = svld3q (p0, x0 + x1))
+
+/*
+** ld3q_f64_index2:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_LOAD (ld3q_f64_index2, svfloat64x3_t, float64_t,
+ z0 = svld3q_f64 (p0, x0 + x1 * 2),
+ z0 = svld3q (p0, x0 + x1 * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_f64_1:
+** incb x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_f64_1, svfloat64x3_t, float64_t,
+ z0 = svld3q_f64 (p0, x0 + svcntd ()),
+ z0 = svld3q (p0, x0 + svcntd ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_f64_2:
+** incb x0, all, mul #2
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_f64_2, svfloat64x3_t, float64_t,
+ z0 = svld3q_f64 (p0, x0 + svcntd () * 2),
+ z0 = svld3q (p0, x0 + svcntd () * 2))
+
+/*
+** ld3q_f64_3:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #3, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_f64_3, svfloat64x3_t, float64_t,
+ z0 = svld3q_f64 (p0, x0 + svcntd () * 3),
+ z0 = svld3q (p0, x0 + svcntd () * 3))
+
+/*
+** ld3q_f64_21:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #21, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_f64_21, svfloat64x3_t, float64_t,
+ z0 = svld3q_f64 (p0, x0 + svcntd () * 21),
+ z0 = svld3q (p0, x0 + svcntd () * 21))
+
+/*
+** ld3q_f64_24:
+** addvl (x[0-9]+), x0, #24
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_f64_24, svfloat64x3_t, float64_t,
+ z0 = svld3q_f64 (p0, x0 + svcntd () * 24),
+ z0 = svld3q (p0, x0 + svcntd () * 24))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_f64_m1:
+** decb x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_f64_m1, svfloat64x3_t, float64_t,
+ z0 = svld3q_f64 (p0, x0 - svcntd ()),
+ z0 = svld3q (p0, x0 - svcntd ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_f64_m2:
+** decb x0, all, mul #2
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_f64_m2, svfloat64x3_t, float64_t,
+ z0 = svld3q_f64 (p0, x0 - svcntd () * 2),
+ z0 = svld3q (p0, x0 - svcntd () * 2))
+
+/*
+** ld3q_f64_m3:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #-3, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_f64_m3, svfloat64x3_t, float64_t,
+ z0 = svld3q_f64 (p0, x0 - svcntd () * 3),
+ z0 = svld3q (p0, x0 - svcntd () * 3))
+
+/*
+** ld3q_f64_m24:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #-24, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_f64_m24, svfloat64x3_t, float64_t,
+ z0 = svld3q_f64 (p0, x0 - svcntd () * 24),
+ z0 = svld3q (p0, x0 - svcntd () * 24))
+
+/*
+** ld3q_f64_m27:
+** addvl (x[0-9]+), x0, #-27
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_f64_m27, svfloat64x3_t, float64_t,
+ z0 = svld3q_f64 (p0, x0 - svcntd () * 27),
+ z0 = svld3q (p0, x0 - svcntd () * 27))
+
+/*
+** ld3q_vnum_f64_0:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_f64_0, svfloat64x3_t, float64_t,
+ z0 = svld3q_vnum_f64 (p0, x0, 0),
+ z0 = svld3q_vnum (p0, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_vnum_f64_1:
+** incb x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_f64_1, svfloat64x3_t, float64_t,
+ z0 = svld3q_vnum_f64 (p0, x0, 1),
+ z0 = svld3q_vnum (p0, x0, 1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_vnum_f64_2:
+** incb x0, all, mul #2
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_f64_2, svfloat64x3_t, float64_t,
+ z0 = svld3q_vnum_f64 (p0, x0, 2),
+ z0 = svld3q_vnum (p0, x0, 2))
+
+/*
+** ld3q_vnum_f64_3:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #3, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_f64_3, svfloat64x3_t, float64_t,
+ z0 = svld3q_vnum_f64 (p0, x0, 3),
+ z0 = svld3q_vnum (p0, x0, 3))
+
+/*
+** ld3q_vnum_f64_21:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #21, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_f64_21, svfloat64x3_t, float64_t,
+ z0 = svld3q_vnum_f64 (p0, x0, 21),
+ z0 = svld3q_vnum (p0, x0, 21))
+
+/*
+** ld3q_vnum_f64_24:
+** addvl (x[0-9]+), x0, #24
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_f64_24, svfloat64x3_t, float64_t,
+ z0 = svld3q_vnum_f64 (p0, x0, 24),
+ z0 = svld3q_vnum (p0, x0, 24))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_vnum_f64_m1:
+** decb x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_f64_m1, svfloat64x3_t, float64_t,
+ z0 = svld3q_vnum_f64 (p0, x0, -1),
+ z0 = svld3q_vnum (p0, x0, -1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_vnum_f64_m2:
+** decb x0, all, mul #2
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_f64_m2, svfloat64x3_t, float64_t,
+ z0 = svld3q_vnum_f64 (p0, x0, -2),
+ z0 = svld3q_vnum (p0, x0, -2))
+
+/*
+** ld3q_vnum_f64_m3:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #-3, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_f64_m3, svfloat64x3_t, float64_t,
+ z0 = svld3q_vnum_f64 (p0, x0, -3),
+ z0 = svld3q_vnum (p0, x0, -3))
+
+/*
+** ld3q_vnum_f64_m24:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #-24, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_f64_m24, svfloat64x3_t, float64_t,
+ z0 = svld3q_vnum_f64 (p0, x0, -24),
+ z0 = svld3q_vnum (p0, x0, -24))
+
+/*
+** ld3q_vnum_f64_m27:
+** addvl (x[0-9]+), x0, #-27
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_f64_m27, svfloat64x3_t, float64_t,
+ z0 = svld3q_vnum_f64 (p0, x0, -27),
+ z0 = svld3q_vnum (p0, x0, -27))
+
+/*
+** ld3q_vnum_f64_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD (ld3q_vnum_f64_x1, svfloat64x3_t, float64_t,
+ z0 = svld3q_vnum_f64 (p0, x0, x1),
+ z0 = svld3q_vnum (p0, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld3q_s16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld3q_s16.c
new file mode 100644
index 0000000..bd710b7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld3q_s16.c
@@ -0,0 +1,281 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** ld3q_s16_base:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_s16_base, svint16x3_t, int16_t,
+ z0 = svld3q_s16 (p0, x0),
+ z0 = svld3q (p0, x0))
+
+/*
+** ld3q_s16_index:
+** add (x[0-9]), x0, x1, lsl #?1
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_s16_index, svint16x3_t, int16_t,
+ z0 = svld3q_s16 (p0, x0 + x1),
+ z0 = svld3q (p0, x0 + x1))
+
+/*
+** ld3q_s16_index2:
+** add (x[0-9]), x0, x1, lsl #?2
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_s16_index2, svint16x3_t, int16_t,
+ z0 = svld3q_s16 (p0, x0 + x1 * 2),
+ z0 = svld3q (p0, x0 + x1 * 2))
+
+/*
+** ld3q_s16_index4:
+** add (x[0-9]), x0, x1, lsl #?3
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_s16_index4, svint16x3_t, int16_t,
+ z0 = svld3q_s16 (p0, x0 + x1 * 4),
+ z0 = svld3q (p0, x0 + x1 * 4))
+
+/*
+** ld3q_s16_index8:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_LOAD (ld3q_s16_index8, svint16x3_t, int16_t,
+ z0 = svld3q_s16 (p0, x0 + x1 * 8),
+ z0 = svld3q (p0, x0 + x1 * 8))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_s16_1:
+** incb x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_s16_1, svint16x3_t, int16_t,
+ z0 = svld3q_s16 (p0, x0 + svcnth ()),
+ z0 = svld3q (p0, x0 + svcnth ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_s16_2:
+** incb x0, all, mul #2
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_s16_2, svint16x3_t, int16_t,
+ z0 = svld3q_s16 (p0, x0 + svcnth () * 2),
+ z0 = svld3q (p0, x0 + svcnth () * 2))
+
+/*
+** ld3q_s16_3:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #3, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_s16_3, svint16x3_t, int16_t,
+ z0 = svld3q_s16 (p0, x0 + svcnth () * 3),
+ z0 = svld3q (p0, x0 + svcnth () * 3))
+
+/*
+** ld3q_s16_21:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #21, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_s16_21, svint16x3_t, int16_t,
+ z0 = svld3q_s16 (p0, x0 + svcnth () * 21),
+ z0 = svld3q (p0, x0 + svcnth () * 21))
+
+/*
+** ld3q_s16_24:
+** addvl (x[0-9]+), x0, #24
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_s16_24, svint16x3_t, int16_t,
+ z0 = svld3q_s16 (p0, x0 + svcnth () * 24),
+ z0 = svld3q (p0, x0 + svcnth () * 24))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_s16_m1:
+** decb x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_s16_m1, svint16x3_t, int16_t,
+ z0 = svld3q_s16 (p0, x0 - svcnth ()),
+ z0 = svld3q (p0, x0 - svcnth ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_s16_m2:
+** decb x0, all, mul #2
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_s16_m2, svint16x3_t, int16_t,
+ z0 = svld3q_s16 (p0, x0 - svcnth () * 2),
+ z0 = svld3q (p0, x0 - svcnth () * 2))
+
+/*
+** ld3q_s16_m3:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #-3, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_s16_m3, svint16x3_t, int16_t,
+ z0 = svld3q_s16 (p0, x0 - svcnth () * 3),
+ z0 = svld3q (p0, x0 - svcnth () * 3))
+
+/*
+** ld3q_s16_m24:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #-24, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_s16_m24, svint16x3_t, int16_t,
+ z0 = svld3q_s16 (p0, x0 - svcnth () * 24),
+ z0 = svld3q (p0, x0 - svcnth () * 24))
+
+/*
+** ld3q_s16_m27:
+** addvl (x[0-9]+), x0, #-27
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_s16_m27, svint16x3_t, int16_t,
+ z0 = svld3q_s16 (p0, x0 - svcnth () * 27),
+ z0 = svld3q (p0, x0 - svcnth () * 27))
+
+/*
+** ld3q_vnum_s16_0:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_s16_0, svint16x3_t, int16_t,
+ z0 = svld3q_vnum_s16 (p0, x0, 0),
+ z0 = svld3q_vnum (p0, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_vnum_s16_1:
+** incb x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_s16_1, svint16x3_t, int16_t,
+ z0 = svld3q_vnum_s16 (p0, x0, 1),
+ z0 = svld3q_vnum (p0, x0, 1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_vnum_s16_2:
+** incb x0, all, mul #2
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_s16_2, svint16x3_t, int16_t,
+ z0 = svld3q_vnum_s16 (p0, x0, 2),
+ z0 = svld3q_vnum (p0, x0, 2))
+
+/*
+** ld3q_vnum_s16_3:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #3, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_s16_3, svint16x3_t, int16_t,
+ z0 = svld3q_vnum_s16 (p0, x0, 3),
+ z0 = svld3q_vnum (p0, x0, 3))
+
+/*
+** ld3q_vnum_s16_21:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #21, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_s16_21, svint16x3_t, int16_t,
+ z0 = svld3q_vnum_s16 (p0, x0, 21),
+ z0 = svld3q_vnum (p0, x0, 21))
+
+/*
+** ld3q_vnum_s16_24:
+** addvl (x[0-9]+), x0, #24
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_s16_24, svint16x3_t, int16_t,
+ z0 = svld3q_vnum_s16 (p0, x0, 24),
+ z0 = svld3q_vnum (p0, x0, 24))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_vnum_s16_m1:
+** decb x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_s16_m1, svint16x3_t, int16_t,
+ z0 = svld3q_vnum_s16 (p0, x0, -1),
+ z0 = svld3q_vnum (p0, x0, -1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_vnum_s16_m2:
+** decb x0, all, mul #2
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_s16_m2, svint16x3_t, int16_t,
+ z0 = svld3q_vnum_s16 (p0, x0, -2),
+ z0 = svld3q_vnum (p0, x0, -2))
+
+/*
+** ld3q_vnum_s16_m3:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #-3, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_s16_m3, svint16x3_t, int16_t,
+ z0 = svld3q_vnum_s16 (p0, x0, -3),
+ z0 = svld3q_vnum (p0, x0, -3))
+
+/*
+** ld3q_vnum_s16_m24:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #-24, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_s16_m24, svint16x3_t, int16_t,
+ z0 = svld3q_vnum_s16 (p0, x0, -24),
+ z0 = svld3q_vnum (p0, x0, -24))
+
+/*
+** ld3q_vnum_s16_m27:
+** addvl (x[0-9]+), x0, #-27
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_s16_m27, svint16x3_t, int16_t,
+ z0 = svld3q_vnum_s16 (p0, x0, -27),
+ z0 = svld3q_vnum (p0, x0, -27))
+
+/*
+** ld3q_vnum_s16_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD (ld3q_vnum_s16_x1, svint16x3_t, int16_t,
+ z0 = svld3q_vnum_s16 (p0, x0, x1),
+ z0 = svld3q_vnum (p0, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld3q_s32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld3q_s32.c
new file mode 100644
index 0000000..a78e0c6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld3q_s32.c
@@ -0,0 +1,271 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** ld3q_s32_base:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_s32_base, svint32x3_t, int32_t,
+ z0 = svld3q_s32 (p0, x0),
+ z0 = svld3q (p0, x0))
+
+/*
+** ld3q_s32_index:
+** add (x[0-9]), x0, x1, lsl #?2
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_s32_index, svint32x3_t, int32_t,
+ z0 = svld3q_s32 (p0, x0 + x1),
+ z0 = svld3q (p0, x0 + x1))
+
+/*
+** ld3q_s32_index2:
+** add (x[0-9]), x0, x1, lsl #?3
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_s32_index2, svint32x3_t, int32_t,
+ z0 = svld3q_s32 (p0, x0 + x1 * 2),
+ z0 = svld3q (p0, x0 + x1 * 2))
+
+/*
+** ld3q_s32_index4:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_LOAD (ld3q_s32_index4, svint32x3_t, int32_t,
+ z0 = svld3q_s32 (p0, x0 + x1 * 4),
+ z0 = svld3q (p0, x0 + x1 * 4))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_s32_1:
+** incb x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_s32_1, svint32x3_t, int32_t,
+ z0 = svld3q_s32 (p0, x0 + svcntw ()),
+ z0 = svld3q (p0, x0 + svcntw ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_s32_2:
+** incb x0, all, mul #2
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_s32_2, svint32x3_t, int32_t,
+ z0 = svld3q_s32 (p0, x0 + svcntw () * 2),
+ z0 = svld3q (p0, x0 + svcntw () * 2))
+
+/*
+** ld3q_s32_3:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #3, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_s32_3, svint32x3_t, int32_t,
+ z0 = svld3q_s32 (p0, x0 + svcntw () * 3),
+ z0 = svld3q (p0, x0 + svcntw () * 3))
+
+/*
+** ld3q_s32_21:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #21, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_s32_21, svint32x3_t, int32_t,
+ z0 = svld3q_s32 (p0, x0 + svcntw () * 21),
+ z0 = svld3q (p0, x0 + svcntw () * 21))
+
+/*
+** ld3q_s32_24:
+** addvl (x[0-9]+), x0, #24
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_s32_24, svint32x3_t, int32_t,
+ z0 = svld3q_s32 (p0, x0 + svcntw () * 24),
+ z0 = svld3q (p0, x0 + svcntw () * 24))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_s32_m1:
+** decb x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_s32_m1, svint32x3_t, int32_t,
+ z0 = svld3q_s32 (p0, x0 - svcntw ()),
+ z0 = svld3q (p0, x0 - svcntw ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_s32_m2:
+** decb x0, all, mul #2
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_s32_m2, svint32x3_t, int32_t,
+ z0 = svld3q_s32 (p0, x0 - svcntw () * 2),
+ z0 = svld3q (p0, x0 - svcntw () * 2))
+
+/*
+** ld3q_s32_m3:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #-3, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_s32_m3, svint32x3_t, int32_t,
+ z0 = svld3q_s32 (p0, x0 - svcntw () * 3),
+ z0 = svld3q (p0, x0 - svcntw () * 3))
+
+/*
+** ld3q_s32_m24:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #-24, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_s32_m24, svint32x3_t, int32_t,
+ z0 = svld3q_s32 (p0, x0 - svcntw () * 24),
+ z0 = svld3q (p0, x0 - svcntw () * 24))
+
+/*
+** ld3q_s32_m27:
+** addvl (x[0-9]+), x0, #-27
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_s32_m27, svint32x3_t, int32_t,
+ z0 = svld3q_s32 (p0, x0 - svcntw () * 27),
+ z0 = svld3q (p0, x0 - svcntw () * 27))
+
+/*
+** ld3q_vnum_s32_0:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_s32_0, svint32x3_t, int32_t,
+ z0 = svld3q_vnum_s32 (p0, x0, 0),
+ z0 = svld3q_vnum (p0, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_vnum_s32_1:
+** incb x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_s32_1, svint32x3_t, int32_t,
+ z0 = svld3q_vnum_s32 (p0, x0, 1),
+ z0 = svld3q_vnum (p0, x0, 1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_vnum_s32_2:
+** incb x0, all, mul #2
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_s32_2, svint32x3_t, int32_t,
+ z0 = svld3q_vnum_s32 (p0, x0, 2),
+ z0 = svld3q_vnum (p0, x0, 2))
+
+/*
+** ld3q_vnum_s32_3:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #3, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_s32_3, svint32x3_t, int32_t,
+ z0 = svld3q_vnum_s32 (p0, x0, 3),
+ z0 = svld3q_vnum (p0, x0, 3))
+
+/*
+** ld3q_vnum_s32_21:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #21, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_s32_21, svint32x3_t, int32_t,
+ z0 = svld3q_vnum_s32 (p0, x0, 21),
+ z0 = svld3q_vnum (p0, x0, 21))
+
+/*
+** ld3q_vnum_s32_24:
+** addvl (x[0-9]+), x0, #24
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_s32_24, svint32x3_t, int32_t,
+ z0 = svld3q_vnum_s32 (p0, x0, 24),
+ z0 = svld3q_vnum (p0, x0, 24))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_vnum_s32_m1:
+** decb x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_s32_m1, svint32x3_t, int32_t,
+ z0 = svld3q_vnum_s32 (p0, x0, -1),
+ z0 = svld3q_vnum (p0, x0, -1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_vnum_s32_m2:
+** decb x0, all, mul #2
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_s32_m2, svint32x3_t, int32_t,
+ z0 = svld3q_vnum_s32 (p0, x0, -2),
+ z0 = svld3q_vnum (p0, x0, -2))
+
+/*
+** ld3q_vnum_s32_m3:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #-3, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_s32_m3, svint32x3_t, int32_t,
+ z0 = svld3q_vnum_s32 (p0, x0, -3),
+ z0 = svld3q_vnum (p0, x0, -3))
+
+/*
+** ld3q_vnum_s32_m24:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #-24, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_s32_m24, svint32x3_t, int32_t,
+ z0 = svld3q_vnum_s32 (p0, x0, -24),
+ z0 = svld3q_vnum (p0, x0, -24))
+
+/*
+** ld3q_vnum_s32_m27:
+** addvl (x[0-9]+), x0, #-27
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_s32_m27, svint32x3_t, int32_t,
+ z0 = svld3q_vnum_s32 (p0, x0, -27),
+ z0 = svld3q_vnum (p0, x0, -27))
+
+/*
+** ld3q_vnum_s32_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD (ld3q_vnum_s32_x1, svint32x3_t, int32_t,
+ z0 = svld3q_vnum_s32 (p0, x0, x1),
+ z0 = svld3q_vnum (p0, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld3q_s64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld3q_s64.c
new file mode 100644
index 0000000..b2625cf
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld3q_s64.c
@@ -0,0 +1,261 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** ld3q_s64_base:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_s64_base, svint64x3_t, int64_t,
+ z0 = svld3q_s64 (p0, x0),
+ z0 = svld3q (p0, x0))
+
+/*
+** ld3q_s64_index:
+** add (x[0-9]), x0, x1, lsl #?3
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_s64_index, svint64x3_t, int64_t,
+ z0 = svld3q_s64 (p0, x0 + x1),
+ z0 = svld3q (p0, x0 + x1))
+
+/*
+** ld3q_s64_index2:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_LOAD (ld3q_s64_index2, svint64x3_t, int64_t,
+ z0 = svld3q_s64 (p0, x0 + x1 * 2),
+ z0 = svld3q (p0, x0 + x1 * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_s64_1:
+** incb x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_s64_1, svint64x3_t, int64_t,
+ z0 = svld3q_s64 (p0, x0 + svcntd ()),
+ z0 = svld3q (p0, x0 + svcntd ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_s64_2:
+** incb x0, all, mul #2
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_s64_2, svint64x3_t, int64_t,
+ z0 = svld3q_s64 (p0, x0 + svcntd () * 2),
+ z0 = svld3q (p0, x0 + svcntd () * 2))
+
+/*
+** ld3q_s64_3:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #3, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_s64_3, svint64x3_t, int64_t,
+ z0 = svld3q_s64 (p0, x0 + svcntd () * 3),
+ z0 = svld3q (p0, x0 + svcntd () * 3))
+
+/*
+** ld3q_s64_21:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #21, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_s64_21, svint64x3_t, int64_t,
+ z0 = svld3q_s64 (p0, x0 + svcntd () * 21),
+ z0 = svld3q (p0, x0 + svcntd () * 21))
+
+/*
+** ld3q_s64_24:
+** addvl (x[0-9]+), x0, #24
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_s64_24, svint64x3_t, int64_t,
+ z0 = svld3q_s64 (p0, x0 + svcntd () * 24),
+ z0 = svld3q (p0, x0 + svcntd () * 24))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_s64_m1:
+** decb x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_s64_m1, svint64x3_t, int64_t,
+ z0 = svld3q_s64 (p0, x0 - svcntd ()),
+ z0 = svld3q (p0, x0 - svcntd ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_s64_m2:
+** decb x0, all, mul #2
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_s64_m2, svint64x3_t, int64_t,
+ z0 = svld3q_s64 (p0, x0 - svcntd () * 2),
+ z0 = svld3q (p0, x0 - svcntd () * 2))
+
+/*
+** ld3q_s64_m3:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #-3, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_s64_m3, svint64x3_t, int64_t,
+ z0 = svld3q_s64 (p0, x0 - svcntd () * 3),
+ z0 = svld3q (p0, x0 - svcntd () * 3))
+
+/*
+** ld3q_s64_m24:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #-24, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_s64_m24, svint64x3_t, int64_t,
+ z0 = svld3q_s64 (p0, x0 - svcntd () * 24),
+ z0 = svld3q (p0, x0 - svcntd () * 24))
+
+/*
+** ld3q_s64_m27:
+** addvl (x[0-9]+), x0, #-27
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_s64_m27, svint64x3_t, int64_t,
+ z0 = svld3q_s64 (p0, x0 - svcntd () * 27),
+ z0 = svld3q (p0, x0 - svcntd () * 27))
+
+/*
+** ld3q_vnum_s64_0:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_s64_0, svint64x3_t, int64_t,
+ z0 = svld3q_vnum_s64 (p0, x0, 0),
+ z0 = svld3q_vnum (p0, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_vnum_s64_1:
+** incb x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_s64_1, svint64x3_t, int64_t,
+ z0 = svld3q_vnum_s64 (p0, x0, 1),
+ z0 = svld3q_vnum (p0, x0, 1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_vnum_s64_2:
+** incb x0, all, mul #2
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_s64_2, svint64x3_t, int64_t,
+ z0 = svld3q_vnum_s64 (p0, x0, 2),
+ z0 = svld3q_vnum (p0, x0, 2))
+
+/*
+** ld3q_vnum_s64_3:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #3, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_s64_3, svint64x3_t, int64_t,
+ z0 = svld3q_vnum_s64 (p0, x0, 3),
+ z0 = svld3q_vnum (p0, x0, 3))
+
+/*
+** ld3q_vnum_s64_21:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #21, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_s64_21, svint64x3_t, int64_t,
+ z0 = svld3q_vnum_s64 (p0, x0, 21),
+ z0 = svld3q_vnum (p0, x0, 21))
+
+/*
+** ld3q_vnum_s64_24:
+** addvl (x[0-9]+), x0, #24
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_s64_24, svint64x3_t, int64_t,
+ z0 = svld3q_vnum_s64 (p0, x0, 24),
+ z0 = svld3q_vnum (p0, x0, 24))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_vnum_s64_m1:
+** decb x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_s64_m1, svint64x3_t, int64_t,
+ z0 = svld3q_vnum_s64 (p0, x0, -1),
+ z0 = svld3q_vnum (p0, x0, -1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_vnum_s64_m2:
+** decb x0, all, mul #2
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_s64_m2, svint64x3_t, int64_t,
+ z0 = svld3q_vnum_s64 (p0, x0, -2),
+ z0 = svld3q_vnum (p0, x0, -2))
+
+/*
+** ld3q_vnum_s64_m3:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #-3, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_s64_m3, svint64x3_t, int64_t,
+ z0 = svld3q_vnum_s64 (p0, x0, -3),
+ z0 = svld3q_vnum (p0, x0, -3))
+
+/*
+** ld3q_vnum_s64_m24:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #-24, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_s64_m24, svint64x3_t, int64_t,
+ z0 = svld3q_vnum_s64 (p0, x0, -24),
+ z0 = svld3q_vnum (p0, x0, -24))
+
+/*
+** ld3q_vnum_s64_m27:
+** addvl (x[0-9]+), x0, #-27
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_s64_m27, svint64x3_t, int64_t,
+ z0 = svld3q_vnum_s64 (p0, x0, -27),
+ z0 = svld3q_vnum (p0, x0, -27))
+
+/*
+** ld3q_vnum_s64_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD (ld3q_vnum_s64_x1, svint64x3_t, int64_t,
+ z0 = svld3q_vnum_s64 (p0, x0, x1),
+ z0 = svld3q_vnum (p0, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld3q_s8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld3q_s8.c
new file mode 100644
index 0000000..f276d2c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld3q_s8.c
@@ -0,0 +1,291 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** ld3q_s8_base:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_s8_base, svint8x3_t, int8_t,
+ z0 = svld3q_s8 (p0, x0),
+ z0 = svld3q (p0, x0))
+
+/*
+** ld3q_s8_index:
+** add (x[0-9]), (?:x0, x1|x1, x0)
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_s8_index, svint8x3_t, int8_t,
+ z0 = svld3q_s8 (p0, x0 + x1),
+ z0 = svld3q (p0, x0 + x1))
+
+/*
+** ld3q_s8_index2:
+** add (x[0-9]), x0, x1, lsl #?1
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_s8_index2, svint8x3_t, int8_t,
+ z0 = svld3q_s8 (p0, x0 + x1 * 2),
+ z0 = svld3q (p0, x0 + x1 * 2))
+
+/*
+** ld3q_s8_index4:
+** add (x[0-9]), x0, x1, lsl #?2
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_s8_index4, svint8x3_t, int8_t,
+ z0 = svld3q_s8 (p0, x0 + x1 * 4),
+ z0 = svld3q (p0, x0 + x1 * 4))
+
+/*
+** ld3q_s8_index8:
+** add (x[0-9]), x0, x1, lsl #?3
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_s8_index8, svint8x3_t, int8_t,
+ z0 = svld3q_s8 (p0, x0 + x1 * 8),
+ z0 = svld3q (p0, x0 + x1 * 8))
+
+/*
+** ld3q_s8_index16:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_LOAD (ld3q_s8_index16, svint8x3_t, int8_t,
+ z0 = svld3q_s8 (p0, x0 + x1 * 16),
+ z0 = svld3q (p0, x0 + x1 * 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_s8_1:
+** incb x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_s8_1, svint8x3_t, int8_t,
+ z0 = svld3q_s8 (p0, x0 + svcntb ()),
+ z0 = svld3q (p0, x0 + svcntb ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_s8_2:
+** incb x0, all, mul #2
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_s8_2, svint8x3_t, int8_t,
+ z0 = svld3q_s8 (p0, x0 + svcntb () * 2),
+ z0 = svld3q (p0, x0 + svcntb () * 2))
+
+/*
+** ld3q_s8_3:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #3, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_s8_3, svint8x3_t, int8_t,
+ z0 = svld3q_s8 (p0, x0 + svcntb () * 3),
+ z0 = svld3q (p0, x0 + svcntb () * 3))
+
+/*
+** ld3q_s8_21:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #21, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_s8_21, svint8x3_t, int8_t,
+ z0 = svld3q_s8 (p0, x0 + svcntb () * 21),
+ z0 = svld3q (p0, x0 + svcntb () * 21))
+
+/*
+** ld3q_s8_24:
+** addvl (x[0-9]+), x0, #24
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_s8_24, svint8x3_t, int8_t,
+ z0 = svld3q_s8 (p0, x0 + svcntb () * 24),
+ z0 = svld3q (p0, x0 + svcntb () * 24))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_s8_m1:
+** decb x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_s8_m1, svint8x3_t, int8_t,
+ z0 = svld3q_s8 (p0, x0 - svcntb ()),
+ z0 = svld3q (p0, x0 - svcntb ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_s8_m2:
+** decb x0, all, mul #2
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_s8_m2, svint8x3_t, int8_t,
+ z0 = svld3q_s8 (p0, x0 - svcntb () * 2),
+ z0 = svld3q (p0, x0 - svcntb () * 2))
+
+/*
+** ld3q_s8_m3:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #-3, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_s8_m3, svint8x3_t, int8_t,
+ z0 = svld3q_s8 (p0, x0 - svcntb () * 3),
+ z0 = svld3q (p0, x0 - svcntb () * 3))
+
+/*
+** ld3q_s8_m24:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #-24, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_s8_m24, svint8x3_t, int8_t,
+ z0 = svld3q_s8 (p0, x0 - svcntb () * 24),
+ z0 = svld3q (p0, x0 - svcntb () * 24))
+
+/*
+** ld3q_s8_m27:
+** addvl (x[0-9]+), x0, #-27
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_s8_m27, svint8x3_t, int8_t,
+ z0 = svld3q_s8 (p0, x0 - svcntb () * 27),
+ z0 = svld3q (p0, x0 - svcntb () * 27))
+
+/*
+** ld3q_vnum_s8_0:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_s8_0, svint8x3_t, int8_t,
+ z0 = svld3q_vnum_s8 (p0, x0, 0),
+ z0 = svld3q_vnum (p0, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_vnum_s8_1:
+** incb x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_s8_1, svint8x3_t, int8_t,
+ z0 = svld3q_vnum_s8 (p0, x0, 1),
+ z0 = svld3q_vnum (p0, x0, 1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_vnum_s8_2:
+** incb x0, all, mul #2
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_s8_2, svint8x3_t, int8_t,
+ z0 = svld3q_vnum_s8 (p0, x0, 2),
+ z0 = svld3q_vnum (p0, x0, 2))
+
+/*
+** ld3q_vnum_s8_3:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #3, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_s8_3, svint8x3_t, int8_t,
+ z0 = svld3q_vnum_s8 (p0, x0, 3),
+ z0 = svld3q_vnum (p0, x0, 3))
+
+/*
+** ld3q_vnum_s8_21:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #21, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_s8_21, svint8x3_t, int8_t,
+ z0 = svld3q_vnum_s8 (p0, x0, 21),
+ z0 = svld3q_vnum (p0, x0, 21))
+
+/*
+** ld3q_vnum_s8_24:
+** addvl (x[0-9]+), x0, #24
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_s8_24, svint8x3_t, int8_t,
+ z0 = svld3q_vnum_s8 (p0, x0, 24),
+ z0 = svld3q_vnum (p0, x0, 24))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_vnum_s8_m1:
+** decb x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_s8_m1, svint8x3_t, int8_t,
+ z0 = svld3q_vnum_s8 (p0, x0, -1),
+ z0 = svld3q_vnum (p0, x0, -1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_vnum_s8_m2:
+** decb x0, all, mul #2
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_s8_m2, svint8x3_t, int8_t,
+ z0 = svld3q_vnum_s8 (p0, x0, -2),
+ z0 = svld3q_vnum (p0, x0, -2))
+
+/*
+** ld3q_vnum_s8_m3:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #-3, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_s8_m3, svint8x3_t, int8_t,
+ z0 = svld3q_vnum_s8 (p0, x0, -3),
+ z0 = svld3q_vnum (p0, x0, -3))
+
+/*
+** ld3q_vnum_s8_m24:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #-24, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_s8_m24, svint8x3_t, int8_t,
+ z0 = svld3q_vnum_s8 (p0, x0, -24),
+ z0 = svld3q_vnum (p0, x0, -24))
+
+/*
+** ld3q_vnum_s8_m27:
+** addvl (x[0-9]+), x0, #-27
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_s8_m27, svint8x3_t, int8_t,
+ z0 = svld3q_vnum_s8 (p0, x0, -27),
+ z0 = svld3q_vnum (p0, x0, -27))
+
+/*
+** ld3q_vnum_s8_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD (ld3q_vnum_s8_x1, svint8x3_t, int8_t,
+ z0 = svld3q_vnum_s8 (p0, x0, x1),
+ z0 = svld3q_vnum (p0, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld3q_u16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld3q_u16.c
new file mode 100644
index 0000000..6ce61eb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld3q_u16.c
@@ -0,0 +1,281 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** ld3q_u16_base:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_u16_base, svuint16x3_t, uint16_t,
+ z0 = svld3q_u16 (p0, x0),
+ z0 = svld3q (p0, x0))
+
+/*
+** ld3q_u16_index:
+** add (x[0-9]), x0, x1, lsl #?1
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_u16_index, svuint16x3_t, uint16_t,
+ z0 = svld3q_u16 (p0, x0 + x1),
+ z0 = svld3q (p0, x0 + x1))
+
+/*
+** ld3q_u16_index2:
+** add (x[0-9]), x0, x1, lsl #?2
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_u16_index2, svuint16x3_t, uint16_t,
+ z0 = svld3q_u16 (p0, x0 + x1 * 2),
+ z0 = svld3q (p0, x0 + x1 * 2))
+
+/*
+** ld3q_u16_index4:
+** add (x[0-9]), x0, x1, lsl #?3
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_u16_index4, svuint16x3_t, uint16_t,
+ z0 = svld3q_u16 (p0, x0 + x1 * 4),
+ z0 = svld3q (p0, x0 + x1 * 4))
+
+/*
+** ld3q_u16_index8:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_LOAD (ld3q_u16_index8, svuint16x3_t, uint16_t,
+ z0 = svld3q_u16 (p0, x0 + x1 * 8),
+ z0 = svld3q (p0, x0 + x1 * 8))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_u16_1:
+** incb x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_u16_1, svuint16x3_t, uint16_t,
+ z0 = svld3q_u16 (p0, x0 + svcnth ()),
+ z0 = svld3q (p0, x0 + svcnth ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_u16_2:
+** incb x0, all, mul #2
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_u16_2, svuint16x3_t, uint16_t,
+ z0 = svld3q_u16 (p0, x0 + svcnth () * 2),
+ z0 = svld3q (p0, x0 + svcnth () * 2))
+
+/*
+** ld3q_u16_3:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #3, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_u16_3, svuint16x3_t, uint16_t,
+ z0 = svld3q_u16 (p0, x0 + svcnth () * 3),
+ z0 = svld3q (p0, x0 + svcnth () * 3))
+
+/*
+** ld3q_u16_21:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #21, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_u16_21, svuint16x3_t, uint16_t,
+ z0 = svld3q_u16 (p0, x0 + svcnth () * 21),
+ z0 = svld3q (p0, x0 + svcnth () * 21))
+
+/*
+** ld3q_u16_24:
+** addvl (x[0-9]+), x0, #24
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_u16_24, svuint16x3_t, uint16_t,
+ z0 = svld3q_u16 (p0, x0 + svcnth () * 24),
+ z0 = svld3q (p0, x0 + svcnth () * 24))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_u16_m1:
+** decb x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_u16_m1, svuint16x3_t, uint16_t,
+ z0 = svld3q_u16 (p0, x0 - svcnth ()),
+ z0 = svld3q (p0, x0 - svcnth ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_u16_m2:
+** decb x0, all, mul #2
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_u16_m2, svuint16x3_t, uint16_t,
+ z0 = svld3q_u16 (p0, x0 - svcnth () * 2),
+ z0 = svld3q (p0, x0 - svcnth () * 2))
+
+/*
+** ld3q_u16_m3:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #-3, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_u16_m3, svuint16x3_t, uint16_t,
+ z0 = svld3q_u16 (p0, x0 - svcnth () * 3),
+ z0 = svld3q (p0, x0 - svcnth () * 3))
+
+/*
+** ld3q_u16_m24:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #-24, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_u16_m24, svuint16x3_t, uint16_t,
+ z0 = svld3q_u16 (p0, x0 - svcnth () * 24),
+ z0 = svld3q (p0, x0 - svcnth () * 24))
+
+/*
+** ld3q_u16_m27:
+** addvl (x[0-9]+), x0, #-27
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_u16_m27, svuint16x3_t, uint16_t,
+ z0 = svld3q_u16 (p0, x0 - svcnth () * 27),
+ z0 = svld3q (p0, x0 - svcnth () * 27))
+
+/*
+** ld3q_vnum_u16_0:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_u16_0, svuint16x3_t, uint16_t,
+ z0 = svld3q_vnum_u16 (p0, x0, 0),
+ z0 = svld3q_vnum (p0, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_vnum_u16_1:
+** incb x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_u16_1, svuint16x3_t, uint16_t,
+ z0 = svld3q_vnum_u16 (p0, x0, 1),
+ z0 = svld3q_vnum (p0, x0, 1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_vnum_u16_2:
+** incb x0, all, mul #2
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_u16_2, svuint16x3_t, uint16_t,
+ z0 = svld3q_vnum_u16 (p0, x0, 2),
+ z0 = svld3q_vnum (p0, x0, 2))
+
+/*
+** ld3q_vnum_u16_3:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #3, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_u16_3, svuint16x3_t, uint16_t,
+ z0 = svld3q_vnum_u16 (p0, x0, 3),
+ z0 = svld3q_vnum (p0, x0, 3))
+
+/*
+** ld3q_vnum_u16_21:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #21, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_u16_21, svuint16x3_t, uint16_t,
+ z0 = svld3q_vnum_u16 (p0, x0, 21),
+ z0 = svld3q_vnum (p0, x0, 21))
+
+/*
+** ld3q_vnum_u16_24:
+** addvl (x[0-9]+), x0, #24
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_u16_24, svuint16x3_t, uint16_t,
+ z0 = svld3q_vnum_u16 (p0, x0, 24),
+ z0 = svld3q_vnum (p0, x0, 24))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_vnum_u16_m1:
+** decb x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_u16_m1, svuint16x3_t, uint16_t,
+ z0 = svld3q_vnum_u16 (p0, x0, -1),
+ z0 = svld3q_vnum (p0, x0, -1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_vnum_u16_m2:
+** decb x0, all, mul #2
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_u16_m2, svuint16x3_t, uint16_t,
+ z0 = svld3q_vnum_u16 (p0, x0, -2),
+ z0 = svld3q_vnum (p0, x0, -2))
+
+/*
+** ld3q_vnum_u16_m3:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #-3, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_u16_m3, svuint16x3_t, uint16_t,
+ z0 = svld3q_vnum_u16 (p0, x0, -3),
+ z0 = svld3q_vnum (p0, x0, -3))
+
+/*
+** ld3q_vnum_u16_m24:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #-24, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_u16_m24, svuint16x3_t, uint16_t,
+ z0 = svld3q_vnum_u16 (p0, x0, -24),
+ z0 = svld3q_vnum (p0, x0, -24))
+
+/*
+** ld3q_vnum_u16_m27:
+** addvl (x[0-9]+), x0, #-27
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_u16_m27, svuint16x3_t, uint16_t,
+ z0 = svld3q_vnum_u16 (p0, x0, -27),
+ z0 = svld3q_vnum (p0, x0, -27))
+
+/*
+** ld3q_vnum_u16_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD (ld3q_vnum_u16_x1, svuint16x3_t, uint16_t,
+ z0 = svld3q_vnum_u16 (p0, x0, x1),
+ z0 = svld3q_vnum (p0, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld3q_u32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld3q_u32.c
new file mode 100644
index 0000000..7ffb5fa
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld3q_u32.c
@@ -0,0 +1,271 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** ld3q_u32_base:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_u32_base, svuint32x3_t, uint32_t,
+ z0 = svld3q_u32 (p0, x0),
+ z0 = svld3q (p0, x0))
+
+/*
+** ld3q_u32_index:
+** add (x[0-9]), x0, x1, lsl #?2
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_u32_index, svuint32x3_t, uint32_t,
+ z0 = svld3q_u32 (p0, x0 + x1),
+ z0 = svld3q (p0, x0 + x1))
+
+/*
+** ld3q_u32_index2:
+** add (x[0-9]), x0, x1, lsl #?3
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_u32_index2, svuint32x3_t, uint32_t,
+ z0 = svld3q_u32 (p0, x0 + x1 * 2),
+ z0 = svld3q (p0, x0 + x1 * 2))
+
+/*
+** ld3q_u32_index4:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_LOAD (ld3q_u32_index4, svuint32x3_t, uint32_t,
+ z0 = svld3q_u32 (p0, x0 + x1 * 4),
+ z0 = svld3q (p0, x0 + x1 * 4))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_u32_1:
+** incb x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_u32_1, svuint32x3_t, uint32_t,
+ z0 = svld3q_u32 (p0, x0 + svcntw ()),
+ z0 = svld3q (p0, x0 + svcntw ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_u32_2:
+** incb x0, all, mul #2
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_u32_2, svuint32x3_t, uint32_t,
+ z0 = svld3q_u32 (p0, x0 + svcntw () * 2),
+ z0 = svld3q (p0, x0 + svcntw () * 2))
+
+/*
+** ld3q_u32_3:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #3, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_u32_3, svuint32x3_t, uint32_t,
+ z0 = svld3q_u32 (p0, x0 + svcntw () * 3),
+ z0 = svld3q (p0, x0 + svcntw () * 3))
+
+/*
+** ld3q_u32_21:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #21, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_u32_21, svuint32x3_t, uint32_t,
+ z0 = svld3q_u32 (p0, x0 + svcntw () * 21),
+ z0 = svld3q (p0, x0 + svcntw () * 21))
+
+/*
+** ld3q_u32_24:
+** addvl (x[0-9]+), x0, #24
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_u32_24, svuint32x3_t, uint32_t,
+ z0 = svld3q_u32 (p0, x0 + svcntw () * 24),
+ z0 = svld3q (p0, x0 + svcntw () * 24))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_u32_m1:
+** decb x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_u32_m1, svuint32x3_t, uint32_t,
+ z0 = svld3q_u32 (p0, x0 - svcntw ()),
+ z0 = svld3q (p0, x0 - svcntw ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_u32_m2:
+** decb x0, all, mul #2
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_u32_m2, svuint32x3_t, uint32_t,
+ z0 = svld3q_u32 (p0, x0 - svcntw () * 2),
+ z0 = svld3q (p0, x0 - svcntw () * 2))
+
+/*
+** ld3q_u32_m3:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #-3, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_u32_m3, svuint32x3_t, uint32_t,
+ z0 = svld3q_u32 (p0, x0 - svcntw () * 3),
+ z0 = svld3q (p0, x0 - svcntw () * 3))
+
+/*
+** ld3q_u32_m24:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #-24, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_u32_m24, svuint32x3_t, uint32_t,
+ z0 = svld3q_u32 (p0, x0 - svcntw () * 24),
+ z0 = svld3q (p0, x0 - svcntw () * 24))
+
+/*
+** ld3q_u32_m27:
+** addvl (x[0-9]+), x0, #-27
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_u32_m27, svuint32x3_t, uint32_t,
+ z0 = svld3q_u32 (p0, x0 - svcntw () * 27),
+ z0 = svld3q (p0, x0 - svcntw () * 27))
+
+/*
+** ld3q_vnum_u32_0:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_u32_0, svuint32x3_t, uint32_t,
+ z0 = svld3q_vnum_u32 (p0, x0, 0),
+ z0 = svld3q_vnum (p0, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_vnum_u32_1:
+** incb x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_u32_1, svuint32x3_t, uint32_t,
+ z0 = svld3q_vnum_u32 (p0, x0, 1),
+ z0 = svld3q_vnum (p0, x0, 1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_vnum_u32_2:
+** incb x0, all, mul #2
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_u32_2, svuint32x3_t, uint32_t,
+ z0 = svld3q_vnum_u32 (p0, x0, 2),
+ z0 = svld3q_vnum (p0, x0, 2))
+
+/*
+** ld3q_vnum_u32_3:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #3, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_u32_3, svuint32x3_t, uint32_t,
+ z0 = svld3q_vnum_u32 (p0, x0, 3),
+ z0 = svld3q_vnum (p0, x0, 3))
+
+/*
+** ld3q_vnum_u32_21:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #21, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_u32_21, svuint32x3_t, uint32_t,
+ z0 = svld3q_vnum_u32 (p0, x0, 21),
+ z0 = svld3q_vnum (p0, x0, 21))
+
+/*
+** ld3q_vnum_u32_24:
+** addvl (x[0-9]+), x0, #24
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_u32_24, svuint32x3_t, uint32_t,
+ z0 = svld3q_vnum_u32 (p0, x0, 24),
+ z0 = svld3q_vnum (p0, x0, 24))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_vnum_u32_m1:
+** decb x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_u32_m1, svuint32x3_t, uint32_t,
+ z0 = svld3q_vnum_u32 (p0, x0, -1),
+ z0 = svld3q_vnum (p0, x0, -1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_vnum_u32_m2:
+** decb x0, all, mul #2
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_u32_m2, svuint32x3_t, uint32_t,
+ z0 = svld3q_vnum_u32 (p0, x0, -2),
+ z0 = svld3q_vnum (p0, x0, -2))
+
+/*
+** ld3q_vnum_u32_m3:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #-3, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_u32_m3, svuint32x3_t, uint32_t,
+ z0 = svld3q_vnum_u32 (p0, x0, -3),
+ z0 = svld3q_vnum (p0, x0, -3))
+
+/*
+** ld3q_vnum_u32_m24:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #-24, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_u32_m24, svuint32x3_t, uint32_t,
+ z0 = svld3q_vnum_u32 (p0, x0, -24),
+ z0 = svld3q_vnum (p0, x0, -24))
+
+/*
+** ld3q_vnum_u32_m27:
+** addvl (x[0-9]+), x0, #-27
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_u32_m27, svuint32x3_t, uint32_t,
+ z0 = svld3q_vnum_u32 (p0, x0, -27),
+ z0 = svld3q_vnum (p0, x0, -27))
+
+/*
+** ld3q_vnum_u32_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD (ld3q_vnum_u32_x1, svuint32x3_t, uint32_t,
+ z0 = svld3q_vnum_u32 (p0, x0, x1),
+ z0 = svld3q_vnum (p0, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld3q_u64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld3q_u64.c
new file mode 100644
index 0000000..afe1d32
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld3q_u64.c
@@ -0,0 +1,261 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** ld3q_u64_base:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_u64_base, svuint64x3_t, uint64_t,
+ z0 = svld3q_u64 (p0, x0),
+ z0 = svld3q (p0, x0))
+
+/*
+** ld3q_u64_index:
+** add (x[0-9]), x0, x1, lsl #?3
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_u64_index, svuint64x3_t, uint64_t,
+ z0 = svld3q_u64 (p0, x0 + x1),
+ z0 = svld3q (p0, x0 + x1))
+
+/*
+** ld3q_u64_index2:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_LOAD (ld3q_u64_index2, svuint64x3_t, uint64_t,
+ z0 = svld3q_u64 (p0, x0 + x1 * 2),
+ z0 = svld3q (p0, x0 + x1 * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_u64_1:
+** incb x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_u64_1, svuint64x3_t, uint64_t,
+ z0 = svld3q_u64 (p0, x0 + svcntd ()),
+ z0 = svld3q (p0, x0 + svcntd ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_u64_2:
+** incb x0, all, mul #2
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_u64_2, svuint64x3_t, uint64_t,
+ z0 = svld3q_u64 (p0, x0 + svcntd () * 2),
+ z0 = svld3q (p0, x0 + svcntd () * 2))
+
+/*
+** ld3q_u64_3:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #3, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_u64_3, svuint64x3_t, uint64_t,
+ z0 = svld3q_u64 (p0, x0 + svcntd () * 3),
+ z0 = svld3q (p0, x0 + svcntd () * 3))
+
+/*
+** ld3q_u64_21:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #21, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_u64_21, svuint64x3_t, uint64_t,
+ z0 = svld3q_u64 (p0, x0 + svcntd () * 21),
+ z0 = svld3q (p0, x0 + svcntd () * 21))
+
+/*
+** ld3q_u64_24:
+** addvl (x[0-9]+), x0, #24
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_u64_24, svuint64x3_t, uint64_t,
+ z0 = svld3q_u64 (p0, x0 + svcntd () * 24),
+ z0 = svld3q (p0, x0 + svcntd () * 24))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_u64_m1:
+** decb x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_u64_m1, svuint64x3_t, uint64_t,
+ z0 = svld3q_u64 (p0, x0 - svcntd ()),
+ z0 = svld3q (p0, x0 - svcntd ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_u64_m2:
+** decb x0, all, mul #2
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_u64_m2, svuint64x3_t, uint64_t,
+ z0 = svld3q_u64 (p0, x0 - svcntd () * 2),
+ z0 = svld3q (p0, x0 - svcntd () * 2))
+
+/*
+** ld3q_u64_m3:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #-3, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_u64_m3, svuint64x3_t, uint64_t,
+ z0 = svld3q_u64 (p0, x0 - svcntd () * 3),
+ z0 = svld3q (p0, x0 - svcntd () * 3))
+
+/*
+** ld3q_u64_m24:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #-24, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_u64_m24, svuint64x3_t, uint64_t,
+ z0 = svld3q_u64 (p0, x0 - svcntd () * 24),
+ z0 = svld3q (p0, x0 - svcntd () * 24))
+
+/*
+** ld3q_u64_m27:
+** addvl (x[0-9]+), x0, #-27
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_u64_m27, svuint64x3_t, uint64_t,
+ z0 = svld3q_u64 (p0, x0 - svcntd () * 27),
+ z0 = svld3q (p0, x0 - svcntd () * 27))
+
+/*
+** ld3q_vnum_u64_0:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_u64_0, svuint64x3_t, uint64_t,
+ z0 = svld3q_vnum_u64 (p0, x0, 0),
+ z0 = svld3q_vnum (p0, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_vnum_u64_1:
+** incb x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_u64_1, svuint64x3_t, uint64_t,
+ z0 = svld3q_vnum_u64 (p0, x0, 1),
+ z0 = svld3q_vnum (p0, x0, 1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_vnum_u64_2:
+** incb x0, all, mul #2
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_u64_2, svuint64x3_t, uint64_t,
+ z0 = svld3q_vnum_u64 (p0, x0, 2),
+ z0 = svld3q_vnum (p0, x0, 2))
+
+/*
+** ld3q_vnum_u64_3:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #3, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_u64_3, svuint64x3_t, uint64_t,
+ z0 = svld3q_vnum_u64 (p0, x0, 3),
+ z0 = svld3q_vnum (p0, x0, 3))
+
+/*
+** ld3q_vnum_u64_21:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #21, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_u64_21, svuint64x3_t, uint64_t,
+ z0 = svld3q_vnum_u64 (p0, x0, 21),
+ z0 = svld3q_vnum (p0, x0, 21))
+
+/*
+** ld3q_vnum_u64_24:
+** addvl (x[0-9]+), x0, #24
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_u64_24, svuint64x3_t, uint64_t,
+ z0 = svld3q_vnum_u64 (p0, x0, 24),
+ z0 = svld3q_vnum (p0, x0, 24))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_vnum_u64_m1:
+** decb x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_u64_m1, svuint64x3_t, uint64_t,
+ z0 = svld3q_vnum_u64 (p0, x0, -1),
+ z0 = svld3q_vnum (p0, x0, -1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_vnum_u64_m2:
+** decb x0, all, mul #2
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_u64_m2, svuint64x3_t, uint64_t,
+ z0 = svld3q_vnum_u64 (p0, x0, -2),
+ z0 = svld3q_vnum (p0, x0, -2))
+
+/*
+** ld3q_vnum_u64_m3:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #-3, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_u64_m3, svuint64x3_t, uint64_t,
+ z0 = svld3q_vnum_u64 (p0, x0, -3),
+ z0 = svld3q_vnum (p0, x0, -3))
+
+/*
+** ld3q_vnum_u64_m24:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #-24, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_u64_m24, svuint64x3_t, uint64_t,
+ z0 = svld3q_vnum_u64 (p0, x0, -24),
+ z0 = svld3q_vnum (p0, x0, -24))
+
+/*
+** ld3q_vnum_u64_m27:
+** addvl (x[0-9]+), x0, #-27
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_u64_m27, svuint64x3_t, uint64_t,
+ z0 = svld3q_vnum_u64 (p0, x0, -27),
+ z0 = svld3q_vnum (p0, x0, -27))
+
+/*
+** ld3q_vnum_u64_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD (ld3q_vnum_u64_x1, svuint64x3_t, uint64_t,
+ z0 = svld3q_vnum_u64 (p0, x0, x1),
+ z0 = svld3q_vnum (p0, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld3q_u8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld3q_u8.c
new file mode 100644
index 0000000..6d3d33f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld3q_u8.c
@@ -0,0 +1,291 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** ld3q_u8_base:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_u8_base, svuint8x3_t, uint8_t,
+ z0 = svld3q_u8 (p0, x0),
+ z0 = svld3q (p0, x0))
+
+/*
+** ld3q_u8_index:
+** add (x[0-9]), (?:x0, x1|x1, x0)
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_u8_index, svuint8x3_t, uint8_t,
+ z0 = svld3q_u8 (p0, x0 + x1),
+ z0 = svld3q (p0, x0 + x1))
+
+/*
+** ld3q_u8_index2:
+** add (x[0-9]), x0, x1, lsl #?1
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_u8_index2, svuint8x3_t, uint8_t,
+ z0 = svld3q_u8 (p0, x0 + x1 * 2),
+ z0 = svld3q (p0, x0 + x1 * 2))
+
+/*
+** ld3q_u8_index4:
+** add (x[0-9]), x0, x1, lsl #?2
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_u8_index4, svuint8x3_t, uint8_t,
+ z0 = svld3q_u8 (p0, x0 + x1 * 4),
+ z0 = svld3q (p0, x0 + x1 * 4))
+
+/*
+** ld3q_u8_index8:
+** add (x[0-9]), x0, x1, lsl #?3
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_u8_index8, svuint8x3_t, uint8_t,
+ z0 = svld3q_u8 (p0, x0 + x1 * 8),
+ z0 = svld3q (p0, x0 + x1 * 8))
+
+/*
+** ld3q_u8_index16:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_LOAD (ld3q_u8_index16, svuint8x3_t, uint8_t,
+ z0 = svld3q_u8 (p0, x0 + x1 * 16),
+ z0 = svld3q (p0, x0 + x1 * 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_u8_1:
+** incb x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_u8_1, svuint8x3_t, uint8_t,
+ z0 = svld3q_u8 (p0, x0 + svcntb ()),
+ z0 = svld3q (p0, x0 + svcntb ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_u8_2:
+** incb x0, all, mul #2
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_u8_2, svuint8x3_t, uint8_t,
+ z0 = svld3q_u8 (p0, x0 + svcntb () * 2),
+ z0 = svld3q (p0, x0 + svcntb () * 2))
+
+/*
+** ld3q_u8_3:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #3, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_u8_3, svuint8x3_t, uint8_t,
+ z0 = svld3q_u8 (p0, x0 + svcntb () * 3),
+ z0 = svld3q (p0, x0 + svcntb () * 3))
+
+/*
+** ld3q_u8_21:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #21, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_u8_21, svuint8x3_t, uint8_t,
+ z0 = svld3q_u8 (p0, x0 + svcntb () * 21),
+ z0 = svld3q (p0, x0 + svcntb () * 21))
+
+/*
+** ld3q_u8_24:
+** addvl (x[0-9]+), x0, #24
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_u8_24, svuint8x3_t, uint8_t,
+ z0 = svld3q_u8 (p0, x0 + svcntb () * 24),
+ z0 = svld3q (p0, x0 + svcntb () * 24))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_u8_m1:
+** decb x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_u8_m1, svuint8x3_t, uint8_t,
+ z0 = svld3q_u8 (p0, x0 - svcntb ()),
+ z0 = svld3q (p0, x0 - svcntb ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_u8_m2:
+** decb x0, all, mul #2
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_u8_m2, svuint8x3_t, uint8_t,
+ z0 = svld3q_u8 (p0, x0 - svcntb () * 2),
+ z0 = svld3q (p0, x0 - svcntb () * 2))
+
+/*
+** ld3q_u8_m3:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #-3, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_u8_m3, svuint8x3_t, uint8_t,
+ z0 = svld3q_u8 (p0, x0 - svcntb () * 3),
+ z0 = svld3q (p0, x0 - svcntb () * 3))
+
+/*
+** ld3q_u8_m24:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #-24, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_u8_m24, svuint8x3_t, uint8_t,
+ z0 = svld3q_u8 (p0, x0 - svcntb () * 24),
+ z0 = svld3q (p0, x0 - svcntb () * 24))
+
+/*
+** ld3q_u8_m27:
+** addvl (x[0-9]+), x0, #-27
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_u8_m27, svuint8x3_t, uint8_t,
+ z0 = svld3q_u8 (p0, x0 - svcntb () * 27),
+ z0 = svld3q (p0, x0 - svcntb () * 27))
+
+/*
+** ld3q_vnum_u8_0:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_u8_0, svuint8x3_t, uint8_t,
+ z0 = svld3q_vnum_u8 (p0, x0, 0),
+ z0 = svld3q_vnum (p0, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_vnum_u8_1:
+** incb x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_u8_1, svuint8x3_t, uint8_t,
+ z0 = svld3q_vnum_u8 (p0, x0, 1),
+ z0 = svld3q_vnum (p0, x0, 1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_vnum_u8_2:
+** incb x0, all, mul #2
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_u8_2, svuint8x3_t, uint8_t,
+ z0 = svld3q_vnum_u8 (p0, x0, 2),
+ z0 = svld3q_vnum (p0, x0, 2))
+
+/*
+** ld3q_vnum_u8_3:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #3, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_u8_3, svuint8x3_t, uint8_t,
+ z0 = svld3q_vnum_u8 (p0, x0, 3),
+ z0 = svld3q_vnum (p0, x0, 3))
+
+/*
+** ld3q_vnum_u8_21:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #21, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_u8_21, svuint8x3_t, uint8_t,
+ z0 = svld3q_vnum_u8 (p0, x0, 21),
+ z0 = svld3q_vnum (p0, x0, 21))
+
+/*
+** ld3q_vnum_u8_24:
+** addvl (x[0-9]+), x0, #24
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_u8_24, svuint8x3_t, uint8_t,
+ z0 = svld3q_vnum_u8 (p0, x0, 24),
+ z0 = svld3q_vnum (p0, x0, 24))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_vnum_u8_m1:
+** decb x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_u8_m1, svuint8x3_t, uint8_t,
+ z0 = svld3q_vnum_u8 (p0, x0, -1),
+ z0 = svld3q_vnum (p0, x0, -1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld3q_vnum_u8_m2:
+** decb x0, all, mul #2
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_u8_m2, svuint8x3_t, uint8_t,
+ z0 = svld3q_vnum_u8 (p0, x0, -2),
+ z0 = svld3q_vnum (p0, x0, -2))
+
+/*
+** ld3q_vnum_u8_m3:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #-3, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_u8_m3, svuint8x3_t, uint8_t,
+ z0 = svld3q_vnum_u8 (p0, x0, -3),
+ z0 = svld3q_vnum (p0, x0, -3))
+
+/*
+** ld3q_vnum_u8_m24:
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, #-24, mul vl\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_u8_m24, svuint8x3_t, uint8_t,
+ z0 = svld3q_vnum_u8 (p0, x0, -24),
+ z0 = svld3q_vnum (p0, x0, -24))
+
+/*
+** ld3q_vnum_u8_m27:
+** addvl (x[0-9]+), x0, #-27
+** ld3q {z0\.q - z2\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld3q_vnum_u8_m27, svuint8x3_t, uint8_t,
+ z0 = svld3q_vnum_u8 (p0, x0, -27),
+ z0 = svld3q_vnum (p0, x0, -27))
+
+/*
+** ld3q_vnum_u8_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld3q {z0\.q - z2\.q}, p0/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ld3q {z0\.q - z2\.q}, p0/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD (ld3q_vnum_u8_x1, svuint8x3_t, uint8_t,
+ z0 = svld3q_vnum_u8 (p0, x0, x1),
+ z0 = svld3q_vnum (p0, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld4q_bf16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld4q_bf16.c
new file mode 100644
index 0000000..53af4a7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld4q_bf16.c
@@ -0,0 +1,325 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** ld4q_bf16_base:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_bf16_base, svbfloat16x4_t, bfloat16_t,
+ z0 = svld4q_bf16 (p0, x0),
+ z0 = svld4q (p0, x0))
+
+/*
+** ld4q_bf16_index:
+** add (x[0-9]), x0, x1, lsl #?1
+** ld4q {z0\.q - z3\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld4q_bf16_index, svbfloat16x4_t, bfloat16_t,
+ z0 = svld4q_bf16 (p0, x0 + x1),
+ z0 = svld4q (p0, x0 + x1))
+
+/*
+** ld4q_bf16_index2:
+** add (x[0-9]), x0, x1, lsl #?2
+** ld4q {z0\.q - z3\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld4q_bf16_index2, svbfloat16x4_t, bfloat16_t,
+ z0 = svld4q_bf16 (p0, x0 + x1 * 2),
+ z0 = svld4q (p0, x0 + x1 * 2))
+
+/*
+** ld4q_bf16_index4:
+** add (x[0-9]), x0, x1, lsl #?3
+** ld4q {z0\.q - z3\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld4q_bf16_index4, svbfloat16x4_t, bfloat16_t,
+ z0 = svld4q_bf16 (p0, x0 + x1 * 4),
+ z0 = svld4q (p0, x0 + x1 * 4))
+
+/*
+** ld4q_bf16_index8:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_LOAD (ld4q_bf16_index8, svbfloat16x4_t, bfloat16_t,
+ z0 = svld4q_bf16 (p0, x0 + x1 * 8),
+ z0 = svld4q (p0, x0 + x1 * 8))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_bf16_1:
+** incb x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_bf16_1, svbfloat16x4_t, bfloat16_t,
+ z0 = svld4q_bf16 (p0, x0 + svcnth ()),
+ z0 = svld4q (p0, x0 + svcnth ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_bf16_2:
+** incb x0, all, mul #2
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_bf16_2, svbfloat16x4_t, bfloat16_t,
+ z0 = svld4q_bf16 (p0, x0 + svcnth () * 2),
+ z0 = svld4q (p0, x0 + svcnth () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_bf16_3:
+** incb x0, all, mul #3
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_bf16_3, svbfloat16x4_t, bfloat16_t,
+ z0 = svld4q_bf16 (p0, x0 + svcnth () * 3),
+ z0 = svld4q (p0, x0 + svcnth () * 3))
+
+/*
+** ld4q_bf16_4:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_bf16_4, svbfloat16x4_t, bfloat16_t,
+ z0 = svld4q_bf16 (p0, x0 + svcnth () * 4),
+ z0 = svld4q (p0, x0 + svcnth () * 4))
+
+/*
+** ld4q_bf16_28:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_bf16_28, svbfloat16x4_t, bfloat16_t,
+ z0 = svld4q_bf16 (p0, x0 + svcnth () * 28),
+ z0 = svld4q (p0, x0 + svcnth () * 28))
+
+/*
+** ld4q_bf16_32:
+** [^{]*
+** ld4q {z0\.q - z3\.q}, p0/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD (ld4q_bf16_32, svbfloat16x4_t, bfloat16_t,
+ z0 = svld4q_bf16 (p0, x0 + svcnth () * 32),
+ z0 = svld4q (p0, x0 + svcnth () * 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_bf16_m1:
+** decb x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_bf16_m1, svbfloat16x4_t, bfloat16_t,
+ z0 = svld4q_bf16 (p0, x0 - svcnth ()),
+ z0 = svld4q (p0, x0 - svcnth ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_bf16_m2:
+** decb x0, all, mul #2
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_bf16_m2, svbfloat16x4_t, bfloat16_t,
+ z0 = svld4q_bf16 (p0, x0 - svcnth () * 2),
+ z0 = svld4q (p0, x0 - svcnth () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_bf16_m3:
+** decb x0, all, mul #3
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_bf16_m3, svbfloat16x4_t, bfloat16_t,
+ z0 = svld4q_bf16 (p0, x0 - svcnth () * 3),
+ z0 = svld4q (p0, x0 - svcnth () * 3))
+
+/*
+** ld4q_bf16_m4:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_bf16_m4, svbfloat16x4_t, bfloat16_t,
+ z0 = svld4q_bf16 (p0, x0 - svcnth () * 4),
+ z0 = svld4q (p0, x0 - svcnth () * 4))
+
+/*
+** ld4q_bf16_m32:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_bf16_m32, svbfloat16x4_t, bfloat16_t,
+ z0 = svld4q_bf16 (p0, x0 - svcnth () * 32),
+ z0 = svld4q (p0, x0 - svcnth () * 32))
+
+/*
+** ld4q_bf16_m36:
+** [^{]*
+** ld4q {z0\.q - z3\.q}, p0/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD (ld4q_bf16_m36, svbfloat16x4_t, bfloat16_t,
+ z0 = svld4q_bf16 (p0, x0 - svcnth () * 36),
+ z0 = svld4q (p0, x0 - svcnth () * 36))
+
+/*
+** ld4q_vnum_bf16_0:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_bf16_0, svbfloat16x4_t, bfloat16_t,
+ z0 = svld4q_vnum_bf16 (p0, x0, 0),
+ z0 = svld4q_vnum (p0, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_bf16_1:
+** incb x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_bf16_1, svbfloat16x4_t, bfloat16_t,
+ z0 = svld4q_vnum_bf16 (p0, x0, 1),
+ z0 = svld4q_vnum (p0, x0, 1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_bf16_2:
+** incb x0, all, mul #2
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_bf16_2, svbfloat16x4_t, bfloat16_t,
+ z0 = svld4q_vnum_bf16 (p0, x0, 2),
+ z0 = svld4q_vnum (p0, x0, 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_bf16_3:
+** incb x0, all, mul #3
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_bf16_3, svbfloat16x4_t, bfloat16_t,
+ z0 = svld4q_vnum_bf16 (p0, x0, 3),
+ z0 = svld4q_vnum (p0, x0, 3))
+
+/*
+** ld4q_vnum_bf16_4:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_bf16_4, svbfloat16x4_t, bfloat16_t,
+ z0 = svld4q_vnum_bf16 (p0, x0, 4),
+ z0 = svld4q_vnum (p0, x0, 4))
+
+/*
+** ld4q_vnum_bf16_28:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_bf16_28, svbfloat16x4_t, bfloat16_t,
+ z0 = svld4q_vnum_bf16 (p0, x0, 28),
+ z0 = svld4q_vnum (p0, x0, 28))
+
+/*
+** ld4q_vnum_bf16_32:
+** [^{]*
+** ld4q {z0\.q - z3\.q}, p0/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_bf16_32, svbfloat16x4_t, bfloat16_t,
+ z0 = svld4q_vnum_bf16 (p0, x0, 32),
+ z0 = svld4q_vnum (p0, x0, 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_bf16_m1:
+** decb x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_bf16_m1, svbfloat16x4_t, bfloat16_t,
+ z0 = svld4q_vnum_bf16 (p0, x0, -1),
+ z0 = svld4q_vnum (p0, x0, -1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_bf16_m2:
+** decb x0, all, mul #2
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_bf16_m2, svbfloat16x4_t, bfloat16_t,
+ z0 = svld4q_vnum_bf16 (p0, x0, -2),
+ z0 = svld4q_vnum (p0, x0, -2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_bf16_m3:
+** decb x0, all, mul #3
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_bf16_m3, svbfloat16x4_t, bfloat16_t,
+ z0 = svld4q_vnum_bf16 (p0, x0, -3),
+ z0 = svld4q_vnum (p0, x0, -3))
+
+/*
+** ld4q_vnum_bf16_m4:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_bf16_m4, svbfloat16x4_t, bfloat16_t,
+ z0 = svld4q_vnum_bf16 (p0, x0, -4),
+ z0 = svld4q_vnum (p0, x0, -4))
+
+/*
+** ld4q_vnum_bf16_m32:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_bf16_m32, svbfloat16x4_t, bfloat16_t,
+ z0 = svld4q_vnum_bf16 (p0, x0, -32),
+ z0 = svld4q_vnum (p0, x0, -32))
+
+/*
+** ld4q_vnum_bf16_m36:
+** [^{]*
+** ld4q {z0\.q - z3\.q}, p0/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_bf16_m36, svbfloat16x4_t, bfloat16_t,
+ z0 = svld4q_vnum_bf16 (p0, x0, -36),
+ z0 = svld4q_vnum (p0, x0, -36))
+
+/*
+** ld4q_vnum_bf16_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD (ld4q_vnum_bf16_x1, svbfloat16x4_t, bfloat16_t,
+ z0 = svld4q_vnum_bf16 (p0, x0, x1),
+ z0 = svld4q_vnum (p0, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld4q_f16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld4q_f16.c
new file mode 100644
index 0000000..770114e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld4q_f16.c
@@ -0,0 +1,325 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** ld4q_f16_base:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_f16_base, svfloat16x4_t, float16_t,
+ z0 = svld4q_f16 (p0, x0),
+ z0 = svld4q (p0, x0))
+
+/*
+** ld4q_f16_index:
+** add (x[0-9]), x0, x1, lsl #?1
+** ld4q {z0\.q - z3\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld4q_f16_index, svfloat16x4_t, float16_t,
+ z0 = svld4q_f16 (p0, x0 + x1),
+ z0 = svld4q (p0, x0 + x1))
+
+/*
+** ld4q_f16_index2:
+** add (x[0-9]), x0, x1, lsl #?2
+** ld4q {z0\.q - z3\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld4q_f16_index2, svfloat16x4_t, float16_t,
+ z0 = svld4q_f16 (p0, x0 + x1 * 2),
+ z0 = svld4q (p0, x0 + x1 * 2))
+
+/*
+** ld4q_f16_index4:
+** add (x[0-9]), x0, x1, lsl #?3
+** ld4q {z0\.q - z3\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld4q_f16_index4, svfloat16x4_t, float16_t,
+ z0 = svld4q_f16 (p0, x0 + x1 * 4),
+ z0 = svld4q (p0, x0 + x1 * 4))
+
+/*
+** ld4q_f16_index8:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_LOAD (ld4q_f16_index8, svfloat16x4_t, float16_t,
+ z0 = svld4q_f16 (p0, x0 + x1 * 8),
+ z0 = svld4q (p0, x0 + x1 * 8))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_f16_1:
+** incb x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_f16_1, svfloat16x4_t, float16_t,
+ z0 = svld4q_f16 (p0, x0 + svcnth ()),
+ z0 = svld4q (p0, x0 + svcnth ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_f16_2:
+** incb x0, all, mul #2
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_f16_2, svfloat16x4_t, float16_t,
+ z0 = svld4q_f16 (p0, x0 + svcnth () * 2),
+ z0 = svld4q (p0, x0 + svcnth () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_f16_3:
+** incb x0, all, mul #3
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_f16_3, svfloat16x4_t, float16_t,
+ z0 = svld4q_f16 (p0, x0 + svcnth () * 3),
+ z0 = svld4q (p0, x0 + svcnth () * 3))
+
+/*
+** ld4q_f16_4:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_f16_4, svfloat16x4_t, float16_t,
+ z0 = svld4q_f16 (p0, x0 + svcnth () * 4),
+ z0 = svld4q (p0, x0 + svcnth () * 4))
+
+/*
+** ld4q_f16_28:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_f16_28, svfloat16x4_t, float16_t,
+ z0 = svld4q_f16 (p0, x0 + svcnth () * 28),
+ z0 = svld4q (p0, x0 + svcnth () * 28))
+
+/*
+** ld4q_f16_32:
+** [^{]*
+** ld4q {z0\.q - z3\.q}, p0/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD (ld4q_f16_32, svfloat16x4_t, float16_t,
+ z0 = svld4q_f16 (p0, x0 + svcnth () * 32),
+ z0 = svld4q (p0, x0 + svcnth () * 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_f16_m1:
+** decb x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_f16_m1, svfloat16x4_t, float16_t,
+ z0 = svld4q_f16 (p0, x0 - svcnth ()),
+ z0 = svld4q (p0, x0 - svcnth ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_f16_m2:
+** decb x0, all, mul #2
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_f16_m2, svfloat16x4_t, float16_t,
+ z0 = svld4q_f16 (p0, x0 - svcnth () * 2),
+ z0 = svld4q (p0, x0 - svcnth () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_f16_m3:
+** decb x0, all, mul #3
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_f16_m3, svfloat16x4_t, float16_t,
+ z0 = svld4q_f16 (p0, x0 - svcnth () * 3),
+ z0 = svld4q (p0, x0 - svcnth () * 3))
+
+/*
+** ld4q_f16_m4:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_f16_m4, svfloat16x4_t, float16_t,
+ z0 = svld4q_f16 (p0, x0 - svcnth () * 4),
+ z0 = svld4q (p0, x0 - svcnth () * 4))
+
+/*
+** ld4q_f16_m32:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_f16_m32, svfloat16x4_t, float16_t,
+ z0 = svld4q_f16 (p0, x0 - svcnth () * 32),
+ z0 = svld4q (p0, x0 - svcnth () * 32))
+
+/*
+** ld4q_f16_m36:
+** [^{]*
+** ld4q {z0\.q - z3\.q}, p0/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD (ld4q_f16_m36, svfloat16x4_t, float16_t,
+ z0 = svld4q_f16 (p0, x0 - svcnth () * 36),
+ z0 = svld4q (p0, x0 - svcnth () * 36))
+
+/*
+** ld4q_vnum_f16_0:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_f16_0, svfloat16x4_t, float16_t,
+ z0 = svld4q_vnum_f16 (p0, x0, 0),
+ z0 = svld4q_vnum (p0, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_f16_1:
+** incb x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_f16_1, svfloat16x4_t, float16_t,
+ z0 = svld4q_vnum_f16 (p0, x0, 1),
+ z0 = svld4q_vnum (p0, x0, 1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_f16_2:
+** incb x0, all, mul #2
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_f16_2, svfloat16x4_t, float16_t,
+ z0 = svld4q_vnum_f16 (p0, x0, 2),
+ z0 = svld4q_vnum (p0, x0, 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_f16_3:
+** incb x0, all, mul #3
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_f16_3, svfloat16x4_t, float16_t,
+ z0 = svld4q_vnum_f16 (p0, x0, 3),
+ z0 = svld4q_vnum (p0, x0, 3))
+
+/*
+** ld4q_vnum_f16_4:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_f16_4, svfloat16x4_t, float16_t,
+ z0 = svld4q_vnum_f16 (p0, x0, 4),
+ z0 = svld4q_vnum (p0, x0, 4))
+
+/*
+** ld4q_vnum_f16_28:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_f16_28, svfloat16x4_t, float16_t,
+ z0 = svld4q_vnum_f16 (p0, x0, 28),
+ z0 = svld4q_vnum (p0, x0, 28))
+
+/*
+** ld4q_vnum_f16_32:
+** [^{]*
+** ld4q {z0\.q - z3\.q}, p0/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_f16_32, svfloat16x4_t, float16_t,
+ z0 = svld4q_vnum_f16 (p0, x0, 32),
+ z0 = svld4q_vnum (p0, x0, 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_f16_m1:
+** decb x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_f16_m1, svfloat16x4_t, float16_t,
+ z0 = svld4q_vnum_f16 (p0, x0, -1),
+ z0 = svld4q_vnum (p0, x0, -1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_f16_m2:
+** decb x0, all, mul #2
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_f16_m2, svfloat16x4_t, float16_t,
+ z0 = svld4q_vnum_f16 (p0, x0, -2),
+ z0 = svld4q_vnum (p0, x0, -2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_f16_m3:
+** decb x0, all, mul #3
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_f16_m3, svfloat16x4_t, float16_t,
+ z0 = svld4q_vnum_f16 (p0, x0, -3),
+ z0 = svld4q_vnum (p0, x0, -3))
+
+/*
+** ld4q_vnum_f16_m4:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_f16_m4, svfloat16x4_t, float16_t,
+ z0 = svld4q_vnum_f16 (p0, x0, -4),
+ z0 = svld4q_vnum (p0, x0, -4))
+
+/*
+** ld4q_vnum_f16_m32:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_f16_m32, svfloat16x4_t, float16_t,
+ z0 = svld4q_vnum_f16 (p0, x0, -32),
+ z0 = svld4q_vnum (p0, x0, -32))
+
+/*
+** ld4q_vnum_f16_m36:
+** [^{]*
+** ld4q {z0\.q - z3\.q}, p0/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_f16_m36, svfloat16x4_t, float16_t,
+ z0 = svld4q_vnum_f16 (p0, x0, -36),
+ z0 = svld4q_vnum (p0, x0, -36))
+
+/*
+** ld4q_vnum_f16_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD (ld4q_vnum_f16_x1, svfloat16x4_t, float16_t,
+ z0 = svld4q_vnum_f16 (p0, x0, x1),
+ z0 = svld4q_vnum (p0, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld4q_f32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld4q_f32.c
new file mode 100644
index 0000000..ba42d2a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld4q_f32.c
@@ -0,0 +1,315 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** ld4q_f32_base:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_f32_base, svfloat32x4_t, float32_t,
+ z0 = svld4q_f32 (p0, x0),
+ z0 = svld4q (p0, x0))
+
+/*
+** ld4q_f32_index:
+** add (x[0-9]), x0, x1, lsl #?2
+** ld4q {z0\.q - z3\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld4q_f32_index, svfloat32x4_t, float32_t,
+ z0 = svld4q_f32 (p0, x0 + x1),
+ z0 = svld4q (p0, x0 + x1))
+
+/*
+** ld4q_f32_index2:
+** add (x[0-9]), x0, x1, lsl #?3
+** ld4q {z0\.q - z3\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld4q_f32_index2, svfloat32x4_t, float32_t,
+ z0 = svld4q_f32 (p0, x0 + x1 * 2),
+ z0 = svld4q (p0, x0 + x1 * 2))
+
+/*
+** ld4q_f32_index4:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_LOAD (ld4q_f32_index4, svfloat32x4_t, float32_t,
+ z0 = svld4q_f32 (p0, x0 + x1 * 4),
+ z0 = svld4q (p0, x0 + x1 * 4))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_f32_1:
+** incb x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_f32_1, svfloat32x4_t, float32_t,
+ z0 = svld4q_f32 (p0, x0 + svcntw ()),
+ z0 = svld4q (p0, x0 + svcntw ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_f32_2:
+** incb x0, all, mul #2
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_f32_2, svfloat32x4_t, float32_t,
+ z0 = svld4q_f32 (p0, x0 + svcntw () * 2),
+ z0 = svld4q (p0, x0 + svcntw () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_f32_3:
+** incb x0, all, mul #3
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_f32_3, svfloat32x4_t, float32_t,
+ z0 = svld4q_f32 (p0, x0 + svcntw () * 3),
+ z0 = svld4q (p0, x0 + svcntw () * 3))
+
+/*
+** ld4q_f32_4:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_f32_4, svfloat32x4_t, float32_t,
+ z0 = svld4q_f32 (p0, x0 + svcntw () * 4),
+ z0 = svld4q (p0, x0 + svcntw () * 4))
+
+/*
+** ld4q_f32_28:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_f32_28, svfloat32x4_t, float32_t,
+ z0 = svld4q_f32 (p0, x0 + svcntw () * 28),
+ z0 = svld4q (p0, x0 + svcntw () * 28))
+
+/*
+** ld4q_f32_32:
+** [^{]*
+** ld4q {z0\.q - z3\.q}, p0/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD (ld4q_f32_32, svfloat32x4_t, float32_t,
+ z0 = svld4q_f32 (p0, x0 + svcntw () * 32),
+ z0 = svld4q (p0, x0 + svcntw () * 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_f32_m1:
+** decb x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_f32_m1, svfloat32x4_t, float32_t,
+ z0 = svld4q_f32 (p0, x0 - svcntw ()),
+ z0 = svld4q (p0, x0 - svcntw ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_f32_m2:
+** decb x0, all, mul #2
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_f32_m2, svfloat32x4_t, float32_t,
+ z0 = svld4q_f32 (p0, x0 - svcntw () * 2),
+ z0 = svld4q (p0, x0 - svcntw () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_f32_m3:
+** decb x0, all, mul #3
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_f32_m3, svfloat32x4_t, float32_t,
+ z0 = svld4q_f32 (p0, x0 - svcntw () * 3),
+ z0 = svld4q (p0, x0 - svcntw () * 3))
+
+/*
+** ld4q_f32_m4:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_f32_m4, svfloat32x4_t, float32_t,
+ z0 = svld4q_f32 (p0, x0 - svcntw () * 4),
+ z0 = svld4q (p0, x0 - svcntw () * 4))
+
+/*
+** ld4q_f32_m32:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_f32_m32, svfloat32x4_t, float32_t,
+ z0 = svld4q_f32 (p0, x0 - svcntw () * 32),
+ z0 = svld4q (p0, x0 - svcntw () * 32))
+
+/*
+** ld4q_f32_m36:
+** [^{]*
+** ld4q {z0\.q - z3\.q}, p0/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD (ld4q_f32_m36, svfloat32x4_t, float32_t,
+ z0 = svld4q_f32 (p0, x0 - svcntw () * 36),
+ z0 = svld4q (p0, x0 - svcntw () * 36))
+
+/*
+** ld4q_vnum_f32_0:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_f32_0, svfloat32x4_t, float32_t,
+ z0 = svld4q_vnum_f32 (p0, x0, 0),
+ z0 = svld4q_vnum (p0, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_f32_1:
+** incb x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_f32_1, svfloat32x4_t, float32_t,
+ z0 = svld4q_vnum_f32 (p0, x0, 1),
+ z0 = svld4q_vnum (p0, x0, 1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_f32_2:
+** incb x0, all, mul #2
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_f32_2, svfloat32x4_t, float32_t,
+ z0 = svld4q_vnum_f32 (p0, x0, 2),
+ z0 = svld4q_vnum (p0, x0, 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_f32_3:
+** incb x0, all, mul #3
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_f32_3, svfloat32x4_t, float32_t,
+ z0 = svld4q_vnum_f32 (p0, x0, 3),
+ z0 = svld4q_vnum (p0, x0, 3))
+
+/*
+** ld4q_vnum_f32_4:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_f32_4, svfloat32x4_t, float32_t,
+ z0 = svld4q_vnum_f32 (p0, x0, 4),
+ z0 = svld4q_vnum (p0, x0, 4))
+
+/*
+** ld4q_vnum_f32_28:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_f32_28, svfloat32x4_t, float32_t,
+ z0 = svld4q_vnum_f32 (p0, x0, 28),
+ z0 = svld4q_vnum (p0, x0, 28))
+
+/*
+** ld4q_vnum_f32_32:
+** [^{]*
+** ld4q {z0\.q - z3\.q}, p0/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_f32_32, svfloat32x4_t, float32_t,
+ z0 = svld4q_vnum_f32 (p0, x0, 32),
+ z0 = svld4q_vnum (p0, x0, 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_f32_m1:
+** decb x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_f32_m1, svfloat32x4_t, float32_t,
+ z0 = svld4q_vnum_f32 (p0, x0, -1),
+ z0 = svld4q_vnum (p0, x0, -1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_f32_m2:
+** decb x0, all, mul #2
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_f32_m2, svfloat32x4_t, float32_t,
+ z0 = svld4q_vnum_f32 (p0, x0, -2),
+ z0 = svld4q_vnum (p0, x0, -2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_f32_m3:
+** decb x0, all, mul #3
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_f32_m3, svfloat32x4_t, float32_t,
+ z0 = svld4q_vnum_f32 (p0, x0, -3),
+ z0 = svld4q_vnum (p0, x0, -3))
+
+/*
+** ld4q_vnum_f32_m4:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_f32_m4, svfloat32x4_t, float32_t,
+ z0 = svld4q_vnum_f32 (p0, x0, -4),
+ z0 = svld4q_vnum (p0, x0, -4))
+
+/*
+** ld4q_vnum_f32_m32:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_f32_m32, svfloat32x4_t, float32_t,
+ z0 = svld4q_vnum_f32 (p0, x0, -32),
+ z0 = svld4q_vnum (p0, x0, -32))
+
+/*
+** ld4q_vnum_f32_m36:
+** [^{]*
+** ld4q {z0\.q - z3\.q}, p0/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_f32_m36, svfloat32x4_t, float32_t,
+ z0 = svld4q_vnum_f32 (p0, x0, -36),
+ z0 = svld4q_vnum (p0, x0, -36))
+
+/*
+** ld4q_vnum_f32_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD (ld4q_vnum_f32_x1, svfloat32x4_t, float32_t,
+ z0 = svld4q_vnum_f32 (p0, x0, x1),
+ z0 = svld4q_vnum (p0, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld4q_f64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld4q_f64.c
new file mode 100644
index 0000000..c2ac775
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld4q_f64.c
@@ -0,0 +1,305 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** ld4q_f64_base:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_f64_base, svfloat64x4_t, float64_t,
+ z0 = svld4q_f64 (p0, x0),
+ z0 = svld4q (p0, x0))
+
+/*
+** ld4q_f64_index:
+** add (x[0-9]), x0, x1, lsl #?3
+** ld4q {z0\.q - z3\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld4q_f64_index, svfloat64x4_t, float64_t,
+ z0 = svld4q_f64 (p0, x0 + x1),
+ z0 = svld4q (p0, x0 + x1))
+
+/*
+** ld4q_f64_index2:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_LOAD (ld4q_f64_index2, svfloat64x4_t, float64_t,
+ z0 = svld4q_f64 (p0, x0 + x1 * 2),
+ z0 = svld4q (p0, x0 + x1 * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_f64_1:
+** incb x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_f64_1, svfloat64x4_t, float64_t,
+ z0 = svld4q_f64 (p0, x0 + svcntd ()),
+ z0 = svld4q (p0, x0 + svcntd ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_f64_2:
+** incb x0, all, mul #2
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_f64_2, svfloat64x4_t, float64_t,
+ z0 = svld4q_f64 (p0, x0 + svcntd () * 2),
+ z0 = svld4q (p0, x0 + svcntd () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_f64_3:
+** incb x0, all, mul #3
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_f64_3, svfloat64x4_t, float64_t,
+ z0 = svld4q_f64 (p0, x0 + svcntd () * 3),
+ z0 = svld4q (p0, x0 + svcntd () * 3))
+
+/*
+** ld4q_f64_4:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_f64_4, svfloat64x4_t, float64_t,
+ z0 = svld4q_f64 (p0, x0 + svcntd () * 4),
+ z0 = svld4q (p0, x0 + svcntd () * 4))
+
+/*
+** ld4q_f64_28:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_f64_28, svfloat64x4_t, float64_t,
+ z0 = svld4q_f64 (p0, x0 + svcntd () * 28),
+ z0 = svld4q (p0, x0 + svcntd () * 28))
+
+/*
+** ld4q_f64_32:
+** [^{]*
+** ld4q {z0\.q - z3\.q}, p0/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD (ld4q_f64_32, svfloat64x4_t, float64_t,
+ z0 = svld4q_f64 (p0, x0 + svcntd () * 32),
+ z0 = svld4q (p0, x0 + svcntd () * 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_f64_m1:
+** decb x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_f64_m1, svfloat64x4_t, float64_t,
+ z0 = svld4q_f64 (p0, x0 - svcntd ()),
+ z0 = svld4q (p0, x0 - svcntd ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_f64_m2:
+** decb x0, all, mul #2
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_f64_m2, svfloat64x4_t, float64_t,
+ z0 = svld4q_f64 (p0, x0 - svcntd () * 2),
+ z0 = svld4q (p0, x0 - svcntd () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_f64_m3:
+** decb x0, all, mul #3
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_f64_m3, svfloat64x4_t, float64_t,
+ z0 = svld4q_f64 (p0, x0 - svcntd () * 3),
+ z0 = svld4q (p0, x0 - svcntd () * 3))
+
+/*
+** ld4q_f64_m4:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_f64_m4, svfloat64x4_t, float64_t,
+ z0 = svld4q_f64 (p0, x0 - svcntd () * 4),
+ z0 = svld4q (p0, x0 - svcntd () * 4))
+
+/*
+** ld4q_f64_m32:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_f64_m32, svfloat64x4_t, float64_t,
+ z0 = svld4q_f64 (p0, x0 - svcntd () * 32),
+ z0 = svld4q (p0, x0 - svcntd () * 32))
+
+/*
+** ld4q_f64_m36:
+** [^{]*
+** ld4q {z0\.q - z3\.q}, p0/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD (ld4q_f64_m36, svfloat64x4_t, float64_t,
+ z0 = svld4q_f64 (p0, x0 - svcntd () * 36),
+ z0 = svld4q (p0, x0 - svcntd () * 36))
+
+/*
+** ld4q_vnum_f64_0:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_f64_0, svfloat64x4_t, float64_t,
+ z0 = svld4q_vnum_f64 (p0, x0, 0),
+ z0 = svld4q_vnum (p0, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_f64_1:
+** incb x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_f64_1, svfloat64x4_t, float64_t,
+ z0 = svld4q_vnum_f64 (p0, x0, 1),
+ z0 = svld4q_vnum (p0, x0, 1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_f64_2:
+** incb x0, all, mul #2
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_f64_2, svfloat64x4_t, float64_t,
+ z0 = svld4q_vnum_f64 (p0, x0, 2),
+ z0 = svld4q_vnum (p0, x0, 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_f64_3:
+** incb x0, all, mul #3
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_f64_3, svfloat64x4_t, float64_t,
+ z0 = svld4q_vnum_f64 (p0, x0, 3),
+ z0 = svld4q_vnum (p0, x0, 3))
+
+/*
+** ld4q_vnum_f64_4:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_f64_4, svfloat64x4_t, float64_t,
+ z0 = svld4q_vnum_f64 (p0, x0, 4),
+ z0 = svld4q_vnum (p0, x0, 4))
+
+/*
+** ld4q_vnum_f64_28:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_f64_28, svfloat64x4_t, float64_t,
+ z0 = svld4q_vnum_f64 (p0, x0, 28),
+ z0 = svld4q_vnum (p0, x0, 28))
+
+/*
+** ld4q_vnum_f64_32:
+** [^{]*
+** ld4q {z0\.q - z3\.q}, p0/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_f64_32, svfloat64x4_t, float64_t,
+ z0 = svld4q_vnum_f64 (p0, x0, 32),
+ z0 = svld4q_vnum (p0, x0, 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_f64_m1:
+** decb x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_f64_m1, svfloat64x4_t, float64_t,
+ z0 = svld4q_vnum_f64 (p0, x0, -1),
+ z0 = svld4q_vnum (p0, x0, -1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_f64_m2:
+** decb x0, all, mul #2
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_f64_m2, svfloat64x4_t, float64_t,
+ z0 = svld4q_vnum_f64 (p0, x0, -2),
+ z0 = svld4q_vnum (p0, x0, -2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_f64_m3:
+** decb x0, all, mul #3
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_f64_m3, svfloat64x4_t, float64_t,
+ z0 = svld4q_vnum_f64 (p0, x0, -3),
+ z0 = svld4q_vnum (p0, x0, -3))
+
+/*
+** ld4q_vnum_f64_m4:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_f64_m4, svfloat64x4_t, float64_t,
+ z0 = svld4q_vnum_f64 (p0, x0, -4),
+ z0 = svld4q_vnum (p0, x0, -4))
+
+/*
+** ld4q_vnum_f64_m32:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_f64_m32, svfloat64x4_t, float64_t,
+ z0 = svld4q_vnum_f64 (p0, x0, -32),
+ z0 = svld4q_vnum (p0, x0, -32))
+
+/*
+** ld4q_vnum_f64_m36:
+** [^{]*
+** ld4q {z0\.q - z3\.q}, p0/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_f64_m36, svfloat64x4_t, float64_t,
+ z0 = svld4q_vnum_f64 (p0, x0, -36),
+ z0 = svld4q_vnum (p0, x0, -36))
+
+/*
+** ld4q_vnum_f64_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD (ld4q_vnum_f64_x1, svfloat64x4_t, float64_t,
+ z0 = svld4q_vnum_f64 (p0, x0, x1),
+ z0 = svld4q_vnum (p0, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld4q_s16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld4q_s16.c
new file mode 100644
index 0000000..a3a3e41
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld4q_s16.c
@@ -0,0 +1,325 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** ld4q_s16_base:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_s16_base, svint16x4_t, int16_t,
+ z0 = svld4q_s16 (p0, x0),
+ z0 = svld4q (p0, x0))
+
+/*
+** ld4q_s16_index:
+** add (x[0-9]), x0, x1, lsl #?1
+** ld4q {z0\.q - z3\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld4q_s16_index, svint16x4_t, int16_t,
+ z0 = svld4q_s16 (p0, x0 + x1),
+ z0 = svld4q (p0, x0 + x1))
+
+/*
+** ld4q_s16_index2:
+** add (x[0-9]), x0, x1, lsl #?2
+** ld4q {z0\.q - z3\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld4q_s16_index2, svint16x4_t, int16_t,
+ z0 = svld4q_s16 (p0, x0 + x1 * 2),
+ z0 = svld4q (p0, x0 + x1 * 2))
+
+/*
+** ld4q_s16_index4:
+** add (x[0-9]), x0, x1, lsl #?3
+** ld4q {z0\.q - z3\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld4q_s16_index4, svint16x4_t, int16_t,
+ z0 = svld4q_s16 (p0, x0 + x1 * 4),
+ z0 = svld4q (p0, x0 + x1 * 4))
+
+/*
+** ld4q_s16_index8:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_LOAD (ld4q_s16_index8, svint16x4_t, int16_t,
+ z0 = svld4q_s16 (p0, x0 + x1 * 8),
+ z0 = svld4q (p0, x0 + x1 * 8))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_s16_1:
+** incb x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_s16_1, svint16x4_t, int16_t,
+ z0 = svld4q_s16 (p0, x0 + svcnth ()),
+ z0 = svld4q (p0, x0 + svcnth ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_s16_2:
+** incb x0, all, mul #2
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_s16_2, svint16x4_t, int16_t,
+ z0 = svld4q_s16 (p0, x0 + svcnth () * 2),
+ z0 = svld4q (p0, x0 + svcnth () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_s16_3:
+** incb x0, all, mul #3
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_s16_3, svint16x4_t, int16_t,
+ z0 = svld4q_s16 (p0, x0 + svcnth () * 3),
+ z0 = svld4q (p0, x0 + svcnth () * 3))
+
+/*
+** ld4q_s16_4:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_s16_4, svint16x4_t, int16_t,
+ z0 = svld4q_s16 (p0, x0 + svcnth () * 4),
+ z0 = svld4q (p0, x0 + svcnth () * 4))
+
+/*
+** ld4q_s16_28:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_s16_28, svint16x4_t, int16_t,
+ z0 = svld4q_s16 (p0, x0 + svcnth () * 28),
+ z0 = svld4q (p0, x0 + svcnth () * 28))
+
+/*
+** ld4q_s16_32:
+** [^{]*
+** ld4q {z0\.q - z3\.q}, p0/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD (ld4q_s16_32, svint16x4_t, int16_t,
+ z0 = svld4q_s16 (p0, x0 + svcnth () * 32),
+ z0 = svld4q (p0, x0 + svcnth () * 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_s16_m1:
+** decb x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_s16_m1, svint16x4_t, int16_t,
+ z0 = svld4q_s16 (p0, x0 - svcnth ()),
+ z0 = svld4q (p0, x0 - svcnth ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_s16_m2:
+** decb x0, all, mul #2
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_s16_m2, svint16x4_t, int16_t,
+ z0 = svld4q_s16 (p0, x0 - svcnth () * 2),
+ z0 = svld4q (p0, x0 - svcnth () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_s16_m3:
+** decb x0, all, mul #3
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_s16_m3, svint16x4_t, int16_t,
+ z0 = svld4q_s16 (p0, x0 - svcnth () * 3),
+ z0 = svld4q (p0, x0 - svcnth () * 3))
+
+/*
+** ld4q_s16_m4:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_s16_m4, svint16x4_t, int16_t,
+ z0 = svld4q_s16 (p0, x0 - svcnth () * 4),
+ z0 = svld4q (p0, x0 - svcnth () * 4))
+
+/*
+** ld4q_s16_m32:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_s16_m32, svint16x4_t, int16_t,
+ z0 = svld4q_s16 (p0, x0 - svcnth () * 32),
+ z0 = svld4q (p0, x0 - svcnth () * 32))
+
+/*
+** ld4q_s16_m36:
+** [^{]*
+** ld4q {z0\.q - z3\.q}, p0/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD (ld4q_s16_m36, svint16x4_t, int16_t,
+ z0 = svld4q_s16 (p0, x0 - svcnth () * 36),
+ z0 = svld4q (p0, x0 - svcnth () * 36))
+
+/*
+** ld4q_vnum_s16_0:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s16_0, svint16x4_t, int16_t,
+ z0 = svld4q_vnum_s16 (p0, x0, 0),
+ z0 = svld4q_vnum (p0, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_s16_1:
+** incb x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s16_1, svint16x4_t, int16_t,
+ z0 = svld4q_vnum_s16 (p0, x0, 1),
+ z0 = svld4q_vnum (p0, x0, 1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_s16_2:
+** incb x0, all, mul #2
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s16_2, svint16x4_t, int16_t,
+ z0 = svld4q_vnum_s16 (p0, x0, 2),
+ z0 = svld4q_vnum (p0, x0, 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_s16_3:
+** incb x0, all, mul #3
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s16_3, svint16x4_t, int16_t,
+ z0 = svld4q_vnum_s16 (p0, x0, 3),
+ z0 = svld4q_vnum (p0, x0, 3))
+
+/*
+** ld4q_vnum_s16_4:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s16_4, svint16x4_t, int16_t,
+ z0 = svld4q_vnum_s16 (p0, x0, 4),
+ z0 = svld4q_vnum (p0, x0, 4))
+
+/*
+** ld4q_vnum_s16_28:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s16_28, svint16x4_t, int16_t,
+ z0 = svld4q_vnum_s16 (p0, x0, 28),
+ z0 = svld4q_vnum (p0, x0, 28))
+
+/*
+** ld4q_vnum_s16_32:
+** [^{]*
+** ld4q {z0\.q - z3\.q}, p0/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s16_32, svint16x4_t, int16_t,
+ z0 = svld4q_vnum_s16 (p0, x0, 32),
+ z0 = svld4q_vnum (p0, x0, 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_s16_m1:
+** decb x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s16_m1, svint16x4_t, int16_t,
+ z0 = svld4q_vnum_s16 (p0, x0, -1),
+ z0 = svld4q_vnum (p0, x0, -1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_s16_m2:
+** decb x0, all, mul #2
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s16_m2, svint16x4_t, int16_t,
+ z0 = svld4q_vnum_s16 (p0, x0, -2),
+ z0 = svld4q_vnum (p0, x0, -2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_s16_m3:
+** decb x0, all, mul #3
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s16_m3, svint16x4_t, int16_t,
+ z0 = svld4q_vnum_s16 (p0, x0, -3),
+ z0 = svld4q_vnum (p0, x0, -3))
+
+/*
+** ld4q_vnum_s16_m4:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s16_m4, svint16x4_t, int16_t,
+ z0 = svld4q_vnum_s16 (p0, x0, -4),
+ z0 = svld4q_vnum (p0, x0, -4))
+
+/*
+** ld4q_vnum_s16_m32:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s16_m32, svint16x4_t, int16_t,
+ z0 = svld4q_vnum_s16 (p0, x0, -32),
+ z0 = svld4q_vnum (p0, x0, -32))
+
+/*
+** ld4q_vnum_s16_m36:
+** [^{]*
+** ld4q {z0\.q - z3\.q}, p0/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s16_m36, svint16x4_t, int16_t,
+ z0 = svld4q_vnum_s16 (p0, x0, -36),
+ z0 = svld4q_vnum (p0, x0, -36))
+
+/*
+** ld4q_vnum_s16_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s16_x1, svint16x4_t, int16_t,
+ z0 = svld4q_vnum_s16 (p0, x0, x1),
+ z0 = svld4q_vnum (p0, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld4q_s32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld4q_s32.c
new file mode 100644
index 0000000..9229c3f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld4q_s32.c
@@ -0,0 +1,315 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** ld4q_s32_base:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_s32_base, svint32x4_t, int32_t,
+ z0 = svld4q_s32 (p0, x0),
+ z0 = svld4q (p0, x0))
+
+/*
+** ld4q_s32_index:
+** add (x[0-9]), x0, x1, lsl #?2
+** ld4q {z0\.q - z3\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld4q_s32_index, svint32x4_t, int32_t,
+ z0 = svld4q_s32 (p0, x0 + x1),
+ z0 = svld4q (p0, x0 + x1))
+
+/*
+** ld4q_s32_index2:
+** add (x[0-9]), x0, x1, lsl #?3
+** ld4q {z0\.q - z3\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld4q_s32_index2, svint32x4_t, int32_t,
+ z0 = svld4q_s32 (p0, x0 + x1 * 2),
+ z0 = svld4q (p0, x0 + x1 * 2))
+
+/*
+** ld4q_s32_index4:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_LOAD (ld4q_s32_index4, svint32x4_t, int32_t,
+ z0 = svld4q_s32 (p0, x0 + x1 * 4),
+ z0 = svld4q (p0, x0 + x1 * 4))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_s32_1:
+** incb x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_s32_1, svint32x4_t, int32_t,
+ z0 = svld4q_s32 (p0, x0 + svcntw ()),
+ z0 = svld4q (p0, x0 + svcntw ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_s32_2:
+** incb x0, all, mul #2
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_s32_2, svint32x4_t, int32_t,
+ z0 = svld4q_s32 (p0, x0 + svcntw () * 2),
+ z0 = svld4q (p0, x0 + svcntw () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_s32_3:
+** incb x0, all, mul #3
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_s32_3, svint32x4_t, int32_t,
+ z0 = svld4q_s32 (p0, x0 + svcntw () * 3),
+ z0 = svld4q (p0, x0 + svcntw () * 3))
+
+/*
+** ld4q_s32_4:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_s32_4, svint32x4_t, int32_t,
+ z0 = svld4q_s32 (p0, x0 + svcntw () * 4),
+ z0 = svld4q (p0, x0 + svcntw () * 4))
+
+/*
+** ld4q_s32_28:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_s32_28, svint32x4_t, int32_t,
+ z0 = svld4q_s32 (p0, x0 + svcntw () * 28),
+ z0 = svld4q (p0, x0 + svcntw () * 28))
+
+/*
+** ld4q_s32_32:
+** [^{]*
+** ld4q {z0\.q - z3\.q}, p0/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD (ld4q_s32_32, svint32x4_t, int32_t,
+ z0 = svld4q_s32 (p0, x0 + svcntw () * 32),
+ z0 = svld4q (p0, x0 + svcntw () * 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_s32_m1:
+** decb x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_s32_m1, svint32x4_t, int32_t,
+ z0 = svld4q_s32 (p0, x0 - svcntw ()),
+ z0 = svld4q (p0, x0 - svcntw ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_s32_m2:
+** decb x0, all, mul #2
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_s32_m2, svint32x4_t, int32_t,
+ z0 = svld4q_s32 (p0, x0 - svcntw () * 2),
+ z0 = svld4q (p0, x0 - svcntw () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_s32_m3:
+** decb x0, all, mul #3
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_s32_m3, svint32x4_t, int32_t,
+ z0 = svld4q_s32 (p0, x0 - svcntw () * 3),
+ z0 = svld4q (p0, x0 - svcntw () * 3))
+
+/*
+** ld4q_s32_m4:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_s32_m4, svint32x4_t, int32_t,
+ z0 = svld4q_s32 (p0, x0 - svcntw () * 4),
+ z0 = svld4q (p0, x0 - svcntw () * 4))
+
+/*
+** ld4q_s32_m32:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_s32_m32, svint32x4_t, int32_t,
+ z0 = svld4q_s32 (p0, x0 - svcntw () * 32),
+ z0 = svld4q (p0, x0 - svcntw () * 32))
+
+/*
+** ld4q_s32_m36:
+** [^{]*
+** ld4q {z0\.q - z3\.q}, p0/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD (ld4q_s32_m36, svint32x4_t, int32_t,
+ z0 = svld4q_s32 (p0, x0 - svcntw () * 36),
+ z0 = svld4q (p0, x0 - svcntw () * 36))
+
+/*
+** ld4q_vnum_s32_0:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s32_0, svint32x4_t, int32_t,
+ z0 = svld4q_vnum_s32 (p0, x0, 0),
+ z0 = svld4q_vnum (p0, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_s32_1:
+** incb x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s32_1, svint32x4_t, int32_t,
+ z0 = svld4q_vnum_s32 (p0, x0, 1),
+ z0 = svld4q_vnum (p0, x0, 1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_s32_2:
+** incb x0, all, mul #2
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s32_2, svint32x4_t, int32_t,
+ z0 = svld4q_vnum_s32 (p0, x0, 2),
+ z0 = svld4q_vnum (p0, x0, 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_s32_3:
+** incb x0, all, mul #3
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s32_3, svint32x4_t, int32_t,
+ z0 = svld4q_vnum_s32 (p0, x0, 3),
+ z0 = svld4q_vnum (p0, x0, 3))
+
+/*
+** ld4q_vnum_s32_4:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s32_4, svint32x4_t, int32_t,
+ z0 = svld4q_vnum_s32 (p0, x0, 4),
+ z0 = svld4q_vnum (p0, x0, 4))
+
+/*
+** ld4q_vnum_s32_28:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s32_28, svint32x4_t, int32_t,
+ z0 = svld4q_vnum_s32 (p0, x0, 28),
+ z0 = svld4q_vnum (p0, x0, 28))
+
+/*
+** ld4q_vnum_s32_32:
+** [^{]*
+** ld4q {z0\.q - z3\.q}, p0/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s32_32, svint32x4_t, int32_t,
+ z0 = svld4q_vnum_s32 (p0, x0, 32),
+ z0 = svld4q_vnum (p0, x0, 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_s32_m1:
+** decb x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s32_m1, svint32x4_t, int32_t,
+ z0 = svld4q_vnum_s32 (p0, x0, -1),
+ z0 = svld4q_vnum (p0, x0, -1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_s32_m2:
+** decb x0, all, mul #2
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s32_m2, svint32x4_t, int32_t,
+ z0 = svld4q_vnum_s32 (p0, x0, -2),
+ z0 = svld4q_vnum (p0, x0, -2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_s32_m3:
+** decb x0, all, mul #3
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s32_m3, svint32x4_t, int32_t,
+ z0 = svld4q_vnum_s32 (p0, x0, -3),
+ z0 = svld4q_vnum (p0, x0, -3))
+
+/*
+** ld4q_vnum_s32_m4:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s32_m4, svint32x4_t, int32_t,
+ z0 = svld4q_vnum_s32 (p0, x0, -4),
+ z0 = svld4q_vnum (p0, x0, -4))
+
+/*
+** ld4q_vnum_s32_m32:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s32_m32, svint32x4_t, int32_t,
+ z0 = svld4q_vnum_s32 (p0, x0, -32),
+ z0 = svld4q_vnum (p0, x0, -32))
+
+/*
+** ld4q_vnum_s32_m36:
+** [^{]*
+** ld4q {z0\.q - z3\.q}, p0/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s32_m36, svint32x4_t, int32_t,
+ z0 = svld4q_vnum_s32 (p0, x0, -36),
+ z0 = svld4q_vnum (p0, x0, -36))
+
+/*
+** ld4q_vnum_s32_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s32_x1, svint32x4_t, int32_t,
+ z0 = svld4q_vnum_s32 (p0, x0, x1),
+ z0 = svld4q_vnum (p0, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld4q_s64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld4q_s64.c
new file mode 100644
index 0000000..3d46751
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld4q_s64.c
@@ -0,0 +1,305 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** ld4q_s64_base:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_s64_base, svint64x4_t, int64_t,
+ z0 = svld4q_s64 (p0, x0),
+ z0 = svld4q (p0, x0))
+
+/*
+** ld4q_s64_index:
+** add (x[0-9]), x0, x1, lsl #?3
+** ld4q {z0\.q - z3\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld4q_s64_index, svint64x4_t, int64_t,
+ z0 = svld4q_s64 (p0, x0 + x1),
+ z0 = svld4q (p0, x0 + x1))
+
+/*
+** ld4q_s64_index2:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_LOAD (ld4q_s64_index2, svint64x4_t, int64_t,
+ z0 = svld4q_s64 (p0, x0 + x1 * 2),
+ z0 = svld4q (p0, x0 + x1 * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_s64_1:
+** incb x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_s64_1, svint64x4_t, int64_t,
+ z0 = svld4q_s64 (p0, x0 + svcntd ()),
+ z0 = svld4q (p0, x0 + svcntd ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_s64_2:
+** incb x0, all, mul #2
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_s64_2, svint64x4_t, int64_t,
+ z0 = svld4q_s64 (p0, x0 + svcntd () * 2),
+ z0 = svld4q (p0, x0 + svcntd () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_s64_3:
+** incb x0, all, mul #3
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_s64_3, svint64x4_t, int64_t,
+ z0 = svld4q_s64 (p0, x0 + svcntd () * 3),
+ z0 = svld4q (p0, x0 + svcntd () * 3))
+
+/*
+** ld4q_s64_4:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_s64_4, svint64x4_t, int64_t,
+ z0 = svld4q_s64 (p0, x0 + svcntd () * 4),
+ z0 = svld4q (p0, x0 + svcntd () * 4))
+
+/*
+** ld4q_s64_28:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_s64_28, svint64x4_t, int64_t,
+ z0 = svld4q_s64 (p0, x0 + svcntd () * 28),
+ z0 = svld4q (p0, x0 + svcntd () * 28))
+
+/*
+** ld4q_s64_32:
+** [^{]*
+** ld4q {z0\.q - z3\.q}, p0/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD (ld4q_s64_32, svint64x4_t, int64_t,
+ z0 = svld4q_s64 (p0, x0 + svcntd () * 32),
+ z0 = svld4q (p0, x0 + svcntd () * 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_s64_m1:
+** decb x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_s64_m1, svint64x4_t, int64_t,
+ z0 = svld4q_s64 (p0, x0 - svcntd ()),
+ z0 = svld4q (p0, x0 - svcntd ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_s64_m2:
+** decb x0, all, mul #2
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_s64_m2, svint64x4_t, int64_t,
+ z0 = svld4q_s64 (p0, x0 - svcntd () * 2),
+ z0 = svld4q (p0, x0 - svcntd () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_s64_m3:
+** decb x0, all, mul #3
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_s64_m3, svint64x4_t, int64_t,
+ z0 = svld4q_s64 (p0, x0 - svcntd () * 3),
+ z0 = svld4q (p0, x0 - svcntd () * 3))
+
+/*
+** ld4q_s64_m4:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_s64_m4, svint64x4_t, int64_t,
+ z0 = svld4q_s64 (p0, x0 - svcntd () * 4),
+ z0 = svld4q (p0, x0 - svcntd () * 4))
+
+/*
+** ld4q_s64_m32:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_s64_m32, svint64x4_t, int64_t,
+ z0 = svld4q_s64 (p0, x0 - svcntd () * 32),
+ z0 = svld4q (p0, x0 - svcntd () * 32))
+
+/*
+** ld4q_s64_m36:
+** [^{]*
+** ld4q {z0\.q - z3\.q}, p0/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD (ld4q_s64_m36, svint64x4_t, int64_t,
+ z0 = svld4q_s64 (p0, x0 - svcntd () * 36),
+ z0 = svld4q (p0, x0 - svcntd () * 36))
+
+/*
+** ld4q_vnum_s64_0:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s64_0, svint64x4_t, int64_t,
+ z0 = svld4q_vnum_s64 (p0, x0, 0),
+ z0 = svld4q_vnum (p0, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_s64_1:
+** incb x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s64_1, svint64x4_t, int64_t,
+ z0 = svld4q_vnum_s64 (p0, x0, 1),
+ z0 = svld4q_vnum (p0, x0, 1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_s64_2:
+** incb x0, all, mul #2
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s64_2, svint64x4_t, int64_t,
+ z0 = svld4q_vnum_s64 (p0, x0, 2),
+ z0 = svld4q_vnum (p0, x0, 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_s64_3:
+** incb x0, all, mul #3
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s64_3, svint64x4_t, int64_t,
+ z0 = svld4q_vnum_s64 (p0, x0, 3),
+ z0 = svld4q_vnum (p0, x0, 3))
+
+/*
+** ld4q_vnum_s64_4:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s64_4, svint64x4_t, int64_t,
+ z0 = svld4q_vnum_s64 (p0, x0, 4),
+ z0 = svld4q_vnum (p0, x0, 4))
+
+/*
+** ld4q_vnum_s64_28:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s64_28, svint64x4_t, int64_t,
+ z0 = svld4q_vnum_s64 (p0, x0, 28),
+ z0 = svld4q_vnum (p0, x0, 28))
+
+/*
+** ld4q_vnum_s64_32:
+** [^{]*
+** ld4q {z0\.q - z3\.q}, p0/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s64_32, svint64x4_t, int64_t,
+ z0 = svld4q_vnum_s64 (p0, x0, 32),
+ z0 = svld4q_vnum (p0, x0, 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_s64_m1:
+** decb x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s64_m1, svint64x4_t, int64_t,
+ z0 = svld4q_vnum_s64 (p0, x0, -1),
+ z0 = svld4q_vnum (p0, x0, -1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_s64_m2:
+** decb x0, all, mul #2
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s64_m2, svint64x4_t, int64_t,
+ z0 = svld4q_vnum_s64 (p0, x0, -2),
+ z0 = svld4q_vnum (p0, x0, -2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_s64_m3:
+** decb x0, all, mul #3
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s64_m3, svint64x4_t, int64_t,
+ z0 = svld4q_vnum_s64 (p0, x0, -3),
+ z0 = svld4q_vnum (p0, x0, -3))
+
+/*
+** ld4q_vnum_s64_m4:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s64_m4, svint64x4_t, int64_t,
+ z0 = svld4q_vnum_s64 (p0, x0, -4),
+ z0 = svld4q_vnum (p0, x0, -4))
+
+/*
+** ld4q_vnum_s64_m32:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s64_m32, svint64x4_t, int64_t,
+ z0 = svld4q_vnum_s64 (p0, x0, -32),
+ z0 = svld4q_vnum (p0, x0, -32))
+
+/*
+** ld4q_vnum_s64_m36:
+** [^{]*
+** ld4q {z0\.q - z3\.q}, p0/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s64_m36, svint64x4_t, int64_t,
+ z0 = svld4q_vnum_s64 (p0, x0, -36),
+ z0 = svld4q_vnum (p0, x0, -36))
+
+/*
+** ld4q_vnum_s64_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s64_x1, svint64x4_t, int64_t,
+ z0 = svld4q_vnum_s64 (p0, x0, x1),
+ z0 = svld4q_vnum (p0, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld4q_s8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld4q_s8.c
new file mode 100644
index 0000000..4ec9ab3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld4q_s8.c
@@ -0,0 +1,335 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** ld4q_s8_base:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_s8_base, svint8x4_t, int8_t,
+ z0 = svld4q_s8 (p0, x0),
+ z0 = svld4q (p0, x0))
+
+/*
+** ld4q_s8_index:
+** add (x[0-9]), (?:x0, x1|x1, x0)
+** ld4q {z0\.q - z3\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld4q_s8_index, svint8x4_t, int8_t,
+ z0 = svld4q_s8 (p0, x0 + x1),
+ z0 = svld4q (p0, x0 + x1))
+
+/*
+** ld4q_s8_index2:
+** add (x[0-9]), x0, x1, lsl #?1
+** ld4q {z0\.q - z3\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld4q_s8_index2, svint8x4_t, int8_t,
+ z0 = svld4q_s8 (p0, x0 + x1 * 2),
+ z0 = svld4q (p0, x0 + x1 * 2))
+
+/*
+** ld4q_s8_index4:
+** add (x[0-9]), x0, x1, lsl #?2
+** ld4q {z0\.q - z3\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld4q_s8_index4, svint8x4_t, int8_t,
+ z0 = svld4q_s8 (p0, x0 + x1 * 4),
+ z0 = svld4q (p0, x0 + x1 * 4))
+
+/*
+** ld4q_s8_index8:
+** add (x[0-9]), x0, x1, lsl #?3
+** ld4q {z0\.q - z3\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld4q_s8_index8, svint8x4_t, int8_t,
+ z0 = svld4q_s8 (p0, x0 + x1 * 8),
+ z0 = svld4q (p0, x0 + x1 * 8))
+
+/*
+** ld4q_s8_index16:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_LOAD (ld4q_s8_index16, svint8x4_t, int8_t,
+ z0 = svld4q_s8 (p0, x0 + x1 * 16),
+ z0 = svld4q (p0, x0 + x1 * 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_s8_1:
+** incb x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_s8_1, svint8x4_t, int8_t,
+ z0 = svld4q_s8 (p0, x0 + svcntb ()),
+ z0 = svld4q (p0, x0 + svcntb ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_s8_2:
+** incb x0, all, mul #2
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_s8_2, svint8x4_t, int8_t,
+ z0 = svld4q_s8 (p0, x0 + svcntb () * 2),
+ z0 = svld4q (p0, x0 + svcntb () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_s8_3:
+** incb x0, all, mul #3
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_s8_3, svint8x4_t, int8_t,
+ z0 = svld4q_s8 (p0, x0 + svcntb () * 3),
+ z0 = svld4q (p0, x0 + svcntb () * 3))
+
+/*
+** ld4q_s8_4:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_s8_4, svint8x4_t, int8_t,
+ z0 = svld4q_s8 (p0, x0 + svcntb () * 4),
+ z0 = svld4q (p0, x0 + svcntb () * 4))
+
+/*
+** ld4q_s8_28:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_s8_28, svint8x4_t, int8_t,
+ z0 = svld4q_s8 (p0, x0 + svcntb () * 28),
+ z0 = svld4q (p0, x0 + svcntb () * 28))
+
+/*
+** ld4q_s8_32:
+** [^{]*
+** ld4q {z0\.q - z3\.q}, p0/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD (ld4q_s8_32, svint8x4_t, int8_t,
+ z0 = svld4q_s8 (p0, x0 + svcntb () * 32),
+ z0 = svld4q (p0, x0 + svcntb () * 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_s8_m1:
+** decb x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_s8_m1, svint8x4_t, int8_t,
+ z0 = svld4q_s8 (p0, x0 - svcntb ()),
+ z0 = svld4q (p0, x0 - svcntb ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_s8_m2:
+** decb x0, all, mul #2
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_s8_m2, svint8x4_t, int8_t,
+ z0 = svld4q_s8 (p0, x0 - svcntb () * 2),
+ z0 = svld4q (p0, x0 - svcntb () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_s8_m3:
+** decb x0, all, mul #3
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_s8_m3, svint8x4_t, int8_t,
+ z0 = svld4q_s8 (p0, x0 - svcntb () * 3),
+ z0 = svld4q (p0, x0 - svcntb () * 3))
+
+/*
+** ld4q_s8_m4:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_s8_m4, svint8x4_t, int8_t,
+ z0 = svld4q_s8 (p0, x0 - svcntb () * 4),
+ z0 = svld4q (p0, x0 - svcntb () * 4))
+
+/*
+** ld4q_s8_m32:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_s8_m32, svint8x4_t, int8_t,
+ z0 = svld4q_s8 (p0, x0 - svcntb () * 32),
+ z0 = svld4q (p0, x0 - svcntb () * 32))
+
+/*
+** ld4q_s8_m36:
+** [^{]*
+** ld4q {z0\.q - z3\.q}, p0/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD (ld4q_s8_m36, svint8x4_t, int8_t,
+ z0 = svld4q_s8 (p0, x0 - svcntb () * 36),
+ z0 = svld4q (p0, x0 - svcntb () * 36))
+
+/*
+** ld4q_vnum_s8_0:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s8_0, svint8x4_t, int8_t,
+ z0 = svld4q_vnum_s8 (p0, x0, 0),
+ z0 = svld4q_vnum (p0, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_s8_1:
+** incb x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s8_1, svint8x4_t, int8_t,
+ z0 = svld4q_vnum_s8 (p0, x0, 1),
+ z0 = svld4q_vnum (p0, x0, 1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_s8_2:
+** incb x0, all, mul #2
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s8_2, svint8x4_t, int8_t,
+ z0 = svld4q_vnum_s8 (p0, x0, 2),
+ z0 = svld4q_vnum (p0, x0, 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_s8_3:
+** incb x0, all, mul #3
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s8_3, svint8x4_t, int8_t,
+ z0 = svld4q_vnum_s8 (p0, x0, 3),
+ z0 = svld4q_vnum (p0, x0, 3))
+
+/*
+** ld4q_vnum_s8_4:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s8_4, svint8x4_t, int8_t,
+ z0 = svld4q_vnum_s8 (p0, x0, 4),
+ z0 = svld4q_vnum (p0, x0, 4))
+
+/*
+** ld4q_vnum_s8_28:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s8_28, svint8x4_t, int8_t,
+ z0 = svld4q_vnum_s8 (p0, x0, 28),
+ z0 = svld4q_vnum (p0, x0, 28))
+
+/*
+** ld4q_vnum_s8_32:
+** [^{]*
+** ld4q {z0\.q - z3\.q}, p0/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s8_32, svint8x4_t, int8_t,
+ z0 = svld4q_vnum_s8 (p0, x0, 32),
+ z0 = svld4q_vnum (p0, x0, 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_s8_m1:
+** decb x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s8_m1, svint8x4_t, int8_t,
+ z0 = svld4q_vnum_s8 (p0, x0, -1),
+ z0 = svld4q_vnum (p0, x0, -1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_s8_m2:
+** decb x0, all, mul #2
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s8_m2, svint8x4_t, int8_t,
+ z0 = svld4q_vnum_s8 (p0, x0, -2),
+ z0 = svld4q_vnum (p0, x0, -2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_s8_m3:
+** decb x0, all, mul #3
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s8_m3, svint8x4_t, int8_t,
+ z0 = svld4q_vnum_s8 (p0, x0, -3),
+ z0 = svld4q_vnum (p0, x0, -3))
+
+/*
+** ld4q_vnum_s8_m4:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s8_m4, svint8x4_t, int8_t,
+ z0 = svld4q_vnum_s8 (p0, x0, -4),
+ z0 = svld4q_vnum (p0, x0, -4))
+
+/*
+** ld4q_vnum_s8_m32:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s8_m32, svint8x4_t, int8_t,
+ z0 = svld4q_vnum_s8 (p0, x0, -32),
+ z0 = svld4q_vnum (p0, x0, -32))
+
+/*
+** ld4q_vnum_s8_m36:
+** [^{]*
+** ld4q {z0\.q - z3\.q}, p0/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s8_m36, svint8x4_t, int8_t,
+ z0 = svld4q_vnum_s8 (p0, x0, -36),
+ z0 = svld4q_vnum (p0, x0, -36))
+
+/*
+** ld4q_vnum_s8_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD (ld4q_vnum_s8_x1, svint8x4_t, int8_t,
+ z0 = svld4q_vnum_s8 (p0, x0, x1),
+ z0 = svld4q_vnum (p0, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld4q_u16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld4q_u16.c
new file mode 100644
index 0000000..c3d5e5e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld4q_u16.c
@@ -0,0 +1,325 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** ld4q_u16_base:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_u16_base, svuint16x4_t, uint16_t,
+ z0 = svld4q_u16 (p0, x0),
+ z0 = svld4q (p0, x0))
+
+/*
+** ld4q_u16_index:
+** add (x[0-9]), x0, x1, lsl #?1
+** ld4q {z0\.q - z3\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld4q_u16_index, svuint16x4_t, uint16_t,
+ z0 = svld4q_u16 (p0, x0 + x1),
+ z0 = svld4q (p0, x0 + x1))
+
+/*
+** ld4q_u16_index2:
+** add (x[0-9]), x0, x1, lsl #?2
+** ld4q {z0\.q - z3\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld4q_u16_index2, svuint16x4_t, uint16_t,
+ z0 = svld4q_u16 (p0, x0 + x1 * 2),
+ z0 = svld4q (p0, x0 + x1 * 2))
+
+/*
+** ld4q_u16_index4:
+** add (x[0-9]), x0, x1, lsl #?3
+** ld4q {z0\.q - z3\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld4q_u16_index4, svuint16x4_t, uint16_t,
+ z0 = svld4q_u16 (p0, x0 + x1 * 4),
+ z0 = svld4q (p0, x0 + x1 * 4))
+
+/*
+** ld4q_u16_index8:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_LOAD (ld4q_u16_index8, svuint16x4_t, uint16_t,
+ z0 = svld4q_u16 (p0, x0 + x1 * 8),
+ z0 = svld4q (p0, x0 + x1 * 8))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_u16_1:
+** incb x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_u16_1, svuint16x4_t, uint16_t,
+ z0 = svld4q_u16 (p0, x0 + svcnth ()),
+ z0 = svld4q (p0, x0 + svcnth ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_u16_2:
+** incb x0, all, mul #2
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_u16_2, svuint16x4_t, uint16_t,
+ z0 = svld4q_u16 (p0, x0 + svcnth () * 2),
+ z0 = svld4q (p0, x0 + svcnth () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_u16_3:
+** incb x0, all, mul #3
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_u16_3, svuint16x4_t, uint16_t,
+ z0 = svld4q_u16 (p0, x0 + svcnth () * 3),
+ z0 = svld4q (p0, x0 + svcnth () * 3))
+
+/*
+** ld4q_u16_4:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_u16_4, svuint16x4_t, uint16_t,
+ z0 = svld4q_u16 (p0, x0 + svcnth () * 4),
+ z0 = svld4q (p0, x0 + svcnth () * 4))
+
+/*
+** ld4q_u16_28:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_u16_28, svuint16x4_t, uint16_t,
+ z0 = svld4q_u16 (p0, x0 + svcnth () * 28),
+ z0 = svld4q (p0, x0 + svcnth () * 28))
+
+/*
+** ld4q_u16_32:
+** [^{]*
+** ld4q {z0\.q - z3\.q}, p0/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD (ld4q_u16_32, svuint16x4_t, uint16_t,
+ z0 = svld4q_u16 (p0, x0 + svcnth () * 32),
+ z0 = svld4q (p0, x0 + svcnth () * 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_u16_m1:
+** decb x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_u16_m1, svuint16x4_t, uint16_t,
+ z0 = svld4q_u16 (p0, x0 - svcnth ()),
+ z0 = svld4q (p0, x0 - svcnth ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_u16_m2:
+** decb x0, all, mul #2
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_u16_m2, svuint16x4_t, uint16_t,
+ z0 = svld4q_u16 (p0, x0 - svcnth () * 2),
+ z0 = svld4q (p0, x0 - svcnth () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_u16_m3:
+** decb x0, all, mul #3
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_u16_m3, svuint16x4_t, uint16_t,
+ z0 = svld4q_u16 (p0, x0 - svcnth () * 3),
+ z0 = svld4q (p0, x0 - svcnth () * 3))
+
+/*
+** ld4q_u16_m4:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_u16_m4, svuint16x4_t, uint16_t,
+ z0 = svld4q_u16 (p0, x0 - svcnth () * 4),
+ z0 = svld4q (p0, x0 - svcnth () * 4))
+
+/*
+** ld4q_u16_m32:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_u16_m32, svuint16x4_t, uint16_t,
+ z0 = svld4q_u16 (p0, x0 - svcnth () * 32),
+ z0 = svld4q (p0, x0 - svcnth () * 32))
+
+/*
+** ld4q_u16_m36:
+** [^{]*
+** ld4q {z0\.q - z3\.q}, p0/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD (ld4q_u16_m36, svuint16x4_t, uint16_t,
+ z0 = svld4q_u16 (p0, x0 - svcnth () * 36),
+ z0 = svld4q (p0, x0 - svcnth () * 36))
+
+/*
+** ld4q_vnum_u16_0:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u16_0, svuint16x4_t, uint16_t,
+ z0 = svld4q_vnum_u16 (p0, x0, 0),
+ z0 = svld4q_vnum (p0, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_u16_1:
+** incb x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u16_1, svuint16x4_t, uint16_t,
+ z0 = svld4q_vnum_u16 (p0, x0, 1),
+ z0 = svld4q_vnum (p0, x0, 1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_u16_2:
+** incb x0, all, mul #2
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u16_2, svuint16x4_t, uint16_t,
+ z0 = svld4q_vnum_u16 (p0, x0, 2),
+ z0 = svld4q_vnum (p0, x0, 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_u16_3:
+** incb x0, all, mul #3
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u16_3, svuint16x4_t, uint16_t,
+ z0 = svld4q_vnum_u16 (p0, x0, 3),
+ z0 = svld4q_vnum (p0, x0, 3))
+
+/*
+** ld4q_vnum_u16_4:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u16_4, svuint16x4_t, uint16_t,
+ z0 = svld4q_vnum_u16 (p0, x0, 4),
+ z0 = svld4q_vnum (p0, x0, 4))
+
+/*
+** ld4q_vnum_u16_28:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u16_28, svuint16x4_t, uint16_t,
+ z0 = svld4q_vnum_u16 (p0, x0, 28),
+ z0 = svld4q_vnum (p0, x0, 28))
+
+/*
+** ld4q_vnum_u16_32:
+** [^{]*
+** ld4q {z0\.q - z3\.q}, p0/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u16_32, svuint16x4_t, uint16_t,
+ z0 = svld4q_vnum_u16 (p0, x0, 32),
+ z0 = svld4q_vnum (p0, x0, 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_u16_m1:
+** decb x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u16_m1, svuint16x4_t, uint16_t,
+ z0 = svld4q_vnum_u16 (p0, x0, -1),
+ z0 = svld4q_vnum (p0, x0, -1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_u16_m2:
+** decb x0, all, mul #2
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u16_m2, svuint16x4_t, uint16_t,
+ z0 = svld4q_vnum_u16 (p0, x0, -2),
+ z0 = svld4q_vnum (p0, x0, -2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_u16_m3:
+** decb x0, all, mul #3
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u16_m3, svuint16x4_t, uint16_t,
+ z0 = svld4q_vnum_u16 (p0, x0, -3),
+ z0 = svld4q_vnum (p0, x0, -3))
+
+/*
+** ld4q_vnum_u16_m4:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u16_m4, svuint16x4_t, uint16_t,
+ z0 = svld4q_vnum_u16 (p0, x0, -4),
+ z0 = svld4q_vnum (p0, x0, -4))
+
+/*
+** ld4q_vnum_u16_m32:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u16_m32, svuint16x4_t, uint16_t,
+ z0 = svld4q_vnum_u16 (p0, x0, -32),
+ z0 = svld4q_vnum (p0, x0, -32))
+
+/*
+** ld4q_vnum_u16_m36:
+** [^{]*
+** ld4q {z0\.q - z3\.q}, p0/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u16_m36, svuint16x4_t, uint16_t,
+ z0 = svld4q_vnum_u16 (p0, x0, -36),
+ z0 = svld4q_vnum (p0, x0, -36))
+
+/*
+** ld4q_vnum_u16_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u16_x1, svuint16x4_t, uint16_t,
+ z0 = svld4q_vnum_u16 (p0, x0, x1),
+ z0 = svld4q_vnum (p0, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld4q_u32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld4q_u32.c
new file mode 100644
index 0000000..e39989c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld4q_u32.c
@@ -0,0 +1,315 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** ld4q_u32_base:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_u32_base, svuint32x4_t, uint32_t,
+ z0 = svld4q_u32 (p0, x0),
+ z0 = svld4q (p0, x0))
+
+/*
+** ld4q_u32_index:
+** add (x[0-9]), x0, x1, lsl #?2
+** ld4q {z0\.q - z3\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld4q_u32_index, svuint32x4_t, uint32_t,
+ z0 = svld4q_u32 (p0, x0 + x1),
+ z0 = svld4q (p0, x0 + x1))
+
+/*
+** ld4q_u32_index2:
+** add (x[0-9]), x0, x1, lsl #?3
+** ld4q {z0\.q - z3\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld4q_u32_index2, svuint32x4_t, uint32_t,
+ z0 = svld4q_u32 (p0, x0 + x1 * 2),
+ z0 = svld4q (p0, x0 + x1 * 2))
+
+/*
+** ld4q_u32_index4:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_LOAD (ld4q_u32_index4, svuint32x4_t, uint32_t,
+ z0 = svld4q_u32 (p0, x0 + x1 * 4),
+ z0 = svld4q (p0, x0 + x1 * 4))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_u32_1:
+** incb x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_u32_1, svuint32x4_t, uint32_t,
+ z0 = svld4q_u32 (p0, x0 + svcntw ()),
+ z0 = svld4q (p0, x0 + svcntw ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_u32_2:
+** incb x0, all, mul #2
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_u32_2, svuint32x4_t, uint32_t,
+ z0 = svld4q_u32 (p0, x0 + svcntw () * 2),
+ z0 = svld4q (p0, x0 + svcntw () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_u32_3:
+** incb x0, all, mul #3
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_u32_3, svuint32x4_t, uint32_t,
+ z0 = svld4q_u32 (p0, x0 + svcntw () * 3),
+ z0 = svld4q (p0, x0 + svcntw () * 3))
+
+/*
+** ld4q_u32_4:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_u32_4, svuint32x4_t, uint32_t,
+ z0 = svld4q_u32 (p0, x0 + svcntw () * 4),
+ z0 = svld4q (p0, x0 + svcntw () * 4))
+
+/*
+** ld4q_u32_28:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_u32_28, svuint32x4_t, uint32_t,
+ z0 = svld4q_u32 (p0, x0 + svcntw () * 28),
+ z0 = svld4q (p0, x0 + svcntw () * 28))
+
+/*
+** ld4q_u32_32:
+** [^{]*
+** ld4q {z0\.q - z3\.q}, p0/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD (ld4q_u32_32, svuint32x4_t, uint32_t,
+ z0 = svld4q_u32 (p0, x0 + svcntw () * 32),
+ z0 = svld4q (p0, x0 + svcntw () * 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_u32_m1:
+** decb x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_u32_m1, svuint32x4_t, uint32_t,
+ z0 = svld4q_u32 (p0, x0 - svcntw ()),
+ z0 = svld4q (p0, x0 - svcntw ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_u32_m2:
+** decb x0, all, mul #2
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_u32_m2, svuint32x4_t, uint32_t,
+ z0 = svld4q_u32 (p0, x0 - svcntw () * 2),
+ z0 = svld4q (p0, x0 - svcntw () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_u32_m3:
+** decb x0, all, mul #3
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_u32_m3, svuint32x4_t, uint32_t,
+ z0 = svld4q_u32 (p0, x0 - svcntw () * 3),
+ z0 = svld4q (p0, x0 - svcntw () * 3))
+
+/*
+** ld4q_u32_m4:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_u32_m4, svuint32x4_t, uint32_t,
+ z0 = svld4q_u32 (p0, x0 - svcntw () * 4),
+ z0 = svld4q (p0, x0 - svcntw () * 4))
+
+/*
+** ld4q_u32_m32:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_u32_m32, svuint32x4_t, uint32_t,
+ z0 = svld4q_u32 (p0, x0 - svcntw () * 32),
+ z0 = svld4q (p0, x0 - svcntw () * 32))
+
+/*
+** ld4q_u32_m36:
+** [^{]*
+** ld4q {z0\.q - z3\.q}, p0/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD (ld4q_u32_m36, svuint32x4_t, uint32_t,
+ z0 = svld4q_u32 (p0, x0 - svcntw () * 36),
+ z0 = svld4q (p0, x0 - svcntw () * 36))
+
+/*
+** ld4q_vnum_u32_0:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u32_0, svuint32x4_t, uint32_t,
+ z0 = svld4q_vnum_u32 (p0, x0, 0),
+ z0 = svld4q_vnum (p0, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_u32_1:
+** incb x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u32_1, svuint32x4_t, uint32_t,
+ z0 = svld4q_vnum_u32 (p0, x0, 1),
+ z0 = svld4q_vnum (p0, x0, 1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_u32_2:
+** incb x0, all, mul #2
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u32_2, svuint32x4_t, uint32_t,
+ z0 = svld4q_vnum_u32 (p0, x0, 2),
+ z0 = svld4q_vnum (p0, x0, 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_u32_3:
+** incb x0, all, mul #3
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u32_3, svuint32x4_t, uint32_t,
+ z0 = svld4q_vnum_u32 (p0, x0, 3),
+ z0 = svld4q_vnum (p0, x0, 3))
+
+/*
+** ld4q_vnum_u32_4:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u32_4, svuint32x4_t, uint32_t,
+ z0 = svld4q_vnum_u32 (p0, x0, 4),
+ z0 = svld4q_vnum (p0, x0, 4))
+
+/*
+** ld4q_vnum_u32_28:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u32_28, svuint32x4_t, uint32_t,
+ z0 = svld4q_vnum_u32 (p0, x0, 28),
+ z0 = svld4q_vnum (p0, x0, 28))
+
+/*
+** ld4q_vnum_u32_32:
+** [^{]*
+** ld4q {z0\.q - z3\.q}, p0/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u32_32, svuint32x4_t, uint32_t,
+ z0 = svld4q_vnum_u32 (p0, x0, 32),
+ z0 = svld4q_vnum (p0, x0, 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_u32_m1:
+** decb x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u32_m1, svuint32x4_t, uint32_t,
+ z0 = svld4q_vnum_u32 (p0, x0, -1),
+ z0 = svld4q_vnum (p0, x0, -1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_u32_m2:
+** decb x0, all, mul #2
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u32_m2, svuint32x4_t, uint32_t,
+ z0 = svld4q_vnum_u32 (p0, x0, -2),
+ z0 = svld4q_vnum (p0, x0, -2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_u32_m3:
+** decb x0, all, mul #3
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u32_m3, svuint32x4_t, uint32_t,
+ z0 = svld4q_vnum_u32 (p0, x0, -3),
+ z0 = svld4q_vnum (p0, x0, -3))
+
+/*
+** ld4q_vnum_u32_m4:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u32_m4, svuint32x4_t, uint32_t,
+ z0 = svld4q_vnum_u32 (p0, x0, -4),
+ z0 = svld4q_vnum (p0, x0, -4))
+
+/*
+** ld4q_vnum_u32_m32:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u32_m32, svuint32x4_t, uint32_t,
+ z0 = svld4q_vnum_u32 (p0, x0, -32),
+ z0 = svld4q_vnum (p0, x0, -32))
+
+/*
+** ld4q_vnum_u32_m36:
+** [^{]*
+** ld4q {z0\.q - z3\.q}, p0/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u32_m36, svuint32x4_t, uint32_t,
+ z0 = svld4q_vnum_u32 (p0, x0, -36),
+ z0 = svld4q_vnum (p0, x0, -36))
+
+/*
+** ld4q_vnum_u32_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u32_x1, svuint32x4_t, uint32_t,
+ z0 = svld4q_vnum_u32 (p0, x0, x1),
+ z0 = svld4q_vnum (p0, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld4q_u64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld4q_u64.c
new file mode 100644
index 0000000..30b05d7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld4q_u64.c
@@ -0,0 +1,305 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** ld4q_u64_base:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_u64_base, svuint64x4_t, uint64_t,
+ z0 = svld4q_u64 (p0, x0),
+ z0 = svld4q (p0, x0))
+
+/*
+** ld4q_u64_index:
+** add (x[0-9]), x0, x1, lsl #?3
+** ld4q {z0\.q - z3\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld4q_u64_index, svuint64x4_t, uint64_t,
+ z0 = svld4q_u64 (p0, x0 + x1),
+ z0 = svld4q (p0, x0 + x1))
+
+/*
+** ld4q_u64_index2:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_LOAD (ld4q_u64_index2, svuint64x4_t, uint64_t,
+ z0 = svld4q_u64 (p0, x0 + x1 * 2),
+ z0 = svld4q (p0, x0 + x1 * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_u64_1:
+** incb x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_u64_1, svuint64x4_t, uint64_t,
+ z0 = svld4q_u64 (p0, x0 + svcntd ()),
+ z0 = svld4q (p0, x0 + svcntd ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_u64_2:
+** incb x0, all, mul #2
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_u64_2, svuint64x4_t, uint64_t,
+ z0 = svld4q_u64 (p0, x0 + svcntd () * 2),
+ z0 = svld4q (p0, x0 + svcntd () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_u64_3:
+** incb x0, all, mul #3
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_u64_3, svuint64x4_t, uint64_t,
+ z0 = svld4q_u64 (p0, x0 + svcntd () * 3),
+ z0 = svld4q (p0, x0 + svcntd () * 3))
+
+/*
+** ld4q_u64_4:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_u64_4, svuint64x4_t, uint64_t,
+ z0 = svld4q_u64 (p0, x0 + svcntd () * 4),
+ z0 = svld4q (p0, x0 + svcntd () * 4))
+
+/*
+** ld4q_u64_28:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_u64_28, svuint64x4_t, uint64_t,
+ z0 = svld4q_u64 (p0, x0 + svcntd () * 28),
+ z0 = svld4q (p0, x0 + svcntd () * 28))
+
+/*
+** ld4q_u64_32:
+** [^{]*
+** ld4q {z0\.q - z3\.q}, p0/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD (ld4q_u64_32, svuint64x4_t, uint64_t,
+ z0 = svld4q_u64 (p0, x0 + svcntd () * 32),
+ z0 = svld4q (p0, x0 + svcntd () * 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_u64_m1:
+** decb x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_u64_m1, svuint64x4_t, uint64_t,
+ z0 = svld4q_u64 (p0, x0 - svcntd ()),
+ z0 = svld4q (p0, x0 - svcntd ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_u64_m2:
+** decb x0, all, mul #2
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_u64_m2, svuint64x4_t, uint64_t,
+ z0 = svld4q_u64 (p0, x0 - svcntd () * 2),
+ z0 = svld4q (p0, x0 - svcntd () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_u64_m3:
+** decb x0, all, mul #3
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_u64_m3, svuint64x4_t, uint64_t,
+ z0 = svld4q_u64 (p0, x0 - svcntd () * 3),
+ z0 = svld4q (p0, x0 - svcntd () * 3))
+
+/*
+** ld4q_u64_m4:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_u64_m4, svuint64x4_t, uint64_t,
+ z0 = svld4q_u64 (p0, x0 - svcntd () * 4),
+ z0 = svld4q (p0, x0 - svcntd () * 4))
+
+/*
+** ld4q_u64_m32:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_u64_m32, svuint64x4_t, uint64_t,
+ z0 = svld4q_u64 (p0, x0 - svcntd () * 32),
+ z0 = svld4q (p0, x0 - svcntd () * 32))
+
+/*
+** ld4q_u64_m36:
+** [^{]*
+** ld4q {z0\.q - z3\.q}, p0/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD (ld4q_u64_m36, svuint64x4_t, uint64_t,
+ z0 = svld4q_u64 (p0, x0 - svcntd () * 36),
+ z0 = svld4q (p0, x0 - svcntd () * 36))
+
+/*
+** ld4q_vnum_u64_0:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u64_0, svuint64x4_t, uint64_t,
+ z0 = svld4q_vnum_u64 (p0, x0, 0),
+ z0 = svld4q_vnum (p0, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_u64_1:
+** incb x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u64_1, svuint64x4_t, uint64_t,
+ z0 = svld4q_vnum_u64 (p0, x0, 1),
+ z0 = svld4q_vnum (p0, x0, 1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_u64_2:
+** incb x0, all, mul #2
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u64_2, svuint64x4_t, uint64_t,
+ z0 = svld4q_vnum_u64 (p0, x0, 2),
+ z0 = svld4q_vnum (p0, x0, 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_u64_3:
+** incb x0, all, mul #3
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u64_3, svuint64x4_t, uint64_t,
+ z0 = svld4q_vnum_u64 (p0, x0, 3),
+ z0 = svld4q_vnum (p0, x0, 3))
+
+/*
+** ld4q_vnum_u64_4:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u64_4, svuint64x4_t, uint64_t,
+ z0 = svld4q_vnum_u64 (p0, x0, 4),
+ z0 = svld4q_vnum (p0, x0, 4))
+
+/*
+** ld4q_vnum_u64_28:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u64_28, svuint64x4_t, uint64_t,
+ z0 = svld4q_vnum_u64 (p0, x0, 28),
+ z0 = svld4q_vnum (p0, x0, 28))
+
+/*
+** ld4q_vnum_u64_32:
+** [^{]*
+** ld4q {z0\.q - z3\.q}, p0/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u64_32, svuint64x4_t, uint64_t,
+ z0 = svld4q_vnum_u64 (p0, x0, 32),
+ z0 = svld4q_vnum (p0, x0, 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_u64_m1:
+** decb x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u64_m1, svuint64x4_t, uint64_t,
+ z0 = svld4q_vnum_u64 (p0, x0, -1),
+ z0 = svld4q_vnum (p0, x0, -1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_u64_m2:
+** decb x0, all, mul #2
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u64_m2, svuint64x4_t, uint64_t,
+ z0 = svld4q_vnum_u64 (p0, x0, -2),
+ z0 = svld4q_vnum (p0, x0, -2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_u64_m3:
+** decb x0, all, mul #3
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u64_m3, svuint64x4_t, uint64_t,
+ z0 = svld4q_vnum_u64 (p0, x0, -3),
+ z0 = svld4q_vnum (p0, x0, -3))
+
+/*
+** ld4q_vnum_u64_m4:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u64_m4, svuint64x4_t, uint64_t,
+ z0 = svld4q_vnum_u64 (p0, x0, -4),
+ z0 = svld4q_vnum (p0, x0, -4))
+
+/*
+** ld4q_vnum_u64_m32:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u64_m32, svuint64x4_t, uint64_t,
+ z0 = svld4q_vnum_u64 (p0, x0, -32),
+ z0 = svld4q_vnum (p0, x0, -32))
+
+/*
+** ld4q_vnum_u64_m36:
+** [^{]*
+** ld4q {z0\.q - z3\.q}, p0/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u64_m36, svuint64x4_t, uint64_t,
+ z0 = svld4q_vnum_u64 (p0, x0, -36),
+ z0 = svld4q_vnum (p0, x0, -36))
+
+/*
+** ld4q_vnum_u64_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u64_x1, svuint64x4_t, uint64_t,
+ z0 = svld4q_vnum_u64 (p0, x0, x1),
+ z0 = svld4q_vnum (p0, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld4q_u8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld4q_u8.c
new file mode 100644
index 0000000..f8e41bf
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/ld4q_u8.c
@@ -0,0 +1,335 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** ld4q_u8_base:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_u8_base, svuint8x4_t, uint8_t,
+ z0 = svld4q_u8 (p0, x0),
+ z0 = svld4q (p0, x0))
+
+/*
+** ld4q_u8_index:
+** add (x[0-9]), (?:x0, x1|x1, x0)
+** ld4q {z0\.q - z3\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld4q_u8_index, svuint8x4_t, uint8_t,
+ z0 = svld4q_u8 (p0, x0 + x1),
+ z0 = svld4q (p0, x0 + x1))
+
+/*
+** ld4q_u8_index2:
+** add (x[0-9]), x0, x1, lsl #?1
+** ld4q {z0\.q - z3\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld4q_u8_index2, svuint8x4_t, uint8_t,
+ z0 = svld4q_u8 (p0, x0 + x1 * 2),
+ z0 = svld4q (p0, x0 + x1 * 2))
+
+/*
+** ld4q_u8_index4:
+** add (x[0-9]), x0, x1, lsl #?2
+** ld4q {z0\.q - z3\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld4q_u8_index4, svuint8x4_t, uint8_t,
+ z0 = svld4q_u8 (p0, x0 + x1 * 4),
+ z0 = svld4q (p0, x0 + x1 * 4))
+
+/*
+** ld4q_u8_index8:
+** add (x[0-9]), x0, x1, lsl #?3
+** ld4q {z0\.q - z3\.q}, p0/z, \[\1\]
+** ret
+*/
+TEST_LOAD (ld4q_u8_index8, svuint8x4_t, uint8_t,
+ z0 = svld4q_u8 (p0, x0 + x1 * 8),
+ z0 = svld4q (p0, x0 + x1 * 8))
+
+/*
+** ld4q_u8_index16:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_LOAD (ld4q_u8_index16, svuint8x4_t, uint8_t,
+ z0 = svld4q_u8 (p0, x0 + x1 * 16),
+ z0 = svld4q (p0, x0 + x1 * 16))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_u8_1:
+** incb x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_u8_1, svuint8x4_t, uint8_t,
+ z0 = svld4q_u8 (p0, x0 + svcntb ()),
+ z0 = svld4q (p0, x0 + svcntb ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_u8_2:
+** incb x0, all, mul #2
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_u8_2, svuint8x4_t, uint8_t,
+ z0 = svld4q_u8 (p0, x0 + svcntb () * 2),
+ z0 = svld4q (p0, x0 + svcntb () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_u8_3:
+** incb x0, all, mul #3
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_u8_3, svuint8x4_t, uint8_t,
+ z0 = svld4q_u8 (p0, x0 + svcntb () * 3),
+ z0 = svld4q (p0, x0 + svcntb () * 3))
+
+/*
+** ld4q_u8_4:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_u8_4, svuint8x4_t, uint8_t,
+ z0 = svld4q_u8 (p0, x0 + svcntb () * 4),
+ z0 = svld4q (p0, x0 + svcntb () * 4))
+
+/*
+** ld4q_u8_28:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_u8_28, svuint8x4_t, uint8_t,
+ z0 = svld4q_u8 (p0, x0 + svcntb () * 28),
+ z0 = svld4q (p0, x0 + svcntb () * 28))
+
+/*
+** ld4q_u8_32:
+** [^{]*
+** ld4q {z0\.q - z3\.q}, p0/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD (ld4q_u8_32, svuint8x4_t, uint8_t,
+ z0 = svld4q_u8 (p0, x0 + svcntb () * 32),
+ z0 = svld4q (p0, x0 + svcntb () * 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_u8_m1:
+** decb x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_u8_m1, svuint8x4_t, uint8_t,
+ z0 = svld4q_u8 (p0, x0 - svcntb ()),
+ z0 = svld4q (p0, x0 - svcntb ()))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_u8_m2:
+** decb x0, all, mul #2
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_u8_m2, svuint8x4_t, uint8_t,
+ z0 = svld4q_u8 (p0, x0 - svcntb () * 2),
+ z0 = svld4q (p0, x0 - svcntb () * 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_u8_m3:
+** decb x0, all, mul #3
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_u8_m3, svuint8x4_t, uint8_t,
+ z0 = svld4q_u8 (p0, x0 - svcntb () * 3),
+ z0 = svld4q (p0, x0 - svcntb () * 3))
+
+/*
+** ld4q_u8_m4:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_u8_m4, svuint8x4_t, uint8_t,
+ z0 = svld4q_u8 (p0, x0 - svcntb () * 4),
+ z0 = svld4q (p0, x0 - svcntb () * 4))
+
+/*
+** ld4q_u8_m32:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_u8_m32, svuint8x4_t, uint8_t,
+ z0 = svld4q_u8 (p0, x0 - svcntb () * 32),
+ z0 = svld4q (p0, x0 - svcntb () * 32))
+
+/*
+** ld4q_u8_m36:
+** [^{]*
+** ld4q {z0\.q - z3\.q}, p0/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD (ld4q_u8_m36, svuint8x4_t, uint8_t,
+ z0 = svld4q_u8 (p0, x0 - svcntb () * 36),
+ z0 = svld4q (p0, x0 - svcntb () * 36))
+
+/*
+** ld4q_vnum_u8_0:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u8_0, svuint8x4_t, uint8_t,
+ z0 = svld4q_vnum_u8 (p0, x0, 0),
+ z0 = svld4q_vnum (p0, x0, 0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_u8_1:
+** incb x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u8_1, svuint8x4_t, uint8_t,
+ z0 = svld4q_vnum_u8 (p0, x0, 1),
+ z0 = svld4q_vnum (p0, x0, 1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_u8_2:
+** incb x0, all, mul #2
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u8_2, svuint8x4_t, uint8_t,
+ z0 = svld4q_vnum_u8 (p0, x0, 2),
+ z0 = svld4q_vnum (p0, x0, 2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_u8_3:
+** incb x0, all, mul #3
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u8_3, svuint8x4_t, uint8_t,
+ z0 = svld4q_vnum_u8 (p0, x0, 3),
+ z0 = svld4q_vnum (p0, x0, 3))
+
+/*
+** ld4q_vnum_u8_4:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u8_4, svuint8x4_t, uint8_t,
+ z0 = svld4q_vnum_u8 (p0, x0, 4),
+ z0 = svld4q_vnum (p0, x0, 4))
+
+/*
+** ld4q_vnum_u8_28:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u8_28, svuint8x4_t, uint8_t,
+ z0 = svld4q_vnum_u8 (p0, x0, 28),
+ z0 = svld4q_vnum (p0, x0, 28))
+
+/*
+** ld4q_vnum_u8_32:
+** [^{]*
+** ld4q {z0\.q - z3\.q}, p0/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u8_32, svuint8x4_t, uint8_t,
+ z0 = svld4q_vnum_u8 (p0, x0, 32),
+ z0 = svld4q_vnum (p0, x0, 32))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_u8_m1:
+** decb x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u8_m1, svuint8x4_t, uint8_t,
+ z0 = svld4q_vnum_u8 (p0, x0, -1),
+ z0 = svld4q_vnum (p0, x0, -1))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_u8_m2:
+** decb x0, all, mul #2
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u8_m2, svuint8x4_t, uint8_t,
+ z0 = svld4q_vnum_u8 (p0, x0, -2),
+ z0 = svld4q_vnum (p0, x0, -2))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** ld4q_vnum_u8_m3:
+** decb x0, all, mul #3
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u8_m3, svuint8x4_t, uint8_t,
+ z0 = svld4q_vnum_u8 (p0, x0, -3),
+ z0 = svld4q_vnum (p0, x0, -3))
+
+/*
+** ld4q_vnum_u8_m4:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u8_m4, svuint8x4_t, uint8_t,
+ z0 = svld4q_vnum_u8 (p0, x0, -4),
+ z0 = svld4q_vnum (p0, x0, -4))
+
+/*
+** ld4q_vnum_u8_m32:
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u8_m32, svuint8x4_t, uint8_t,
+ z0 = svld4q_vnum_u8 (p0, x0, -32),
+ z0 = svld4q_vnum (p0, x0, -32))
+
+/*
+** ld4q_vnum_u8_m36:
+** [^{]*
+** ld4q {z0\.q - z3\.q}, p0/z, \[x[0-9]+\]
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u8_m36, svuint8x4_t, uint8_t,
+ z0 = svld4q_vnum_u8 (p0, x0, -36),
+ z0 = svld4q_vnum (p0, x0, -36))
+
+/*
+** ld4q_vnum_u8_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** ld4q {z0\.q - z3\.q}, p0/z, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** ld4q {z0\.q - z3\.q}, p0/z, \[x0, \3\]
+** )
+** ret
+*/
+TEST_LOAD (ld4q_vnum_u8_x1, svuint8x4_t, uint8_t,
+ z0 = svld4q_vnum_u8 (p0, x0, x1),
+ z0 = svld4q_vnum (p0, x0, x1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxnmqv_f16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxnmqv_f16.c
new file mode 100644
index 0000000..c17dfc9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxnmqv_f16.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** maxnmqv_d0_f16_tied:
+** fmaxnmqv v0\.8h, p0, z0\.h
+** ret
+*/
+TEST_REDUCTION_D (maxnmqv_d0_f16_tied, float16x8_t, svfloat16_t,
+ d0 = svmaxnmqv_f16 (p0, z0),
+ d0 = svmaxnmqv (p0, z0))
+
+/*
+** maxnmqv_d0_f16_untied:
+** fmaxnmqv v0\.8h, p0, z1\.h
+** ret
+*/
+TEST_REDUCTION_D (maxnmqv_d0_f16_untied, float16x8_t, svfloat16_t,
+ d0 = svmaxnmqv_f16 (p0, z1),
+ d0 = svmaxnmqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxnmqv_f32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxnmqv_f32.c
new file mode 100644
index 0000000..ba5bf78
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxnmqv_f32.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** maxnmqv_d0_f32_tied:
+** fmaxnmqv v0\.4s, p0, z0\.s
+** ret
+*/
+TEST_REDUCTION_D (maxnmqv_d0_f32_tied, float32x4_t, svfloat32_t,
+ d0 = svmaxnmqv_f32 (p0, z0),
+ d0 = svmaxnmqv (p0, z0))
+
+/*
+** maxnmqv_d0_f32_untied:
+** fmaxnmqv v0\.4s, p0, z1\.s
+** ret
+*/
+TEST_REDUCTION_D (maxnmqv_d0_f32_untied, float32x4_t, svfloat32_t,
+ d0 = svmaxnmqv_f32 (p0, z1),
+ d0 = svmaxnmqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxnmqv_f64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxnmqv_f64.c
new file mode 100644
index 0000000..f768bf4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxnmqv_f64.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** maxnmqv_d0_f64_tied:
+** fmaxnmqv v0\.2d, p0, z0\.d
+** ret
+*/
+TEST_REDUCTION_D (maxnmqv_d0_f64_tied, float64x2_t, svfloat64_t,
+ d0 = svmaxnmqv_f64 (p0, z0),
+ d0 = svmaxnmqv (p0, z0))
+
+/*
+** maxnmqv_d0_f64_untied:
+** fmaxnmqv v0\.2d, p0, z1\.d
+** ret
+*/
+TEST_REDUCTION_D (maxnmqv_d0_f64_untied, float64x2_t, svfloat64_t,
+ d0 = svmaxnmqv_f64 (p0, z1),
+ d0 = svmaxnmqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxqv_f16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxqv_f16.c
new file mode 100644
index 0000000..8c2cc92
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxqv_f16.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** maxqv_d0_f16_tied:
+** fmaxqv v0\.8h, p0, z0\.h
+** ret
+*/
+TEST_REDUCTION_D (maxqv_d0_f16_tied, float16x8_t, svfloat16_t,
+ d0 = svmaxqv_f16 (p0, z0),
+ d0 = svmaxqv (p0, z0))
+
+/*
+** maxqv_d0_f16_untied:
+** fmaxqv v0\.8h, p0, z1\.h
+** ret
+*/
+TEST_REDUCTION_D (maxqv_d0_f16_untied, float16x8_t, svfloat16_t,
+ d0 = svmaxqv_f16 (p0, z1),
+ d0 = svmaxqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxqv_f32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxqv_f32.c
new file mode 100644
index 0000000..0e7cbb7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxqv_f32.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** maxqv_d0_f32_tied:
+** fmaxqv v0\.4s, p0, z0\.s
+** ret
+*/
+TEST_REDUCTION_D (maxqv_d0_f32_tied, float32x4_t, svfloat32_t,
+ d0 = svmaxqv_f32 (p0, z0),
+ d0 = svmaxqv (p0, z0))
+
+/*
+** maxqv_d0_f32_untied:
+** fmaxqv v0\.4s, p0, z1\.s
+** ret
+*/
+TEST_REDUCTION_D (maxqv_d0_f32_untied, float32x4_t, svfloat32_t,
+ d0 = svmaxqv_f32 (p0, z1),
+ d0 = svmaxqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxqv_f64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxqv_f64.c
new file mode 100644
index 0000000..b846f4c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxqv_f64.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** maxqv_d0_f64_tied:
+** fmaxqv v0\.2d, p0, z0\.d
+** ret
+*/
+TEST_REDUCTION_D (maxqv_d0_f64_tied, float64x2_t, svfloat64_t,
+ d0 = svmaxqv_f64 (p0, z0),
+ d0 = svmaxqv (p0, z0))
+
+/*
+** maxqv_d0_f64_untied:
+** fmaxqv v0\.2d, p0, z1\.d
+** ret
+*/
+TEST_REDUCTION_D (maxqv_d0_f64_untied, float64x2_t, svfloat64_t,
+ d0 = svmaxqv_f64 (p0, z1),
+ d0 = svmaxqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxqv_s16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxqv_s16.c
new file mode 100644
index 0000000..065d872
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxqv_s16.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** maxqv_d0_s16_tied:
+** smaxqv v0\.8h, p0, z0\.h
+** ret
+*/
+TEST_REDUCTION_D (maxqv_d0_s16_tied, int16x8_t, svint16_t,
+ d0 = svmaxqv_s16 (p0, z0),
+ d0 = svmaxqv (p0, z0))
+
+/*
+** maxqv_d0_s16_untied:
+** smaxqv v0\.8h, p0, z1\.h
+** ret
+*/
+TEST_REDUCTION_D (maxqv_d0_s16_untied, int16x8_t, svint16_t,
+ d0 = svmaxqv_s16 (p0, z1),
+ d0 = svmaxqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxqv_s32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxqv_s32.c
new file mode 100644
index 0000000..f000ffa
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxqv_s32.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** maxqv_d0_s32_tied:
+** smaxqv v0\.4s, p0, z0\.s
+** ret
+*/
+TEST_REDUCTION_D (maxqv_d0_s32_tied, int32x4_t, svint32_t,
+ d0 = svmaxqv_s32 (p0, z0),
+ d0 = svmaxqv (p0, z0))
+
+/*
+** maxqv_d0_s32_untied:
+** smaxqv v0\.4s, p0, z1\.s
+** ret
+*/
+TEST_REDUCTION_D (maxqv_d0_s32_untied, int32x4_t, svint32_t,
+ d0 = svmaxqv_s32 (p0, z1),
+ d0 = svmaxqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxqv_s64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxqv_s64.c
new file mode 100644
index 0000000..9910b34
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxqv_s64.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** maxqv_d0_s64_tied:
+** smaxqv v0\.2d, p0, z0\.d
+** ret
+*/
+TEST_REDUCTION_D (maxqv_d0_s64_tied, int64x2_t, svint64_t,
+ d0 = svmaxqv_s64 (p0, z0),
+ d0 = svmaxqv (p0, z0))
+
+/*
+** maxqv_d0_s64_untied:
+** smaxqv v0\.2d, p0, z1\.d
+** ret
+*/
+TEST_REDUCTION_D (maxqv_d0_s64_untied, int64x2_t, svint64_t,
+ d0 = svmaxqv_s64 (p0, z1),
+ d0 = svmaxqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxqv_s8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxqv_s8.c
new file mode 100644
index 0000000..8c8e250
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxqv_s8.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** maxqv_d0_s8_tied:
+** smaxqv v0\.16b, p0, z0\.b
+** ret
+*/
+TEST_REDUCTION_D (maxqv_d0_s8_tied, int8x16_t, svint8_t,
+ d0 = svmaxqv_s8 (p0, z0),
+ d0 = svmaxqv (p0, z0))
+
+/*
+** maxqv_d0_s8_untied:
+** smaxqv v0\.16b, p0, z1\.b
+** ret
+*/
+TEST_REDUCTION_D (maxqv_d0_s8_untied, int8x16_t, svint8_t,
+ d0 = svmaxqv_s8 (p0, z1),
+ d0 = svmaxqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxqv_u16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxqv_u16.c
new file mode 100644
index 0000000..c458d10
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxqv_u16.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** maxqv_d0_u16_tied:
+** umaxqv v0\.8h, p0, z0\.h
+** ret
+*/
+TEST_REDUCTION_D (maxqv_d0_u16_tied, uint16x8_t, svuint16_t,
+ d0 = svmaxqv_u16 (p0, z0),
+ d0 = svmaxqv (p0, z0))
+
+/*
+** maxqv_d0_u16_untied:
+** umaxqv v0\.8h, p0, z1\.h
+** ret
+*/
+TEST_REDUCTION_D (maxqv_d0_u16_untied, uint16x8_t, svuint16_t,
+ d0 = svmaxqv_u16 (p0, z1),
+ d0 = svmaxqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxqv_u32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxqv_u32.c
new file mode 100644
index 0000000..0ef8401
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxqv_u32.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** maxqv_d0_u32_tied:
+** umaxqv v0\.4s, p0, z0\.s
+** ret
+*/
+TEST_REDUCTION_D (maxqv_d0_u32_tied, uint32x4_t, svuint32_t,
+ d0 = svmaxqv_u32 (p0, z0),
+ d0 = svmaxqv (p0, z0))
+
+/*
+** maxqv_d0_u32_untied:
+** umaxqv v0\.4s, p0, z1\.s
+** ret
+*/
+TEST_REDUCTION_D (maxqv_d0_u32_untied, uint32x4_t, svuint32_t,
+ d0 = svmaxqv_u32 (p0, z1),
+ d0 = svmaxqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxqv_u64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxqv_u64.c
new file mode 100644
index 0000000..6a28fe7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxqv_u64.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** maxqv_d0_u64_tied:
+** umaxqv v0\.2d, p0, z0\.d
+** ret
+*/
+TEST_REDUCTION_D (maxqv_d0_u64_tied, uint64x2_t, svuint64_t,
+ d0 = svmaxqv_u64 (p0, z0),
+ d0 = svmaxqv (p0, z0))
+
+/*
+** maxqv_d0_u64_untied:
+** umaxqv v0\.2d, p0, z1\.d
+** ret
+*/
+TEST_REDUCTION_D (maxqv_d0_u64_untied, uint64x2_t, svuint64_t,
+ d0 = svmaxqv_u64 (p0, z1),
+ d0 = svmaxqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxqv_u8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxqv_u8.c
new file mode 100644
index 0000000..f4f0862
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/maxqv_u8.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** maxqv_d0_u8_tied:
+** umaxqv v0\.16b, p0, z0\.b
+** ret
+*/
+TEST_REDUCTION_D (maxqv_d0_u8_tied, uint8x16_t, svuint8_t,
+ d0 = svmaxqv_u8 (p0, z0),
+ d0 = svmaxqv (p0, z0))
+
+/*
+** maxqv_d0_u8_untied:
+** umaxqv v0\.16b, p0, z1\.b
+** ret
+*/
+TEST_REDUCTION_D (maxqv_d0_u8_untied, uint8x16_t, svuint8_t,
+ d0 = svmaxqv_u8 (p0, z1),
+ d0 = svmaxqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minnmqv_f16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minnmqv_f16.c
new file mode 100644
index 0000000..9339d9d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minnmqv_f16.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** minnmqv_d0_f16_tied:
+** fminnmqv v0\.8h, p0, z0\.h
+** ret
+*/
+TEST_REDUCTION_D (minnmqv_d0_f16_tied, float16x8_t, svfloat16_t,
+ d0 = svminnmqv_f16 (p0, z0),
+ d0 = svminnmqv (p0, z0))
+
+/*
+** minnmqv_d0_f16_untied:
+** fminnmqv v0\.8h, p0, z1\.h
+** ret
+*/
+TEST_REDUCTION_D (minnmqv_d0_f16_untied, float16x8_t, svfloat16_t,
+ d0 = svminnmqv_f16 (p0, z1),
+ d0 = svminnmqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minnmqv_f32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minnmqv_f32.c
new file mode 100644
index 0000000..d508ee8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minnmqv_f32.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** minnmqv_d0_f32_tied:
+** fminnmqv v0\.4s, p0, z0\.s
+** ret
+*/
+TEST_REDUCTION_D (minnmqv_d0_f32_tied, float32x4_t, svfloat32_t,
+ d0 = svminnmqv_f32 (p0, z0),
+ d0 = svminnmqv (p0, z0))
+
+/*
+** minnmqv_d0_f32_untied:
+** fminnmqv v0\.4s, p0, z1\.s
+** ret
+*/
+TEST_REDUCTION_D (minnmqv_d0_f32_untied, float32x4_t, svfloat32_t,
+ d0 = svminnmqv_f32 (p0, z1),
+ d0 = svminnmqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minnmqv_f64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minnmqv_f64.c
new file mode 100644
index 0000000..9223970
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minnmqv_f64.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** minnmqv_d0_f64_tied:
+** fminnmqv v0\.2d, p0, z0\.d
+** ret
+*/
+TEST_REDUCTION_D (minnmqv_d0_f64_tied, float64x2_t, svfloat64_t,
+ d0 = svminnmqv_f64 (p0, z0),
+ d0 = svminnmqv (p0, z0))
+
+/*
+** minnmqv_d0_f64_untied:
+** fminnmqv v0\.2d, p0, z1\.d
+** ret
+*/
+TEST_REDUCTION_D (minnmqv_d0_f64_untied, float64x2_t, svfloat64_t,
+ d0 = svminnmqv_f64 (p0, z1),
+ d0 = svminnmqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minqv_f16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minqv_f16.c
new file mode 100644
index 0000000..7fb3960
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minqv_f16.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** minqv_d0_f16_tied:
+** fminqv v0\.8h, p0, z0\.h
+** ret
+*/
+TEST_REDUCTION_D (minqv_d0_f16_tied, float16x8_t, svfloat16_t,
+ d0 = svminqv_f16 (p0, z0),
+ d0 = svminqv (p0, z0))
+
+/*
+** minqv_d0_f16_untied:
+** fminqv v0\.8h, p0, z1\.h
+** ret
+*/
+TEST_REDUCTION_D (minqv_d0_f16_untied, float16x8_t, svfloat16_t,
+ d0 = svminqv_f16 (p0, z1),
+ d0 = svminqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minqv_f32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minqv_f32.c
new file mode 100644
index 0000000..a9dae50
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minqv_f32.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** minqv_d0_f32_tied:
+** fminqv v0\.4s, p0, z0\.s
+** ret
+*/
+TEST_REDUCTION_D (minqv_d0_f32_tied, float32x4_t, svfloat32_t,
+ d0 = svminqv_f32 (p0, z0),
+ d0 = svminqv (p0, z0))
+
+/*
+** minqv_d0_f32_untied:
+** fminqv v0\.4s, p0, z1\.s
+** ret
+*/
+TEST_REDUCTION_D (minqv_d0_f32_untied, float32x4_t, svfloat32_t,
+ d0 = svminqv_f32 (p0, z1),
+ d0 = svminqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minqv_f64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minqv_f64.c
new file mode 100644
index 0000000..a125493
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minqv_f64.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** minqv_d0_f64_tied:
+** fminqv v0\.2d, p0, z0\.d
+** ret
+*/
+TEST_REDUCTION_D (minqv_d0_f64_tied, float64x2_t, svfloat64_t,
+ d0 = svminqv_f64 (p0, z0),
+ d0 = svminqv (p0, z0))
+
+/*
+** minqv_d0_f64_untied:
+** fminqv v0\.2d, p0, z1\.d
+** ret
+*/
+TEST_REDUCTION_D (minqv_d0_f64_untied, float64x2_t, svfloat64_t,
+ d0 = svminqv_f64 (p0, z1),
+ d0 = svminqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minqv_s16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minqv_s16.c
new file mode 100644
index 0000000..326eef2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minqv_s16.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** minqv_d0_s16_tied:
+** sminqv v0\.8h, p0, z0\.h
+** ret
+*/
+TEST_REDUCTION_D (minqv_d0_s16_tied, int16x8_t, svint16_t,
+ d0 = svminqv_s16 (p0, z0),
+ d0 = svminqv (p0, z0))
+
+/*
+** minqv_d0_s16_untied:
+** sminqv v0\.8h, p0, z1\.h
+** ret
+*/
+TEST_REDUCTION_D (minqv_d0_s16_untied, int16x8_t, svint16_t,
+ d0 = svminqv_s16 (p0, z1),
+ d0 = svminqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minqv_s32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minqv_s32.c
new file mode 100644
index 0000000..972d678
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minqv_s32.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** minqv_d0_s32_tied:
+** sminqv v0\.4s, p0, z0\.s
+** ret
+*/
+TEST_REDUCTION_D (minqv_d0_s32_tied, int32x4_t, svint32_t,
+ d0 = svminqv_s32 (p0, z0),
+ d0 = svminqv (p0, z0))
+
+/*
+** minqv_d0_s32_untied:
+** sminqv v0\.4s, p0, z1\.s
+** ret
+*/
+TEST_REDUCTION_D (minqv_d0_s32_untied, int32x4_t, svint32_t,
+ d0 = svminqv_s32 (p0, z1),
+ d0 = svminqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minqv_s64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minqv_s64.c
new file mode 100644
index 0000000..523dbf7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minqv_s64.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** minqv_d0_s64_tied:
+** sminqv v0\.2d, p0, z0\.d
+** ret
+*/
+TEST_REDUCTION_D (minqv_d0_s64_tied, int64x2_t, svint64_t,
+ d0 = svminqv_s64 (p0, z0),
+ d0 = svminqv (p0, z0))
+
+/*
+** minqv_d0_s64_untied:
+** sminqv v0\.2d, p0, z1\.d
+** ret
+*/
+TEST_REDUCTION_D (minqv_d0_s64_untied, int64x2_t, svint64_t,
+ d0 = svminqv_s64 (p0, z1),
+ d0 = svminqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minqv_s8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minqv_s8.c
new file mode 100644
index 0000000..d3f8622
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minqv_s8.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** minqv_d0_s8_tied:
+** sminqv v0\.16b, p0, z0\.b
+** ret
+*/
+TEST_REDUCTION_D (minqv_d0_s8_tied, int8x16_t, svint8_t,
+ d0 = svminqv_s8 (p0, z0),
+ d0 = svminqv (p0, z0))
+
+/*
+** minqv_d0_s8_untied:
+** sminqv v0\.16b, p0, z1\.b
+** ret
+*/
+TEST_REDUCTION_D (minqv_d0_s8_untied, int8x16_t, svint8_t,
+ d0 = svminqv_s8 (p0, z1),
+ d0 = svminqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minqv_u16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minqv_u16.c
new file mode 100644
index 0000000..7865095
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minqv_u16.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** minqv_d0_u16_tied:
+** uminqv v0\.8h, p0, z0\.h
+** ret
+*/
+TEST_REDUCTION_D (minqv_d0_u16_tied, uint16x8_t, svuint16_t,
+ d0 = svminqv_u16 (p0, z0),
+ d0 = svminqv (p0, z0))
+
+/*
+** minqv_d0_u16_untied:
+** uminqv v0\.8h, p0, z1\.h
+** ret
+*/
+TEST_REDUCTION_D (minqv_d0_u16_untied, uint16x8_t, svuint16_t,
+ d0 = svminqv_u16 (p0, z1),
+ d0 = svminqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minqv_u32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minqv_u32.c
new file mode 100644
index 0000000..b323ca3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minqv_u32.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** minqv_d0_u32_tied:
+** uminqv v0\.4s, p0, z0\.s
+** ret
+*/
+TEST_REDUCTION_D (minqv_d0_u32_tied, uint32x4_t, svuint32_t,
+ d0 = svminqv_u32 (p0, z0),
+ d0 = svminqv (p0, z0))
+
+/*
+** minqv_d0_u32_untied:
+** uminqv v0\.4s, p0, z1\.s
+** ret
+*/
+TEST_REDUCTION_D (minqv_d0_u32_untied, uint32x4_t, svuint32_t,
+ d0 = svminqv_u32 (p0, z1),
+ d0 = svminqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minqv_u64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minqv_u64.c
new file mode 100644
index 0000000..fb826b0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minqv_u64.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** minqv_d0_u64_tied:
+** uminqv v0\.2d, p0, z0\.d
+** ret
+*/
+TEST_REDUCTION_D (minqv_d0_u64_tied, uint64x2_t, svuint64_t,
+ d0 = svminqv_u64 (p0, z0),
+ d0 = svminqv (p0, z0))
+
+/*
+** minqv_d0_u64_untied:
+** uminqv v0\.2d, p0, z1\.d
+** ret
+*/
+TEST_REDUCTION_D (minqv_d0_u64_untied, uint64x2_t, svuint64_t,
+ d0 = svminqv_u64 (p0, z1),
+ d0 = svminqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minqv_u8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minqv_u8.c
new file mode 100644
index 0000000..7fb0425
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/minqv_u8.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** minqv_d0_u8_tied:
+** uminqv v0\.16b, p0, z0\.b
+** ret
+*/
+TEST_REDUCTION_D (minqv_d0_u8_tied, uint8x16_t, svuint8_t,
+ d0 = svminqv_u8 (p0, z0),
+ d0 = svminqv (p0, z0))
+
+/*
+** minqv_d0_u8_untied:
+** uminqv v0\.16b, p0, z1\.b
+** ret
+*/
+TEST_REDUCTION_D (minqv_d0_u8_untied, uint8x16_t, svuint8_t,
+ d0 = svminqv_u8 (p0, z1),
+ d0 = svminqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/orqv_s16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/orqv_s16.c
new file mode 100644
index 0000000..16305ff
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/orqv_s16.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** orqv_d0_s16_tied:
+** orqv v0\.8h, p0, z0\.h
+** ret
+*/
+TEST_REDUCTION_D (orqv_d0_s16_tied, int16x8_t, svint16_t,
+ d0 = svorqv_s16 (p0, z0),
+ d0 = svorqv (p0, z0))
+
+/*
+** orqv_d0_s16_untied:
+** orqv v0\.8h, p0, z1\.h
+** ret
+*/
+TEST_REDUCTION_D (orqv_d0_s16_untied, int16x8_t, svint16_t,
+ d0 = svorqv_s16 (p0, z1),
+ d0 = svorqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/orqv_s32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/orqv_s32.c
new file mode 100644
index 0000000..1312e2e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/orqv_s32.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** orqv_d0_s32_tied:
+** orqv v0\.4s, p0, z0\.s
+** ret
+*/
+TEST_REDUCTION_D (orqv_d0_s32_tied, int32x4_t, svint32_t,
+ d0 = svorqv_s32 (p0, z0),
+ d0 = svorqv (p0, z0))
+
+/*
+** orqv_d0_s32_untied:
+** orqv v0\.4s, p0, z1\.s
+** ret
+*/
+TEST_REDUCTION_D (orqv_d0_s32_untied, int32x4_t, svint32_t,
+ d0 = svorqv_s32 (p0, z1),
+ d0 = svorqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/orqv_s64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/orqv_s64.c
new file mode 100644
index 0000000..73fb886
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/orqv_s64.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** orqv_d0_s64_tied:
+** orqv v0\.2d, p0, z0\.d
+** ret
+*/
+TEST_REDUCTION_D (orqv_d0_s64_tied, int64x2_t, svint64_t,
+ d0 = svorqv_s64 (p0, z0),
+ d0 = svorqv (p0, z0))
+
+/*
+** orqv_d0_s64_untied:
+** orqv v0\.2d, p0, z1\.d
+** ret
+*/
+TEST_REDUCTION_D (orqv_d0_s64_untied, int64x2_t, svint64_t,
+ d0 = svorqv_s64 (p0, z1),
+ d0 = svorqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/orqv_s8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/orqv_s8.c
new file mode 100644
index 0000000..93afb23
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/orqv_s8.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** orqv_d0_s8_tied:
+** orqv v0\.16b, p0, z0\.b
+** ret
+*/
+TEST_REDUCTION_D (orqv_d0_s8_tied, int8x16_t, svint8_t,
+ d0 = svorqv_s8 (p0, z0),
+ d0 = svorqv (p0, z0))
+
+/*
+** orqv_d0_s8_untied:
+** orqv v0\.16b, p0, z1\.b
+** ret
+*/
+TEST_REDUCTION_D (orqv_d0_s8_untied, int8x16_t, svint8_t,
+ d0 = svorqv_s8 (p0, z1),
+ d0 = svorqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/orqv_u16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/orqv_u16.c
new file mode 100644
index 0000000..eee0d7c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/orqv_u16.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** orqv_d0_u16_tied:
+** orqv v0\.8h, p0, z0\.h
+** ret
+*/
+TEST_REDUCTION_D (orqv_d0_u16_tied, uint16x8_t, svuint16_t,
+ d0 = svorqv_u16 (p0, z0),
+ d0 = svorqv (p0, z0))
+
+/*
+** orqv_d0_u16_untied:
+** orqv v0\.8h, p0, z1\.h
+** ret
+*/
+TEST_REDUCTION_D (orqv_d0_u16_untied, uint16x8_t, svuint16_t,
+ d0 = svorqv_u16 (p0, z1),
+ d0 = svorqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/orqv_u32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/orqv_u32.c
new file mode 100644
index 0000000..f619b0d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/orqv_u32.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** orqv_d0_u32_tied:
+** orqv v0\.4s, p0, z0\.s
+** ret
+*/
+TEST_REDUCTION_D (orqv_d0_u32_tied, uint32x4_t, svuint32_t,
+ d0 = svorqv_u32 (p0, z0),
+ d0 = svorqv (p0, z0))
+
+/*
+** orqv_d0_u32_untied:
+** orqv v0\.4s, p0, z1\.s
+** ret
+*/
+TEST_REDUCTION_D (orqv_d0_u32_untied, uint32x4_t, svuint32_t,
+ d0 = svorqv_u32 (p0, z1),
+ d0 = svorqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/orqv_u64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/orqv_u64.c
new file mode 100644
index 0000000..8209540
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/orqv_u64.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** orqv_d0_u64_tied:
+** orqv v0\.2d, p0, z0\.d
+** ret
+*/
+TEST_REDUCTION_D (orqv_d0_u64_tied, uint64x2_t, svuint64_t,
+ d0 = svorqv_u64 (p0, z0),
+ d0 = svorqv (p0, z0))
+
+/*
+** orqv_d0_u64_untied:
+** orqv v0\.2d, p0, z1\.d
+** ret
+*/
+TEST_REDUCTION_D (orqv_d0_u64_untied, uint64x2_t, svuint64_t,
+ d0 = svorqv_u64 (p0, z1),
+ d0 = svorqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/orqv_u8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/orqv_u8.c
new file mode 100644
index 0000000..82eb08b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/orqv_u8.c
@@ -0,0 +1,26 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** orqv_d0_u8_tied:
+** orqv v0\.16b, p0, z0\.b
+** ret
+*/
+TEST_REDUCTION_D (orqv_d0_u8_tied, uint8x16_t, svuint8_t,
+ d0 = svorqv_u8 (p0, z0),
+ d0 = svorqv (p0, z0))
+
+/*
+** orqv_d0_u8_untied:
+** orqv v0\.16b, p0, z1\.b
+** ret
+*/
+TEST_REDUCTION_D (orqv_d0_u8_untied, uint8x16_t, svuint8_t,
+ d0 = svorqv_u8 (p0, z1),
+ d0 = svorqv (p0, z1))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/pmov_s16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/pmov_s16.c
new file mode 100644
index 0000000..9bcebb6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/pmov_s16.c
@@ -0,0 +1,68 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** pmov_to_vector:
+** pmov z0, p0\.h
+** ret
+*/
+TEST_UNIFORM_Z (pmov_to_vector, svint16_t,
+ z0 = svpmov_s16_z (p0),
+ z0 = svpmov_s16_z (p0));
+
+/*
+** pmov_to_vector_1_tied:
+** pmov z0\[1\], p0\.h
+** ret
+*/
+TEST_UNIFORM_Z (pmov_to_vector_1_tied, svint16_t,
+ z0 = svpmov_lane_s16_m (z0, p0, 1),
+ z0 = svpmov_lane_m (z0, p0, 1));
+
+/*
+** pmov_to_vector_1_untied:
+** (
+** mov z0\.d, z1\.d
+** pmov z0\[1\], p0\.h
+** |
+** pmov z1\[1\], p0\.h
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_UNIFORM_Z (pmov_to_vector_1_untied, svint16_t,
+ z0 = svpmov_lane_s16_m (z1, p0, 1),
+ z0 = svpmov_lane_m (z1, p0, 1));
+
+/*
+** pmov_from_vector:
+** pmov p0\.h, z0
+** ret
+*/
+TEST_COMPARE_Z (pmov_from_vector, svint16_t,
+ p0 = svpmov_s16 (z0),
+ p0 = svpmov (z0));
+
+/*
+** pmov_from_vector_0:
+** pmov p0\.h, z0
+** ret
+*/
+TEST_COMPARE_Z (pmov_from_vector_0, svint16_t,
+ p0 = svpmov_lane_s16 (z0, 0),
+ p0 = svpmov_lane (z0, 0));
+
+/*
+** pmov_from_vector_1:
+** pmov p0\.h, z0\[1\]
+** ret
+*/
+TEST_COMPARE_Z (pmov_from_vector_1, svint16_t,
+ p0 = svpmov_lane_s16 (z0, 1),
+ p0 = svpmov_lane (z0, 1));
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/pmov_s32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/pmov_s32.c
new file mode 100644
index 0000000..a673905
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/pmov_s32.c
@@ -0,0 +1,104 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** pmov_to_vector:
+** pmov z0, p0\.s
+** ret
+*/
+TEST_UNIFORM_Z (pmov_to_vector, svint32_t,
+ z0 = svpmov_s32_z (p0),
+ z0 = svpmov_s32_z (p0));
+
+/*
+** pmov_to_vector_1_tied:
+** pmov z0\[1\], p0\.s
+** ret
+*/
+TEST_UNIFORM_Z (pmov_to_vector_1_tied, svint32_t,
+ z0 = svpmov_lane_s32_m (z0, p0, 1),
+ z0 = svpmov_lane_m (z0, p0, 1));
+
+/*
+** pmov_to_vector_1_untied:
+** (
+** mov z0\.d, z1\.d
+** pmov z0\[1\], p0\.s
+** |
+** pmov z1\[1\], p0\.s
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_UNIFORM_Z (pmov_to_vector_1_untied, svint32_t,
+ z0 = svpmov_lane_s32_m (z1, p0, 1),
+ z0 = svpmov_lane_m (z1, p0, 1));
+
+/*
+** pmov_to_vector_2:
+** pmov z0\[2\], p0\.s
+** ret
+*/
+TEST_UNIFORM_Z (pmov_to_vector_2, svint32_t,
+ z0 = svpmov_lane_s32_m (z0, p0, 2),
+ z0 = svpmov_lane_m (z0, p0, 2));
+
+/*
+** pmov_to_vector_3:
+** pmov z0\[3\], p0\.s
+** ret
+*/
+TEST_UNIFORM_Z (pmov_to_vector_3, svint32_t,
+ z0 = svpmov_lane_s32_m (z0, p0, 3),
+ z0 = svpmov_lane_m (z0, p0, 3));
+
+/*
+** pmov_from_vector:
+** pmov p0\.s, z0
+** ret
+*/
+TEST_COMPARE_Z (pmov_from_vector, svint32_t,
+ p0 = svpmov_s32 (z0),
+ p0 = svpmov (z0));
+
+/*
+** pmov_from_vector_0:
+** pmov p0\.s, z0
+** ret
+*/
+TEST_COMPARE_Z (pmov_from_vector_0, svint32_t,
+ p0 = svpmov_lane_s32 (z0, 0),
+ p0 = svpmov_lane (z0, 0));
+
+/*
+** pmov_from_vector_1:
+** pmov p0\.s, z0\[1\]
+** ret
+*/
+TEST_COMPARE_Z (pmov_from_vector_1, svint32_t,
+ p0 = svpmov_lane_s32 (z0, 1),
+ p0 = svpmov_lane (z0, 1));
+
+/*
+** pmov_from_vector_2:
+** pmov p0\.s, z0\[2\]
+** ret
+*/
+TEST_COMPARE_Z (pmov_from_vector_2, svint32_t,
+ p0 = svpmov_lane_s32 (z0, 2),
+ p0 = svpmov_lane (z0, 2));
+
+/*
+** pmov_from_vector_3:
+** pmov p0\.s, z0\[3\]
+** ret
+*/
+TEST_COMPARE_Z (pmov_from_vector_3, svint32_t,
+ p0 = svpmov_lane_s32 (z0, 3),
+ p0 = svpmov_lane (z0, 3));
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/pmov_s64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/pmov_s64.c
new file mode 100644
index 0000000..2194c0e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/pmov_s64.c
@@ -0,0 +1,104 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** pmov_to_vector:
+** pmov z0, p0\.d
+** ret
+*/
+TEST_UNIFORM_Z (pmov_to_vector, svint64_t,
+ z0 = svpmov_s64_z (p0),
+ z0 = svpmov_s64_z (p0));
+
+/*
+** pmov_to_vector_1_tied:
+** pmov z0\[1\], p0\.d
+** ret
+*/
+TEST_UNIFORM_Z (pmov_to_vector_1_tied, svint64_t,
+ z0 = svpmov_lane_s64_m (z0, p0, 1),
+ z0 = svpmov_lane_m (z0, p0, 1));
+
+/*
+** pmov_to_vector_4_untied:
+** (
+** mov z0\.d, z1\.d
+** pmov z0\[4\], p0\.d
+** |
+** pmov z1\[4\], p0\.d
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_UNIFORM_Z (pmov_to_vector_4_untied, svint64_t,
+ z0 = svpmov_lane_s64_m (z1, p0, 4),
+ z0 = svpmov_lane_m (z1, p0, 4));
+
+/*
+** pmov_to_vector_6:
+** pmov z0\[6\], p0\.d
+** ret
+*/
+TEST_UNIFORM_Z (pmov_to_vector_6, svint64_t,
+ z0 = svpmov_lane_s64_m (z0, p0, 6),
+ z0 = svpmov_lane_m (z0, p0, 6));
+
+/*
+** pmov_to_vector_7:
+** pmov z0\[7\], p0\.d
+** ret
+*/
+TEST_UNIFORM_Z (pmov_to_vector_7, svint64_t,
+ z0 = svpmov_lane_s64_m (z0, p0, 7),
+ z0 = svpmov_lane_m (z0, p0, 7));
+
+/*
+** pmov_from_vector:
+** pmov p0\.d, z0
+** ret
+*/
+TEST_COMPARE_Z (pmov_from_vector, svint64_t,
+ p0 = svpmov_s64 (z0),
+ p0 = svpmov (z0));
+
+/*
+** pmov_from_vector_0:
+** pmov p0\.d, z0
+** ret
+*/
+TEST_COMPARE_Z (pmov_from_vector_0, svint64_t,
+ p0 = svpmov_lane_s64 (z0, 0),
+ p0 = svpmov_lane (z0, 0));
+
+/*
+** pmov_from_vector_4:
+** pmov p0\.d, z0\[4\]
+** ret
+*/
+TEST_COMPARE_Z (pmov_from_vector_4, svint64_t,
+ p0 = svpmov_lane_s64 (z0, 4),
+ p0 = svpmov_lane (z0, 4));
+
+/*
+** pmov_from_vector_5:
+** pmov p0\.d, z0\[5\]
+** ret
+*/
+TEST_COMPARE_Z (pmov_from_vector_5, svint64_t,
+ p0 = svpmov_lane_s64 (z0, 5),
+ p0 = svpmov_lane (z0, 5));
+
+/*
+** pmov_from_vector_7:
+** pmov p0\.d, z0\[7\]
+** ret
+*/
+TEST_COMPARE_Z (pmov_from_vector_7, svint64_t,
+ p0 = svpmov_lane_s64 (z0, 7),
+ p0 = svpmov_lane (z0, 7));
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/pmov_s8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/pmov_s8.c
new file mode 100644
index 0000000..8dc0886
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/pmov_s8.c
@@ -0,0 +1,35 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** pmov_to_vector:
+** pmov z0, p0\.b
+** ret
+*/
+TEST_UNIFORM_Z (pmov_to_vector, svint8_t,
+ z0 = svpmov_s8_z (p0),
+ z0 = svpmov_s8_z (p0));
+
+/*
+** pmov_from_vector:
+** pmov p0\.b, z0
+** ret
+*/
+TEST_COMPARE_Z (pmov_from_vector, svint8_t,
+ p0 = svpmov_s8 (z0),
+ p0 = svpmov (z0));
+
+/*
+** pmov_from_vector_0:
+** pmov p0\.b, z0
+** ret
+*/
+TEST_COMPARE_Z (pmov_from_vector_0, svint8_t,
+ p0 = svpmov_lane_s8 (z0, 0),
+ p0 = svpmov_lane (z0, 0));
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/pmov_u16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/pmov_u16.c
new file mode 100644
index 0000000..a312e16
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/pmov_u16.c
@@ -0,0 +1,68 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** pmov_to_vector:
+** pmov z0, p0\.h
+** ret
+*/
+TEST_UNIFORM_Z (pmov_to_vector, svuint16_t,
+ z0 = svpmov_u16_z (p0),
+ z0 = svpmov_u16_z (p0));
+
+/*
+** pmov_to_vector_1_tied:
+** pmov z0\[1\], p0\.h
+** ret
+*/
+TEST_UNIFORM_Z (pmov_to_vector_1_tied, svuint16_t,
+ z0 = svpmov_lane_u16_m (z0, p0, 1),
+ z0 = svpmov_lane_m (z0, p0, 1));
+
+/*
+** pmov_to_vector_1_untied:
+** (
+** mov z0\.d, z1\.d
+** pmov z0\[1\], p0\.h
+** |
+** pmov z1\[1\], p0\.h
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_UNIFORM_Z (pmov_to_vector_1_untied, svuint16_t,
+ z0 = svpmov_lane_u16_m (z1, p0, 1),
+ z0 = svpmov_lane_m (z1, p0, 1));
+
+/*
+** pmov_from_vector:
+** pmov p0\.h, z0
+** ret
+*/
+TEST_COMPARE_Z (pmov_from_vector, svuint16_t,
+ p0 = svpmov_u16 (z0),
+ p0 = svpmov (z0));
+
+/*
+** pmov_from_vector_0:
+** pmov p0\.h, z0
+** ret
+*/
+TEST_COMPARE_Z (pmov_from_vector_0, svuint16_t,
+ p0 = svpmov_lane_u16 (z0, 0),
+ p0 = svpmov_lane (z0, 0));
+
+/*
+** pmov_from_vector_1:
+** pmov p0\.h, z0\[1\]
+** ret
+*/
+TEST_COMPARE_Z (pmov_from_vector_1, svuint16_t,
+ p0 = svpmov_lane_u16 (z0, 1),
+ p0 = svpmov_lane (z0, 1));
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/pmov_u32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/pmov_u32.c
new file mode 100644
index 0000000..dff79e6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/pmov_u32.c
@@ -0,0 +1,104 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** pmov_to_vector:
+** pmov z0, p0\.s
+** ret
+*/
+TEST_UNIFORM_Z (pmov_to_vector, svuint32_t,
+ z0 = svpmov_u32_z (p0),
+ z0 = svpmov_u32_z (p0));
+
+/*
+** pmov_to_vector_1_tied:
+** pmov z0\[1\], p0\.s
+** ret
+*/
+TEST_UNIFORM_Z (pmov_to_vector_1_tied, svuint32_t,
+ z0 = svpmov_lane_u32_m (z0, p0, 1),
+ z0 = svpmov_lane_m (z0, p0, 1));
+
+/*
+** pmov_to_vector_1_untied:
+** (
+** mov z0\.d, z1\.d
+** pmov z0\[1\], p0\.s
+** |
+** pmov z1\[1\], p0\.s
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_UNIFORM_Z (pmov_to_vector_1_untied, svuint32_t,
+ z0 = svpmov_lane_u32_m (z1, p0, 1),
+ z0 = svpmov_lane_m (z1, p0, 1));
+
+/*
+** pmov_to_vector_2:
+** pmov z0\[2\], p0\.s
+** ret
+*/
+TEST_UNIFORM_Z (pmov_to_vector_2, svuint32_t,
+ z0 = svpmov_lane_u32_m (z0, p0, 2),
+ z0 = svpmov_lane_m (z0, p0, 2));
+
+/*
+** pmov_to_vector_3:
+** pmov z0\[3\], p0\.s
+** ret
+*/
+TEST_UNIFORM_Z (pmov_to_vector_3, svuint32_t,
+ z0 = svpmov_lane_u32_m (z0, p0, 3),
+ z0 = svpmov_lane_m (z0, p0, 3));
+
+/*
+** pmov_from_vector:
+** pmov p0\.s, z0
+** ret
+*/
+TEST_COMPARE_Z (pmov_from_vector, svuint32_t,
+ p0 = svpmov_u32 (z0),
+ p0 = svpmov (z0));
+
+/*
+** pmov_from_vector_0:
+** pmov p0\.s, z0
+** ret
+*/
+TEST_COMPARE_Z (pmov_from_vector_0, svuint32_t,
+ p0 = svpmov_lane_u32 (z0, 0),
+ p0 = svpmov_lane (z0, 0));
+
+/*
+** pmov_from_vector_1:
+** pmov p0\.s, z0\[1\]
+** ret
+*/
+TEST_COMPARE_Z (pmov_from_vector_1, svuint32_t,
+ p0 = svpmov_lane_u32 (z0, 1),
+ p0 = svpmov_lane (z0, 1));
+
+/*
+** pmov_from_vector_2:
+** pmov p0\.s, z0\[2\]
+** ret
+*/
+TEST_COMPARE_Z (pmov_from_vector_2, svuint32_t,
+ p0 = svpmov_lane_u32 (z0, 2),
+ p0 = svpmov_lane (z0, 2));
+
+/*
+** pmov_from_vector_3:
+** pmov p0\.s, z0\[3\]
+** ret
+*/
+TEST_COMPARE_Z (pmov_from_vector_3, svuint32_t,
+ p0 = svpmov_lane_u32 (z0, 3),
+ p0 = svpmov_lane (z0, 3));
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/pmov_u64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/pmov_u64.c
new file mode 100644
index 0000000..2a56525
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/pmov_u64.c
@@ -0,0 +1,104 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** pmov_to_vector:
+** pmov z0, p0\.d
+** ret
+*/
+TEST_UNIFORM_Z (pmov_to_vector, svuint64_t,
+ z0 = svpmov_u64_z (p0),
+ z0 = svpmov_u64_z (p0));
+
+/*
+** pmov_to_vector_1_tied:
+** pmov z0\[1\], p0\.d
+** ret
+*/
+TEST_UNIFORM_Z (pmov_to_vector_1_tied, svuint64_t,
+ z0 = svpmov_lane_u64_m (z0, p0, 1),
+ z0 = svpmov_lane_m (z0, p0, 1));
+
+/*
+** pmov_to_vector_4_untied:
+** (
+** mov z0\.d, z1\.d
+** pmov z0\[4\], p0\.d
+** |
+** pmov z1\[4\], p0\.d
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_UNIFORM_Z (pmov_to_vector_4_untied, svuint64_t,
+ z0 = svpmov_lane_u64_m (z1, p0, 4),
+ z0 = svpmov_lane_m (z1, p0, 4));
+
+/*
+** pmov_to_vector_6:
+** pmov z0\[6\], p0\.d
+** ret
+*/
+TEST_UNIFORM_Z (pmov_to_vector_6, svuint64_t,
+ z0 = svpmov_lane_u64_m (z0, p0, 6),
+ z0 = svpmov_lane_m (z0, p0, 6));
+
+/*
+** pmov_to_vector_7:
+** pmov z0\[7\], p0\.d
+** ret
+*/
+TEST_UNIFORM_Z (pmov_to_vector_7, svuint64_t,
+ z0 = svpmov_lane_u64_m (z0, p0, 7),
+ z0 = svpmov_lane_m (z0, p0, 7));
+
+/*
+** pmov_from_vector:
+** pmov p0\.d, z0
+** ret
+*/
+TEST_COMPARE_Z (pmov_from_vector, svuint64_t,
+ p0 = svpmov_u64 (z0),
+ p0 = svpmov (z0));
+
+/*
+** pmov_from_vector_0:
+** pmov p0\.d, z0
+** ret
+*/
+TEST_COMPARE_Z (pmov_from_vector_0, svuint64_t,
+ p0 = svpmov_lane_u64 (z0, 0),
+ p0 = svpmov_lane (z0, 0));
+
+/*
+** pmov_from_vector_4:
+** pmov p0\.d, z0\[4\]
+** ret
+*/
+TEST_COMPARE_Z (pmov_from_vector_4, svuint64_t,
+ p0 = svpmov_lane_u64 (z0, 4),
+ p0 = svpmov_lane (z0, 4));
+
+/*
+** pmov_from_vector_5:
+** pmov p0\.d, z0\[5\]
+** ret
+*/
+TEST_COMPARE_Z (pmov_from_vector_5, svuint64_t,
+ p0 = svpmov_lane_u64 (z0, 5),
+ p0 = svpmov_lane (z0, 5));
+
+/*
+** pmov_from_vector_7:
+** pmov p0\.d, z0\[7\]
+** ret
+*/
+TEST_COMPARE_Z (pmov_from_vector_7, svuint64_t,
+ p0 = svpmov_lane_u64 (z0, 7),
+ p0 = svpmov_lane (z0, 7));
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/pmov_u8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/pmov_u8.c
new file mode 100644
index 0000000..49568cb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/pmov_u8.c
@@ -0,0 +1,35 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** pmov_to_vector:
+** pmov z0, p0\.b
+** ret
+*/
+TEST_UNIFORM_Z (pmov_to_vector, svuint8_t,
+ z0 = svpmov_u8_z (p0),
+ z0 = svpmov_u8_z (p0));
+
+/*
+** pmov_from_vector:
+** pmov p0\.b, z0
+** ret
+*/
+TEST_COMPARE_Z (pmov_from_vector, svuint8_t,
+ p0 = svpmov_u8 (z0),
+ p0 = svpmov (z0));
+
+/*
+** pmov_from_vector_0:
+** pmov p0\.b, z0
+** ret
+*/
+TEST_COMPARE_Z (pmov_from_vector_0, svuint8_t,
+ p0 = svpmov_lane_u8 (z0, 0),
+ p0 = svpmov_lane (z0, 0));
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1dq_f64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1dq_f64.c
new file mode 100644
index 0000000..9c684c4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1dq_f64.c
@@ -0,0 +1,163 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** st1dq_f64_base:
+** st1d {z0\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st1dq_f64_base, svfloat64_t, float64_t,
+ svst1dq_f64 (p0, x0, z0),
+ svst1dq (p0, x0, z0))
+
+/*
+** st1dq_f64_index:
+** st1d {z0\.q}, p0, \[x0, x1, lsl 3\]
+** ret
+*/
+TEST_STORE (st1dq_f64_index, svfloat64_t, float64_t,
+ svst1dq_f64 (p0, x0 + x1, z0),
+ svst1dq (p0, x0 + x1, z0))
+
+/*
+** st1dq_f64_1:
+** st1d {z0\.q}, p0, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE (st1dq_f64_1, svfloat64_t, float64_t,
+ svst1dq_f64 (p0, x0 + svcntd (), z0),
+ svst1dq (p0, x0 + svcntd (), z0))
+
+/*
+** st1dq_f64_7:
+** st1d {z0\.q}, p0, \[x0, #6, mul vl\]
+** ret
+*/
+TEST_STORE (st1dq_f64_7, svfloat64_t, float64_t,
+ svst1dq_f64 (p0, x0 + svcntd () * 3, z0),
+ svst1dq (p0, x0 + svcntd () * 3, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1dq_f64_8:
+** incb x0, all, mul #4
+** st1d {z0\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st1dq_f64_8, svfloat64_t, float64_t,
+ svst1dq_f64 (p0, x0 + svcntd () * 4, z0),
+ svst1dq (p0, x0 + svcntd () * 4, z0))
+
+/*
+** st1dq_f64_m2:
+** st1d {z0\.q}, p0, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE (st1dq_f64_m2, svfloat64_t, float64_t,
+ svst1dq_f64 (p0, x0 - svcntd (), z0),
+ svst1dq (p0, x0 - svcntd (), z0))
+
+/*
+** st1dq_f64_m8:
+** st1d {z0\.q}, p0, \[x0, #-8, mul vl\]
+** ret
+*/
+TEST_STORE (st1dq_f64_m8, svfloat64_t, float64_t,
+ svst1dq_f64 (p0, x0 - svcntd () * 4, z0),
+ svst1dq (p0, x0 - svcntd () * 4, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1dq_f64_m10:
+** decb x0, all, mul #5
+** st1d {z0\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st1dq_f64_m10, svfloat64_t, float64_t,
+ svst1dq_f64 (p0, x0 - svcntd () * 5, z0),
+ svst1dq (p0, x0 - svcntd () * 5, z0))
+
+/*
+** st1dq_vnum_f64_0:
+** st1d {z0\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st1dq_vnum_f64_0, svfloat64_t, float64_t,
+ svst1dq_vnum_f64 (p0, x0, 0, z0),
+ svst1dq_vnum (p0, x0, 0, z0))
+
+/*
+** st1dq_vnum_f64_1:
+** st1d {z0\.q}, p0, \[x0, #1, mul vl\]
+** ret
+*/
+TEST_STORE (st1dq_vnum_f64_1, svfloat64_t, float64_t,
+ svst1dq_vnum_f64 (p0, x0, 1, z0),
+ svst1dq_vnum (p0, x0, 1, z0))
+
+/*
+** st1dq_vnum_f64_7:
+** st1d {z0\.q}, p0, \[x0, #7, mul vl\]
+** ret
+*/
+TEST_STORE (st1dq_vnum_f64_7, svfloat64_t, float64_t,
+ svst1dq_vnum_f64 (p0, x0, 7, z0),
+ svst1dq_vnum (p0, x0, 7, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1dq_vnum_f64_8:
+** incb x0, all, mul #4
+** st1d {z0\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st1dq_vnum_f64_8, svfloat64_t, float64_t,
+ svst1dq_vnum_f64 (p0, x0, 8, z0),
+ svst1dq_vnum (p0, x0, 8, z0))
+
+/*
+** st1dq_vnum_f64_m1:
+** st1d {z0\.q}, p0, \[x0, #-1, mul vl\]
+** ret
+*/
+TEST_STORE (st1dq_vnum_f64_m1, svfloat64_t, float64_t,
+ svst1dq_vnum_f64 (p0, x0, -1, z0),
+ svst1dq_vnum (p0, x0, -1, z0))
+
+/*
+** st1dq_vnum_f64_m8:
+** st1d {z0\.q}, p0, \[x0, #-8, mul vl\]
+** ret
+*/
+TEST_STORE (st1dq_vnum_f64_m8, svfloat64_t, float64_t,
+ svst1dq_vnum_f64 (p0, x0, -8, z0),
+ svst1dq_vnum (p0, x0, -8, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1dq_vnum_f64_m9:
+** dech x0, all, mul #9
+** st1d {z0\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st1dq_vnum_f64_m9, svfloat64_t, float64_t,
+ svst1dq_vnum_f64 (p0, x0, -9, z0),
+ svst1dq_vnum (p0, x0, -9, z0))
+
+/* Using MUL to calculate an index would also be OK. */
+/*
+** st1dq_vnum_f64_x1:
+** cnth (x[0-9]+)
+** madd (x[0-9]+), (x1, \1|\1, x1), x0
+** st1d {z0\.q}, p0, \[\2\]
+** ret
+*/
+TEST_STORE (st1dq_vnum_f64_x1, svfloat64_t, float64_t,
+ svst1dq_vnum_f64 (p0, x0, x1, z0),
+ svst1dq_vnum (p0, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1dq_s64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1dq_s64.c
new file mode 100644
index 0000000..3634d92
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1dq_s64.c
@@ -0,0 +1,163 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** st1dq_s64_base:
+** st1d {z0\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st1dq_s64_base, svint64_t, int64_t,
+ svst1dq_s64 (p0, x0, z0),
+ svst1dq (p0, x0, z0))
+
+/*
+** st1dq_s64_index:
+** st1d {z0\.q}, p0, \[x0, x1, lsl 3\]
+** ret
+*/
+TEST_STORE (st1dq_s64_index, svint64_t, int64_t,
+ svst1dq_s64 (p0, x0 + x1, z0),
+ svst1dq (p0, x0 + x1, z0))
+
+/*
+** st1dq_s64_1:
+** st1d {z0\.q}, p0, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE (st1dq_s64_1, svint64_t, int64_t,
+ svst1dq_s64 (p0, x0 + svcntd (), z0),
+ svst1dq (p0, x0 + svcntd (), z0))
+
+/*
+** st1dq_s64_7:
+** st1d {z0\.q}, p0, \[x0, #6, mul vl\]
+** ret
+*/
+TEST_STORE (st1dq_s64_7, svint64_t, int64_t,
+ svst1dq_s64 (p0, x0 + svcntd () * 3, z0),
+ svst1dq (p0, x0 + svcntd () * 3, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1dq_s64_8:
+** incb x0, all, mul #4
+** st1d {z0\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st1dq_s64_8, svint64_t, int64_t,
+ svst1dq_s64 (p0, x0 + svcntd () * 4, z0),
+ svst1dq (p0, x0 + svcntd () * 4, z0))
+
+/*
+** st1dq_s64_m2:
+** st1d {z0\.q}, p0, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE (st1dq_s64_m2, svint64_t, int64_t,
+ svst1dq_s64 (p0, x0 - svcntd (), z0),
+ svst1dq (p0, x0 - svcntd (), z0))
+
+/*
+** st1dq_s64_m8:
+** st1d {z0\.q}, p0, \[x0, #-8, mul vl\]
+** ret
+*/
+TEST_STORE (st1dq_s64_m8, svint64_t, int64_t,
+ svst1dq_s64 (p0, x0 - svcntd () * 4, z0),
+ svst1dq (p0, x0 - svcntd () * 4, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1dq_s64_m10:
+** decb x0, all, mul #5
+** st1d {z0\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st1dq_s64_m10, svint64_t, int64_t,
+ svst1dq_s64 (p0, x0 - svcntd () * 5, z0),
+ svst1dq (p0, x0 - svcntd () * 5, z0))
+
+/*
+** st1dq_vnum_s64_0:
+** st1d {z0\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st1dq_vnum_s64_0, svint64_t, int64_t,
+ svst1dq_vnum_s64 (p0, x0, 0, z0),
+ svst1dq_vnum (p0, x0, 0, z0))
+
+/*
+** st1dq_vnum_s64_1:
+** st1d {z0\.q}, p0, \[x0, #1, mul vl\]
+** ret
+*/
+TEST_STORE (st1dq_vnum_s64_1, svint64_t, int64_t,
+ svst1dq_vnum_s64 (p0, x0, 1, z0),
+ svst1dq_vnum (p0, x0, 1, z0))
+
+/*
+** st1dq_vnum_s64_7:
+** st1d {z0\.q}, p0, \[x0, #7, mul vl\]
+** ret
+*/
+TEST_STORE (st1dq_vnum_s64_7, svint64_t, int64_t,
+ svst1dq_vnum_s64 (p0, x0, 7, z0),
+ svst1dq_vnum (p0, x0, 7, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1dq_vnum_s64_8:
+** incb x0, all, mul #4
+** st1d {z0\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st1dq_vnum_s64_8, svint64_t, int64_t,
+ svst1dq_vnum_s64 (p0, x0, 8, z0),
+ svst1dq_vnum (p0, x0, 8, z0))
+
+/*
+** st1dq_vnum_s64_m1:
+** st1d {z0\.q}, p0, \[x0, #-1, mul vl\]
+** ret
+*/
+TEST_STORE (st1dq_vnum_s64_m1, svint64_t, int64_t,
+ svst1dq_vnum_s64 (p0, x0, -1, z0),
+ svst1dq_vnum (p0, x0, -1, z0))
+
+/*
+** st1dq_vnum_s64_m8:
+** st1d {z0\.q}, p0, \[x0, #-8, mul vl\]
+** ret
+*/
+TEST_STORE (st1dq_vnum_s64_m8, svint64_t, int64_t,
+ svst1dq_vnum_s64 (p0, x0, -8, z0),
+ svst1dq_vnum (p0, x0, -8, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1dq_vnum_s64_m9:
+** dech x0, all, mul #9
+** st1d {z0\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st1dq_vnum_s64_m9, svint64_t, int64_t,
+ svst1dq_vnum_s64 (p0, x0, -9, z0),
+ svst1dq_vnum (p0, x0, -9, z0))
+
+/* Using MUL to calculate an index would also be OK. */
+/*
+** st1dq_vnum_s64_x1:
+** cnth (x[0-9]+)
+** madd (x[0-9]+), (x1, \1|\1, x1), x0
+** st1d {z0\.q}, p0, \[\2\]
+** ret
+*/
+TEST_STORE (st1dq_vnum_s64_x1, svint64_t, int64_t,
+ svst1dq_vnum_s64 (p0, x0, x1, z0),
+ svst1dq_vnum (p0, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1dq_u64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1dq_u64.c
new file mode 100644
index 0000000..477b787
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1dq_u64.c
@@ -0,0 +1,163 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** st1dq_u64_base:
+** st1d {z0\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st1dq_u64_base, svuint64_t, uint64_t,
+ svst1dq_u64 (p0, x0, z0),
+ svst1dq (p0, x0, z0))
+
+/*
+** st1dq_u64_index:
+** st1d {z0\.q}, p0, \[x0, x1, lsl 3\]
+** ret
+*/
+TEST_STORE (st1dq_u64_index, svuint64_t, uint64_t,
+ svst1dq_u64 (p0, x0 + x1, z0),
+ svst1dq (p0, x0 + x1, z0))
+
+/*
+** st1dq_u64_1:
+** st1d {z0\.q}, p0, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE (st1dq_u64_1, svuint64_t, uint64_t,
+ svst1dq_u64 (p0, x0 + svcntd (), z0),
+ svst1dq (p0, x0 + svcntd (), z0))
+
+/*
+** st1dq_u64_7:
+** st1d {z0\.q}, p0, \[x0, #6, mul vl\]
+** ret
+*/
+TEST_STORE (st1dq_u64_7, svuint64_t, uint64_t,
+ svst1dq_u64 (p0, x0 + svcntd () * 3, z0),
+ svst1dq (p0, x0 + svcntd () * 3, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1dq_u64_8:
+** incb x0, all, mul #4
+** st1d {z0\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st1dq_u64_8, svuint64_t, uint64_t,
+ svst1dq_u64 (p0, x0 + svcntd () * 4, z0),
+ svst1dq (p0, x0 + svcntd () * 4, z0))
+
+/*
+** st1dq_u64_m2:
+** st1d {z0\.q}, p0, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE (st1dq_u64_m2, svuint64_t, uint64_t,
+ svst1dq_u64 (p0, x0 - svcntd (), z0),
+ svst1dq (p0, x0 - svcntd (), z0))
+
+/*
+** st1dq_u64_m8:
+** st1d {z0\.q}, p0, \[x0, #-8, mul vl\]
+** ret
+*/
+TEST_STORE (st1dq_u64_m8, svuint64_t, uint64_t,
+ svst1dq_u64 (p0, x0 - svcntd () * 4, z0),
+ svst1dq (p0, x0 - svcntd () * 4, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1dq_u64_m10:
+** decb x0, all, mul #5
+** st1d {z0\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st1dq_u64_m10, svuint64_t, uint64_t,
+ svst1dq_u64 (p0, x0 - svcntd () * 5, z0),
+ svst1dq (p0, x0 - svcntd () * 5, z0))
+
+/*
+** st1dq_vnum_u64_0:
+** st1d {z0\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st1dq_vnum_u64_0, svuint64_t, uint64_t,
+ svst1dq_vnum_u64 (p0, x0, 0, z0),
+ svst1dq_vnum (p0, x0, 0, z0))
+
+/*
+** st1dq_vnum_u64_1:
+** st1d {z0\.q}, p0, \[x0, #1, mul vl\]
+** ret
+*/
+TEST_STORE (st1dq_vnum_u64_1, svuint64_t, uint64_t,
+ svst1dq_vnum_u64 (p0, x0, 1, z0),
+ svst1dq_vnum (p0, x0, 1, z0))
+
+/*
+** st1dq_vnum_u64_7:
+** st1d {z0\.q}, p0, \[x0, #7, mul vl\]
+** ret
+*/
+TEST_STORE (st1dq_vnum_u64_7, svuint64_t, uint64_t,
+ svst1dq_vnum_u64 (p0, x0, 7, z0),
+ svst1dq_vnum (p0, x0, 7, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1dq_vnum_u64_8:
+** incb x0, all, mul #4
+** st1d {z0\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st1dq_vnum_u64_8, svuint64_t, uint64_t,
+ svst1dq_vnum_u64 (p0, x0, 8, z0),
+ svst1dq_vnum (p0, x0, 8, z0))
+
+/*
+** st1dq_vnum_u64_m1:
+** st1d {z0\.q}, p0, \[x0, #-1, mul vl\]
+** ret
+*/
+TEST_STORE (st1dq_vnum_u64_m1, svuint64_t, uint64_t,
+ svst1dq_vnum_u64 (p0, x0, -1, z0),
+ svst1dq_vnum (p0, x0, -1, z0))
+
+/*
+** st1dq_vnum_u64_m8:
+** st1d {z0\.q}, p0, \[x0, #-8, mul vl\]
+** ret
+*/
+TEST_STORE (st1dq_vnum_u64_m8, svuint64_t, uint64_t,
+ svst1dq_vnum_u64 (p0, x0, -8, z0),
+ svst1dq_vnum (p0, x0, -8, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1dq_vnum_u64_m9:
+** dech x0, all, mul #9
+** st1d {z0\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st1dq_vnum_u64_m9, svuint64_t, uint64_t,
+ svst1dq_vnum_u64 (p0, x0, -9, z0),
+ svst1dq_vnum (p0, x0, -9, z0))
+
+/* Using MUL to calculate an index would also be OK. */
+/*
+** st1dq_vnum_u64_x1:
+** cnth (x[0-9]+)
+** madd (x[0-9]+), (x1, \1|\1, x1), x0
+** st1d {z0\.q}, p0, \[\2\]
+** ret
+*/
+TEST_STORE (st1dq_vnum_u64_x1, svuint64_t, uint64_t,
+ svst1dq_vnum_u64 (p0, x0, x1, z0),
+ svst1dq_vnum (p0, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1q_scatter_bf16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1q_scatter_bf16.c
new file mode 100644
index 0000000..77e9d6f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1q_scatter_bf16.c
@@ -0,0 +1,93 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** st1q_scatter_bf16:
+** st1q {z0\.q}, p0, \[z1\.d\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_bf16, svbfloat16_t, svuint64_t,
+ svst1q_scatter_u64base_bf16 (p0, z1, z0),
+ svst1q_scatter (p0, z1, z0))
+
+/*
+** st1q_scatter_x0_bf16_offset:
+** st1q {z0\.q}, p0, \[z1\.d, x0\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_x0_bf16_offset, svbfloat16_t, svuint64_t,
+ svst1q_scatter_u64base_offset_bf16 (p0, z1, x0, z0),
+ svst1q_scatter_offset (p0, z1, x0, z0))
+
+/*
+** st1q_scatter_m1_bf16_offset:
+** mov (x[0-9]+), #?-1
+** st1q {z0\.q}, p0, \[z1\.d, \1\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_m1_bf16_offset, svbfloat16_t, svuint64_t,
+ svst1q_scatter_u64base_offset_bf16 (p0, z1, -1, z0),
+ svst1q_scatter_offset (p0, z1, -1, z0))
+
+/*
+** st1q_scatter_0_bf16_offset:
+** st1q {z0\.q}, p0, \[z1\.d\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_0_bf16_offset, svbfloat16_t, svuint64_t,
+ svst1q_scatter_u64base_offset_bf16 (p0, z1, 0, z0),
+ svst1q_scatter_offset (p0, z1, 0, z0))
+
+/*
+** st1q_scatter_1_bf16_offset:
+** mov (x[0-9]+), #?1
+** st1q {z0\.q}, p0, \[z1\.d, \1\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_1_bf16_offset, svbfloat16_t, svuint64_t,
+ svst1q_scatter_u64base_offset_bf16 (p0, z1, 1, z0),
+ svst1q_scatter_offset (p0, z1, 1, z0))
+
+/*
+** st1q_scatter_x0_bf16_s64offset:
+** st1q {z0\.q}, p0, \[z1\.d, x0\]
+** ret
+*/
+TEST_STORE_SCATTER_SZ (st1q_scatter_x0_bf16_s64offset, svbfloat16_t, bfloat16_t, svint64_t,
+ svst1q_scatter_s64offset_bf16 (p0, x0, z1, z0),
+ svst1q_scatter_offset (p0, x0, z1, z0))
+
+/*
+** st1q_scatter_ext_bf16_s64offset:
+** sxtw z1\.d, p0/m, z1\.d
+** st1q {z0\.q}, p0, \[z1\.d, x0\]
+** ret
+*/
+TEST_STORE_SCATTER_SZ (st1q_scatter_ext_bf16_s64offset, svbfloat16_t, bfloat16_t, svint64_t,
+ svst1q_scatter_s64offset_bf16 (p0, x0, svextw_s64_x (p0, z1), z0),
+ svst1q_scatter_offset (p0, x0, svextw_x (p0, z1), z0))
+
+/*
+** st1q_scatter_x0_bf16_u64offset:
+** st1q {z0\.q}, p0, \[z1\.d. x0\]
+** ret
+*/
+TEST_STORE_SCATTER_SZ (st1q_scatter_x0_bf16_u64offset, svbfloat16_t, bfloat16_t, svuint64_t,
+ svst1q_scatter_u64offset_bf16 (p0, x0, z1, z0),
+ svst1q_scatter_offset (p0, x0, z1, z0))
+
+/*
+** st1q_scatter_ext_bf16_u64offset:
+** and z1\.d, z1\.d, #0xffffffff
+** st1q {z0\.q}, p0, \[z1\.d, x0\]
+** ret
+*/
+TEST_STORE_SCATTER_SZ (st1q_scatter_ext_bf16_u64offset, svbfloat16_t, bfloat16_t, svuint64_t,
+ svst1q_scatter_u64offset_bf16 (p0, x0, svextw_u64_x (p0, z1), z0),
+ svst1q_scatter_offset (p0, x0, svextw_x (p0, z1), z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1q_scatter_f16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1q_scatter_f16.c
new file mode 100644
index 0000000..790099a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1q_scatter_f16.c
@@ -0,0 +1,93 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** st1q_scatter_f16:
+** st1q {z0\.q}, p0, \[z1\.d\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_f16, svfloat16_t, svuint64_t,
+ svst1q_scatter_u64base_f16 (p0, z1, z0),
+ svst1q_scatter (p0, z1, z0))
+
+/*
+** st1q_scatter_x0_f16_offset:
+** st1q {z0\.q}, p0, \[z1\.d, x0\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_x0_f16_offset, svfloat16_t, svuint64_t,
+ svst1q_scatter_u64base_offset_f16 (p0, z1, x0, z0),
+ svst1q_scatter_offset (p0, z1, x0, z0))
+
+/*
+** st1q_scatter_m1_f16_offset:
+** mov (x[0-9]+), #?-1
+** st1q {z0\.q}, p0, \[z1\.d, \1\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_m1_f16_offset, svfloat16_t, svuint64_t,
+ svst1q_scatter_u64base_offset_f16 (p0, z1, -1, z0),
+ svst1q_scatter_offset (p0, z1, -1, z0))
+
+/*
+** st1q_scatter_0_f16_offset:
+** st1q {z0\.q}, p0, \[z1\.d\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_0_f16_offset, svfloat16_t, svuint64_t,
+ svst1q_scatter_u64base_offset_f16 (p0, z1, 0, z0),
+ svst1q_scatter_offset (p0, z1, 0, z0))
+
+/*
+** st1q_scatter_1_f16_offset:
+** mov (x[0-9]+), #?1
+** st1q {z0\.q}, p0, \[z1\.d, \1\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_1_f16_offset, svfloat16_t, svuint64_t,
+ svst1q_scatter_u64base_offset_f16 (p0, z1, 1, z0),
+ svst1q_scatter_offset (p0, z1, 1, z0))
+
+/*
+** st1q_scatter_x0_f16_s64offset:
+** st1q {z0\.q}, p0, \[z1\.d, x0\]
+** ret
+*/
+TEST_STORE_SCATTER_SZ (st1q_scatter_x0_f16_s64offset, svfloat16_t, float16_t, svint64_t,
+ svst1q_scatter_s64offset_f16 (p0, x0, z1, z0),
+ svst1q_scatter_offset (p0, x0, z1, z0))
+
+/*
+** st1q_scatter_ext_f16_s64offset:
+** sxtw z1\.d, p0/m, z1\.d
+** st1q {z0\.q}, p0, \[z1\.d, x0\]
+** ret
+*/
+TEST_STORE_SCATTER_SZ (st1q_scatter_ext_f16_s64offset, svfloat16_t, float16_t, svint64_t,
+ svst1q_scatter_s64offset_f16 (p0, x0, svextw_s64_x (p0, z1), z0),
+ svst1q_scatter_offset (p0, x0, svextw_x (p0, z1), z0))
+
+/*
+** st1q_scatter_x0_f16_u64offset:
+** st1q {z0\.q}, p0, \[z1\.d. x0\]
+** ret
+*/
+TEST_STORE_SCATTER_SZ (st1q_scatter_x0_f16_u64offset, svfloat16_t, float16_t, svuint64_t,
+ svst1q_scatter_u64offset_f16 (p0, x0, z1, z0),
+ svst1q_scatter_offset (p0, x0, z1, z0))
+
+/*
+** st1q_scatter_ext_f16_u64offset:
+** and z1\.d, z1\.d, #0xffffffff
+** st1q {z0\.q}, p0, \[z1\.d, x0\]
+** ret
+*/
+TEST_STORE_SCATTER_SZ (st1q_scatter_ext_f16_u64offset, svfloat16_t, float16_t, svuint64_t,
+ svst1q_scatter_u64offset_f16 (p0, x0, svextw_u64_x (p0, z1), z0),
+ svst1q_scatter_offset (p0, x0, svextw_x (p0, z1), z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1q_scatter_f32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1q_scatter_f32.c
new file mode 100644
index 0000000..14a0890
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1q_scatter_f32.c
@@ -0,0 +1,93 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** st1q_scatter_f32:
+** st1q {z0\.q}, p0, \[z1\.d\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_f32, svfloat32_t, svuint64_t,
+ svst1q_scatter_u64base_f32 (p0, z1, z0),
+ svst1q_scatter (p0, z1, z0))
+
+/*
+** st1q_scatter_x0_f32_offset:
+** st1q {z0\.q}, p0, \[z1\.d, x0\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_x0_f32_offset, svfloat32_t, svuint64_t,
+ svst1q_scatter_u64base_offset_f32 (p0, z1, x0, z0),
+ svst1q_scatter_offset (p0, z1, x0, z0))
+
+/*
+** st1q_scatter_m1_f32_offset:
+** mov (x[0-9]+), #?-1
+** st1q {z0\.q}, p0, \[z1\.d, \1\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_m1_f32_offset, svfloat32_t, svuint64_t,
+ svst1q_scatter_u64base_offset_f32 (p0, z1, -1, z0),
+ svst1q_scatter_offset (p0, z1, -1, z0))
+
+/*
+** st1q_scatter_0_f32_offset:
+** st1q {z0\.q}, p0, \[z1\.d\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_0_f32_offset, svfloat32_t, svuint64_t,
+ svst1q_scatter_u64base_offset_f32 (p0, z1, 0, z0),
+ svst1q_scatter_offset (p0, z1, 0, z0))
+
+/*
+** st1q_scatter_1_f32_offset:
+** mov (x[0-9]+), #?1
+** st1q {z0\.q}, p0, \[z1\.d, \1\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_1_f32_offset, svfloat32_t, svuint64_t,
+ svst1q_scatter_u64base_offset_f32 (p0, z1, 1, z0),
+ svst1q_scatter_offset (p0, z1, 1, z0))
+
+/*
+** st1q_scatter_x0_f32_s64offset:
+** st1q {z0\.q}, p0, \[z1\.d, x0\]
+** ret
+*/
+TEST_STORE_SCATTER_SZ (st1q_scatter_x0_f32_s64offset, svfloat32_t, float32_t, svint64_t,
+ svst1q_scatter_s64offset_f32 (p0, x0, z1, z0),
+ svst1q_scatter_offset (p0, x0, z1, z0))
+
+/*
+** st1q_scatter_ext_f32_s64offset:
+** sxtw z1\.d, p0/m, z1\.d
+** st1q {z0\.q}, p0, \[z1\.d, x0\]
+** ret
+*/
+TEST_STORE_SCATTER_SZ (st1q_scatter_ext_f32_s64offset, svfloat32_t, float32_t, svint64_t,
+ svst1q_scatter_s64offset_f32 (p0, x0, svextw_s64_x (p0, z1), z0),
+ svst1q_scatter_offset (p0, x0, svextw_x (p0, z1), z0))
+
+/*
+** st1q_scatter_x0_f32_u64offset:
+** st1q {z0\.q}, p0, \[z1\.d. x0\]
+** ret
+*/
+TEST_STORE_SCATTER_SZ (st1q_scatter_x0_f32_u64offset, svfloat32_t, float32_t, svuint64_t,
+ svst1q_scatter_u64offset_f32 (p0, x0, z1, z0),
+ svst1q_scatter_offset (p0, x0, z1, z0))
+
+/*
+** st1q_scatter_ext_f32_u64offset:
+** and z1\.d, z1\.d, #0xffffffff
+** st1q {z0\.q}, p0, \[z1\.d, x0\]
+** ret
+*/
+TEST_STORE_SCATTER_SZ (st1q_scatter_ext_f32_u64offset, svfloat32_t, float32_t, svuint64_t,
+ svst1q_scatter_u64offset_f32 (p0, x0, svextw_u64_x (p0, z1), z0),
+ svst1q_scatter_offset (p0, x0, svextw_x (p0, z1), z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1q_scatter_f64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1q_scatter_f64.c
new file mode 100644
index 0000000..33d0f39
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1q_scatter_f64.c
@@ -0,0 +1,152 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** st1q_scatter_f64:
+** st1q {z0\.q}, p0, \[z1\.d\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_f64, svfloat64_t, svuint64_t,
+ svst1q_scatter_u64base_f64 (p0, z1, z0),
+ svst1q_scatter (p0, z1, z0))
+
+/*
+** st1q_scatter_x0_f64_offset:
+** st1q {z0\.q}, p0, \[z1\.d, x0\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_x0_f64_offset, svfloat64_t, svuint64_t,
+ svst1q_scatter_u64base_offset_f64 (p0, z1, x0, z0),
+ svst1q_scatter_offset (p0, z1, x0, z0))
+
+/*
+** st1q_scatter_x0_f64_index:
+** lsl (x[0-9]+), x0, #?3
+** st1q {z0\.q}, p0, \[z1\.d, \1\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_x0_f64_index, svfloat64_t, svuint64_t,
+ svst1q_scatter_u64base_index_f64 (p0, z1, x0, z0),
+ svst1q_scatter_index (p0, z1, x0, z0))
+
+/*
+** st1q_scatter_m1_f64_offset:
+** mov (x[0-9]+), #?-1
+** st1q {z0\.q}, p0, \[z1\.d, \1\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_m1_f64_offset, svfloat64_t, svuint64_t,
+ svst1q_scatter_u64base_offset_f64 (p0, z1, -1, z0),
+ svst1q_scatter_offset (p0, z1, -1, z0))
+
+/*
+** st1q_scatter_m1_f64_index:
+** mov (x[0-9]+), #?-8
+** st1q {z0\.q}, p0, \[z1\.d, \1\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_m1_f64_index, svfloat64_t, svuint64_t,
+ svst1q_scatter_u64base_index_f64 (p0, z1, -1, z0),
+ svst1q_scatter_index (p0, z1, -1, z0))
+
+/*
+** st1q_scatter_0_f64_offset:
+** st1q {z0\.q}, p0, \[z1\.d\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_0_f64_offset, svfloat64_t, svuint64_t,
+ svst1q_scatter_u64base_offset_f64 (p0, z1, 0, z0),
+ svst1q_scatter_offset (p0, z1, 0, z0))
+
+/*
+** st1q_scatter_0_f64_index:
+** st1q {z0\.q}, p0, \[z1\.d\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_0_f64_index, svfloat64_t, svuint64_t,
+ svst1q_scatter_u64base_index_f64 (p0, z1, 0, z0),
+ svst1q_scatter_index (p0, z1, 0, z0))
+
+/*
+** st1q_scatter_1_f64_offset:
+** mov (x[0-9]+), #?1
+** st1q {z0\.q}, p0, \[z1\.d, \1\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_1_f64_offset, svfloat64_t, svuint64_t,
+ svst1q_scatter_u64base_offset_f64 (p0, z1, 1, z0),
+ svst1q_scatter_offset (p0, z1, 1, z0))
+
+/*
+** st1q_scatter_1_f64_index:
+** mov (x[0-9]+), #?8
+** st1q {z0\.q}, p0, \[z1\.d, \1\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_1_f64_index, svfloat64_t, svuint64_t,
+ svst1q_scatter_u64base_index_f64 (p0, z1, 1, z0),
+ svst1q_scatter_index (p0, z1, 1, z0))
+
+/*
+** st1q_scatter_x0_f64_s64offset:
+** st1q {z0\.q}, p0, \[z1\.d, x0\]
+** ret
+*/
+TEST_STORE_SCATTER_SZ (st1q_scatter_x0_f64_s64offset, svfloat64_t, float64_t, svint64_t,
+ svst1q_scatter_s64offset_f64 (p0, x0, z1, z0),
+ svst1q_scatter_offset (p0, x0, z1, z0))
+
+/*
+** st1q_scatter_ext_f64_s64offset:
+** sxtw z1\.d, p0/m, z1\.d
+** st1q {z0\.q}, p0, \[z1\.d, x0\]
+** ret
+*/
+TEST_STORE_SCATTER_SZ (st1q_scatter_ext_f64_s64offset, svfloat64_t, float64_t, svint64_t,
+ svst1q_scatter_s64offset_f64 (p0, x0, svextw_s64_x (p0, z1), z0),
+ svst1q_scatter_offset (p0, x0, svextw_x (p0, z1), z0))
+
+/*
+** st1q_scatter_x0_f64_s64index:
+** lsl (z[0-9]+\.d), z1\.d, #3
+** st1q {z0\.q}, p0, \[\1, x0\]
+** ret
+*/
+TEST_STORE_SCATTER_SZ (st1q_scatter_x0_f64_s64index, svfloat64_t, float64_t, svint64_t,
+ svst1q_scatter_s64index_f64 (p0, x0, z1, z0),
+ svst1q_scatter_index (p0, x0, z1, z0))
+
+/*
+** st1q_scatter_x0_f64_u64offset:
+** st1q {z0\.q}, p0, \[z1\.d. x0\]
+** ret
+*/
+TEST_STORE_SCATTER_SZ (st1q_scatter_x0_f64_u64offset, svfloat64_t, float64_t, svuint64_t,
+ svst1q_scatter_u64offset_f64 (p0, x0, z1, z0),
+ svst1q_scatter_offset (p0, x0, z1, z0))
+
+/*
+** st1q_scatter_ext_f64_u64offset:
+** and z1\.d, z1\.d, #0xffffffff
+** st1q {z0\.q}, p0, \[z1\.d, x0\]
+** ret
+*/
+TEST_STORE_SCATTER_SZ (st1q_scatter_ext_f64_u64offset, svfloat64_t, float64_t, svuint64_t,
+ svst1q_scatter_u64offset_f64 (p0, x0, svextw_u64_x (p0, z1), z0),
+ svst1q_scatter_offset (p0, x0, svextw_x (p0, z1), z0))
+
+/*
+** st1q_scatter_x0_f64_u64index:
+** lsl (z[0-9]+\.d), z1\.d, #3
+** st1q {z0\.q}, p0, \[\1, x0\]
+** ret
+*/
+TEST_STORE_SCATTER_SZ (st1q_scatter_x0_f64_u64index, svfloat64_t, float64_t, svuint64_t,
+ svst1q_scatter_u64index_f64 (p0, x0, z1, z0),
+ svst1q_scatter_index (p0, x0, z1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1q_scatter_s16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1q_scatter_s16.c
new file mode 100644
index 0000000..0764899
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1q_scatter_s16.c
@@ -0,0 +1,93 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** st1q_scatter_s16:
+** st1q {z0\.q}, p0, \[z1\.d\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_s16, svint16_t, svuint64_t,
+ svst1q_scatter_u64base_s16 (p0, z1, z0),
+ svst1q_scatter (p0, z1, z0))
+
+/*
+** st1q_scatter_x0_s16_offset:
+** st1q {z0\.q}, p0, \[z1\.d, x0\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_x0_s16_offset, svint16_t, svuint64_t,
+ svst1q_scatter_u64base_offset_s16 (p0, z1, x0, z0),
+ svst1q_scatter_offset (p0, z1, x0, z0))
+
+/*
+** st1q_scatter_m1_s16_offset:
+** mov (x[0-9]+), #?-1
+** st1q {z0\.q}, p0, \[z1\.d, \1\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_m1_s16_offset, svint16_t, svuint64_t,
+ svst1q_scatter_u64base_offset_s16 (p0, z1, -1, z0),
+ svst1q_scatter_offset (p0, z1, -1, z0))
+
+/*
+** st1q_scatter_0_s16_offset:
+** st1q {z0\.q}, p0, \[z1\.d\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_0_s16_offset, svint16_t, svuint64_t,
+ svst1q_scatter_u64base_offset_s16 (p0, z1, 0, z0),
+ svst1q_scatter_offset (p0, z1, 0, z0))
+
+/*
+** st1q_scatter_1_s16_offset:
+** mov (x[0-9]+), #?1
+** st1q {z0\.q}, p0, \[z1\.d, \1\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_1_s16_offset, svint16_t, svuint64_t,
+ svst1q_scatter_u64base_offset_s16 (p0, z1, 1, z0),
+ svst1q_scatter_offset (p0, z1, 1, z0))
+
+/*
+** st1q_scatter_x0_s16_s64offset:
+** st1q {z0\.q}, p0, \[z1\.d, x0\]
+** ret
+*/
+TEST_STORE_SCATTER_SZ (st1q_scatter_x0_s16_s64offset, svint16_t, int16_t, svint64_t,
+ svst1q_scatter_s64offset_s16 (p0, x0, z1, z0),
+ svst1q_scatter_offset (p0, x0, z1, z0))
+
+/*
+** st1q_scatter_ext_s16_s64offset:
+** sxtw z1\.d, p0/m, z1\.d
+** st1q {z0\.q}, p0, \[z1\.d, x0\]
+** ret
+*/
+TEST_STORE_SCATTER_SZ (st1q_scatter_ext_s16_s64offset, svint16_t, int16_t, svint64_t,
+ svst1q_scatter_s64offset_s16 (p0, x0, svextw_s64_x (p0, z1), z0),
+ svst1q_scatter_offset (p0, x0, svextw_x (p0, z1), z0))
+
+/*
+** st1q_scatter_x0_s16_u64offset:
+** st1q {z0\.q}, p0, \[z1\.d. x0\]
+** ret
+*/
+TEST_STORE_SCATTER_SZ (st1q_scatter_x0_s16_u64offset, svint16_t, int16_t, svuint64_t,
+ svst1q_scatter_u64offset_s16 (p0, x0, z1, z0),
+ svst1q_scatter_offset (p0, x0, z1, z0))
+
+/*
+** st1q_scatter_ext_s16_u64offset:
+** and z1\.d, z1\.d, #0xffffffff
+** st1q {z0\.q}, p0, \[z1\.d, x0\]
+** ret
+*/
+TEST_STORE_SCATTER_SZ (st1q_scatter_ext_s16_u64offset, svint16_t, int16_t, svuint64_t,
+ svst1q_scatter_u64offset_s16 (p0, x0, svextw_u64_x (p0, z1), z0),
+ svst1q_scatter_offset (p0, x0, svextw_x (p0, z1), z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1q_scatter_s32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1q_scatter_s32.c
new file mode 100644
index 0000000..24e275f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1q_scatter_s32.c
@@ -0,0 +1,93 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** st1q_scatter_s32:
+** st1q {z0\.q}, p0, \[z1\.d\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_s32, svint32_t, svuint64_t,
+ svst1q_scatter_u64base_s32 (p0, z1, z0),
+ svst1q_scatter (p0, z1, z0))
+
+/*
+** st1q_scatter_x0_s32_offset:
+** st1q {z0\.q}, p0, \[z1\.d, x0\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_x0_s32_offset, svint32_t, svuint64_t,
+ svst1q_scatter_u64base_offset_s32 (p0, z1, x0, z0),
+ svst1q_scatter_offset (p0, z1, x0, z0))
+
+/*
+** st1q_scatter_m1_s32_offset:
+** mov (x[0-9]+), #?-1
+** st1q {z0\.q}, p0, \[z1\.d, \1\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_m1_s32_offset, svint32_t, svuint64_t,
+ svst1q_scatter_u64base_offset_s32 (p0, z1, -1, z0),
+ svst1q_scatter_offset (p0, z1, -1, z0))
+
+/*
+** st1q_scatter_0_s32_offset:
+** st1q {z0\.q}, p0, \[z1\.d\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_0_s32_offset, svint32_t, svuint64_t,
+ svst1q_scatter_u64base_offset_s32 (p0, z1, 0, z0),
+ svst1q_scatter_offset (p0, z1, 0, z0))
+
+/*
+** st1q_scatter_1_s32_offset:
+** mov (x[0-9]+), #?1
+** st1q {z0\.q}, p0, \[z1\.d, \1\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_1_s32_offset, svint32_t, svuint64_t,
+ svst1q_scatter_u64base_offset_s32 (p0, z1, 1, z0),
+ svst1q_scatter_offset (p0, z1, 1, z0))
+
+/*
+** st1q_scatter_x0_s32_s64offset:
+** st1q {z0\.q}, p0, \[z1\.d, x0\]
+** ret
+*/
+TEST_STORE_SCATTER_SZ (st1q_scatter_x0_s32_s64offset, svint32_t, int32_t, svint64_t,
+ svst1q_scatter_s64offset_s32 (p0, x0, z1, z0),
+ svst1q_scatter_offset (p0, x0, z1, z0))
+
+/*
+** st1q_scatter_ext_s32_s64offset:
+** sxtw z1\.d, p0/m, z1\.d
+** st1q {z0\.q}, p0, \[z1\.d, x0\]
+** ret
+*/
+TEST_STORE_SCATTER_SZ (st1q_scatter_ext_s32_s64offset, svint32_t, int32_t, svint64_t,
+ svst1q_scatter_s64offset_s32 (p0, x0, svextw_s64_x (p0, z1), z0),
+ svst1q_scatter_offset (p0, x0, svextw_x (p0, z1), z0))
+
+/*
+** st1q_scatter_x0_s32_u64offset:
+** st1q {z0\.q}, p0, \[z1\.d. x0\]
+** ret
+*/
+TEST_STORE_SCATTER_SZ (st1q_scatter_x0_s32_u64offset, svint32_t, int32_t, svuint64_t,
+ svst1q_scatter_u64offset_s32 (p0, x0, z1, z0),
+ svst1q_scatter_offset (p0, x0, z1, z0))
+
+/*
+** st1q_scatter_ext_s32_u64offset:
+** and z1\.d, z1\.d, #0xffffffff
+** st1q {z0\.q}, p0, \[z1\.d, x0\]
+** ret
+*/
+TEST_STORE_SCATTER_SZ (st1q_scatter_ext_s32_u64offset, svint32_t, int32_t, svuint64_t,
+ svst1q_scatter_u64offset_s32 (p0, x0, svextw_u64_x (p0, z1), z0),
+ svst1q_scatter_offset (p0, x0, svextw_x (p0, z1), z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1q_scatter_s64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1q_scatter_s64.c
new file mode 100644
index 0000000..cb8da1e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1q_scatter_s64.c
@@ -0,0 +1,152 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** st1q_scatter_s64:
+** st1q {z0\.q}, p0, \[z1\.d\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_s64, svint64_t, svuint64_t,
+ svst1q_scatter_u64base_s64 (p0, z1, z0),
+ svst1q_scatter (p0, z1, z0))
+
+/*
+** st1q_scatter_x0_s64_offset:
+** st1q {z0\.q}, p0, \[z1\.d, x0\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_x0_s64_offset, svint64_t, svuint64_t,
+ svst1q_scatter_u64base_offset_s64 (p0, z1, x0, z0),
+ svst1q_scatter_offset (p0, z1, x0, z0))
+
+/*
+** st1q_scatter_x0_s64_index:
+** lsl (x[0-9]+), x0, #?3
+** st1q {z0\.q}, p0, \[z1\.d, \1\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_x0_s64_index, svint64_t, svuint64_t,
+ svst1q_scatter_u64base_index_s64 (p0, z1, x0, z0),
+ svst1q_scatter_index (p0, z1, x0, z0))
+
+/*
+** st1q_scatter_m1_s64_offset:
+** mov (x[0-9]+), #?-1
+** st1q {z0\.q}, p0, \[z1\.d, \1\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_m1_s64_offset, svint64_t, svuint64_t,
+ svst1q_scatter_u64base_offset_s64 (p0, z1, -1, z0),
+ svst1q_scatter_offset (p0, z1, -1, z0))
+
+/*
+** st1q_scatter_m1_s64_index:
+** mov (x[0-9]+), #?-8
+** st1q {z0\.q}, p0, \[z1\.d, \1\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_m1_s64_index, svint64_t, svuint64_t,
+ svst1q_scatter_u64base_index_s64 (p0, z1, -1, z0),
+ svst1q_scatter_index (p0, z1, -1, z0))
+
+/*
+** st1q_scatter_0_s64_offset:
+** st1q {z0\.q}, p0, \[z1\.d\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_0_s64_offset, svint64_t, svuint64_t,
+ svst1q_scatter_u64base_offset_s64 (p0, z1, 0, z0),
+ svst1q_scatter_offset (p0, z1, 0, z0))
+
+/*
+** st1q_scatter_0_s64_index:
+** st1q {z0\.q}, p0, \[z1\.d\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_0_s64_index, svint64_t, svuint64_t,
+ svst1q_scatter_u64base_index_s64 (p0, z1, 0, z0),
+ svst1q_scatter_index (p0, z1, 0, z0))
+
+/*
+** st1q_scatter_1_s64_offset:
+** mov (x[0-9]+), #?1
+** st1q {z0\.q}, p0, \[z1\.d, \1\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_1_s64_offset, svint64_t, svuint64_t,
+ svst1q_scatter_u64base_offset_s64 (p0, z1, 1, z0),
+ svst1q_scatter_offset (p0, z1, 1, z0))
+
+/*
+** st1q_scatter_1_s64_index:
+** mov (x[0-9]+), #?8
+** st1q {z0\.q}, p0, \[z1\.d, \1\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_1_s64_index, svint64_t, svuint64_t,
+ svst1q_scatter_u64base_index_s64 (p0, z1, 1, z0),
+ svst1q_scatter_index (p0, z1, 1, z0))
+
+/*
+** st1q_scatter_x0_s64_s64offset:
+** st1q {z0\.q}, p0, \[z1\.d, x0\]
+** ret
+*/
+TEST_STORE_SCATTER_SZ (st1q_scatter_x0_s64_s64offset, svint64_t, int64_t, svint64_t,
+ svst1q_scatter_s64offset_s64 (p0, x0, z1, z0),
+ svst1q_scatter_offset (p0, x0, z1, z0))
+
+/*
+** st1q_scatter_ext_s64_s64offset:
+** sxtw z1\.d, p0/m, z1\.d
+** st1q {z0\.q}, p0, \[z1\.d, x0\]
+** ret
+*/
+TEST_STORE_SCATTER_SZ (st1q_scatter_ext_s64_s64offset, svint64_t, int64_t, svint64_t,
+ svst1q_scatter_s64offset_s64 (p0, x0, svextw_s64_x (p0, z1), z0),
+ svst1q_scatter_offset (p0, x0, svextw_x (p0, z1), z0))
+
+/*
+** st1q_scatter_x0_s64_s64index:
+** lsl (z[0-9]+\.d), z1\.d, #3
+** st1q {z0\.q}, p0, \[\1, x0\]
+** ret
+*/
+TEST_STORE_SCATTER_SZ (st1q_scatter_x0_s64_s64index, svint64_t, int64_t, svint64_t,
+ svst1q_scatter_s64index_s64 (p0, x0, z1, z0),
+ svst1q_scatter_index (p0, x0, z1, z0))
+
+/*
+** st1q_scatter_x0_s64_u64offset:
+** st1q {z0\.q}, p0, \[z1\.d. x0\]
+** ret
+*/
+TEST_STORE_SCATTER_SZ (st1q_scatter_x0_s64_u64offset, svint64_t, int64_t, svuint64_t,
+ svst1q_scatter_u64offset_s64 (p0, x0, z1, z0),
+ svst1q_scatter_offset (p0, x0, z1, z0))
+
+/*
+** st1q_scatter_ext_s64_u64offset:
+** and z1\.d, z1\.d, #0xffffffff
+** st1q {z0\.q}, p0, \[z1\.d, x0\]
+** ret
+*/
+TEST_STORE_SCATTER_SZ (st1q_scatter_ext_s64_u64offset, svint64_t, int64_t, svuint64_t,
+ svst1q_scatter_u64offset_s64 (p0, x0, svextw_u64_x (p0, z1), z0),
+ svst1q_scatter_offset (p0, x0, svextw_x (p0, z1), z0))
+
+/*
+** st1q_scatter_x0_s64_u64index:
+** lsl (z[0-9]+\.d), z1\.d, #3
+** st1q {z0\.q}, p0, \[\1, x0\]
+** ret
+*/
+TEST_STORE_SCATTER_SZ (st1q_scatter_x0_s64_u64index, svint64_t, int64_t, svuint64_t,
+ svst1q_scatter_u64index_s64 (p0, x0, z1, z0),
+ svst1q_scatter_index (p0, x0, z1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1q_scatter_s8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1q_scatter_s8.c
new file mode 100644
index 0000000..6a79bf0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1q_scatter_s8.c
@@ -0,0 +1,93 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** st1q_scatter_s8:
+** st1q {z0\.q}, p0, \[z1\.d\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_s8, svint8_t, svuint64_t,
+ svst1q_scatter_u64base_s8 (p0, z1, z0),
+ svst1q_scatter (p0, z1, z0))
+
+/*
+** st1q_scatter_x0_s8_offset:
+** st1q {z0\.q}, p0, \[z1\.d, x0\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_x0_s8_offset, svint8_t, svuint64_t,
+ svst1q_scatter_u64base_offset_s8 (p0, z1, x0, z0),
+ svst1q_scatter_offset (p0, z1, x0, z0))
+
+/*
+** st1q_scatter_m1_s8_offset:
+** mov (x[0-9]+), #?-1
+** st1q {z0\.q}, p0, \[z1\.d, \1\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_m1_s8_offset, svint8_t, svuint64_t,
+ svst1q_scatter_u64base_offset_s8 (p0, z1, -1, z0),
+ svst1q_scatter_offset (p0, z1, -1, z0))
+
+/*
+** st1q_scatter_0_s8_offset:
+** st1q {z0\.q}, p0, \[z1\.d\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_0_s8_offset, svint8_t, svuint64_t,
+ svst1q_scatter_u64base_offset_s8 (p0, z1, 0, z0),
+ svst1q_scatter_offset (p0, z1, 0, z0))
+
+/*
+** st1q_scatter_1_s8_offset:
+** mov (x[0-9]+), #?1
+** st1q {z0\.q}, p0, \[z1\.d, \1\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_1_s8_offset, svint8_t, svuint64_t,
+ svst1q_scatter_u64base_offset_s8 (p0, z1, 1, z0),
+ svst1q_scatter_offset (p0, z1, 1, z0))
+
+/*
+** st1q_scatter_x0_s8_s64offset:
+** st1q {z0\.q}, p0, \[z1\.d, x0\]
+** ret
+*/
+TEST_STORE_SCATTER_SZ (st1q_scatter_x0_s8_s64offset, svint8_t, int8_t, svint64_t,
+ svst1q_scatter_s64offset_s8 (p0, x0, z1, z0),
+ svst1q_scatter_offset (p0, x0, z1, z0))
+
+/*
+** st1q_scatter_ext_s8_s64offset:
+** sxtw z1\.d, p0/m, z1\.d
+** st1q {z0\.q}, p0, \[z1\.d, x0\]
+** ret
+*/
+TEST_STORE_SCATTER_SZ (st1q_scatter_ext_s8_s64offset, svint8_t, int8_t, svint64_t,
+ svst1q_scatter_s64offset_s8 (p0, x0, svextw_s64_x (p0, z1), z0),
+ svst1q_scatter_offset (p0, x0, svextw_x (p0, z1), z0))
+
+/*
+** st1q_scatter_x0_s8_u64offset:
+** st1q {z0\.q}, p0, \[z1\.d. x0\]
+** ret
+*/
+TEST_STORE_SCATTER_SZ (st1q_scatter_x0_s8_u64offset, svint8_t, int8_t, svuint64_t,
+ svst1q_scatter_u64offset_s8 (p0, x0, z1, z0),
+ svst1q_scatter_offset (p0, x0, z1, z0))
+
+/*
+** st1q_scatter_ext_s8_u64offset:
+** and z1\.d, z1\.d, #0xffffffff
+** st1q {z0\.q}, p0, \[z1\.d, x0\]
+** ret
+*/
+TEST_STORE_SCATTER_SZ (st1q_scatter_ext_s8_u64offset, svint8_t, int8_t, svuint64_t,
+ svst1q_scatter_u64offset_s8 (p0, x0, svextw_u64_x (p0, z1), z0),
+ svst1q_scatter_offset (p0, x0, svextw_x (p0, z1), z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1q_scatter_u16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1q_scatter_u16.c
new file mode 100644
index 0000000..046a2d1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1q_scatter_u16.c
@@ -0,0 +1,93 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** st1q_scatter_u16:
+** st1q {z0\.q}, p0, \[z1\.d\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_u16, svuint16_t, svuint64_t,
+ svst1q_scatter_u64base_u16 (p0, z1, z0),
+ svst1q_scatter (p0, z1, z0))
+
+/*
+** st1q_scatter_x0_u16_offset:
+** st1q {z0\.q}, p0, \[z1\.d, x0\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_x0_u16_offset, svuint16_t, svuint64_t,
+ svst1q_scatter_u64base_offset_u16 (p0, z1, x0, z0),
+ svst1q_scatter_offset (p0, z1, x0, z0))
+
+/*
+** st1q_scatter_m1_u16_offset:
+** mov (x[0-9]+), #?-1
+** st1q {z0\.q}, p0, \[z1\.d, \1\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_m1_u16_offset, svuint16_t, svuint64_t,
+ svst1q_scatter_u64base_offset_u16 (p0, z1, -1, z0),
+ svst1q_scatter_offset (p0, z1, -1, z0))
+
+/*
+** st1q_scatter_0_u16_offset:
+** st1q {z0\.q}, p0, \[z1\.d\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_0_u16_offset, svuint16_t, svuint64_t,
+ svst1q_scatter_u64base_offset_u16 (p0, z1, 0, z0),
+ svst1q_scatter_offset (p0, z1, 0, z0))
+
+/*
+** st1q_scatter_1_u16_offset:
+** mov (x[0-9]+), #?1
+** st1q {z0\.q}, p0, \[z1\.d, \1\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_1_u16_offset, svuint16_t, svuint64_t,
+ svst1q_scatter_u64base_offset_u16 (p0, z1, 1, z0),
+ svst1q_scatter_offset (p0, z1, 1, z0))
+
+/*
+** st1q_scatter_x0_u16_s64offset:
+** st1q {z0\.q}, p0, \[z1\.d, x0\]
+** ret
+*/
+TEST_STORE_SCATTER_SZ (st1q_scatter_x0_u16_s64offset, svuint16_t, uint16_t, svint64_t,
+ svst1q_scatter_s64offset_u16 (p0, x0, z1, z0),
+ svst1q_scatter_offset (p0, x0, z1, z0))
+
+/*
+** st1q_scatter_ext_u16_s64offset:
+** sxtw z1\.d, p0/m, z1\.d
+** st1q {z0\.q}, p0, \[z1\.d, x0\]
+** ret
+*/
+TEST_STORE_SCATTER_SZ (st1q_scatter_ext_u16_s64offset, svuint16_t, uint16_t, svint64_t,
+ svst1q_scatter_s64offset_u16 (p0, x0, svextw_s64_x (p0, z1), z0),
+ svst1q_scatter_offset (p0, x0, svextw_x (p0, z1), z0))
+
+/*
+** st1q_scatter_x0_u16_u64offset:
+** st1q {z0\.q}, p0, \[z1\.d. x0\]
+** ret
+*/
+TEST_STORE_SCATTER_SZ (st1q_scatter_x0_u16_u64offset, svuint16_t, uint16_t, svuint64_t,
+ svst1q_scatter_u64offset_u16 (p0, x0, z1, z0),
+ svst1q_scatter_offset (p0, x0, z1, z0))
+
+/*
+** st1q_scatter_ext_u16_u64offset:
+** and z1\.d, z1\.d, #0xffffffff
+** st1q {z0\.q}, p0, \[z1\.d, x0\]
+** ret
+*/
+TEST_STORE_SCATTER_SZ (st1q_scatter_ext_u16_u64offset, svuint16_t, uint16_t, svuint64_t,
+ svst1q_scatter_u64offset_u16 (p0, x0, svextw_u64_x (p0, z1), z0),
+ svst1q_scatter_offset (p0, x0, svextw_x (p0, z1), z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1q_scatter_u32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1q_scatter_u32.c
new file mode 100644
index 0000000..54f5a8f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1q_scatter_u32.c
@@ -0,0 +1,93 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** st1q_scatter_u32:
+** st1q {z0\.q}, p0, \[z1\.d\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_u32, svuint32_t, svuint64_t,
+ svst1q_scatter_u64base_u32 (p0, z1, z0),
+ svst1q_scatter (p0, z1, z0))
+
+/*
+** st1q_scatter_x0_u32_offset:
+** st1q {z0\.q}, p0, \[z1\.d, x0\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_x0_u32_offset, svuint32_t, svuint64_t,
+ svst1q_scatter_u64base_offset_u32 (p0, z1, x0, z0),
+ svst1q_scatter_offset (p0, z1, x0, z0))
+
+/*
+** st1q_scatter_m1_u32_offset:
+** mov (x[0-9]+), #?-1
+** st1q {z0\.q}, p0, \[z1\.d, \1\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_m1_u32_offset, svuint32_t, svuint64_t,
+ svst1q_scatter_u64base_offset_u32 (p0, z1, -1, z0),
+ svst1q_scatter_offset (p0, z1, -1, z0))
+
+/*
+** st1q_scatter_0_u32_offset:
+** st1q {z0\.q}, p0, \[z1\.d\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_0_u32_offset, svuint32_t, svuint64_t,
+ svst1q_scatter_u64base_offset_u32 (p0, z1, 0, z0),
+ svst1q_scatter_offset (p0, z1, 0, z0))
+
+/*
+** st1q_scatter_1_u32_offset:
+** mov (x[0-9]+), #?1
+** st1q {z0\.q}, p0, \[z1\.d, \1\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_1_u32_offset, svuint32_t, svuint64_t,
+ svst1q_scatter_u64base_offset_u32 (p0, z1, 1, z0),
+ svst1q_scatter_offset (p0, z1, 1, z0))
+
+/*
+** st1q_scatter_x0_u32_s64offset:
+** st1q {z0\.q}, p0, \[z1\.d, x0\]
+** ret
+*/
+TEST_STORE_SCATTER_SZ (st1q_scatter_x0_u32_s64offset, svuint32_t, uint32_t, svint64_t,
+ svst1q_scatter_s64offset_u32 (p0, x0, z1, z0),
+ svst1q_scatter_offset (p0, x0, z1, z0))
+
+/*
+** st1q_scatter_ext_u32_s64offset:
+** sxtw z1\.d, p0/m, z1\.d
+** st1q {z0\.q}, p0, \[z1\.d, x0\]
+** ret
+*/
+TEST_STORE_SCATTER_SZ (st1q_scatter_ext_u32_s64offset, svuint32_t, uint32_t, svint64_t,
+ svst1q_scatter_s64offset_u32 (p0, x0, svextw_s64_x (p0, z1), z0),
+ svst1q_scatter_offset (p0, x0, svextw_x (p0, z1), z0))
+
+/*
+** st1q_scatter_x0_u32_u64offset:
+** st1q {z0\.q}, p0, \[z1\.d. x0\]
+** ret
+*/
+TEST_STORE_SCATTER_SZ (st1q_scatter_x0_u32_u64offset, svuint32_t, uint32_t, svuint64_t,
+ svst1q_scatter_u64offset_u32 (p0, x0, z1, z0),
+ svst1q_scatter_offset (p0, x0, z1, z0))
+
+/*
+** st1q_scatter_ext_u32_u64offset:
+** and z1\.d, z1\.d, #0xffffffff
+** st1q {z0\.q}, p0, \[z1\.d, x0\]
+** ret
+*/
+TEST_STORE_SCATTER_SZ (st1q_scatter_ext_u32_u64offset, svuint32_t, uint32_t, svuint64_t,
+ svst1q_scatter_u64offset_u32 (p0, x0, svextw_u64_x (p0, z1), z0),
+ svst1q_scatter_offset (p0, x0, svextw_x (p0, z1), z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1q_scatter_u64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1q_scatter_u64.c
new file mode 100644
index 0000000..553cf10
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1q_scatter_u64.c
@@ -0,0 +1,152 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** st1q_scatter_u64:
+** st1q {z0\.q}, p0, \[z1\.d\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_u64, svuint64_t, svuint64_t,
+ svst1q_scatter_u64base_u64 (p0, z1, z0),
+ svst1q_scatter (p0, z1, z0))
+
+/*
+** st1q_scatter_x0_u64_offset:
+** st1q {z0\.q}, p0, \[z1\.d, x0\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_x0_u64_offset, svuint64_t, svuint64_t,
+ svst1q_scatter_u64base_offset_u64 (p0, z1, x0, z0),
+ svst1q_scatter_offset (p0, z1, x0, z0))
+
+/*
+** st1q_scatter_x0_u64_index:
+** lsl (x[0-9]+), x0, #?3
+** st1q {z0\.q}, p0, \[z1\.d, \1\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_x0_u64_index, svuint64_t, svuint64_t,
+ svst1q_scatter_u64base_index_u64 (p0, z1, x0, z0),
+ svst1q_scatter_index (p0, z1, x0, z0))
+
+/*
+** st1q_scatter_m1_u64_offset:
+** mov (x[0-9]+), #?-1
+** st1q {z0\.q}, p0, \[z1\.d, \1\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_m1_u64_offset, svuint64_t, svuint64_t,
+ svst1q_scatter_u64base_offset_u64 (p0, z1, -1, z0),
+ svst1q_scatter_offset (p0, z1, -1, z0))
+
+/*
+** st1q_scatter_m1_u64_index:
+** mov (x[0-9]+), #?-8
+** st1q {z0\.q}, p0, \[z1\.d, \1\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_m1_u64_index, svuint64_t, svuint64_t,
+ svst1q_scatter_u64base_index_u64 (p0, z1, -1, z0),
+ svst1q_scatter_index (p0, z1, -1, z0))
+
+/*
+** st1q_scatter_0_u64_offset:
+** st1q {z0\.q}, p0, \[z1\.d\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_0_u64_offset, svuint64_t, svuint64_t,
+ svst1q_scatter_u64base_offset_u64 (p0, z1, 0, z0),
+ svst1q_scatter_offset (p0, z1, 0, z0))
+
+/*
+** st1q_scatter_0_u64_index:
+** st1q {z0\.q}, p0, \[z1\.d\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_0_u64_index, svuint64_t, svuint64_t,
+ svst1q_scatter_u64base_index_u64 (p0, z1, 0, z0),
+ svst1q_scatter_index (p0, z1, 0, z0))
+
+/*
+** st1q_scatter_1_u64_offset:
+** mov (x[0-9]+), #?1
+** st1q {z0\.q}, p0, \[z1\.d, \1\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_1_u64_offset, svuint64_t, svuint64_t,
+ svst1q_scatter_u64base_offset_u64 (p0, z1, 1, z0),
+ svst1q_scatter_offset (p0, z1, 1, z0))
+
+/*
+** st1q_scatter_1_u64_index:
+** mov (x[0-9]+), #?8
+** st1q {z0\.q}, p0, \[z1\.d, \1\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_1_u64_index, svuint64_t, svuint64_t,
+ svst1q_scatter_u64base_index_u64 (p0, z1, 1, z0),
+ svst1q_scatter_index (p0, z1, 1, z0))
+
+/*
+** st1q_scatter_x0_u64_s64offset:
+** st1q {z0\.q}, p0, \[z1\.d, x0\]
+** ret
+*/
+TEST_STORE_SCATTER_SZ (st1q_scatter_x0_u64_s64offset, svuint64_t, uint64_t, svint64_t,
+ svst1q_scatter_s64offset_u64 (p0, x0, z1, z0),
+ svst1q_scatter_offset (p0, x0, z1, z0))
+
+/*
+** st1q_scatter_ext_u64_s64offset:
+** sxtw z1\.d, p0/m, z1\.d
+** st1q {z0\.q}, p0, \[z1\.d, x0\]
+** ret
+*/
+TEST_STORE_SCATTER_SZ (st1q_scatter_ext_u64_s64offset, svuint64_t, uint64_t, svint64_t,
+ svst1q_scatter_s64offset_u64 (p0, x0, svextw_s64_x (p0, z1), z0),
+ svst1q_scatter_offset (p0, x0, svextw_x (p0, z1), z0))
+
+/*
+** st1q_scatter_x0_u64_s64index:
+** lsl (z[0-9]+\.d), z1\.d, #3
+** st1q {z0\.q}, p0, \[\1, x0\]
+** ret
+*/
+TEST_STORE_SCATTER_SZ (st1q_scatter_x0_u64_s64index, svuint64_t, uint64_t, svint64_t,
+ svst1q_scatter_s64index_u64 (p0, x0, z1, z0),
+ svst1q_scatter_index (p0, x0, z1, z0))
+
+/*
+** st1q_scatter_x0_u64_u64offset:
+** st1q {z0\.q}, p0, \[z1\.d. x0\]
+** ret
+*/
+TEST_STORE_SCATTER_SZ (st1q_scatter_x0_u64_u64offset, svuint64_t, uint64_t, svuint64_t,
+ svst1q_scatter_u64offset_u64 (p0, x0, z1, z0),
+ svst1q_scatter_offset (p0, x0, z1, z0))
+
+/*
+** st1q_scatter_ext_u64_u64offset:
+** and z1\.d, z1\.d, #0xffffffff
+** st1q {z0\.q}, p0, \[z1\.d, x0\]
+** ret
+*/
+TEST_STORE_SCATTER_SZ (st1q_scatter_ext_u64_u64offset, svuint64_t, uint64_t, svuint64_t,
+ svst1q_scatter_u64offset_u64 (p0, x0, svextw_u64_x (p0, z1), z0),
+ svst1q_scatter_offset (p0, x0, svextw_x (p0, z1), z0))
+
+/*
+** st1q_scatter_x0_u64_u64index:
+** lsl (z[0-9]+\.d), z1\.d, #3
+** st1q {z0\.q}, p0, \[\1, x0\]
+** ret
+*/
+TEST_STORE_SCATTER_SZ (st1q_scatter_x0_u64_u64index, svuint64_t, uint64_t, svuint64_t,
+ svst1q_scatter_u64index_u64 (p0, x0, z1, z0),
+ svst1q_scatter_index (p0, x0, z1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1q_scatter_u8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1q_scatter_u8.c
new file mode 100644
index 0000000..52eb0b1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1q_scatter_u8.c
@@ -0,0 +1,93 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" { target { ! ilp32 } } } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** st1q_scatter_u8:
+** st1q {z0\.q}, p0, \[z1\.d\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_u8, svuint8_t, svuint64_t,
+ svst1q_scatter_u64base_u8 (p0, z1, z0),
+ svst1q_scatter (p0, z1, z0))
+
+/*
+** st1q_scatter_x0_u8_offset:
+** st1q {z0\.q}, p0, \[z1\.d, x0\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_x0_u8_offset, svuint8_t, svuint64_t,
+ svst1q_scatter_u64base_offset_u8 (p0, z1, x0, z0),
+ svst1q_scatter_offset (p0, z1, x0, z0))
+
+/*
+** st1q_scatter_m1_u8_offset:
+** mov (x[0-9]+), #?-1
+** st1q {z0\.q}, p0, \[z1\.d, \1\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_m1_u8_offset, svuint8_t, svuint64_t,
+ svst1q_scatter_u64base_offset_u8 (p0, z1, -1, z0),
+ svst1q_scatter_offset (p0, z1, -1, z0))
+
+/*
+** st1q_scatter_0_u8_offset:
+** st1q {z0\.q}, p0, \[z1\.d\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_0_u8_offset, svuint8_t, svuint64_t,
+ svst1q_scatter_u64base_offset_u8 (p0, z1, 0, z0),
+ svst1q_scatter_offset (p0, z1, 0, z0))
+
+/*
+** st1q_scatter_1_u8_offset:
+** mov (x[0-9]+), #?1
+** st1q {z0\.q}, p0, \[z1\.d, \1\]
+** ret
+*/
+TEST_STORE_SCATTER_ZS (st1q_scatter_1_u8_offset, svuint8_t, svuint64_t,
+ svst1q_scatter_u64base_offset_u8 (p0, z1, 1, z0),
+ svst1q_scatter_offset (p0, z1, 1, z0))
+
+/*
+** st1q_scatter_x0_u8_s64offset:
+** st1q {z0\.q}, p0, \[z1\.d, x0\]
+** ret
+*/
+TEST_STORE_SCATTER_SZ (st1q_scatter_x0_u8_s64offset, svuint8_t, uint8_t, svint64_t,
+ svst1q_scatter_s64offset_u8 (p0, x0, z1, z0),
+ svst1q_scatter_offset (p0, x0, z1, z0))
+
+/*
+** st1q_scatter_ext_u8_s64offset:
+** sxtw z1\.d, p0/m, z1\.d
+** st1q {z0\.q}, p0, \[z1\.d, x0\]
+** ret
+*/
+TEST_STORE_SCATTER_SZ (st1q_scatter_ext_u8_s64offset, svuint8_t, uint8_t, svint64_t,
+ svst1q_scatter_s64offset_u8 (p0, x0, svextw_s64_x (p0, z1), z0),
+ svst1q_scatter_offset (p0, x0, svextw_x (p0, z1), z0))
+
+/*
+** st1q_scatter_x0_u8_u64offset:
+** st1q {z0\.q}, p0, \[z1\.d. x0\]
+** ret
+*/
+TEST_STORE_SCATTER_SZ (st1q_scatter_x0_u8_u64offset, svuint8_t, uint8_t, svuint64_t,
+ svst1q_scatter_u64offset_u8 (p0, x0, z1, z0),
+ svst1q_scatter_offset (p0, x0, z1, z0))
+
+/*
+** st1q_scatter_ext_u8_u64offset:
+** and z1\.d, z1\.d, #0xffffffff
+** st1q {z0\.q}, p0, \[z1\.d, x0\]
+** ret
+*/
+TEST_STORE_SCATTER_SZ (st1q_scatter_ext_u8_u64offset, svuint8_t, uint8_t, svuint64_t,
+ svst1q_scatter_u64offset_u8 (p0, x0, svextw_u64_x (p0, z1), z0),
+ svst1q_scatter_offset (p0, x0, svextw_x (p0, z1), z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1wq_f32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1wq_f32.c
new file mode 100644
index 0000000..610f382
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1wq_f32.c
@@ -0,0 +1,163 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** st1wq_f32_base:
+** st1w {z0\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st1wq_f32_base, svfloat32_t, float32_t,
+ svst1wq_f32 (p0, x0, z0),
+ svst1wq (p0, x0, z0))
+
+/*
+** st1wq_f32_index:
+** st1w {z0\.q}, p0, \[x0, x1, lsl 2\]
+** ret
+*/
+TEST_STORE (st1wq_f32_index, svfloat32_t, float32_t,
+ svst1wq_f32 (p0, x0 + x1, z0),
+ svst1wq (p0, x0 + x1, z0))
+
+/*
+** st1wq_f32_1:
+** st1w {z0\.q}, p0, \[x0, #1, mul vl\]
+** ret
+*/
+TEST_STORE (st1wq_f32_1, svfloat32_t, float32_t,
+ svst1wq_f32 (p0, x0 + svcntw () / 4, z0),
+ svst1wq (p0, x0 + svcntw () / 4, z0))
+
+/*
+** st1wq_f32_7:
+** st1w {z0\.q}, p0, \[x0, #7, mul vl\]
+** ret
+*/
+TEST_STORE (st1wq_f32_7, svfloat32_t, float32_t,
+ svst1wq_f32 (p0, x0 + svcntw () * 7 / 4, z0),
+ svst1wq (p0, x0 + svcntw () * 7 / 4, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1wq_f32_8:
+** incb x0, all, mul #2
+** st1w {z0\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st1wq_f32_8, svfloat32_t, float32_t,
+ svst1wq_f32 (p0, x0 + svcntw () * 2, z0),
+ svst1wq (p0, x0 + svcntw () * 2, z0))
+
+/*
+** st1wq_f32_m1:
+** st1w {z0\.q}, p0, \[x0, #-1, mul vl\]
+** ret
+*/
+TEST_STORE (st1wq_f32_m1, svfloat32_t, float32_t,
+ svst1wq_f32 (p0, x0 - svcntw () / 4, z0),
+ svst1wq (p0, x0 - svcntw () / 4, z0))
+
+/*
+** st1wq_f32_m8:
+** st1w {z0\.q}, p0, \[x0, #-8, mul vl\]
+** ret
+*/
+TEST_STORE (st1wq_f32_m8, svfloat32_t, float32_t,
+ svst1wq_f32 (p0, x0 - svcntw () * 2, z0),
+ svst1wq (p0, x0 - svcntw () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1wq_f32_m9:
+** decw x0, all, mul #9
+** st1w {z0\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st1wq_f32_m9, svfloat32_t, float32_t,
+ svst1wq_f32 (p0, x0 - svcntw () * 9 / 4, z0),
+ svst1wq (p0, x0 - svcntw () * 9 / 4, z0))
+
+/*
+** st1wq_vnum_f32_0:
+** st1w {z0\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st1wq_vnum_f32_0, svfloat32_t, float32_t,
+ svst1wq_vnum_f32 (p0, x0, 0, z0),
+ svst1wq_vnum (p0, x0, 0, z0))
+
+/*
+** st1wq_vnum_f32_1:
+** st1w {z0\.q}, p0, \[x0, #1, mul vl\]
+** ret
+*/
+TEST_STORE (st1wq_vnum_f32_1, svfloat32_t, float32_t,
+ svst1wq_vnum_f32 (p0, x0, 1, z0),
+ svst1wq_vnum (p0, x0, 1, z0))
+
+/*
+** st1wq_vnum_f32_7:
+** st1w {z0\.q}, p0, \[x0, #7, mul vl\]
+** ret
+*/
+TEST_STORE (st1wq_vnum_f32_7, svfloat32_t, float32_t,
+ svst1wq_vnum_f32 (p0, x0, 7, z0),
+ svst1wq_vnum (p0, x0, 7, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1wq_vnum_f32_8:
+** incb x0, all, mul #2
+** st1w {z0\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st1wq_vnum_f32_8, svfloat32_t, float32_t,
+ svst1wq_vnum_f32 (p0, x0, 8, z0),
+ svst1wq_vnum (p0, x0, 8, z0))
+
+/*
+** st1wq_vnum_f32_m1:
+** st1w {z0\.q}, p0, \[x0, #-1, mul vl\]
+** ret
+*/
+TEST_STORE (st1wq_vnum_f32_m1, svfloat32_t, float32_t,
+ svst1wq_vnum_f32 (p0, x0, -1, z0),
+ svst1wq_vnum (p0, x0, -1, z0))
+
+/*
+** st1wq_vnum_f32_m8:
+** st1w {z0\.q}, p0, \[x0, #-8, mul vl\]
+** ret
+*/
+TEST_STORE (st1wq_vnum_f32_m8, svfloat32_t, float32_t,
+ svst1wq_vnum_f32 (p0, x0, -8, z0),
+ svst1wq_vnum (p0, x0, -8, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1wq_vnum_f32_m9:
+** decw x0, all, mul #9
+** st1w {z0\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st1wq_vnum_f32_m9, svfloat32_t, float32_t,
+ svst1wq_vnum_f32 (p0, x0, -9, z0),
+ svst1wq_vnum (p0, x0, -9, z0))
+
+/* Using MUL to calculate an index would also be OK. */
+/*
+** st1wq_vnum_f32_x1:
+** cntw (x[0-9]+)
+** madd (x[0-9]+), (x1, \1|\1, x1), x0
+** st1w {z0\.q}, p0, \[\2\]
+** ret
+*/
+TEST_STORE (st1wq_vnum_f32_x1, svfloat32_t, float32_t,
+ svst1wq_vnum_f32 (p0, x0, x1, z0),
+ svst1wq_vnum (p0, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1wq_s32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1wq_s32.c
new file mode 100644
index 0000000..a79ec32
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1wq_s32.c
@@ -0,0 +1,163 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** st1wq_s32_base:
+** st1w {z0\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st1wq_s32_base, svint32_t, int32_t,
+ svst1wq_s32 (p0, x0, z0),
+ svst1wq (p0, x0, z0))
+
+/*
+** st1wq_s32_index:
+** st1w {z0\.q}, p0, \[x0, x1, lsl 2\]
+** ret
+*/
+TEST_STORE (st1wq_s32_index, svint32_t, int32_t,
+ svst1wq_s32 (p0, x0 + x1, z0),
+ svst1wq (p0, x0 + x1, z0))
+
+/*
+** st1wq_s32_1:
+** st1w {z0\.q}, p0, \[x0, #1, mul vl\]
+** ret
+*/
+TEST_STORE (st1wq_s32_1, svint32_t, int32_t,
+ svst1wq_s32 (p0, x0 + svcntw () / 4, z0),
+ svst1wq (p0, x0 + svcntw () / 4, z0))
+
+/*
+** st1wq_s32_7:
+** st1w {z0\.q}, p0, \[x0, #7, mul vl\]
+** ret
+*/
+TEST_STORE (st1wq_s32_7, svint32_t, int32_t,
+ svst1wq_s32 (p0, x0 + svcntw () * 7 / 4, z0),
+ svst1wq (p0, x0 + svcntw () * 7 / 4, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1wq_s32_8:
+** incb x0, all, mul #2
+** st1w {z0\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st1wq_s32_8, svint32_t, int32_t,
+ svst1wq_s32 (p0, x0 + svcntw () * 2, z0),
+ svst1wq (p0, x0 + svcntw () * 2, z0))
+
+/*
+** st1wq_s32_m1:
+** st1w {z0\.q}, p0, \[x0, #-1, mul vl\]
+** ret
+*/
+TEST_STORE (st1wq_s32_m1, svint32_t, int32_t,
+ svst1wq_s32 (p0, x0 - svcntw () / 4, z0),
+ svst1wq (p0, x0 - svcntw () / 4, z0))
+
+/*
+** st1wq_s32_m8:
+** st1w {z0\.q}, p0, \[x0, #-8, mul vl\]
+** ret
+*/
+TEST_STORE (st1wq_s32_m8, svint32_t, int32_t,
+ svst1wq_s32 (p0, x0 - svcntw () * 2, z0),
+ svst1wq (p0, x0 - svcntw () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1wq_s32_m9:
+** decw x0, all, mul #9
+** st1w {z0\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st1wq_s32_m9, svint32_t, int32_t,
+ svst1wq_s32 (p0, x0 - svcntw () * 9 / 4, z0),
+ svst1wq (p0, x0 - svcntw () * 9 / 4, z0))
+
+/*
+** st1wq_vnum_s32_0:
+** st1w {z0\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st1wq_vnum_s32_0, svint32_t, int32_t,
+ svst1wq_vnum_s32 (p0, x0, 0, z0),
+ svst1wq_vnum (p0, x0, 0, z0))
+
+/*
+** st1wq_vnum_s32_1:
+** st1w {z0\.q}, p0, \[x0, #1, mul vl\]
+** ret
+*/
+TEST_STORE (st1wq_vnum_s32_1, svint32_t, int32_t,
+ svst1wq_vnum_s32 (p0, x0, 1, z0),
+ svst1wq_vnum (p0, x0, 1, z0))
+
+/*
+** st1wq_vnum_s32_7:
+** st1w {z0\.q}, p0, \[x0, #7, mul vl\]
+** ret
+*/
+TEST_STORE (st1wq_vnum_s32_7, svint32_t, int32_t,
+ svst1wq_vnum_s32 (p0, x0, 7, z0),
+ svst1wq_vnum (p0, x0, 7, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1wq_vnum_s32_8:
+** incb x0, all, mul #2
+** st1w {z0\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st1wq_vnum_s32_8, svint32_t, int32_t,
+ svst1wq_vnum_s32 (p0, x0, 8, z0),
+ svst1wq_vnum (p0, x0, 8, z0))
+
+/*
+** st1wq_vnum_s32_m1:
+** st1w {z0\.q}, p0, \[x0, #-1, mul vl\]
+** ret
+*/
+TEST_STORE (st1wq_vnum_s32_m1, svint32_t, int32_t,
+ svst1wq_vnum_s32 (p0, x0, -1, z0),
+ svst1wq_vnum (p0, x0, -1, z0))
+
+/*
+** st1wq_vnum_s32_m8:
+** st1w {z0\.q}, p0, \[x0, #-8, mul vl\]
+** ret
+*/
+TEST_STORE (st1wq_vnum_s32_m8, svint32_t, int32_t,
+ svst1wq_vnum_s32 (p0, x0, -8, z0),
+ svst1wq_vnum (p0, x0, -8, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1wq_vnum_s32_m9:
+** decw x0, all, mul #9
+** st1w {z0\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st1wq_vnum_s32_m9, svint32_t, int32_t,
+ svst1wq_vnum_s32 (p0, x0, -9, z0),
+ svst1wq_vnum (p0, x0, -9, z0))
+
+/* Using MUL to calculate an index would also be OK. */
+/*
+** st1wq_vnum_s32_x1:
+** cntw (x[0-9]+)
+** madd (x[0-9]+), (x1, \1|\1, x1), x0
+** st1w {z0\.q}, p0, \[\2\]
+** ret
+*/
+TEST_STORE (st1wq_vnum_s32_x1, svint32_t, int32_t,
+ svst1wq_vnum_s32 (p0, x0, x1, z0),
+ svst1wq_vnum (p0, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1wq_u32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1wq_u32.c
new file mode 100644
index 0000000..410617c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st1wq_u32.c
@@ -0,0 +1,163 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** st1wq_u32_base:
+** st1w {z0\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st1wq_u32_base, svuint32_t, uint32_t,
+ svst1wq_u32 (p0, x0, z0),
+ svst1wq (p0, x0, z0))
+
+/*
+** st1wq_u32_index:
+** st1w {z0\.q}, p0, \[x0, x1, lsl 2\]
+** ret
+*/
+TEST_STORE (st1wq_u32_index, svuint32_t, uint32_t,
+ svst1wq_u32 (p0, x0 + x1, z0),
+ svst1wq (p0, x0 + x1, z0))
+
+/*
+** st1wq_u32_1:
+** st1w {z0\.q}, p0, \[x0, #1, mul vl\]
+** ret
+*/
+TEST_STORE (st1wq_u32_1, svuint32_t, uint32_t,
+ svst1wq_u32 (p0, x0 + svcntw () / 4, z0),
+ svst1wq (p0, x0 + svcntw () / 4, z0))
+
+/*
+** st1wq_u32_7:
+** st1w {z0\.q}, p0, \[x0, #7, mul vl\]
+** ret
+*/
+TEST_STORE (st1wq_u32_7, svuint32_t, uint32_t,
+ svst1wq_u32 (p0, x0 + svcntw () * 7 / 4, z0),
+ svst1wq (p0, x0 + svcntw () * 7 / 4, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1wq_u32_8:
+** incb x0, all, mul #2
+** st1w {z0\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st1wq_u32_8, svuint32_t, uint32_t,
+ svst1wq_u32 (p0, x0 + svcntw () * 2, z0),
+ svst1wq (p0, x0 + svcntw () * 2, z0))
+
+/*
+** st1wq_u32_m1:
+** st1w {z0\.q}, p0, \[x0, #-1, mul vl\]
+** ret
+*/
+TEST_STORE (st1wq_u32_m1, svuint32_t, uint32_t,
+ svst1wq_u32 (p0, x0 - svcntw () / 4, z0),
+ svst1wq (p0, x0 - svcntw () / 4, z0))
+
+/*
+** st1wq_u32_m8:
+** st1w {z0\.q}, p0, \[x0, #-8, mul vl\]
+** ret
+*/
+TEST_STORE (st1wq_u32_m8, svuint32_t, uint32_t,
+ svst1wq_u32 (p0, x0 - svcntw () * 2, z0),
+ svst1wq (p0, x0 - svcntw () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1wq_u32_m9:
+** decw x0, all, mul #9
+** st1w {z0\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st1wq_u32_m9, svuint32_t, uint32_t,
+ svst1wq_u32 (p0, x0 - svcntw () * 9 / 4, z0),
+ svst1wq (p0, x0 - svcntw () * 9 / 4, z0))
+
+/*
+** st1wq_vnum_u32_0:
+** st1w {z0\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st1wq_vnum_u32_0, svuint32_t, uint32_t,
+ svst1wq_vnum_u32 (p0, x0, 0, z0),
+ svst1wq_vnum (p0, x0, 0, z0))
+
+/*
+** st1wq_vnum_u32_1:
+** st1w {z0\.q}, p0, \[x0, #1, mul vl\]
+** ret
+*/
+TEST_STORE (st1wq_vnum_u32_1, svuint32_t, uint32_t,
+ svst1wq_vnum_u32 (p0, x0, 1, z0),
+ svst1wq_vnum (p0, x0, 1, z0))
+
+/*
+** st1wq_vnum_u32_7:
+** st1w {z0\.q}, p0, \[x0, #7, mul vl\]
+** ret
+*/
+TEST_STORE (st1wq_vnum_u32_7, svuint32_t, uint32_t,
+ svst1wq_vnum_u32 (p0, x0, 7, z0),
+ svst1wq_vnum (p0, x0, 7, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1wq_vnum_u32_8:
+** incb x0, all, mul #2
+** st1w {z0\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st1wq_vnum_u32_8, svuint32_t, uint32_t,
+ svst1wq_vnum_u32 (p0, x0, 8, z0),
+ svst1wq_vnum (p0, x0, 8, z0))
+
+/*
+** st1wq_vnum_u32_m1:
+** st1w {z0\.q}, p0, \[x0, #-1, mul vl\]
+** ret
+*/
+TEST_STORE (st1wq_vnum_u32_m1, svuint32_t, uint32_t,
+ svst1wq_vnum_u32 (p0, x0, -1, z0),
+ svst1wq_vnum (p0, x0, -1, z0))
+
+/*
+** st1wq_vnum_u32_m8:
+** st1w {z0\.q}, p0, \[x0, #-8, mul vl\]
+** ret
+*/
+TEST_STORE (st1wq_vnum_u32_m8, svuint32_t, uint32_t,
+ svst1wq_vnum_u32 (p0, x0, -8, z0),
+ svst1wq_vnum (p0, x0, -8, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st1wq_vnum_u32_m9:
+** decw x0, all, mul #9
+** st1w {z0\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st1wq_vnum_u32_m9, svuint32_t, uint32_t,
+ svst1wq_vnum_u32 (p0, x0, -9, z0),
+ svst1wq_vnum (p0, x0, -9, z0))
+
+/* Using MUL to calculate an index would also be OK. */
+/*
+** st1wq_vnum_u32_x1:
+** cntw (x[0-9]+)
+** madd (x[0-9]+), (x1, \1|\1, x1), x0
+** st1w {z0\.q}, p0, \[\2\]
+** ret
+*/
+TEST_STORE (st1wq_vnum_u32_x1, svuint32_t, uint32_t,
+ svst1wq_vnum_u32 (p0, x0, x1, z0),
+ svst1wq_vnum (p0, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st2q_bf16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st2q_bf16.c
new file mode 100644
index 0000000..bfbaed1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st2q_bf16.c
@@ -0,0 +1,239 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** st2q_bf16_base:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_bf16_base, svbfloat16x2_t, bfloat16_t,
+ svst2q_bf16 (p0, x0, z0),
+ svst2q (p0, x0, z0))
+
+/*
+** st2q_bf16_index:
+** add (x[0-9]), x0, x1, lsl #?1
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st2q_bf16_index, svbfloat16x2_t, bfloat16_t,
+ svst2q_bf16 (p0, x0 + x1, z0),
+ svst2q (p0, x0 + x1, z0))
+
+/*
+** st2q_bf16_index2:
+** add (x[0-9]), x0, x1, lsl #?2
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st2q_bf16_index2, svbfloat16x2_t, bfloat16_t,
+ svst2q_bf16 (p0, x0 + x1 * 2, z0),
+ svst2q (p0, x0 + x1 * 2, z0))
+
+/*
+** st2q_bf16_index4:
+** add (x[0-9]), x0, x1, lsl #?3
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st2q_bf16_index4, svbfloat16x2_t, bfloat16_t,
+ svst2q_bf16 (p0, x0 + x1 * 4, z0),
+ svst2q (p0, x0 + x1 * 4, z0))
+
+/*
+** st2q_bf16_index8:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_STORE (st2q_bf16_index8, svbfloat16x2_t, bfloat16_t,
+ svst2q_bf16 (p0, x0 + x1 * 8, z0),
+ svst2q (p0, x0 + x1 * 8, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_bf16_1:
+** incb x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_bf16_1, svbfloat16x2_t, bfloat16_t,
+ svst2q_bf16 (p0, x0 + svcnth (), z0),
+ svst2q (p0, x0 + svcnth (), z0))
+
+/*
+** st2q_bf16_2:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_bf16_2, svbfloat16x2_t, bfloat16_t,
+ svst2q_bf16 (p0, x0 + svcnth () * 2, z0),
+ svst2q (p0, x0 + svcnth () * 2, z0))
+
+/*
+** st2q_bf16_14:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_bf16_14, svbfloat16x2_t, bfloat16_t,
+ svst2q_bf16 (p0, x0 + svcnth () * 14, z0),
+ svst2q (p0, x0 + svcnth () * 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_bf16_16:
+** incb x0, all, mul #16
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_bf16_16, svbfloat16x2_t, bfloat16_t,
+ svst2q_bf16 (p0, x0 + svcnth () * 16, z0),
+ svst2q (p0, x0 + svcnth () * 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_bf16_m1:
+** decb x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_bf16_m1, svbfloat16x2_t, bfloat16_t,
+ svst2q_bf16 (p0, x0 - svcnth (), z0),
+ svst2q (p0, x0 - svcnth (), z0))
+
+/*
+** st2q_bf16_m2:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_bf16_m2, svbfloat16x2_t, bfloat16_t,
+ svst2q_bf16 (p0, x0 - svcnth () * 2, z0),
+ svst2q (p0, x0 - svcnth () * 2, z0))
+
+/*
+** st2q_bf16_m16:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_bf16_m16, svbfloat16x2_t, bfloat16_t,
+ svst2q_bf16 (p0, x0 - svcnth () * 16, z0),
+ svst2q (p0, x0 - svcnth () * 16, z0))
+
+/*
+** st2q_bf16_m18:
+** addvl (x[0-9]+), x0, #-18
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st2q_bf16_m18, svbfloat16x2_t, bfloat16_t,
+ svst2q_bf16 (p0, x0 - svcnth () * 18, z0),
+ svst2q (p0, x0 - svcnth () * 18, z0))
+
+/*
+** st2q_vnum_bf16_0:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_vnum_bf16_0, svbfloat16x2_t, bfloat16_t,
+ svst2q_vnum_bf16 (p0, x0, 0, z0),
+ svst2q_vnum (p0, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_vnum_bf16_1:
+** incb x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_vnum_bf16_1, svbfloat16x2_t, bfloat16_t,
+ svst2q_vnum_bf16 (p0, x0, 1, z0),
+ svst2q_vnum (p0, x0, 1, z0))
+
+/*
+** st2q_vnum_bf16_2:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_vnum_bf16_2, svbfloat16x2_t, bfloat16_t,
+ svst2q_vnum_bf16 (p0, x0, 2, z0),
+ svst2q_vnum (p0, x0, 2, z0))
+
+/*
+** st2q_vnum_bf16_14:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_vnum_bf16_14, svbfloat16x2_t, bfloat16_t,
+ svst2q_vnum_bf16 (p0, x0, 14, z0),
+ svst2q_vnum (p0, x0, 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_vnum_bf16_16:
+** incb x0, all, mul #16
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_vnum_bf16_16, svbfloat16x2_t, bfloat16_t,
+ svst2q_vnum_bf16 (p0, x0, 16, z0),
+ svst2q_vnum (p0, x0, 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_vnum_bf16_m1:
+** decb x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_vnum_bf16_m1, svbfloat16x2_t, bfloat16_t,
+ svst2q_vnum_bf16 (p0, x0, -1, z0),
+ svst2q_vnum (p0, x0, -1, z0))
+
+/*
+** st2q_vnum_bf16_m2:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_vnum_bf16_m2, svbfloat16x2_t, bfloat16_t,
+ svst2q_vnum_bf16 (p0, x0, -2, z0),
+ svst2q_vnum (p0, x0, -2, z0))
+
+/*
+** st2q_vnum_bf16_m16:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_vnum_bf16_m16, svbfloat16x2_t, bfloat16_t,
+ svst2q_vnum_bf16 (p0, x0, -16, z0),
+ svst2q_vnum (p0, x0, -16, z0))
+
+/*
+** st2q_vnum_bf16_m18:
+** addvl (x[0-9]+), x0, #-18
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st2q_vnum_bf16_m18, svbfloat16x2_t, bfloat16_t,
+ svst2q_vnum_bf16 (p0, x0, -18, z0),
+ svst2q_vnum (p0, x0, -18, z0))
+
+/*
+** st2q_vnum_bf16_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE (st2q_vnum_bf16_x1, svbfloat16x2_t, bfloat16_t,
+ svst2q_vnum_bf16 (p0, x0, x1, z0),
+ svst2q_vnum (p0, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st2q_f16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st2q_f16.c
new file mode 100644
index 0000000..d742407
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st2q_f16.c
@@ -0,0 +1,239 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** st2q_f16_base:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_f16_base, svfloat16x2_t, float16_t,
+ svst2q_f16 (p0, x0, z0),
+ svst2q (p0, x0, z0))
+
+/*
+** st2q_f16_index:
+** add (x[0-9]), x0, x1, lsl #?1
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st2q_f16_index, svfloat16x2_t, float16_t,
+ svst2q_f16 (p0, x0 + x1, z0),
+ svst2q (p0, x0 + x1, z0))
+
+/*
+** st2q_f16_index2:
+** add (x[0-9]), x0, x1, lsl #?2
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st2q_f16_index2, svfloat16x2_t, float16_t,
+ svst2q_f16 (p0, x0 + x1 * 2, z0),
+ svst2q (p0, x0 + x1 * 2, z0))
+
+/*
+** st2q_f16_index4:
+** add (x[0-9]), x0, x1, lsl #?3
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st2q_f16_index4, svfloat16x2_t, float16_t,
+ svst2q_f16 (p0, x0 + x1 * 4, z0),
+ svst2q (p0, x0 + x1 * 4, z0))
+
+/*
+** st2q_f16_index8:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_STORE (st2q_f16_index8, svfloat16x2_t, float16_t,
+ svst2q_f16 (p0, x0 + x1 * 8, z0),
+ svst2q (p0, x0 + x1 * 8, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_f16_1:
+** incb x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_f16_1, svfloat16x2_t, float16_t,
+ svst2q_f16 (p0, x0 + svcnth (), z0),
+ svst2q (p0, x0 + svcnth (), z0))
+
+/*
+** st2q_f16_2:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_f16_2, svfloat16x2_t, float16_t,
+ svst2q_f16 (p0, x0 + svcnth () * 2, z0),
+ svst2q (p0, x0 + svcnth () * 2, z0))
+
+/*
+** st2q_f16_14:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_f16_14, svfloat16x2_t, float16_t,
+ svst2q_f16 (p0, x0 + svcnth () * 14, z0),
+ svst2q (p0, x0 + svcnth () * 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_f16_16:
+** incb x0, all, mul #16
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_f16_16, svfloat16x2_t, float16_t,
+ svst2q_f16 (p0, x0 + svcnth () * 16, z0),
+ svst2q (p0, x0 + svcnth () * 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_f16_m1:
+** decb x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_f16_m1, svfloat16x2_t, float16_t,
+ svst2q_f16 (p0, x0 - svcnth (), z0),
+ svst2q (p0, x0 - svcnth (), z0))
+
+/*
+** st2q_f16_m2:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_f16_m2, svfloat16x2_t, float16_t,
+ svst2q_f16 (p0, x0 - svcnth () * 2, z0),
+ svst2q (p0, x0 - svcnth () * 2, z0))
+
+/*
+** st2q_f16_m16:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_f16_m16, svfloat16x2_t, float16_t,
+ svst2q_f16 (p0, x0 - svcnth () * 16, z0),
+ svst2q (p0, x0 - svcnth () * 16, z0))
+
+/*
+** st2q_f16_m18:
+** addvl (x[0-9]+), x0, #-18
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st2q_f16_m18, svfloat16x2_t, float16_t,
+ svst2q_f16 (p0, x0 - svcnth () * 18, z0),
+ svst2q (p0, x0 - svcnth () * 18, z0))
+
+/*
+** st2q_vnum_f16_0:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_vnum_f16_0, svfloat16x2_t, float16_t,
+ svst2q_vnum_f16 (p0, x0, 0, z0),
+ svst2q_vnum (p0, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_vnum_f16_1:
+** incb x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_vnum_f16_1, svfloat16x2_t, float16_t,
+ svst2q_vnum_f16 (p0, x0, 1, z0),
+ svst2q_vnum (p0, x0, 1, z0))
+
+/*
+** st2q_vnum_f16_2:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_vnum_f16_2, svfloat16x2_t, float16_t,
+ svst2q_vnum_f16 (p0, x0, 2, z0),
+ svst2q_vnum (p0, x0, 2, z0))
+
+/*
+** st2q_vnum_f16_14:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_vnum_f16_14, svfloat16x2_t, float16_t,
+ svst2q_vnum_f16 (p0, x0, 14, z0),
+ svst2q_vnum (p0, x0, 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_vnum_f16_16:
+** incb x0, all, mul #16
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_vnum_f16_16, svfloat16x2_t, float16_t,
+ svst2q_vnum_f16 (p0, x0, 16, z0),
+ svst2q_vnum (p0, x0, 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_vnum_f16_m1:
+** decb x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_vnum_f16_m1, svfloat16x2_t, float16_t,
+ svst2q_vnum_f16 (p0, x0, -1, z0),
+ svst2q_vnum (p0, x0, -1, z0))
+
+/*
+** st2q_vnum_f16_m2:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_vnum_f16_m2, svfloat16x2_t, float16_t,
+ svst2q_vnum_f16 (p0, x0, -2, z0),
+ svst2q_vnum (p0, x0, -2, z0))
+
+/*
+** st2q_vnum_f16_m16:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_vnum_f16_m16, svfloat16x2_t, float16_t,
+ svst2q_vnum_f16 (p0, x0, -16, z0),
+ svst2q_vnum (p0, x0, -16, z0))
+
+/*
+** st2q_vnum_f16_m18:
+** addvl (x[0-9]+), x0, #-18
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st2q_vnum_f16_m18, svfloat16x2_t, float16_t,
+ svst2q_vnum_f16 (p0, x0, -18, z0),
+ svst2q_vnum (p0, x0, -18, z0))
+
+/*
+** st2q_vnum_f16_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE (st2q_vnum_f16_x1, svfloat16x2_t, float16_t,
+ svst2q_vnum_f16 (p0, x0, x1, z0),
+ svst2q_vnum (p0, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st2q_f32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st2q_f32.c
new file mode 100644
index 0000000..6e47fe3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st2q_f32.c
@@ -0,0 +1,229 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** st2q_f32_base:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_f32_base, svfloat32x2_t, float32_t,
+ svst2q_f32 (p0, x0, z0),
+ svst2q (p0, x0, z0))
+
+/*
+** st2q_f32_index:
+** add (x[0-9]), x0, x1, lsl #?2
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st2q_f32_index, svfloat32x2_t, float32_t,
+ svst2q_f32 (p0, x0 + x1, z0),
+ svst2q (p0, x0 + x1, z0))
+
+/*
+** st2q_f32_index2:
+** add (x[0-9]), x0, x1, lsl #?3
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st2q_f32_index2, svfloat32x2_t, float32_t,
+ svst2q_f32 (p0, x0 + x1 * 2, z0),
+ svst2q (p0, x0 + x1 * 2, z0))
+
+/*
+** st2q_f32_index4:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_STORE (st2q_f32_index4, svfloat32x2_t, float32_t,
+ svst2q_f32 (p0, x0 + x1 * 4, z0),
+ svst2q (p0, x0 + x1 * 4, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_f32_1:
+** incb x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_f32_1, svfloat32x2_t, float32_t,
+ svst2q_f32 (p0, x0 + svcntw (), z0),
+ svst2q (p0, x0 + svcntw (), z0))
+
+/*
+** st2q_f32_2:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_f32_2, svfloat32x2_t, float32_t,
+ svst2q_f32 (p0, x0 + svcntw () * 2, z0),
+ svst2q (p0, x0 + svcntw () * 2, z0))
+
+/*
+** st2q_f32_14:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_f32_14, svfloat32x2_t, float32_t,
+ svst2q_f32 (p0, x0 + svcntw () * 14, z0),
+ svst2q (p0, x0 + svcntw () * 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_f32_16:
+** incb x0, all, mul #16
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_f32_16, svfloat32x2_t, float32_t,
+ svst2q_f32 (p0, x0 + svcntw () * 16, z0),
+ svst2q (p0, x0 + svcntw () * 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_f32_m1:
+** decb x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_f32_m1, svfloat32x2_t, float32_t,
+ svst2q_f32 (p0, x0 - svcntw (), z0),
+ svst2q (p0, x0 - svcntw (), z0))
+
+/*
+** st2q_f32_m2:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_f32_m2, svfloat32x2_t, float32_t,
+ svst2q_f32 (p0, x0 - svcntw () * 2, z0),
+ svst2q (p0, x0 - svcntw () * 2, z0))
+
+/*
+** st2q_f32_m16:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_f32_m16, svfloat32x2_t, float32_t,
+ svst2q_f32 (p0, x0 - svcntw () * 16, z0),
+ svst2q (p0, x0 - svcntw () * 16, z0))
+
+/*
+** st2q_f32_m18:
+** addvl (x[0-9]+), x0, #-18
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st2q_f32_m18, svfloat32x2_t, float32_t,
+ svst2q_f32 (p0, x0 - svcntw () * 18, z0),
+ svst2q (p0, x0 - svcntw () * 18, z0))
+
+/*
+** st2q_vnum_f32_0:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_vnum_f32_0, svfloat32x2_t, float32_t,
+ svst2q_vnum_f32 (p0, x0, 0, z0),
+ svst2q_vnum (p0, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_vnum_f32_1:
+** incb x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_vnum_f32_1, svfloat32x2_t, float32_t,
+ svst2q_vnum_f32 (p0, x0, 1, z0),
+ svst2q_vnum (p0, x0, 1, z0))
+
+/*
+** st2q_vnum_f32_2:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_vnum_f32_2, svfloat32x2_t, float32_t,
+ svst2q_vnum_f32 (p0, x0, 2, z0),
+ svst2q_vnum (p0, x0, 2, z0))
+
+/*
+** st2q_vnum_f32_14:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_vnum_f32_14, svfloat32x2_t, float32_t,
+ svst2q_vnum_f32 (p0, x0, 14, z0),
+ svst2q_vnum (p0, x0, 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_vnum_f32_16:
+** incb x0, all, mul #16
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_vnum_f32_16, svfloat32x2_t, float32_t,
+ svst2q_vnum_f32 (p0, x0, 16, z0),
+ svst2q_vnum (p0, x0, 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_vnum_f32_m1:
+** decb x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_vnum_f32_m1, svfloat32x2_t, float32_t,
+ svst2q_vnum_f32 (p0, x0, -1, z0),
+ svst2q_vnum (p0, x0, -1, z0))
+
+/*
+** st2q_vnum_f32_m2:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_vnum_f32_m2, svfloat32x2_t, float32_t,
+ svst2q_vnum_f32 (p0, x0, -2, z0),
+ svst2q_vnum (p0, x0, -2, z0))
+
+/*
+** st2q_vnum_f32_m16:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_vnum_f32_m16, svfloat32x2_t, float32_t,
+ svst2q_vnum_f32 (p0, x0, -16, z0),
+ svst2q_vnum (p0, x0, -16, z0))
+
+/*
+** st2q_vnum_f32_m18:
+** addvl (x[0-9]+), x0, #-18
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st2q_vnum_f32_m18, svfloat32x2_t, float32_t,
+ svst2q_vnum_f32 (p0, x0, -18, z0),
+ svst2q_vnum (p0, x0, -18, z0))
+
+/*
+** st2q_vnum_f32_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE (st2q_vnum_f32_x1, svfloat32x2_t, float32_t,
+ svst2q_vnum_f32 (p0, x0, x1, z0),
+ svst2q_vnum (p0, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st2q_f64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st2q_f64.c
new file mode 100644
index 0000000..9ac9c3a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st2q_f64.c
@@ -0,0 +1,219 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** st2q_f64_base:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_f64_base, svfloat64x2_t, float64_t,
+ svst2q_f64 (p0, x0, z0),
+ svst2q (p0, x0, z0))
+
+/*
+** st2q_f64_index:
+** add (x[0-9]), x0, x1, lsl #?3
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st2q_f64_index, svfloat64x2_t, float64_t,
+ svst2q_f64 (p0, x0 + x1, z0),
+ svst2q (p0, x0 + x1, z0))
+
+/*
+** st2q_f64_index2:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_STORE (st2q_f64_index2, svfloat64x2_t, float64_t,
+ svst2q_f64 (p0, x0 + x1 * 2, z0),
+ svst2q (p0, x0 + x1 * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_f64_1:
+** incb x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_f64_1, svfloat64x2_t, float64_t,
+ svst2q_f64 (p0, x0 + svcntd (), z0),
+ svst2q (p0, x0 + svcntd (), z0))
+
+/*
+** st2q_f64_2:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_f64_2, svfloat64x2_t, float64_t,
+ svst2q_f64 (p0, x0 + svcntd () * 2, z0),
+ svst2q (p0, x0 + svcntd () * 2, z0))
+
+/*
+** st2q_f64_14:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_f64_14, svfloat64x2_t, float64_t,
+ svst2q_f64 (p0, x0 + svcntd () * 14, z0),
+ svst2q (p0, x0 + svcntd () * 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_f64_16:
+** incb x0, all, mul #16
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_f64_16, svfloat64x2_t, float64_t,
+ svst2q_f64 (p0, x0 + svcntd () * 16, z0),
+ svst2q (p0, x0 + svcntd () * 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_f64_m1:
+** decb x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_f64_m1, svfloat64x2_t, float64_t,
+ svst2q_f64 (p0, x0 - svcntd (), z0),
+ svst2q (p0, x0 - svcntd (), z0))
+
+/*
+** st2q_f64_m2:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_f64_m2, svfloat64x2_t, float64_t,
+ svst2q_f64 (p0, x0 - svcntd () * 2, z0),
+ svst2q (p0, x0 - svcntd () * 2, z0))
+
+/*
+** st2q_f64_m16:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_f64_m16, svfloat64x2_t, float64_t,
+ svst2q_f64 (p0, x0 - svcntd () * 16, z0),
+ svst2q (p0, x0 - svcntd () * 16, z0))
+
+/*
+** st2q_f64_m18:
+** addvl (x[0-9]+), x0, #-18
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st2q_f64_m18, svfloat64x2_t, float64_t,
+ svst2q_f64 (p0, x0 - svcntd () * 18, z0),
+ svst2q (p0, x0 - svcntd () * 18, z0))
+
+/*
+** st2q_vnum_f64_0:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_vnum_f64_0, svfloat64x2_t, float64_t,
+ svst2q_vnum_f64 (p0, x0, 0, z0),
+ svst2q_vnum (p0, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_vnum_f64_1:
+** incb x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_vnum_f64_1, svfloat64x2_t, float64_t,
+ svst2q_vnum_f64 (p0, x0, 1, z0),
+ svst2q_vnum (p0, x0, 1, z0))
+
+/*
+** st2q_vnum_f64_2:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_vnum_f64_2, svfloat64x2_t, float64_t,
+ svst2q_vnum_f64 (p0, x0, 2, z0),
+ svst2q_vnum (p0, x0, 2, z0))
+
+/*
+** st2q_vnum_f64_14:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_vnum_f64_14, svfloat64x2_t, float64_t,
+ svst2q_vnum_f64 (p0, x0, 14, z0),
+ svst2q_vnum (p0, x0, 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_vnum_f64_16:
+** incb x0, all, mul #16
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_vnum_f64_16, svfloat64x2_t, float64_t,
+ svst2q_vnum_f64 (p0, x0, 16, z0),
+ svst2q_vnum (p0, x0, 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_vnum_f64_m1:
+** decb x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_vnum_f64_m1, svfloat64x2_t, float64_t,
+ svst2q_vnum_f64 (p0, x0, -1, z0),
+ svst2q_vnum (p0, x0, -1, z0))
+
+/*
+** st2q_vnum_f64_m2:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_vnum_f64_m2, svfloat64x2_t, float64_t,
+ svst2q_vnum_f64 (p0, x0, -2, z0),
+ svst2q_vnum (p0, x0, -2, z0))
+
+/*
+** st2q_vnum_f64_m16:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_vnum_f64_m16, svfloat64x2_t, float64_t,
+ svst2q_vnum_f64 (p0, x0, -16, z0),
+ svst2q_vnum (p0, x0, -16, z0))
+
+/*
+** st2q_vnum_f64_m18:
+** addvl (x[0-9]+), x0, #-18
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st2q_vnum_f64_m18, svfloat64x2_t, float64_t,
+ svst2q_vnum_f64 (p0, x0, -18, z0),
+ svst2q_vnum (p0, x0, -18, z0))
+
+/*
+** st2q_vnum_f64_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE (st2q_vnum_f64_x1, svfloat64x2_t, float64_t,
+ svst2q_vnum_f64 (p0, x0, x1, z0),
+ svst2q_vnum (p0, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st2q_s16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st2q_s16.c
new file mode 100644
index 0000000..fe79004
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st2q_s16.c
@@ -0,0 +1,239 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** st2q_s16_base:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_s16_base, svint16x2_t, int16_t,
+ svst2q_s16 (p0, x0, z0),
+ svst2q (p0, x0, z0))
+
+/*
+** st2q_s16_index:
+** add (x[0-9]), x0, x1, lsl #?1
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st2q_s16_index, svint16x2_t, int16_t,
+ svst2q_s16 (p0, x0 + x1, z0),
+ svst2q (p0, x0 + x1, z0))
+
+/*
+** st2q_s16_index2:
+** add (x[0-9]), x0, x1, lsl #?2
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st2q_s16_index2, svint16x2_t, int16_t,
+ svst2q_s16 (p0, x0 + x1 * 2, z0),
+ svst2q (p0, x0 + x1 * 2, z0))
+
+/*
+** st2q_s16_index4:
+** add (x[0-9]), x0, x1, lsl #?3
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st2q_s16_index4, svint16x2_t, int16_t,
+ svst2q_s16 (p0, x0 + x1 * 4, z0),
+ svst2q (p0, x0 + x1 * 4, z0))
+
+/*
+** st2q_s16_index8:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_STORE (st2q_s16_index8, svint16x2_t, int16_t,
+ svst2q_s16 (p0, x0 + x1 * 8, z0),
+ svst2q (p0, x0 + x1 * 8, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_s16_1:
+** incb x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_s16_1, svint16x2_t, int16_t,
+ svst2q_s16 (p0, x0 + svcnth (), z0),
+ svst2q (p0, x0 + svcnth (), z0))
+
+/*
+** st2q_s16_2:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_s16_2, svint16x2_t, int16_t,
+ svst2q_s16 (p0, x0 + svcnth () * 2, z0),
+ svst2q (p0, x0 + svcnth () * 2, z0))
+
+/*
+** st2q_s16_14:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_s16_14, svint16x2_t, int16_t,
+ svst2q_s16 (p0, x0 + svcnth () * 14, z0),
+ svst2q (p0, x0 + svcnth () * 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_s16_16:
+** incb x0, all, mul #16
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_s16_16, svint16x2_t, int16_t,
+ svst2q_s16 (p0, x0 + svcnth () * 16, z0),
+ svst2q (p0, x0 + svcnth () * 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_s16_m1:
+** decb x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_s16_m1, svint16x2_t, int16_t,
+ svst2q_s16 (p0, x0 - svcnth (), z0),
+ svst2q (p0, x0 - svcnth (), z0))
+
+/*
+** st2q_s16_m2:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_s16_m2, svint16x2_t, int16_t,
+ svst2q_s16 (p0, x0 - svcnth () * 2, z0),
+ svst2q (p0, x0 - svcnth () * 2, z0))
+
+/*
+** st2q_s16_m16:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_s16_m16, svint16x2_t, int16_t,
+ svst2q_s16 (p0, x0 - svcnth () * 16, z0),
+ svst2q (p0, x0 - svcnth () * 16, z0))
+
+/*
+** st2q_s16_m18:
+** addvl (x[0-9]+), x0, #-18
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st2q_s16_m18, svint16x2_t, int16_t,
+ svst2q_s16 (p0, x0 - svcnth () * 18, z0),
+ svst2q (p0, x0 - svcnth () * 18, z0))
+
+/*
+** st2q_vnum_s16_0:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_vnum_s16_0, svint16x2_t, int16_t,
+ svst2q_vnum_s16 (p0, x0, 0, z0),
+ svst2q_vnum (p0, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_vnum_s16_1:
+** incb x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_vnum_s16_1, svint16x2_t, int16_t,
+ svst2q_vnum_s16 (p0, x0, 1, z0),
+ svst2q_vnum (p0, x0, 1, z0))
+
+/*
+** st2q_vnum_s16_2:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_vnum_s16_2, svint16x2_t, int16_t,
+ svst2q_vnum_s16 (p0, x0, 2, z0),
+ svst2q_vnum (p0, x0, 2, z0))
+
+/*
+** st2q_vnum_s16_14:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_vnum_s16_14, svint16x2_t, int16_t,
+ svst2q_vnum_s16 (p0, x0, 14, z0),
+ svst2q_vnum (p0, x0, 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_vnum_s16_16:
+** incb x0, all, mul #16
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_vnum_s16_16, svint16x2_t, int16_t,
+ svst2q_vnum_s16 (p0, x0, 16, z0),
+ svst2q_vnum (p0, x0, 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_vnum_s16_m1:
+** decb x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_vnum_s16_m1, svint16x2_t, int16_t,
+ svst2q_vnum_s16 (p0, x0, -1, z0),
+ svst2q_vnum (p0, x0, -1, z0))
+
+/*
+** st2q_vnum_s16_m2:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_vnum_s16_m2, svint16x2_t, int16_t,
+ svst2q_vnum_s16 (p0, x0, -2, z0),
+ svst2q_vnum (p0, x0, -2, z0))
+
+/*
+** st2q_vnum_s16_m16:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_vnum_s16_m16, svint16x2_t, int16_t,
+ svst2q_vnum_s16 (p0, x0, -16, z0),
+ svst2q_vnum (p0, x0, -16, z0))
+
+/*
+** st2q_vnum_s16_m18:
+** addvl (x[0-9]+), x0, #-18
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st2q_vnum_s16_m18, svint16x2_t, int16_t,
+ svst2q_vnum_s16 (p0, x0, -18, z0),
+ svst2q_vnum (p0, x0, -18, z0))
+
+/*
+** st2q_vnum_s16_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE (st2q_vnum_s16_x1, svint16x2_t, int16_t,
+ svst2q_vnum_s16 (p0, x0, x1, z0),
+ svst2q_vnum (p0, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st2q_s32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st2q_s32.c
new file mode 100644
index 0000000..37958f3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st2q_s32.c
@@ -0,0 +1,229 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** st2q_s32_base:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_s32_base, svint32x2_t, int32_t,
+ svst2q_s32 (p0, x0, z0),
+ svst2q (p0, x0, z0))
+
+/*
+** st2q_s32_index:
+** add (x[0-9]), x0, x1, lsl #?2
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st2q_s32_index, svint32x2_t, int32_t,
+ svst2q_s32 (p0, x0 + x1, z0),
+ svst2q (p0, x0 + x1, z0))
+
+/*
+** st2q_s32_index2:
+** add (x[0-9]), x0, x1, lsl #?3
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st2q_s32_index2, svint32x2_t, int32_t,
+ svst2q_s32 (p0, x0 + x1 * 2, z0),
+ svst2q (p0, x0 + x1 * 2, z0))
+
+/*
+** st2q_s32_index4:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_STORE (st2q_s32_index4, svint32x2_t, int32_t,
+ svst2q_s32 (p0, x0 + x1 * 4, z0),
+ svst2q (p0, x0 + x1 * 4, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_s32_1:
+** incb x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_s32_1, svint32x2_t, int32_t,
+ svst2q_s32 (p0, x0 + svcntw (), z0),
+ svst2q (p0, x0 + svcntw (), z0))
+
+/*
+** st2q_s32_2:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_s32_2, svint32x2_t, int32_t,
+ svst2q_s32 (p0, x0 + svcntw () * 2, z0),
+ svst2q (p0, x0 + svcntw () * 2, z0))
+
+/*
+** st2q_s32_14:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_s32_14, svint32x2_t, int32_t,
+ svst2q_s32 (p0, x0 + svcntw () * 14, z0),
+ svst2q (p0, x0 + svcntw () * 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_s32_16:
+** incb x0, all, mul #16
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_s32_16, svint32x2_t, int32_t,
+ svst2q_s32 (p0, x0 + svcntw () * 16, z0),
+ svst2q (p0, x0 + svcntw () * 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_s32_m1:
+** decb x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_s32_m1, svint32x2_t, int32_t,
+ svst2q_s32 (p0, x0 - svcntw (), z0),
+ svst2q (p0, x0 - svcntw (), z0))
+
+/*
+** st2q_s32_m2:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_s32_m2, svint32x2_t, int32_t,
+ svst2q_s32 (p0, x0 - svcntw () * 2, z0),
+ svst2q (p0, x0 - svcntw () * 2, z0))
+
+/*
+** st2q_s32_m16:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_s32_m16, svint32x2_t, int32_t,
+ svst2q_s32 (p0, x0 - svcntw () * 16, z0),
+ svst2q (p0, x0 - svcntw () * 16, z0))
+
+/*
+** st2q_s32_m18:
+** addvl (x[0-9]+), x0, #-18
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st2q_s32_m18, svint32x2_t, int32_t,
+ svst2q_s32 (p0, x0 - svcntw () * 18, z0),
+ svst2q (p0, x0 - svcntw () * 18, z0))
+
+/*
+** st2q_vnum_s32_0:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_vnum_s32_0, svint32x2_t, int32_t,
+ svst2q_vnum_s32 (p0, x0, 0, z0),
+ svst2q_vnum (p0, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_vnum_s32_1:
+** incb x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_vnum_s32_1, svint32x2_t, int32_t,
+ svst2q_vnum_s32 (p0, x0, 1, z0),
+ svst2q_vnum (p0, x0, 1, z0))
+
+/*
+** st2q_vnum_s32_2:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_vnum_s32_2, svint32x2_t, int32_t,
+ svst2q_vnum_s32 (p0, x0, 2, z0),
+ svst2q_vnum (p0, x0, 2, z0))
+
+/*
+** st2q_vnum_s32_14:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_vnum_s32_14, svint32x2_t, int32_t,
+ svst2q_vnum_s32 (p0, x0, 14, z0),
+ svst2q_vnum (p0, x0, 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_vnum_s32_16:
+** incb x0, all, mul #16
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_vnum_s32_16, svint32x2_t, int32_t,
+ svst2q_vnum_s32 (p0, x0, 16, z0),
+ svst2q_vnum (p0, x0, 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_vnum_s32_m1:
+** decb x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_vnum_s32_m1, svint32x2_t, int32_t,
+ svst2q_vnum_s32 (p0, x0, -1, z0),
+ svst2q_vnum (p0, x0, -1, z0))
+
+/*
+** st2q_vnum_s32_m2:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_vnum_s32_m2, svint32x2_t, int32_t,
+ svst2q_vnum_s32 (p0, x0, -2, z0),
+ svst2q_vnum (p0, x0, -2, z0))
+
+/*
+** st2q_vnum_s32_m16:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_vnum_s32_m16, svint32x2_t, int32_t,
+ svst2q_vnum_s32 (p0, x0, -16, z0),
+ svst2q_vnum (p0, x0, -16, z0))
+
+/*
+** st2q_vnum_s32_m18:
+** addvl (x[0-9]+), x0, #-18
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st2q_vnum_s32_m18, svint32x2_t, int32_t,
+ svst2q_vnum_s32 (p0, x0, -18, z0),
+ svst2q_vnum (p0, x0, -18, z0))
+
+/*
+** st2q_vnum_s32_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE (st2q_vnum_s32_x1, svint32x2_t, int32_t,
+ svst2q_vnum_s32 (p0, x0, x1, z0),
+ svst2q_vnum (p0, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st2q_s64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st2q_s64.c
new file mode 100644
index 0000000..20689ce
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st2q_s64.c
@@ -0,0 +1,219 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** st2q_s64_base:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_s64_base, svint64x2_t, int64_t,
+ svst2q_s64 (p0, x0, z0),
+ svst2q (p0, x0, z0))
+
+/*
+** st2q_s64_index:
+** add (x[0-9]), x0, x1, lsl #?3
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st2q_s64_index, svint64x2_t, int64_t,
+ svst2q_s64 (p0, x0 + x1, z0),
+ svst2q (p0, x0 + x1, z0))
+
+/*
+** st2q_s64_index2:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_STORE (st2q_s64_index2, svint64x2_t, int64_t,
+ svst2q_s64 (p0, x0 + x1 * 2, z0),
+ svst2q (p0, x0 + x1 * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_s64_1:
+** incb x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_s64_1, svint64x2_t, int64_t,
+ svst2q_s64 (p0, x0 + svcntd (), z0),
+ svst2q (p0, x0 + svcntd (), z0))
+
+/*
+** st2q_s64_2:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_s64_2, svint64x2_t, int64_t,
+ svst2q_s64 (p0, x0 + svcntd () * 2, z0),
+ svst2q (p0, x0 + svcntd () * 2, z0))
+
+/*
+** st2q_s64_14:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_s64_14, svint64x2_t, int64_t,
+ svst2q_s64 (p0, x0 + svcntd () * 14, z0),
+ svst2q (p0, x0 + svcntd () * 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_s64_16:
+** incb x0, all, mul #16
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_s64_16, svint64x2_t, int64_t,
+ svst2q_s64 (p0, x0 + svcntd () * 16, z0),
+ svst2q (p0, x0 + svcntd () * 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_s64_m1:
+** decb x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_s64_m1, svint64x2_t, int64_t,
+ svst2q_s64 (p0, x0 - svcntd (), z0),
+ svst2q (p0, x0 - svcntd (), z0))
+
+/*
+** st2q_s64_m2:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_s64_m2, svint64x2_t, int64_t,
+ svst2q_s64 (p0, x0 - svcntd () * 2, z0),
+ svst2q (p0, x0 - svcntd () * 2, z0))
+
+/*
+** st2q_s64_m16:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_s64_m16, svint64x2_t, int64_t,
+ svst2q_s64 (p0, x0 - svcntd () * 16, z0),
+ svst2q (p0, x0 - svcntd () * 16, z0))
+
+/*
+** st2q_s64_m18:
+** addvl (x[0-9]+), x0, #-18
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st2q_s64_m18, svint64x2_t, int64_t,
+ svst2q_s64 (p0, x0 - svcntd () * 18, z0),
+ svst2q (p0, x0 - svcntd () * 18, z0))
+
+/*
+** st2q_vnum_s64_0:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_vnum_s64_0, svint64x2_t, int64_t,
+ svst2q_vnum_s64 (p0, x0, 0, z0),
+ svst2q_vnum (p0, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_vnum_s64_1:
+** incb x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_vnum_s64_1, svint64x2_t, int64_t,
+ svst2q_vnum_s64 (p0, x0, 1, z0),
+ svst2q_vnum (p0, x0, 1, z0))
+
+/*
+** st2q_vnum_s64_2:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_vnum_s64_2, svint64x2_t, int64_t,
+ svst2q_vnum_s64 (p0, x0, 2, z0),
+ svst2q_vnum (p0, x0, 2, z0))
+
+/*
+** st2q_vnum_s64_14:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_vnum_s64_14, svint64x2_t, int64_t,
+ svst2q_vnum_s64 (p0, x0, 14, z0),
+ svst2q_vnum (p0, x0, 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_vnum_s64_16:
+** incb x0, all, mul #16
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_vnum_s64_16, svint64x2_t, int64_t,
+ svst2q_vnum_s64 (p0, x0, 16, z0),
+ svst2q_vnum (p0, x0, 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_vnum_s64_m1:
+** decb x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_vnum_s64_m1, svint64x2_t, int64_t,
+ svst2q_vnum_s64 (p0, x0, -1, z0),
+ svst2q_vnum (p0, x0, -1, z0))
+
+/*
+** st2q_vnum_s64_m2:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_vnum_s64_m2, svint64x2_t, int64_t,
+ svst2q_vnum_s64 (p0, x0, -2, z0),
+ svst2q_vnum (p0, x0, -2, z0))
+
+/*
+** st2q_vnum_s64_m16:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_vnum_s64_m16, svint64x2_t, int64_t,
+ svst2q_vnum_s64 (p0, x0, -16, z0),
+ svst2q_vnum (p0, x0, -16, z0))
+
+/*
+** st2q_vnum_s64_m18:
+** addvl (x[0-9]+), x0, #-18
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st2q_vnum_s64_m18, svint64x2_t, int64_t,
+ svst2q_vnum_s64 (p0, x0, -18, z0),
+ svst2q_vnum (p0, x0, -18, z0))
+
+/*
+** st2q_vnum_s64_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE (st2q_vnum_s64_x1, svint64x2_t, int64_t,
+ svst2q_vnum_s64 (p0, x0, x1, z0),
+ svst2q_vnum (p0, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st2q_s8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st2q_s8.c
new file mode 100644
index 0000000..78fe6ca
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st2q_s8.c
@@ -0,0 +1,249 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** st2q_s8_base:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_s8_base, svint8x2_t, int8_t,
+ svst2q_s8 (p0, x0, z0),
+ svst2q (p0, x0, z0))
+
+/*
+** st2q_s8_index:
+** add (x[0-9]), (?:x0, x1|x1, x0)
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st2q_s8_index, svint8x2_t, int8_t,
+ svst2q_s8 (p0, x0 + x1, z0),
+ svst2q (p0, x0 + x1, z0))
+
+/*
+** st2q_s8_index2:
+** add (x[0-9]), x0, x1, lsl #?1
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st2q_s8_index2, svint8x2_t, int8_t,
+ svst2q_s8 (p0, x0 + x1 * 2, z0),
+ svst2q (p0, x0 + x1 * 2, z0))
+
+/*
+** st2q_s8_index4:
+** add (x[0-9]), x0, x1, lsl #?2
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st2q_s8_index4, svint8x2_t, int8_t,
+ svst2q_s8 (p0, x0 + x1 * 4, z0),
+ svst2q (p0, x0 + x1 * 4, z0))
+
+/*
+** st2q_s8_index8:
+** add (x[0-9]), x0, x1, lsl #?3
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st2q_s8_index8, svint8x2_t, int8_t,
+ svst2q_s8 (p0, x0 + x1 * 8, z0),
+ svst2q (p0, x0 + x1 * 8, z0))
+
+/*
+** st2q_s8_index16:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_STORE (st2q_s8_index16, svint8x2_t, int8_t,
+ svst2q_s8 (p0, x0 + x1 * 16, z0),
+ svst2q (p0, x0 + x1 * 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_s8_1:
+** incb x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_s8_1, svint8x2_t, int8_t,
+ svst2q_s8 (p0, x0 + svcntb (), z0),
+ svst2q (p0, x0 + svcntb (), z0))
+
+/*
+** st2q_s8_2:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_s8_2, svint8x2_t, int8_t,
+ svst2q_s8 (p0, x0 + svcntb () * 2, z0),
+ svst2q (p0, x0 + svcntb () * 2, z0))
+
+/*
+** st2q_s8_14:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_s8_14, svint8x2_t, int8_t,
+ svst2q_s8 (p0, x0 + svcntb () * 14, z0),
+ svst2q (p0, x0 + svcntb () * 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_s8_16:
+** incb x0, all, mul #16
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_s8_16, svint8x2_t, int8_t,
+ svst2q_s8 (p0, x0 + svcntb () * 16, z0),
+ svst2q (p0, x0 + svcntb () * 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_s8_m1:
+** decb x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_s8_m1, svint8x2_t, int8_t,
+ svst2q_s8 (p0, x0 - svcntb (), z0),
+ svst2q (p0, x0 - svcntb (), z0))
+
+/*
+** st2q_s8_m2:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_s8_m2, svint8x2_t, int8_t,
+ svst2q_s8 (p0, x0 - svcntb () * 2, z0),
+ svst2q (p0, x0 - svcntb () * 2, z0))
+
+/*
+** st2q_s8_m16:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_s8_m16, svint8x2_t, int8_t,
+ svst2q_s8 (p0, x0 - svcntb () * 16, z0),
+ svst2q (p0, x0 - svcntb () * 16, z0))
+
+/*
+** st2q_s8_m18:
+** addvl (x[0-9]+), x0, #-18
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st2q_s8_m18, svint8x2_t, int8_t,
+ svst2q_s8 (p0, x0 - svcntb () * 18, z0),
+ svst2q (p0, x0 - svcntb () * 18, z0))
+
+/*
+** st2q_vnum_s8_0:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_vnum_s8_0, svint8x2_t, int8_t,
+ svst2q_vnum_s8 (p0, x0, 0, z0),
+ svst2q_vnum (p0, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_vnum_s8_1:
+** incb x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_vnum_s8_1, svint8x2_t, int8_t,
+ svst2q_vnum_s8 (p0, x0, 1, z0),
+ svst2q_vnum (p0, x0, 1, z0))
+
+/*
+** st2q_vnum_s8_2:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_vnum_s8_2, svint8x2_t, int8_t,
+ svst2q_vnum_s8 (p0, x0, 2, z0),
+ svst2q_vnum (p0, x0, 2, z0))
+
+/*
+** st2q_vnum_s8_14:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_vnum_s8_14, svint8x2_t, int8_t,
+ svst2q_vnum_s8 (p0, x0, 14, z0),
+ svst2q_vnum (p0, x0, 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_vnum_s8_16:
+** incb x0, all, mul #16
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_vnum_s8_16, svint8x2_t, int8_t,
+ svst2q_vnum_s8 (p0, x0, 16, z0),
+ svst2q_vnum (p0, x0, 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_vnum_s8_m1:
+** decb x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_vnum_s8_m1, svint8x2_t, int8_t,
+ svst2q_vnum_s8 (p0, x0, -1, z0),
+ svst2q_vnum (p0, x0, -1, z0))
+
+/*
+** st2q_vnum_s8_m2:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_vnum_s8_m2, svint8x2_t, int8_t,
+ svst2q_vnum_s8 (p0, x0, -2, z0),
+ svst2q_vnum (p0, x0, -2, z0))
+
+/*
+** st2q_vnum_s8_m16:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_vnum_s8_m16, svint8x2_t, int8_t,
+ svst2q_vnum_s8 (p0, x0, -16, z0),
+ svst2q_vnum (p0, x0, -16, z0))
+
+/*
+** st2q_vnum_s8_m18:
+** addvl (x[0-9]+), x0, #-18
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st2q_vnum_s8_m18, svint8x2_t, int8_t,
+ svst2q_vnum_s8 (p0, x0, -18, z0),
+ svst2q_vnum (p0, x0, -18, z0))
+
+/*
+** st2q_vnum_s8_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE (st2q_vnum_s8_x1, svint8x2_t, int8_t,
+ svst2q_vnum_s8 (p0, x0, x1, z0),
+ svst2q_vnum (p0, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st2q_u16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st2q_u16.c
new file mode 100644
index 0000000..1e5421c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st2q_u16.c
@@ -0,0 +1,239 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** st2q_u16_base:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_u16_base, svuint16x2_t, uint16_t,
+ svst2q_u16 (p0, x0, z0),
+ svst2q (p0, x0, z0))
+
+/*
+** st2q_u16_index:
+** add (x[0-9]), x0, x1, lsl #?1
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st2q_u16_index, svuint16x2_t, uint16_t,
+ svst2q_u16 (p0, x0 + x1, z0),
+ svst2q (p0, x0 + x1, z0))
+
+/*
+** st2q_u16_index2:
+** add (x[0-9]), x0, x1, lsl #?2
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st2q_u16_index2, svuint16x2_t, uint16_t,
+ svst2q_u16 (p0, x0 + x1 * 2, z0),
+ svst2q (p0, x0 + x1 * 2, z0))
+
+/*
+** st2q_u16_index4:
+** add (x[0-9]), x0, x1, lsl #?3
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st2q_u16_index4, svuint16x2_t, uint16_t,
+ svst2q_u16 (p0, x0 + x1 * 4, z0),
+ svst2q (p0, x0 + x1 * 4, z0))
+
+/*
+** st2q_u16_index8:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_STORE (st2q_u16_index8, svuint16x2_t, uint16_t,
+ svst2q_u16 (p0, x0 + x1 * 8, z0),
+ svst2q (p0, x0 + x1 * 8, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_u16_1:
+** incb x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_u16_1, svuint16x2_t, uint16_t,
+ svst2q_u16 (p0, x0 + svcnth (), z0),
+ svst2q (p0, x0 + svcnth (), z0))
+
+/*
+** st2q_u16_2:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_u16_2, svuint16x2_t, uint16_t,
+ svst2q_u16 (p0, x0 + svcnth () * 2, z0),
+ svst2q (p0, x0 + svcnth () * 2, z0))
+
+/*
+** st2q_u16_14:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_u16_14, svuint16x2_t, uint16_t,
+ svst2q_u16 (p0, x0 + svcnth () * 14, z0),
+ svst2q (p0, x0 + svcnth () * 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_u16_16:
+** incb x0, all, mul #16
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_u16_16, svuint16x2_t, uint16_t,
+ svst2q_u16 (p0, x0 + svcnth () * 16, z0),
+ svst2q (p0, x0 + svcnth () * 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_u16_m1:
+** decb x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_u16_m1, svuint16x2_t, uint16_t,
+ svst2q_u16 (p0, x0 - svcnth (), z0),
+ svst2q (p0, x0 - svcnth (), z0))
+
+/*
+** st2q_u16_m2:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_u16_m2, svuint16x2_t, uint16_t,
+ svst2q_u16 (p0, x0 - svcnth () * 2, z0),
+ svst2q (p0, x0 - svcnth () * 2, z0))
+
+/*
+** st2q_u16_m16:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_u16_m16, svuint16x2_t, uint16_t,
+ svst2q_u16 (p0, x0 - svcnth () * 16, z0),
+ svst2q (p0, x0 - svcnth () * 16, z0))
+
+/*
+** st2q_u16_m18:
+** addvl (x[0-9]+), x0, #-18
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st2q_u16_m18, svuint16x2_t, uint16_t,
+ svst2q_u16 (p0, x0 - svcnth () * 18, z0),
+ svst2q (p0, x0 - svcnth () * 18, z0))
+
+/*
+** st2q_vnum_u16_0:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_vnum_u16_0, svuint16x2_t, uint16_t,
+ svst2q_vnum_u16 (p0, x0, 0, z0),
+ svst2q_vnum (p0, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_vnum_u16_1:
+** incb x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_vnum_u16_1, svuint16x2_t, uint16_t,
+ svst2q_vnum_u16 (p0, x0, 1, z0),
+ svst2q_vnum (p0, x0, 1, z0))
+
+/*
+** st2q_vnum_u16_2:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_vnum_u16_2, svuint16x2_t, uint16_t,
+ svst2q_vnum_u16 (p0, x0, 2, z0),
+ svst2q_vnum (p0, x0, 2, z0))
+
+/*
+** st2q_vnum_u16_14:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_vnum_u16_14, svuint16x2_t, uint16_t,
+ svst2q_vnum_u16 (p0, x0, 14, z0),
+ svst2q_vnum (p0, x0, 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_vnum_u16_16:
+** incb x0, all, mul #16
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_vnum_u16_16, svuint16x2_t, uint16_t,
+ svst2q_vnum_u16 (p0, x0, 16, z0),
+ svst2q_vnum (p0, x0, 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_vnum_u16_m1:
+** decb x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_vnum_u16_m1, svuint16x2_t, uint16_t,
+ svst2q_vnum_u16 (p0, x0, -1, z0),
+ svst2q_vnum (p0, x0, -1, z0))
+
+/*
+** st2q_vnum_u16_m2:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_vnum_u16_m2, svuint16x2_t, uint16_t,
+ svst2q_vnum_u16 (p0, x0, -2, z0),
+ svst2q_vnum (p0, x0, -2, z0))
+
+/*
+** st2q_vnum_u16_m16:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_vnum_u16_m16, svuint16x2_t, uint16_t,
+ svst2q_vnum_u16 (p0, x0, -16, z0),
+ svst2q_vnum (p0, x0, -16, z0))
+
+/*
+** st2q_vnum_u16_m18:
+** addvl (x[0-9]+), x0, #-18
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st2q_vnum_u16_m18, svuint16x2_t, uint16_t,
+ svst2q_vnum_u16 (p0, x0, -18, z0),
+ svst2q_vnum (p0, x0, -18, z0))
+
+/*
+** st2q_vnum_u16_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE (st2q_vnum_u16_x1, svuint16x2_t, uint16_t,
+ svst2q_vnum_u16 (p0, x0, x1, z0),
+ svst2q_vnum (p0, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st2q_u32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st2q_u32.c
new file mode 100644
index 0000000..689bff5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st2q_u32.c
@@ -0,0 +1,229 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** st2q_u32_base:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_u32_base, svuint32x2_t, uint32_t,
+ svst2q_u32 (p0, x0, z0),
+ svst2q (p0, x0, z0))
+
+/*
+** st2q_u32_index:
+** add (x[0-9]), x0, x1, lsl #?2
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st2q_u32_index, svuint32x2_t, uint32_t,
+ svst2q_u32 (p0, x0 + x1, z0),
+ svst2q (p0, x0 + x1, z0))
+
+/*
+** st2q_u32_index2:
+** add (x[0-9]), x0, x1, lsl #?3
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st2q_u32_index2, svuint32x2_t, uint32_t,
+ svst2q_u32 (p0, x0 + x1 * 2, z0),
+ svst2q (p0, x0 + x1 * 2, z0))
+
+/*
+** st2q_u32_index4:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_STORE (st2q_u32_index4, svuint32x2_t, uint32_t,
+ svst2q_u32 (p0, x0 + x1 * 4, z0),
+ svst2q (p0, x0 + x1 * 4, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_u32_1:
+** incb x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_u32_1, svuint32x2_t, uint32_t,
+ svst2q_u32 (p0, x0 + svcntw (), z0),
+ svst2q (p0, x0 + svcntw (), z0))
+
+/*
+** st2q_u32_2:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_u32_2, svuint32x2_t, uint32_t,
+ svst2q_u32 (p0, x0 + svcntw () * 2, z0),
+ svst2q (p0, x0 + svcntw () * 2, z0))
+
+/*
+** st2q_u32_14:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_u32_14, svuint32x2_t, uint32_t,
+ svst2q_u32 (p0, x0 + svcntw () * 14, z0),
+ svst2q (p0, x0 + svcntw () * 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_u32_16:
+** incb x0, all, mul #16
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_u32_16, svuint32x2_t, uint32_t,
+ svst2q_u32 (p0, x0 + svcntw () * 16, z0),
+ svst2q (p0, x0 + svcntw () * 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_u32_m1:
+** decb x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_u32_m1, svuint32x2_t, uint32_t,
+ svst2q_u32 (p0, x0 - svcntw (), z0),
+ svst2q (p0, x0 - svcntw (), z0))
+
+/*
+** st2q_u32_m2:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_u32_m2, svuint32x2_t, uint32_t,
+ svst2q_u32 (p0, x0 - svcntw () * 2, z0),
+ svst2q (p0, x0 - svcntw () * 2, z0))
+
+/*
+** st2q_u32_m16:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_u32_m16, svuint32x2_t, uint32_t,
+ svst2q_u32 (p0, x0 - svcntw () * 16, z0),
+ svst2q (p0, x0 - svcntw () * 16, z0))
+
+/*
+** st2q_u32_m18:
+** addvl (x[0-9]+), x0, #-18
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st2q_u32_m18, svuint32x2_t, uint32_t,
+ svst2q_u32 (p0, x0 - svcntw () * 18, z0),
+ svst2q (p0, x0 - svcntw () * 18, z0))
+
+/*
+** st2q_vnum_u32_0:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_vnum_u32_0, svuint32x2_t, uint32_t,
+ svst2q_vnum_u32 (p0, x0, 0, z0),
+ svst2q_vnum (p0, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_vnum_u32_1:
+** incb x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_vnum_u32_1, svuint32x2_t, uint32_t,
+ svst2q_vnum_u32 (p0, x0, 1, z0),
+ svst2q_vnum (p0, x0, 1, z0))
+
+/*
+** st2q_vnum_u32_2:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_vnum_u32_2, svuint32x2_t, uint32_t,
+ svst2q_vnum_u32 (p0, x0, 2, z0),
+ svst2q_vnum (p0, x0, 2, z0))
+
+/*
+** st2q_vnum_u32_14:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_vnum_u32_14, svuint32x2_t, uint32_t,
+ svst2q_vnum_u32 (p0, x0, 14, z0),
+ svst2q_vnum (p0, x0, 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_vnum_u32_16:
+** incb x0, all, mul #16
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_vnum_u32_16, svuint32x2_t, uint32_t,
+ svst2q_vnum_u32 (p0, x0, 16, z0),
+ svst2q_vnum (p0, x0, 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_vnum_u32_m1:
+** decb x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_vnum_u32_m1, svuint32x2_t, uint32_t,
+ svst2q_vnum_u32 (p0, x0, -1, z0),
+ svst2q_vnum (p0, x0, -1, z0))
+
+/*
+** st2q_vnum_u32_m2:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_vnum_u32_m2, svuint32x2_t, uint32_t,
+ svst2q_vnum_u32 (p0, x0, -2, z0),
+ svst2q_vnum (p0, x0, -2, z0))
+
+/*
+** st2q_vnum_u32_m16:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_vnum_u32_m16, svuint32x2_t, uint32_t,
+ svst2q_vnum_u32 (p0, x0, -16, z0),
+ svst2q_vnum (p0, x0, -16, z0))
+
+/*
+** st2q_vnum_u32_m18:
+** addvl (x[0-9]+), x0, #-18
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st2q_vnum_u32_m18, svuint32x2_t, uint32_t,
+ svst2q_vnum_u32 (p0, x0, -18, z0),
+ svst2q_vnum (p0, x0, -18, z0))
+
+/*
+** st2q_vnum_u32_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE (st2q_vnum_u32_x1, svuint32x2_t, uint32_t,
+ svst2q_vnum_u32 (p0, x0, x1, z0),
+ svst2q_vnum (p0, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st2q_u64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st2q_u64.c
new file mode 100644
index 0000000..33bf401
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st2q_u64.c
@@ -0,0 +1,219 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** st2q_u64_base:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_u64_base, svuint64x2_t, uint64_t,
+ svst2q_u64 (p0, x0, z0),
+ svst2q (p0, x0, z0))
+
+/*
+** st2q_u64_index:
+** add (x[0-9]), x0, x1, lsl #?3
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st2q_u64_index, svuint64x2_t, uint64_t,
+ svst2q_u64 (p0, x0 + x1, z0),
+ svst2q (p0, x0 + x1, z0))
+
+/*
+** st2q_u64_index2:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_STORE (st2q_u64_index2, svuint64x2_t, uint64_t,
+ svst2q_u64 (p0, x0 + x1 * 2, z0),
+ svst2q (p0, x0 + x1 * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_u64_1:
+** incb x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_u64_1, svuint64x2_t, uint64_t,
+ svst2q_u64 (p0, x0 + svcntd (), z0),
+ svst2q (p0, x0 + svcntd (), z0))
+
+/*
+** st2q_u64_2:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_u64_2, svuint64x2_t, uint64_t,
+ svst2q_u64 (p0, x0 + svcntd () * 2, z0),
+ svst2q (p0, x0 + svcntd () * 2, z0))
+
+/*
+** st2q_u64_14:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_u64_14, svuint64x2_t, uint64_t,
+ svst2q_u64 (p0, x0 + svcntd () * 14, z0),
+ svst2q (p0, x0 + svcntd () * 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_u64_16:
+** incb x0, all, mul #16
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_u64_16, svuint64x2_t, uint64_t,
+ svst2q_u64 (p0, x0 + svcntd () * 16, z0),
+ svst2q (p0, x0 + svcntd () * 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_u64_m1:
+** decb x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_u64_m1, svuint64x2_t, uint64_t,
+ svst2q_u64 (p0, x0 - svcntd (), z0),
+ svst2q (p0, x0 - svcntd (), z0))
+
+/*
+** st2q_u64_m2:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_u64_m2, svuint64x2_t, uint64_t,
+ svst2q_u64 (p0, x0 - svcntd () * 2, z0),
+ svst2q (p0, x0 - svcntd () * 2, z0))
+
+/*
+** st2q_u64_m16:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_u64_m16, svuint64x2_t, uint64_t,
+ svst2q_u64 (p0, x0 - svcntd () * 16, z0),
+ svst2q (p0, x0 - svcntd () * 16, z0))
+
+/*
+** st2q_u64_m18:
+** addvl (x[0-9]+), x0, #-18
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st2q_u64_m18, svuint64x2_t, uint64_t,
+ svst2q_u64 (p0, x0 - svcntd () * 18, z0),
+ svst2q (p0, x0 - svcntd () * 18, z0))
+
+/*
+** st2q_vnum_u64_0:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_vnum_u64_0, svuint64x2_t, uint64_t,
+ svst2q_vnum_u64 (p0, x0, 0, z0),
+ svst2q_vnum (p0, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_vnum_u64_1:
+** incb x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_vnum_u64_1, svuint64x2_t, uint64_t,
+ svst2q_vnum_u64 (p0, x0, 1, z0),
+ svst2q_vnum (p0, x0, 1, z0))
+
+/*
+** st2q_vnum_u64_2:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_vnum_u64_2, svuint64x2_t, uint64_t,
+ svst2q_vnum_u64 (p0, x0, 2, z0),
+ svst2q_vnum (p0, x0, 2, z0))
+
+/*
+** st2q_vnum_u64_14:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_vnum_u64_14, svuint64x2_t, uint64_t,
+ svst2q_vnum_u64 (p0, x0, 14, z0),
+ svst2q_vnum (p0, x0, 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_vnum_u64_16:
+** incb x0, all, mul #16
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_vnum_u64_16, svuint64x2_t, uint64_t,
+ svst2q_vnum_u64 (p0, x0, 16, z0),
+ svst2q_vnum (p0, x0, 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_vnum_u64_m1:
+** decb x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_vnum_u64_m1, svuint64x2_t, uint64_t,
+ svst2q_vnum_u64 (p0, x0, -1, z0),
+ svst2q_vnum (p0, x0, -1, z0))
+
+/*
+** st2q_vnum_u64_m2:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_vnum_u64_m2, svuint64x2_t, uint64_t,
+ svst2q_vnum_u64 (p0, x0, -2, z0),
+ svst2q_vnum (p0, x0, -2, z0))
+
+/*
+** st2q_vnum_u64_m16:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_vnum_u64_m16, svuint64x2_t, uint64_t,
+ svst2q_vnum_u64 (p0, x0, -16, z0),
+ svst2q_vnum (p0, x0, -16, z0))
+
+/*
+** st2q_vnum_u64_m18:
+** addvl (x[0-9]+), x0, #-18
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st2q_vnum_u64_m18, svuint64x2_t, uint64_t,
+ svst2q_vnum_u64 (p0, x0, -18, z0),
+ svst2q_vnum (p0, x0, -18, z0))
+
+/*
+** st2q_vnum_u64_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE (st2q_vnum_u64_x1, svuint64x2_t, uint64_t,
+ svst2q_vnum_u64 (p0, x0, x1, z0),
+ svst2q_vnum (p0, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st2q_u8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st2q_u8.c
new file mode 100644
index 0000000..0985a4f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st2q_u8.c
@@ -0,0 +1,249 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** st2q_u8_base:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_u8_base, svuint8x2_t, uint8_t,
+ svst2q_u8 (p0, x0, z0),
+ svst2q (p0, x0, z0))
+
+/*
+** st2q_u8_index:
+** add (x[0-9]), (?:x0, x1|x1, x0)
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st2q_u8_index, svuint8x2_t, uint8_t,
+ svst2q_u8 (p0, x0 + x1, z0),
+ svst2q (p0, x0 + x1, z0))
+
+/*
+** st2q_u8_index2:
+** add (x[0-9]), x0, x1, lsl #?1
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st2q_u8_index2, svuint8x2_t, uint8_t,
+ svst2q_u8 (p0, x0 + x1 * 2, z0),
+ svst2q (p0, x0 + x1 * 2, z0))
+
+/*
+** st2q_u8_index4:
+** add (x[0-9]), x0, x1, lsl #?2
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st2q_u8_index4, svuint8x2_t, uint8_t,
+ svst2q_u8 (p0, x0 + x1 * 4, z0),
+ svst2q (p0, x0 + x1 * 4, z0))
+
+/*
+** st2q_u8_index8:
+** add (x[0-9]), x0, x1, lsl #?3
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st2q_u8_index8, svuint8x2_t, uint8_t,
+ svst2q_u8 (p0, x0 + x1 * 8, z0),
+ svst2q (p0, x0 + x1 * 8, z0))
+
+/*
+** st2q_u8_index16:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_STORE (st2q_u8_index16, svuint8x2_t, uint8_t,
+ svst2q_u8 (p0, x0 + x1 * 16, z0),
+ svst2q (p0, x0 + x1 * 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_u8_1:
+** incb x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_u8_1, svuint8x2_t, uint8_t,
+ svst2q_u8 (p0, x0 + svcntb (), z0),
+ svst2q (p0, x0 + svcntb (), z0))
+
+/*
+** st2q_u8_2:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_u8_2, svuint8x2_t, uint8_t,
+ svst2q_u8 (p0, x0 + svcntb () * 2, z0),
+ svst2q (p0, x0 + svcntb () * 2, z0))
+
+/*
+** st2q_u8_14:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_u8_14, svuint8x2_t, uint8_t,
+ svst2q_u8 (p0, x0 + svcntb () * 14, z0),
+ svst2q (p0, x0 + svcntb () * 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_u8_16:
+** incb x0, all, mul #16
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_u8_16, svuint8x2_t, uint8_t,
+ svst2q_u8 (p0, x0 + svcntb () * 16, z0),
+ svst2q (p0, x0 + svcntb () * 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_u8_m1:
+** decb x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_u8_m1, svuint8x2_t, uint8_t,
+ svst2q_u8 (p0, x0 - svcntb (), z0),
+ svst2q (p0, x0 - svcntb (), z0))
+
+/*
+** st2q_u8_m2:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_u8_m2, svuint8x2_t, uint8_t,
+ svst2q_u8 (p0, x0 - svcntb () * 2, z0),
+ svst2q (p0, x0 - svcntb () * 2, z0))
+
+/*
+** st2q_u8_m16:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_u8_m16, svuint8x2_t, uint8_t,
+ svst2q_u8 (p0, x0 - svcntb () * 16, z0),
+ svst2q (p0, x0 - svcntb () * 16, z0))
+
+/*
+** st2q_u8_m18:
+** addvl (x[0-9]+), x0, #-18
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st2q_u8_m18, svuint8x2_t, uint8_t,
+ svst2q_u8 (p0, x0 - svcntb () * 18, z0),
+ svst2q (p0, x0 - svcntb () * 18, z0))
+
+/*
+** st2q_vnum_u8_0:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_vnum_u8_0, svuint8x2_t, uint8_t,
+ svst2q_vnum_u8 (p0, x0, 0, z0),
+ svst2q_vnum (p0, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_vnum_u8_1:
+** incb x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_vnum_u8_1, svuint8x2_t, uint8_t,
+ svst2q_vnum_u8 (p0, x0, 1, z0),
+ svst2q_vnum (p0, x0, 1, z0))
+
+/*
+** st2q_vnum_u8_2:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #2, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_vnum_u8_2, svuint8x2_t, uint8_t,
+ svst2q_vnum_u8 (p0, x0, 2, z0),
+ svst2q_vnum (p0, x0, 2, z0))
+
+/*
+** st2q_vnum_u8_14:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #14, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_vnum_u8_14, svuint8x2_t, uint8_t,
+ svst2q_vnum_u8 (p0, x0, 14, z0),
+ svst2q_vnum (p0, x0, 14, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_vnum_u8_16:
+** incb x0, all, mul #16
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_vnum_u8_16, svuint8x2_t, uint8_t,
+ svst2q_vnum_u8 (p0, x0, 16, z0),
+ svst2q_vnum (p0, x0, 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st2q_vnum_u8_m1:
+** decb x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st2q_vnum_u8_m1, svuint8x2_t, uint8_t,
+ svst2q_vnum_u8 (p0, x0, -1, z0),
+ svst2q_vnum (p0, x0, -1, z0))
+
+/*
+** st2q_vnum_u8_m2:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #-2, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_vnum_u8_m2, svuint8x2_t, uint8_t,
+ svst2q_vnum_u8 (p0, x0, -2, z0),
+ svst2q_vnum (p0, x0, -2, z0))
+
+/*
+** st2q_vnum_u8_m16:
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, #-16, mul vl\]
+** ret
+*/
+TEST_STORE (st2q_vnum_u8_m16, svuint8x2_t, uint8_t,
+ svst2q_vnum_u8 (p0, x0, -16, z0),
+ svst2q_vnum (p0, x0, -16, z0))
+
+/*
+** st2q_vnum_u8_m18:
+** addvl (x[0-9]+), x0, #-18
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st2q_vnum_u8_m18, svuint8x2_t, uint8_t,
+ svst2q_vnum_u8 (p0, x0, -18, z0),
+ svst2q_vnum (p0, x0, -18, z0))
+
+/*
+** st2q_vnum_u8_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st2q {z0\.q(?: - |, )z1\.q}, p0, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE (st2q_vnum_u8_x1, svuint8x2_t, uint8_t,
+ svst2q_vnum_u8 (p0, x0, x1, z0),
+ svst2q_vnum (p0, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st3q_bf16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st3q_bf16.c
new file mode 100644
index 0000000..11a8514
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st3q_bf16.c
@@ -0,0 +1,281 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** st3q_bf16_base:
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_bf16_base, svbfloat16x3_t, bfloat16_t,
+ svst3q_bf16 (p0, x0, z0),
+ svst3q (p0, x0, z0))
+
+/*
+** st3q_bf16_index:
+** add (x[0-9]), x0, x1, lsl #?1
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_bf16_index, svbfloat16x3_t, bfloat16_t,
+ svst3q_bf16 (p0, x0 + x1, z0),
+ svst3q (p0, x0 + x1, z0))
+
+/*
+** st3q_bf16_index2:
+** add (x[0-9]), x0, x1, lsl #?2
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_bf16_index2, svbfloat16x3_t, bfloat16_t,
+ svst3q_bf16 (p0, x0 + x1 * 2, z0),
+ svst3q (p0, x0 + x1 * 2, z0))
+
+/*
+** st3q_bf16_index4:
+** add (x[0-9]), x0, x1, lsl #?3
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_bf16_index4, svbfloat16x3_t, bfloat16_t,
+ svst3q_bf16 (p0, x0 + x1 * 4, z0),
+ svst3q (p0, x0 + x1 * 4, z0))
+
+/*
+** st3q_bf16_index8:
+** st3q {z0\.q - z2\.q}, p0, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_STORE (st3q_bf16_index8, svbfloat16x3_t, bfloat16_t,
+ svst3q_bf16 (p0, x0 + x1 * 8, z0),
+ svst3q (p0, x0 + x1 * 8, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_bf16_1:
+** incb x0
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_bf16_1, svbfloat16x3_t, bfloat16_t,
+ svst3q_bf16 (p0, x0 + svcnth (), z0),
+ svst3q (p0, x0 + svcnth (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_bf16_2:
+** incb x0, all, mul #2
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_bf16_2, svbfloat16x3_t, bfloat16_t,
+ svst3q_bf16 (p0, x0 + svcnth () * 2, z0),
+ svst3q (p0, x0 + svcnth () * 2, z0))
+
+/*
+** st3q_bf16_3:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #3, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_bf16_3, svbfloat16x3_t, bfloat16_t,
+ svst3q_bf16 (p0, x0 + svcnth () * 3, z0),
+ svst3q (p0, x0 + svcnth () * 3, z0))
+
+/*
+** st3q_bf16_21:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #21, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_bf16_21, svbfloat16x3_t, bfloat16_t,
+ svst3q_bf16 (p0, x0 + svcnth () * 21, z0),
+ svst3q (p0, x0 + svcnth () * 21, z0))
+
+/*
+** st3q_bf16_24:
+** addvl (x[0-9]+), x0, #24
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_bf16_24, svbfloat16x3_t, bfloat16_t,
+ svst3q_bf16 (p0, x0 + svcnth () * 24, z0),
+ svst3q (p0, x0 + svcnth () * 24, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_bf16_m1:
+** decb x0
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_bf16_m1, svbfloat16x3_t, bfloat16_t,
+ svst3q_bf16 (p0, x0 - svcnth (), z0),
+ svst3q (p0, x0 - svcnth (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_bf16_m2:
+** decb x0, all, mul #2
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_bf16_m2, svbfloat16x3_t, bfloat16_t,
+ svst3q_bf16 (p0, x0 - svcnth () * 2, z0),
+ svst3q (p0, x0 - svcnth () * 2, z0))
+
+/*
+** st3q_bf16_m3:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #-3, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_bf16_m3, svbfloat16x3_t, bfloat16_t,
+ svst3q_bf16 (p0, x0 - svcnth () * 3, z0),
+ svst3q (p0, x0 - svcnth () * 3, z0))
+
+/*
+** st3q_bf16_m24:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #-24, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_bf16_m24, svbfloat16x3_t, bfloat16_t,
+ svst3q_bf16 (p0, x0 - svcnth () * 24, z0),
+ svst3q (p0, x0 - svcnth () * 24, z0))
+
+/*
+** st3q_bf16_m27:
+** addvl (x[0-9]+), x0, #-27
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_bf16_m27, svbfloat16x3_t, bfloat16_t,
+ svst3q_bf16 (p0, x0 - svcnth () * 27, z0),
+ svst3q (p0, x0 - svcnth () * 27, z0))
+
+/*
+** st3q_vnum_bf16_0:
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_bf16_0, svbfloat16x3_t, bfloat16_t,
+ svst3q_vnum_bf16 (p0, x0, 0, z0),
+ svst3q_vnum (p0, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_vnum_bf16_1:
+** incb x0
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_bf16_1, svbfloat16x3_t, bfloat16_t,
+ svst3q_vnum_bf16 (p0, x0, 1, z0),
+ svst3q_vnum (p0, x0, 1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_vnum_bf16_2:
+** incb x0, all, mul #2
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_bf16_2, svbfloat16x3_t, bfloat16_t,
+ svst3q_vnum_bf16 (p0, x0, 2, z0),
+ svst3q_vnum (p0, x0, 2, z0))
+
+/*
+** st3q_vnum_bf16_3:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #3, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_vnum_bf16_3, svbfloat16x3_t, bfloat16_t,
+ svst3q_vnum_bf16 (p0, x0, 3, z0),
+ svst3q_vnum (p0, x0, 3, z0))
+
+/*
+** st3q_vnum_bf16_21:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #21, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_vnum_bf16_21, svbfloat16x3_t, bfloat16_t,
+ svst3q_vnum_bf16 (p0, x0, 21, z0),
+ svst3q_vnum (p0, x0, 21, z0))
+
+/*
+** st3q_vnum_bf16_24:
+** addvl (x[0-9]+), x0, #24
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_vnum_bf16_24, svbfloat16x3_t, bfloat16_t,
+ svst3q_vnum_bf16 (p0, x0, 24, z0),
+ svst3q_vnum (p0, x0, 24, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_vnum_bf16_m1:
+** decb x0
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_bf16_m1, svbfloat16x3_t, bfloat16_t,
+ svst3q_vnum_bf16 (p0, x0, -1, z0),
+ svst3q_vnum (p0, x0, -1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_vnum_bf16_m2:
+** decb x0, all, mul #2
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_bf16_m2, svbfloat16x3_t, bfloat16_t,
+ svst3q_vnum_bf16 (p0, x0, -2, z0),
+ svst3q_vnum (p0, x0, -2, z0))
+
+/*
+** st3q_vnum_bf16_m3:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #-3, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_vnum_bf16_m3, svbfloat16x3_t, bfloat16_t,
+ svst3q_vnum_bf16 (p0, x0, -3, z0),
+ svst3q_vnum (p0, x0, -3, z0))
+
+/*
+** st3q_vnum_bf16_m24:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #-24, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_vnum_bf16_m24, svbfloat16x3_t, bfloat16_t,
+ svst3q_vnum_bf16 (p0, x0, -24, z0),
+ svst3q_vnum (p0, x0, -24, z0))
+
+/*
+** st3q_vnum_bf16_m27:
+** addvl (x[0-9]+), x0, #-27
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_vnum_bf16_m27, svbfloat16x3_t, bfloat16_t,
+ svst3q_vnum_bf16 (p0, x0, -27, z0),
+ svst3q_vnum (p0, x0, -27, z0))
+
+/*
+** st3q_vnum_bf16_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st3q {z0\.q - z2\.q}, p0, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st3q {z0\.q - z2\.q}, p0, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE (st3q_vnum_bf16_x1, svbfloat16x3_t, bfloat16_t,
+ svst3q_vnum_bf16 (p0, x0, x1, z0),
+ svst3q_vnum (p0, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st3q_f16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st3q_f16.c
new file mode 100644
index 0000000..b018486
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st3q_f16.c
@@ -0,0 +1,281 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** st3q_f16_base:
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_f16_base, svfloat16x3_t, float16_t,
+ svst3q_f16 (p0, x0, z0),
+ svst3q (p0, x0, z0))
+
+/*
+** st3q_f16_index:
+** add (x[0-9]), x0, x1, lsl #?1
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_f16_index, svfloat16x3_t, float16_t,
+ svst3q_f16 (p0, x0 + x1, z0),
+ svst3q (p0, x0 + x1, z0))
+
+/*
+** st3q_f16_index2:
+** add (x[0-9]), x0, x1, lsl #?2
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_f16_index2, svfloat16x3_t, float16_t,
+ svst3q_f16 (p0, x0 + x1 * 2, z0),
+ svst3q (p0, x0 + x1 * 2, z0))
+
+/*
+** st3q_f16_index4:
+** add (x[0-9]), x0, x1, lsl #?3
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_f16_index4, svfloat16x3_t, float16_t,
+ svst3q_f16 (p0, x0 + x1 * 4, z0),
+ svst3q (p0, x0 + x1 * 4, z0))
+
+/*
+** st3q_f16_index8:
+** st3q {z0\.q - z2\.q}, p0, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_STORE (st3q_f16_index8, svfloat16x3_t, float16_t,
+ svst3q_f16 (p0, x0 + x1 * 8, z0),
+ svst3q (p0, x0 + x1 * 8, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_f16_1:
+** incb x0
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_f16_1, svfloat16x3_t, float16_t,
+ svst3q_f16 (p0, x0 + svcnth (), z0),
+ svst3q (p0, x0 + svcnth (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_f16_2:
+** incb x0, all, mul #2
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_f16_2, svfloat16x3_t, float16_t,
+ svst3q_f16 (p0, x0 + svcnth () * 2, z0),
+ svst3q (p0, x0 + svcnth () * 2, z0))
+
+/*
+** st3q_f16_3:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #3, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_f16_3, svfloat16x3_t, float16_t,
+ svst3q_f16 (p0, x0 + svcnth () * 3, z0),
+ svst3q (p0, x0 + svcnth () * 3, z0))
+
+/*
+** st3q_f16_21:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #21, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_f16_21, svfloat16x3_t, float16_t,
+ svst3q_f16 (p0, x0 + svcnth () * 21, z0),
+ svst3q (p0, x0 + svcnth () * 21, z0))
+
+/*
+** st3q_f16_24:
+** addvl (x[0-9]+), x0, #24
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_f16_24, svfloat16x3_t, float16_t,
+ svst3q_f16 (p0, x0 + svcnth () * 24, z0),
+ svst3q (p0, x0 + svcnth () * 24, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_f16_m1:
+** decb x0
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_f16_m1, svfloat16x3_t, float16_t,
+ svst3q_f16 (p0, x0 - svcnth (), z0),
+ svst3q (p0, x0 - svcnth (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_f16_m2:
+** decb x0, all, mul #2
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_f16_m2, svfloat16x3_t, float16_t,
+ svst3q_f16 (p0, x0 - svcnth () * 2, z0),
+ svst3q (p0, x0 - svcnth () * 2, z0))
+
+/*
+** st3q_f16_m3:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #-3, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_f16_m3, svfloat16x3_t, float16_t,
+ svst3q_f16 (p0, x0 - svcnth () * 3, z0),
+ svst3q (p0, x0 - svcnth () * 3, z0))
+
+/*
+** st3q_f16_m24:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #-24, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_f16_m24, svfloat16x3_t, float16_t,
+ svst3q_f16 (p0, x0 - svcnth () * 24, z0),
+ svst3q (p0, x0 - svcnth () * 24, z0))
+
+/*
+** st3q_f16_m27:
+** addvl (x[0-9]+), x0, #-27
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_f16_m27, svfloat16x3_t, float16_t,
+ svst3q_f16 (p0, x0 - svcnth () * 27, z0),
+ svst3q (p0, x0 - svcnth () * 27, z0))
+
+/*
+** st3q_vnum_f16_0:
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_f16_0, svfloat16x3_t, float16_t,
+ svst3q_vnum_f16 (p0, x0, 0, z0),
+ svst3q_vnum (p0, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_vnum_f16_1:
+** incb x0
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_f16_1, svfloat16x3_t, float16_t,
+ svst3q_vnum_f16 (p0, x0, 1, z0),
+ svst3q_vnum (p0, x0, 1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_vnum_f16_2:
+** incb x0, all, mul #2
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_f16_2, svfloat16x3_t, float16_t,
+ svst3q_vnum_f16 (p0, x0, 2, z0),
+ svst3q_vnum (p0, x0, 2, z0))
+
+/*
+** st3q_vnum_f16_3:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #3, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_vnum_f16_3, svfloat16x3_t, float16_t,
+ svst3q_vnum_f16 (p0, x0, 3, z0),
+ svst3q_vnum (p0, x0, 3, z0))
+
+/*
+** st3q_vnum_f16_21:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #21, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_vnum_f16_21, svfloat16x3_t, float16_t,
+ svst3q_vnum_f16 (p0, x0, 21, z0),
+ svst3q_vnum (p0, x0, 21, z0))
+
+/*
+** st3q_vnum_f16_24:
+** addvl (x[0-9]+), x0, #24
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_vnum_f16_24, svfloat16x3_t, float16_t,
+ svst3q_vnum_f16 (p0, x0, 24, z0),
+ svst3q_vnum (p0, x0, 24, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_vnum_f16_m1:
+** decb x0
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_f16_m1, svfloat16x3_t, float16_t,
+ svst3q_vnum_f16 (p0, x0, -1, z0),
+ svst3q_vnum (p0, x0, -1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_vnum_f16_m2:
+** decb x0, all, mul #2
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_f16_m2, svfloat16x3_t, float16_t,
+ svst3q_vnum_f16 (p0, x0, -2, z0),
+ svst3q_vnum (p0, x0, -2, z0))
+
+/*
+** st3q_vnum_f16_m3:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #-3, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_vnum_f16_m3, svfloat16x3_t, float16_t,
+ svst3q_vnum_f16 (p0, x0, -3, z0),
+ svst3q_vnum (p0, x0, -3, z0))
+
+/*
+** st3q_vnum_f16_m24:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #-24, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_vnum_f16_m24, svfloat16x3_t, float16_t,
+ svst3q_vnum_f16 (p0, x0, -24, z0),
+ svst3q_vnum (p0, x0, -24, z0))
+
+/*
+** st3q_vnum_f16_m27:
+** addvl (x[0-9]+), x0, #-27
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_vnum_f16_m27, svfloat16x3_t, float16_t,
+ svst3q_vnum_f16 (p0, x0, -27, z0),
+ svst3q_vnum (p0, x0, -27, z0))
+
+/*
+** st3q_vnum_f16_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st3q {z0\.q - z2\.q}, p0, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st3q {z0\.q - z2\.q}, p0, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE (st3q_vnum_f16_x1, svfloat16x3_t, float16_t,
+ svst3q_vnum_f16 (p0, x0, x1, z0),
+ svst3q_vnum (p0, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st3q_f32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st3q_f32.c
new file mode 100644
index 0000000..b9de28f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st3q_f32.c
@@ -0,0 +1,271 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** st3q_f32_base:
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_f32_base, svfloat32x3_t, float32_t,
+ svst3q_f32 (p0, x0, z0),
+ svst3q (p0, x0, z0))
+
+/*
+** st3q_f32_index:
+** add (x[0-9]), x0, x1, lsl #?2
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_f32_index, svfloat32x3_t, float32_t,
+ svst3q_f32 (p0, x0 + x1, z0),
+ svst3q (p0, x0 + x1, z0))
+
+/*
+** st3q_f32_index2:
+** add (x[0-9]), x0, x1, lsl #?3
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_f32_index2, svfloat32x3_t, float32_t,
+ svst3q_f32 (p0, x0 + x1 * 2, z0),
+ svst3q (p0, x0 + x1 * 2, z0))
+
+/*
+** st3q_f32_index4:
+** st3q {z0\.q - z2\.q}, p0, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_STORE (st3q_f32_index4, svfloat32x3_t, float32_t,
+ svst3q_f32 (p0, x0 + x1 * 4, z0),
+ svst3q (p0, x0 + x1 * 4, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_f32_1:
+** incb x0
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_f32_1, svfloat32x3_t, float32_t,
+ svst3q_f32 (p0, x0 + svcntw (), z0),
+ svst3q (p0, x0 + svcntw (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_f32_2:
+** incb x0, all, mul #2
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_f32_2, svfloat32x3_t, float32_t,
+ svst3q_f32 (p0, x0 + svcntw () * 2, z0),
+ svst3q (p0, x0 + svcntw () * 2, z0))
+
+/*
+** st3q_f32_3:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #3, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_f32_3, svfloat32x3_t, float32_t,
+ svst3q_f32 (p0, x0 + svcntw () * 3, z0),
+ svst3q (p0, x0 + svcntw () * 3, z0))
+
+/*
+** st3q_f32_21:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #21, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_f32_21, svfloat32x3_t, float32_t,
+ svst3q_f32 (p0, x0 + svcntw () * 21, z0),
+ svst3q (p0, x0 + svcntw () * 21, z0))
+
+/*
+** st3q_f32_24:
+** addvl (x[0-9]+), x0, #24
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_f32_24, svfloat32x3_t, float32_t,
+ svst3q_f32 (p0, x0 + svcntw () * 24, z0),
+ svst3q (p0, x0 + svcntw () * 24, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_f32_m1:
+** decb x0
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_f32_m1, svfloat32x3_t, float32_t,
+ svst3q_f32 (p0, x0 - svcntw (), z0),
+ svst3q (p0, x0 - svcntw (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_f32_m2:
+** decb x0, all, mul #2
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_f32_m2, svfloat32x3_t, float32_t,
+ svst3q_f32 (p0, x0 - svcntw () * 2, z0),
+ svst3q (p0, x0 - svcntw () * 2, z0))
+
+/*
+** st3q_f32_m3:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #-3, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_f32_m3, svfloat32x3_t, float32_t,
+ svst3q_f32 (p0, x0 - svcntw () * 3, z0),
+ svst3q (p0, x0 - svcntw () * 3, z0))
+
+/*
+** st3q_f32_m24:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #-24, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_f32_m24, svfloat32x3_t, float32_t,
+ svst3q_f32 (p0, x0 - svcntw () * 24, z0),
+ svst3q (p0, x0 - svcntw () * 24, z0))
+
+/*
+** st3q_f32_m27:
+** addvl (x[0-9]+), x0, #-27
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_f32_m27, svfloat32x3_t, float32_t,
+ svst3q_f32 (p0, x0 - svcntw () * 27, z0),
+ svst3q (p0, x0 - svcntw () * 27, z0))
+
+/*
+** st3q_vnum_f32_0:
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_f32_0, svfloat32x3_t, float32_t,
+ svst3q_vnum_f32 (p0, x0, 0, z0),
+ svst3q_vnum (p0, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_vnum_f32_1:
+** incb x0
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_f32_1, svfloat32x3_t, float32_t,
+ svst3q_vnum_f32 (p0, x0, 1, z0),
+ svst3q_vnum (p0, x0, 1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_vnum_f32_2:
+** incb x0, all, mul #2
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_f32_2, svfloat32x3_t, float32_t,
+ svst3q_vnum_f32 (p0, x0, 2, z0),
+ svst3q_vnum (p0, x0, 2, z0))
+
+/*
+** st3q_vnum_f32_3:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #3, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_vnum_f32_3, svfloat32x3_t, float32_t,
+ svst3q_vnum_f32 (p0, x0, 3, z0),
+ svst3q_vnum (p0, x0, 3, z0))
+
+/*
+** st3q_vnum_f32_21:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #21, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_vnum_f32_21, svfloat32x3_t, float32_t,
+ svst3q_vnum_f32 (p0, x0, 21, z0),
+ svst3q_vnum (p0, x0, 21, z0))
+
+/*
+** st3q_vnum_f32_24:
+** addvl (x[0-9]+), x0, #24
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_vnum_f32_24, svfloat32x3_t, float32_t,
+ svst3q_vnum_f32 (p0, x0, 24, z0),
+ svst3q_vnum (p0, x0, 24, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_vnum_f32_m1:
+** decb x0
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_f32_m1, svfloat32x3_t, float32_t,
+ svst3q_vnum_f32 (p0, x0, -1, z0),
+ svst3q_vnum (p0, x0, -1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_vnum_f32_m2:
+** decb x0, all, mul #2
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_f32_m2, svfloat32x3_t, float32_t,
+ svst3q_vnum_f32 (p0, x0, -2, z0),
+ svst3q_vnum (p0, x0, -2, z0))
+
+/*
+** st3q_vnum_f32_m3:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #-3, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_vnum_f32_m3, svfloat32x3_t, float32_t,
+ svst3q_vnum_f32 (p0, x0, -3, z0),
+ svst3q_vnum (p0, x0, -3, z0))
+
+/*
+** st3q_vnum_f32_m24:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #-24, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_vnum_f32_m24, svfloat32x3_t, float32_t,
+ svst3q_vnum_f32 (p0, x0, -24, z0),
+ svst3q_vnum (p0, x0, -24, z0))
+
+/*
+** st3q_vnum_f32_m27:
+** addvl (x[0-9]+), x0, #-27
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_vnum_f32_m27, svfloat32x3_t, float32_t,
+ svst3q_vnum_f32 (p0, x0, -27, z0),
+ svst3q_vnum (p0, x0, -27, z0))
+
+/*
+** st3q_vnum_f32_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st3q {z0\.q - z2\.q}, p0, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st3q {z0\.q - z2\.q}, p0, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE (st3q_vnum_f32_x1, svfloat32x3_t, float32_t,
+ svst3q_vnum_f32 (p0, x0, x1, z0),
+ svst3q_vnum (p0, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st3q_f64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st3q_f64.c
new file mode 100644
index 0000000..2824546
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st3q_f64.c
@@ -0,0 +1,261 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** st3q_f64_base:
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_f64_base, svfloat64x3_t, float64_t,
+ svst3q_f64 (p0, x0, z0),
+ svst3q (p0, x0, z0))
+
+/*
+** st3q_f64_index:
+** add (x[0-9]), x0, x1, lsl #?3
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_f64_index, svfloat64x3_t, float64_t,
+ svst3q_f64 (p0, x0 + x1, z0),
+ svst3q (p0, x0 + x1, z0))
+
+/*
+** st3q_f64_index2:
+** st3q {z0\.q - z2\.q}, p0, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_STORE (st3q_f64_index2, svfloat64x3_t, float64_t,
+ svst3q_f64 (p0, x0 + x1 * 2, z0),
+ svst3q (p0, x0 + x1 * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_f64_1:
+** incb x0
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_f64_1, svfloat64x3_t, float64_t,
+ svst3q_f64 (p0, x0 + svcntd (), z0),
+ svst3q (p0, x0 + svcntd (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_f64_2:
+** incb x0, all, mul #2
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_f64_2, svfloat64x3_t, float64_t,
+ svst3q_f64 (p0, x0 + svcntd () * 2, z0),
+ svst3q (p0, x0 + svcntd () * 2, z0))
+
+/*
+** st3q_f64_3:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #3, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_f64_3, svfloat64x3_t, float64_t,
+ svst3q_f64 (p0, x0 + svcntd () * 3, z0),
+ svst3q (p0, x0 + svcntd () * 3, z0))
+
+/*
+** st3q_f64_21:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #21, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_f64_21, svfloat64x3_t, float64_t,
+ svst3q_f64 (p0, x0 + svcntd () * 21, z0),
+ svst3q (p0, x0 + svcntd () * 21, z0))
+
+/*
+** st3q_f64_24:
+** addvl (x[0-9]+), x0, #24
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_f64_24, svfloat64x3_t, float64_t,
+ svst3q_f64 (p0, x0 + svcntd () * 24, z0),
+ svst3q (p0, x0 + svcntd () * 24, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_f64_m1:
+** decb x0
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_f64_m1, svfloat64x3_t, float64_t,
+ svst3q_f64 (p0, x0 - svcntd (), z0),
+ svst3q (p0, x0 - svcntd (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_f64_m2:
+** decb x0, all, mul #2
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_f64_m2, svfloat64x3_t, float64_t,
+ svst3q_f64 (p0, x0 - svcntd () * 2, z0),
+ svst3q (p0, x0 - svcntd () * 2, z0))
+
+/*
+** st3q_f64_m3:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #-3, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_f64_m3, svfloat64x3_t, float64_t,
+ svst3q_f64 (p0, x0 - svcntd () * 3, z0),
+ svst3q (p0, x0 - svcntd () * 3, z0))
+
+/*
+** st3q_f64_m24:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #-24, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_f64_m24, svfloat64x3_t, float64_t,
+ svst3q_f64 (p0, x0 - svcntd () * 24, z0),
+ svst3q (p0, x0 - svcntd () * 24, z0))
+
+/*
+** st3q_f64_m27:
+** addvl (x[0-9]+), x0, #-27
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_f64_m27, svfloat64x3_t, float64_t,
+ svst3q_f64 (p0, x0 - svcntd () * 27, z0),
+ svst3q (p0, x0 - svcntd () * 27, z0))
+
+/*
+** st3q_vnum_f64_0:
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_f64_0, svfloat64x3_t, float64_t,
+ svst3q_vnum_f64 (p0, x0, 0, z0),
+ svst3q_vnum (p0, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_vnum_f64_1:
+** incb x0
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_f64_1, svfloat64x3_t, float64_t,
+ svst3q_vnum_f64 (p0, x0, 1, z0),
+ svst3q_vnum (p0, x0, 1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_vnum_f64_2:
+** incb x0, all, mul #2
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_f64_2, svfloat64x3_t, float64_t,
+ svst3q_vnum_f64 (p0, x0, 2, z0),
+ svst3q_vnum (p0, x0, 2, z0))
+
+/*
+** st3q_vnum_f64_3:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #3, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_vnum_f64_3, svfloat64x3_t, float64_t,
+ svst3q_vnum_f64 (p0, x0, 3, z0),
+ svst3q_vnum (p0, x0, 3, z0))
+
+/*
+** st3q_vnum_f64_21:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #21, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_vnum_f64_21, svfloat64x3_t, float64_t,
+ svst3q_vnum_f64 (p0, x0, 21, z0),
+ svst3q_vnum (p0, x0, 21, z0))
+
+/*
+** st3q_vnum_f64_24:
+** addvl (x[0-9]+), x0, #24
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_vnum_f64_24, svfloat64x3_t, float64_t,
+ svst3q_vnum_f64 (p0, x0, 24, z0),
+ svst3q_vnum (p0, x0, 24, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_vnum_f64_m1:
+** decb x0
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_f64_m1, svfloat64x3_t, float64_t,
+ svst3q_vnum_f64 (p0, x0, -1, z0),
+ svst3q_vnum (p0, x0, -1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_vnum_f64_m2:
+** decb x0, all, mul #2
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_f64_m2, svfloat64x3_t, float64_t,
+ svst3q_vnum_f64 (p0, x0, -2, z0),
+ svst3q_vnum (p0, x0, -2, z0))
+
+/*
+** st3q_vnum_f64_m3:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #-3, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_vnum_f64_m3, svfloat64x3_t, float64_t,
+ svst3q_vnum_f64 (p0, x0, -3, z0),
+ svst3q_vnum (p0, x0, -3, z0))
+
+/*
+** st3q_vnum_f64_m24:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #-24, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_vnum_f64_m24, svfloat64x3_t, float64_t,
+ svst3q_vnum_f64 (p0, x0, -24, z0),
+ svst3q_vnum (p0, x0, -24, z0))
+
+/*
+** st3q_vnum_f64_m27:
+** addvl (x[0-9]+), x0, #-27
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_vnum_f64_m27, svfloat64x3_t, float64_t,
+ svst3q_vnum_f64 (p0, x0, -27, z0),
+ svst3q_vnum (p0, x0, -27, z0))
+
+/*
+** st3q_vnum_f64_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st3q {z0\.q - z2\.q}, p0, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st3q {z0\.q - z2\.q}, p0, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE (st3q_vnum_f64_x1, svfloat64x3_t, float64_t,
+ svst3q_vnum_f64 (p0, x0, x1, z0),
+ svst3q_vnum (p0, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st3q_s16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st3q_s16.c
new file mode 100644
index 0000000..afe99c3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st3q_s16.c
@@ -0,0 +1,281 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** st3q_s16_base:
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_s16_base, svint16x3_t, int16_t,
+ svst3q_s16 (p0, x0, z0),
+ svst3q (p0, x0, z0))
+
+/*
+** st3q_s16_index:
+** add (x[0-9]), x0, x1, lsl #?1
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_s16_index, svint16x3_t, int16_t,
+ svst3q_s16 (p0, x0 + x1, z0),
+ svst3q (p0, x0 + x1, z0))
+
+/*
+** st3q_s16_index2:
+** add (x[0-9]), x0, x1, lsl #?2
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_s16_index2, svint16x3_t, int16_t,
+ svst3q_s16 (p0, x0 + x1 * 2, z0),
+ svst3q (p0, x0 + x1 * 2, z0))
+
+/*
+** st3q_s16_index4:
+** add (x[0-9]), x0, x1, lsl #?3
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_s16_index4, svint16x3_t, int16_t,
+ svst3q_s16 (p0, x0 + x1 * 4, z0),
+ svst3q (p0, x0 + x1 * 4, z0))
+
+/*
+** st3q_s16_index8:
+** st3q {z0\.q - z2\.q}, p0, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_STORE (st3q_s16_index8, svint16x3_t, int16_t,
+ svst3q_s16 (p0, x0 + x1 * 8, z0),
+ svst3q (p0, x0 + x1 * 8, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_s16_1:
+** incb x0
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_s16_1, svint16x3_t, int16_t,
+ svst3q_s16 (p0, x0 + svcnth (), z0),
+ svst3q (p0, x0 + svcnth (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_s16_2:
+** incb x0, all, mul #2
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_s16_2, svint16x3_t, int16_t,
+ svst3q_s16 (p0, x0 + svcnth () * 2, z0),
+ svst3q (p0, x0 + svcnth () * 2, z0))
+
+/*
+** st3q_s16_3:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #3, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_s16_3, svint16x3_t, int16_t,
+ svst3q_s16 (p0, x0 + svcnth () * 3, z0),
+ svst3q (p0, x0 + svcnth () * 3, z0))
+
+/*
+** st3q_s16_21:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #21, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_s16_21, svint16x3_t, int16_t,
+ svst3q_s16 (p0, x0 + svcnth () * 21, z0),
+ svst3q (p0, x0 + svcnth () * 21, z0))
+
+/*
+** st3q_s16_24:
+** addvl (x[0-9]+), x0, #24
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_s16_24, svint16x3_t, int16_t,
+ svst3q_s16 (p0, x0 + svcnth () * 24, z0),
+ svst3q (p0, x0 + svcnth () * 24, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_s16_m1:
+** decb x0
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_s16_m1, svint16x3_t, int16_t,
+ svst3q_s16 (p0, x0 - svcnth (), z0),
+ svst3q (p0, x0 - svcnth (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_s16_m2:
+** decb x0, all, mul #2
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_s16_m2, svint16x3_t, int16_t,
+ svst3q_s16 (p0, x0 - svcnth () * 2, z0),
+ svst3q (p0, x0 - svcnth () * 2, z0))
+
+/*
+** st3q_s16_m3:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #-3, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_s16_m3, svint16x3_t, int16_t,
+ svst3q_s16 (p0, x0 - svcnth () * 3, z0),
+ svst3q (p0, x0 - svcnth () * 3, z0))
+
+/*
+** st3q_s16_m24:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #-24, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_s16_m24, svint16x3_t, int16_t,
+ svst3q_s16 (p0, x0 - svcnth () * 24, z0),
+ svst3q (p0, x0 - svcnth () * 24, z0))
+
+/*
+** st3q_s16_m27:
+** addvl (x[0-9]+), x0, #-27
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_s16_m27, svint16x3_t, int16_t,
+ svst3q_s16 (p0, x0 - svcnth () * 27, z0),
+ svst3q (p0, x0 - svcnth () * 27, z0))
+
+/*
+** st3q_vnum_s16_0:
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_s16_0, svint16x3_t, int16_t,
+ svst3q_vnum_s16 (p0, x0, 0, z0),
+ svst3q_vnum (p0, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_vnum_s16_1:
+** incb x0
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_s16_1, svint16x3_t, int16_t,
+ svst3q_vnum_s16 (p0, x0, 1, z0),
+ svst3q_vnum (p0, x0, 1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_vnum_s16_2:
+** incb x0, all, mul #2
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_s16_2, svint16x3_t, int16_t,
+ svst3q_vnum_s16 (p0, x0, 2, z0),
+ svst3q_vnum (p0, x0, 2, z0))
+
+/*
+** st3q_vnum_s16_3:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #3, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_vnum_s16_3, svint16x3_t, int16_t,
+ svst3q_vnum_s16 (p0, x0, 3, z0),
+ svst3q_vnum (p0, x0, 3, z0))
+
+/*
+** st3q_vnum_s16_21:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #21, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_vnum_s16_21, svint16x3_t, int16_t,
+ svst3q_vnum_s16 (p0, x0, 21, z0),
+ svst3q_vnum (p0, x0, 21, z0))
+
+/*
+** st3q_vnum_s16_24:
+** addvl (x[0-9]+), x0, #24
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_vnum_s16_24, svint16x3_t, int16_t,
+ svst3q_vnum_s16 (p0, x0, 24, z0),
+ svst3q_vnum (p0, x0, 24, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_vnum_s16_m1:
+** decb x0
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_s16_m1, svint16x3_t, int16_t,
+ svst3q_vnum_s16 (p0, x0, -1, z0),
+ svst3q_vnum (p0, x0, -1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_vnum_s16_m2:
+** decb x0, all, mul #2
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_s16_m2, svint16x3_t, int16_t,
+ svst3q_vnum_s16 (p0, x0, -2, z0),
+ svst3q_vnum (p0, x0, -2, z0))
+
+/*
+** st3q_vnum_s16_m3:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #-3, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_vnum_s16_m3, svint16x3_t, int16_t,
+ svst3q_vnum_s16 (p0, x0, -3, z0),
+ svst3q_vnum (p0, x0, -3, z0))
+
+/*
+** st3q_vnum_s16_m24:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #-24, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_vnum_s16_m24, svint16x3_t, int16_t,
+ svst3q_vnum_s16 (p0, x0, -24, z0),
+ svst3q_vnum (p0, x0, -24, z0))
+
+/*
+** st3q_vnum_s16_m27:
+** addvl (x[0-9]+), x0, #-27
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_vnum_s16_m27, svint16x3_t, int16_t,
+ svst3q_vnum_s16 (p0, x0, -27, z0),
+ svst3q_vnum (p0, x0, -27, z0))
+
+/*
+** st3q_vnum_s16_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st3q {z0\.q - z2\.q}, p0, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st3q {z0\.q - z2\.q}, p0, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE (st3q_vnum_s16_x1, svint16x3_t, int16_t,
+ svst3q_vnum_s16 (p0, x0, x1, z0),
+ svst3q_vnum (p0, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st3q_s32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st3q_s32.c
new file mode 100644
index 0000000..7547dad
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st3q_s32.c
@@ -0,0 +1,271 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** st3q_s32_base:
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_s32_base, svint32x3_t, int32_t,
+ svst3q_s32 (p0, x0, z0),
+ svst3q (p0, x0, z0))
+
+/*
+** st3q_s32_index:
+** add (x[0-9]), x0, x1, lsl #?2
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_s32_index, svint32x3_t, int32_t,
+ svst3q_s32 (p0, x0 + x1, z0),
+ svst3q (p0, x0 + x1, z0))
+
+/*
+** st3q_s32_index2:
+** add (x[0-9]), x0, x1, lsl #?3
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_s32_index2, svint32x3_t, int32_t,
+ svst3q_s32 (p0, x0 + x1 * 2, z0),
+ svst3q (p0, x0 + x1 * 2, z0))
+
+/*
+** st3q_s32_index4:
+** st3q {z0\.q - z2\.q}, p0, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_STORE (st3q_s32_index4, svint32x3_t, int32_t,
+ svst3q_s32 (p0, x0 + x1 * 4, z0),
+ svst3q (p0, x0 + x1 * 4, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_s32_1:
+** incb x0
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_s32_1, svint32x3_t, int32_t,
+ svst3q_s32 (p0, x0 + svcntw (), z0),
+ svst3q (p0, x0 + svcntw (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_s32_2:
+** incb x0, all, mul #2
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_s32_2, svint32x3_t, int32_t,
+ svst3q_s32 (p0, x0 + svcntw () * 2, z0),
+ svst3q (p0, x0 + svcntw () * 2, z0))
+
+/*
+** st3q_s32_3:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #3, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_s32_3, svint32x3_t, int32_t,
+ svst3q_s32 (p0, x0 + svcntw () * 3, z0),
+ svst3q (p0, x0 + svcntw () * 3, z0))
+
+/*
+** st3q_s32_21:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #21, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_s32_21, svint32x3_t, int32_t,
+ svst3q_s32 (p0, x0 + svcntw () * 21, z0),
+ svst3q (p0, x0 + svcntw () * 21, z0))
+
+/*
+** st3q_s32_24:
+** addvl (x[0-9]+), x0, #24
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_s32_24, svint32x3_t, int32_t,
+ svst3q_s32 (p0, x0 + svcntw () * 24, z0),
+ svst3q (p0, x0 + svcntw () * 24, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_s32_m1:
+** decb x0
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_s32_m1, svint32x3_t, int32_t,
+ svst3q_s32 (p0, x0 - svcntw (), z0),
+ svst3q (p0, x0 - svcntw (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_s32_m2:
+** decb x0, all, mul #2
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_s32_m2, svint32x3_t, int32_t,
+ svst3q_s32 (p0, x0 - svcntw () * 2, z0),
+ svst3q (p0, x0 - svcntw () * 2, z0))
+
+/*
+** st3q_s32_m3:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #-3, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_s32_m3, svint32x3_t, int32_t,
+ svst3q_s32 (p0, x0 - svcntw () * 3, z0),
+ svst3q (p0, x0 - svcntw () * 3, z0))
+
+/*
+** st3q_s32_m24:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #-24, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_s32_m24, svint32x3_t, int32_t,
+ svst3q_s32 (p0, x0 - svcntw () * 24, z0),
+ svst3q (p0, x0 - svcntw () * 24, z0))
+
+/*
+** st3q_s32_m27:
+** addvl (x[0-9]+), x0, #-27
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_s32_m27, svint32x3_t, int32_t,
+ svst3q_s32 (p0, x0 - svcntw () * 27, z0),
+ svst3q (p0, x0 - svcntw () * 27, z0))
+
+/*
+** st3q_vnum_s32_0:
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_s32_0, svint32x3_t, int32_t,
+ svst3q_vnum_s32 (p0, x0, 0, z0),
+ svst3q_vnum (p0, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_vnum_s32_1:
+** incb x0
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_s32_1, svint32x3_t, int32_t,
+ svst3q_vnum_s32 (p0, x0, 1, z0),
+ svst3q_vnum (p0, x0, 1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_vnum_s32_2:
+** incb x0, all, mul #2
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_s32_2, svint32x3_t, int32_t,
+ svst3q_vnum_s32 (p0, x0, 2, z0),
+ svst3q_vnum (p0, x0, 2, z0))
+
+/*
+** st3q_vnum_s32_3:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #3, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_vnum_s32_3, svint32x3_t, int32_t,
+ svst3q_vnum_s32 (p0, x0, 3, z0),
+ svst3q_vnum (p0, x0, 3, z0))
+
+/*
+** st3q_vnum_s32_21:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #21, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_vnum_s32_21, svint32x3_t, int32_t,
+ svst3q_vnum_s32 (p0, x0, 21, z0),
+ svst3q_vnum (p0, x0, 21, z0))
+
+/*
+** st3q_vnum_s32_24:
+** addvl (x[0-9]+), x0, #24
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_vnum_s32_24, svint32x3_t, int32_t,
+ svst3q_vnum_s32 (p0, x0, 24, z0),
+ svst3q_vnum (p0, x0, 24, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_vnum_s32_m1:
+** decb x0
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_s32_m1, svint32x3_t, int32_t,
+ svst3q_vnum_s32 (p0, x0, -1, z0),
+ svst3q_vnum (p0, x0, -1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_vnum_s32_m2:
+** decb x0, all, mul #2
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_s32_m2, svint32x3_t, int32_t,
+ svst3q_vnum_s32 (p0, x0, -2, z0),
+ svst3q_vnum (p0, x0, -2, z0))
+
+/*
+** st3q_vnum_s32_m3:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #-3, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_vnum_s32_m3, svint32x3_t, int32_t,
+ svst3q_vnum_s32 (p0, x0, -3, z0),
+ svst3q_vnum (p0, x0, -3, z0))
+
+/*
+** st3q_vnum_s32_m24:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #-24, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_vnum_s32_m24, svint32x3_t, int32_t,
+ svst3q_vnum_s32 (p0, x0, -24, z0),
+ svst3q_vnum (p0, x0, -24, z0))
+
+/*
+** st3q_vnum_s32_m27:
+** addvl (x[0-9]+), x0, #-27
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_vnum_s32_m27, svint32x3_t, int32_t,
+ svst3q_vnum_s32 (p0, x0, -27, z0),
+ svst3q_vnum (p0, x0, -27, z0))
+
+/*
+** st3q_vnum_s32_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st3q {z0\.q - z2\.q}, p0, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st3q {z0\.q - z2\.q}, p0, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE (st3q_vnum_s32_x1, svint32x3_t, int32_t,
+ svst3q_vnum_s32 (p0, x0, x1, z0),
+ svst3q_vnum (p0, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st3q_s64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st3q_s64.c
new file mode 100644
index 0000000..9e2f485
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st3q_s64.c
@@ -0,0 +1,261 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** st3q_s64_base:
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_s64_base, svint64x3_t, int64_t,
+ svst3q_s64 (p0, x0, z0),
+ svst3q (p0, x0, z0))
+
+/*
+** st3q_s64_index:
+** add (x[0-9]), x0, x1, lsl #?3
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_s64_index, svint64x3_t, int64_t,
+ svst3q_s64 (p0, x0 + x1, z0),
+ svst3q (p0, x0 + x1, z0))
+
+/*
+** st3q_s64_index2:
+** st3q {z0\.q - z2\.q}, p0, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_STORE (st3q_s64_index2, svint64x3_t, int64_t,
+ svst3q_s64 (p0, x0 + x1 * 2, z0),
+ svst3q (p0, x0 + x1 * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_s64_1:
+** incb x0
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_s64_1, svint64x3_t, int64_t,
+ svst3q_s64 (p0, x0 + svcntd (), z0),
+ svst3q (p0, x0 + svcntd (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_s64_2:
+** incb x0, all, mul #2
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_s64_2, svint64x3_t, int64_t,
+ svst3q_s64 (p0, x0 + svcntd () * 2, z0),
+ svst3q (p0, x0 + svcntd () * 2, z0))
+
+/*
+** st3q_s64_3:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #3, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_s64_3, svint64x3_t, int64_t,
+ svst3q_s64 (p0, x0 + svcntd () * 3, z0),
+ svst3q (p0, x0 + svcntd () * 3, z0))
+
+/*
+** st3q_s64_21:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #21, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_s64_21, svint64x3_t, int64_t,
+ svst3q_s64 (p0, x0 + svcntd () * 21, z0),
+ svst3q (p0, x0 + svcntd () * 21, z0))
+
+/*
+** st3q_s64_24:
+** addvl (x[0-9]+), x0, #24
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_s64_24, svint64x3_t, int64_t,
+ svst3q_s64 (p0, x0 + svcntd () * 24, z0),
+ svst3q (p0, x0 + svcntd () * 24, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_s64_m1:
+** decb x0
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_s64_m1, svint64x3_t, int64_t,
+ svst3q_s64 (p0, x0 - svcntd (), z0),
+ svst3q (p0, x0 - svcntd (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_s64_m2:
+** decb x0, all, mul #2
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_s64_m2, svint64x3_t, int64_t,
+ svst3q_s64 (p0, x0 - svcntd () * 2, z0),
+ svst3q (p0, x0 - svcntd () * 2, z0))
+
+/*
+** st3q_s64_m3:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #-3, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_s64_m3, svint64x3_t, int64_t,
+ svst3q_s64 (p0, x0 - svcntd () * 3, z0),
+ svst3q (p0, x0 - svcntd () * 3, z0))
+
+/*
+** st3q_s64_m24:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #-24, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_s64_m24, svint64x3_t, int64_t,
+ svst3q_s64 (p0, x0 - svcntd () * 24, z0),
+ svst3q (p0, x0 - svcntd () * 24, z0))
+
+/*
+** st3q_s64_m27:
+** addvl (x[0-9]+), x0, #-27
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_s64_m27, svint64x3_t, int64_t,
+ svst3q_s64 (p0, x0 - svcntd () * 27, z0),
+ svst3q (p0, x0 - svcntd () * 27, z0))
+
+/*
+** st3q_vnum_s64_0:
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_s64_0, svint64x3_t, int64_t,
+ svst3q_vnum_s64 (p0, x0, 0, z0),
+ svst3q_vnum (p0, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_vnum_s64_1:
+** incb x0
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_s64_1, svint64x3_t, int64_t,
+ svst3q_vnum_s64 (p0, x0, 1, z0),
+ svst3q_vnum (p0, x0, 1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_vnum_s64_2:
+** incb x0, all, mul #2
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_s64_2, svint64x3_t, int64_t,
+ svst3q_vnum_s64 (p0, x0, 2, z0),
+ svst3q_vnum (p0, x0, 2, z0))
+
+/*
+** st3q_vnum_s64_3:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #3, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_vnum_s64_3, svint64x3_t, int64_t,
+ svst3q_vnum_s64 (p0, x0, 3, z0),
+ svst3q_vnum (p0, x0, 3, z0))
+
+/*
+** st3q_vnum_s64_21:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #21, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_vnum_s64_21, svint64x3_t, int64_t,
+ svst3q_vnum_s64 (p0, x0, 21, z0),
+ svst3q_vnum (p0, x0, 21, z0))
+
+/*
+** st3q_vnum_s64_24:
+** addvl (x[0-9]+), x0, #24
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_vnum_s64_24, svint64x3_t, int64_t,
+ svst3q_vnum_s64 (p0, x0, 24, z0),
+ svst3q_vnum (p0, x0, 24, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_vnum_s64_m1:
+** decb x0
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_s64_m1, svint64x3_t, int64_t,
+ svst3q_vnum_s64 (p0, x0, -1, z0),
+ svst3q_vnum (p0, x0, -1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_vnum_s64_m2:
+** decb x0, all, mul #2
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_s64_m2, svint64x3_t, int64_t,
+ svst3q_vnum_s64 (p0, x0, -2, z0),
+ svst3q_vnum (p0, x0, -2, z0))
+
+/*
+** st3q_vnum_s64_m3:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #-3, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_vnum_s64_m3, svint64x3_t, int64_t,
+ svst3q_vnum_s64 (p0, x0, -3, z0),
+ svst3q_vnum (p0, x0, -3, z0))
+
+/*
+** st3q_vnum_s64_m24:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #-24, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_vnum_s64_m24, svint64x3_t, int64_t,
+ svst3q_vnum_s64 (p0, x0, -24, z0),
+ svst3q_vnum (p0, x0, -24, z0))
+
+/*
+** st3q_vnum_s64_m27:
+** addvl (x[0-9]+), x0, #-27
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_vnum_s64_m27, svint64x3_t, int64_t,
+ svst3q_vnum_s64 (p0, x0, -27, z0),
+ svst3q_vnum (p0, x0, -27, z0))
+
+/*
+** st3q_vnum_s64_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st3q {z0\.q - z2\.q}, p0, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st3q {z0\.q - z2\.q}, p0, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE (st3q_vnum_s64_x1, svint64x3_t, int64_t,
+ svst3q_vnum_s64 (p0, x0, x1, z0),
+ svst3q_vnum (p0, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st3q_s8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st3q_s8.c
new file mode 100644
index 0000000..07e5f8f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st3q_s8.c
@@ -0,0 +1,291 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** st3q_s8_base:
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_s8_base, svint8x3_t, int8_t,
+ svst3q_s8 (p0, x0, z0),
+ svst3q (p0, x0, z0))
+
+/*
+** st3q_s8_index:
+** add (x[0-9]), (?:x0, x1|x1, x0)
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_s8_index, svint8x3_t, int8_t,
+ svst3q_s8 (p0, x0 + x1, z0),
+ svst3q (p0, x0 + x1, z0))
+
+/*
+** st3q_s8_index2:
+** add (x[0-9]), x0, x1, lsl #?1
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_s8_index2, svint8x3_t, int8_t,
+ svst3q_s8 (p0, x0 + x1 * 2, z0),
+ svst3q (p0, x0 + x1 * 2, z0))
+
+/*
+** st3q_s8_index4:
+** add (x[0-9]), x0, x1, lsl #?2
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_s8_index4, svint8x3_t, int8_t,
+ svst3q_s8 (p0, x0 + x1 * 4, z0),
+ svst3q (p0, x0 + x1 * 4, z0))
+
+/*
+** st3q_s8_index8:
+** add (x[0-9]), x0, x1, lsl #?3
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_s8_index8, svint8x3_t, int8_t,
+ svst3q_s8 (p0, x0 + x1 * 8, z0),
+ svst3q (p0, x0 + x1 * 8, z0))
+
+/*
+** st3q_s8_index16:
+** st3q {z0\.q - z2\.q}, p0, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_STORE (st3q_s8_index16, svint8x3_t, int8_t,
+ svst3q_s8 (p0, x0 + x1 * 16, z0),
+ svst3q (p0, x0 + x1 * 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_s8_1:
+** incb x0
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_s8_1, svint8x3_t, int8_t,
+ svst3q_s8 (p0, x0 + svcntb (), z0),
+ svst3q (p0, x0 + svcntb (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_s8_2:
+** incb x0, all, mul #2
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_s8_2, svint8x3_t, int8_t,
+ svst3q_s8 (p0, x0 + svcntb () * 2, z0),
+ svst3q (p0, x0 + svcntb () * 2, z0))
+
+/*
+** st3q_s8_3:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #3, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_s8_3, svint8x3_t, int8_t,
+ svst3q_s8 (p0, x0 + svcntb () * 3, z0),
+ svst3q (p0, x0 + svcntb () * 3, z0))
+
+/*
+** st3q_s8_21:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #21, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_s8_21, svint8x3_t, int8_t,
+ svst3q_s8 (p0, x0 + svcntb () * 21, z0),
+ svst3q (p0, x0 + svcntb () * 21, z0))
+
+/*
+** st3q_s8_24:
+** addvl (x[0-9]+), x0, #24
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_s8_24, svint8x3_t, int8_t,
+ svst3q_s8 (p0, x0 + svcntb () * 24, z0),
+ svst3q (p0, x0 + svcntb () * 24, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_s8_m1:
+** decb x0
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_s8_m1, svint8x3_t, int8_t,
+ svst3q_s8 (p0, x0 - svcntb (), z0),
+ svst3q (p0, x0 - svcntb (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_s8_m2:
+** decb x0, all, mul #2
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_s8_m2, svint8x3_t, int8_t,
+ svst3q_s8 (p0, x0 - svcntb () * 2, z0),
+ svst3q (p0, x0 - svcntb () * 2, z0))
+
+/*
+** st3q_s8_m3:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #-3, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_s8_m3, svint8x3_t, int8_t,
+ svst3q_s8 (p0, x0 - svcntb () * 3, z0),
+ svst3q (p0, x0 - svcntb () * 3, z0))
+
+/*
+** st3q_s8_m24:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #-24, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_s8_m24, svint8x3_t, int8_t,
+ svst3q_s8 (p0, x0 - svcntb () * 24, z0),
+ svst3q (p0, x0 - svcntb () * 24, z0))
+
+/*
+** st3q_s8_m27:
+** addvl (x[0-9]+), x0, #-27
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_s8_m27, svint8x3_t, int8_t,
+ svst3q_s8 (p0, x0 - svcntb () * 27, z0),
+ svst3q (p0, x0 - svcntb () * 27, z0))
+
+/*
+** st3q_vnum_s8_0:
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_s8_0, svint8x3_t, int8_t,
+ svst3q_vnum_s8 (p0, x0, 0, z0),
+ svst3q_vnum (p0, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_vnum_s8_1:
+** incb x0
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_s8_1, svint8x3_t, int8_t,
+ svst3q_vnum_s8 (p0, x0, 1, z0),
+ svst3q_vnum (p0, x0, 1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_vnum_s8_2:
+** incb x0, all, mul #2
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_s8_2, svint8x3_t, int8_t,
+ svst3q_vnum_s8 (p0, x0, 2, z0),
+ svst3q_vnum (p0, x0, 2, z0))
+
+/*
+** st3q_vnum_s8_3:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #3, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_vnum_s8_3, svint8x3_t, int8_t,
+ svst3q_vnum_s8 (p0, x0, 3, z0),
+ svst3q_vnum (p0, x0, 3, z0))
+
+/*
+** st3q_vnum_s8_21:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #21, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_vnum_s8_21, svint8x3_t, int8_t,
+ svst3q_vnum_s8 (p0, x0, 21, z0),
+ svst3q_vnum (p0, x0, 21, z0))
+
+/*
+** st3q_vnum_s8_24:
+** addvl (x[0-9]+), x0, #24
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_vnum_s8_24, svint8x3_t, int8_t,
+ svst3q_vnum_s8 (p0, x0, 24, z0),
+ svst3q_vnum (p0, x0, 24, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_vnum_s8_m1:
+** decb x0
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_s8_m1, svint8x3_t, int8_t,
+ svst3q_vnum_s8 (p0, x0, -1, z0),
+ svst3q_vnum (p0, x0, -1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_vnum_s8_m2:
+** decb x0, all, mul #2
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_s8_m2, svint8x3_t, int8_t,
+ svst3q_vnum_s8 (p0, x0, -2, z0),
+ svst3q_vnum (p0, x0, -2, z0))
+
+/*
+** st3q_vnum_s8_m3:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #-3, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_vnum_s8_m3, svint8x3_t, int8_t,
+ svst3q_vnum_s8 (p0, x0, -3, z0),
+ svst3q_vnum (p0, x0, -3, z0))
+
+/*
+** st3q_vnum_s8_m24:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #-24, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_vnum_s8_m24, svint8x3_t, int8_t,
+ svst3q_vnum_s8 (p0, x0, -24, z0),
+ svst3q_vnum (p0, x0, -24, z0))
+
+/*
+** st3q_vnum_s8_m27:
+** addvl (x[0-9]+), x0, #-27
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_vnum_s8_m27, svint8x3_t, int8_t,
+ svst3q_vnum_s8 (p0, x0, -27, z0),
+ svst3q_vnum (p0, x0, -27, z0))
+
+/*
+** st3q_vnum_s8_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st3q {z0\.q - z2\.q}, p0, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st3q {z0\.q - z2\.q}, p0, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE (st3q_vnum_s8_x1, svint8x3_t, int8_t,
+ svst3q_vnum_s8 (p0, x0, x1, z0),
+ svst3q_vnum (p0, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st3q_u16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st3q_u16.c
new file mode 100644
index 0000000..8ffa920
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st3q_u16.c
@@ -0,0 +1,281 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** st3q_u16_base:
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_u16_base, svuint16x3_t, uint16_t,
+ svst3q_u16 (p0, x0, z0),
+ svst3q (p0, x0, z0))
+
+/*
+** st3q_u16_index:
+** add (x[0-9]), x0, x1, lsl #?1
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_u16_index, svuint16x3_t, uint16_t,
+ svst3q_u16 (p0, x0 + x1, z0),
+ svst3q (p0, x0 + x1, z0))
+
+/*
+** st3q_u16_index2:
+** add (x[0-9]), x0, x1, lsl #?2
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_u16_index2, svuint16x3_t, uint16_t,
+ svst3q_u16 (p0, x0 + x1 * 2, z0),
+ svst3q (p0, x0 + x1 * 2, z0))
+
+/*
+** st3q_u16_index4:
+** add (x[0-9]), x0, x1, lsl #?3
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_u16_index4, svuint16x3_t, uint16_t,
+ svst3q_u16 (p0, x0 + x1 * 4, z0),
+ svst3q (p0, x0 + x1 * 4, z0))
+
+/*
+** st3q_u16_index8:
+** st3q {z0\.q - z2\.q}, p0, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_STORE (st3q_u16_index8, svuint16x3_t, uint16_t,
+ svst3q_u16 (p0, x0 + x1 * 8, z0),
+ svst3q (p0, x0 + x1 * 8, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_u16_1:
+** incb x0
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_u16_1, svuint16x3_t, uint16_t,
+ svst3q_u16 (p0, x0 + svcnth (), z0),
+ svst3q (p0, x0 + svcnth (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_u16_2:
+** incb x0, all, mul #2
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_u16_2, svuint16x3_t, uint16_t,
+ svst3q_u16 (p0, x0 + svcnth () * 2, z0),
+ svst3q (p0, x0 + svcnth () * 2, z0))
+
+/*
+** st3q_u16_3:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #3, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_u16_3, svuint16x3_t, uint16_t,
+ svst3q_u16 (p0, x0 + svcnth () * 3, z0),
+ svst3q (p0, x0 + svcnth () * 3, z0))
+
+/*
+** st3q_u16_21:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #21, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_u16_21, svuint16x3_t, uint16_t,
+ svst3q_u16 (p0, x0 + svcnth () * 21, z0),
+ svst3q (p0, x0 + svcnth () * 21, z0))
+
+/*
+** st3q_u16_24:
+** addvl (x[0-9]+), x0, #24
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_u16_24, svuint16x3_t, uint16_t,
+ svst3q_u16 (p0, x0 + svcnth () * 24, z0),
+ svst3q (p0, x0 + svcnth () * 24, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_u16_m1:
+** decb x0
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_u16_m1, svuint16x3_t, uint16_t,
+ svst3q_u16 (p0, x0 - svcnth (), z0),
+ svst3q (p0, x0 - svcnth (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_u16_m2:
+** decb x0, all, mul #2
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_u16_m2, svuint16x3_t, uint16_t,
+ svst3q_u16 (p0, x0 - svcnth () * 2, z0),
+ svst3q (p0, x0 - svcnth () * 2, z0))
+
+/*
+** st3q_u16_m3:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #-3, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_u16_m3, svuint16x3_t, uint16_t,
+ svst3q_u16 (p0, x0 - svcnth () * 3, z0),
+ svst3q (p0, x0 - svcnth () * 3, z0))
+
+/*
+** st3q_u16_m24:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #-24, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_u16_m24, svuint16x3_t, uint16_t,
+ svst3q_u16 (p0, x0 - svcnth () * 24, z0),
+ svst3q (p0, x0 - svcnth () * 24, z0))
+
+/*
+** st3q_u16_m27:
+** addvl (x[0-9]+), x0, #-27
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_u16_m27, svuint16x3_t, uint16_t,
+ svst3q_u16 (p0, x0 - svcnth () * 27, z0),
+ svst3q (p0, x0 - svcnth () * 27, z0))
+
+/*
+** st3q_vnum_u16_0:
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_u16_0, svuint16x3_t, uint16_t,
+ svst3q_vnum_u16 (p0, x0, 0, z0),
+ svst3q_vnum (p0, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_vnum_u16_1:
+** incb x0
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_u16_1, svuint16x3_t, uint16_t,
+ svst3q_vnum_u16 (p0, x0, 1, z0),
+ svst3q_vnum (p0, x0, 1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_vnum_u16_2:
+** incb x0, all, mul #2
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_u16_2, svuint16x3_t, uint16_t,
+ svst3q_vnum_u16 (p0, x0, 2, z0),
+ svst3q_vnum (p0, x0, 2, z0))
+
+/*
+** st3q_vnum_u16_3:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #3, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_vnum_u16_3, svuint16x3_t, uint16_t,
+ svst3q_vnum_u16 (p0, x0, 3, z0),
+ svst3q_vnum (p0, x0, 3, z0))
+
+/*
+** st3q_vnum_u16_21:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #21, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_vnum_u16_21, svuint16x3_t, uint16_t,
+ svst3q_vnum_u16 (p0, x0, 21, z0),
+ svst3q_vnum (p0, x0, 21, z0))
+
+/*
+** st3q_vnum_u16_24:
+** addvl (x[0-9]+), x0, #24
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_vnum_u16_24, svuint16x3_t, uint16_t,
+ svst3q_vnum_u16 (p0, x0, 24, z0),
+ svst3q_vnum (p0, x0, 24, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_vnum_u16_m1:
+** decb x0
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_u16_m1, svuint16x3_t, uint16_t,
+ svst3q_vnum_u16 (p0, x0, -1, z0),
+ svst3q_vnum (p0, x0, -1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_vnum_u16_m2:
+** decb x0, all, mul #2
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_u16_m2, svuint16x3_t, uint16_t,
+ svst3q_vnum_u16 (p0, x0, -2, z0),
+ svst3q_vnum (p0, x0, -2, z0))
+
+/*
+** st3q_vnum_u16_m3:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #-3, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_vnum_u16_m3, svuint16x3_t, uint16_t,
+ svst3q_vnum_u16 (p0, x0, -3, z0),
+ svst3q_vnum (p0, x0, -3, z0))
+
+/*
+** st3q_vnum_u16_m24:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #-24, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_vnum_u16_m24, svuint16x3_t, uint16_t,
+ svst3q_vnum_u16 (p0, x0, -24, z0),
+ svst3q_vnum (p0, x0, -24, z0))
+
+/*
+** st3q_vnum_u16_m27:
+** addvl (x[0-9]+), x0, #-27
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_vnum_u16_m27, svuint16x3_t, uint16_t,
+ svst3q_vnum_u16 (p0, x0, -27, z0),
+ svst3q_vnum (p0, x0, -27, z0))
+
+/*
+** st3q_vnum_u16_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st3q {z0\.q - z2\.q}, p0, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st3q {z0\.q - z2\.q}, p0, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE (st3q_vnum_u16_x1, svuint16x3_t, uint16_t,
+ svst3q_vnum_u16 (p0, x0, x1, z0),
+ svst3q_vnum (p0, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st3q_u32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st3q_u32.c
new file mode 100644
index 0000000..3444f4e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st3q_u32.c
@@ -0,0 +1,271 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** st3q_u32_base:
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_u32_base, svuint32x3_t, uint32_t,
+ svst3q_u32 (p0, x0, z0),
+ svst3q (p0, x0, z0))
+
+/*
+** st3q_u32_index:
+** add (x[0-9]), x0, x1, lsl #?2
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_u32_index, svuint32x3_t, uint32_t,
+ svst3q_u32 (p0, x0 + x1, z0),
+ svst3q (p0, x0 + x1, z0))
+
+/*
+** st3q_u32_index2:
+** add (x[0-9]), x0, x1, lsl #?3
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_u32_index2, svuint32x3_t, uint32_t,
+ svst3q_u32 (p0, x0 + x1 * 2, z0),
+ svst3q (p0, x0 + x1 * 2, z0))
+
+/*
+** st3q_u32_index4:
+** st3q {z0\.q - z2\.q}, p0, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_STORE (st3q_u32_index4, svuint32x3_t, uint32_t,
+ svst3q_u32 (p0, x0 + x1 * 4, z0),
+ svst3q (p0, x0 + x1 * 4, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_u32_1:
+** incb x0
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_u32_1, svuint32x3_t, uint32_t,
+ svst3q_u32 (p0, x0 + svcntw (), z0),
+ svst3q (p0, x0 + svcntw (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_u32_2:
+** incb x0, all, mul #2
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_u32_2, svuint32x3_t, uint32_t,
+ svst3q_u32 (p0, x0 + svcntw () * 2, z0),
+ svst3q (p0, x0 + svcntw () * 2, z0))
+
+/*
+** st3q_u32_3:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #3, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_u32_3, svuint32x3_t, uint32_t,
+ svst3q_u32 (p0, x0 + svcntw () * 3, z0),
+ svst3q (p0, x0 + svcntw () * 3, z0))
+
+/*
+** st3q_u32_21:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #21, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_u32_21, svuint32x3_t, uint32_t,
+ svst3q_u32 (p0, x0 + svcntw () * 21, z0),
+ svst3q (p0, x0 + svcntw () * 21, z0))
+
+/*
+** st3q_u32_24:
+** addvl (x[0-9]+), x0, #24
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_u32_24, svuint32x3_t, uint32_t,
+ svst3q_u32 (p0, x0 + svcntw () * 24, z0),
+ svst3q (p0, x0 + svcntw () * 24, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_u32_m1:
+** decb x0
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_u32_m1, svuint32x3_t, uint32_t,
+ svst3q_u32 (p0, x0 - svcntw (), z0),
+ svst3q (p0, x0 - svcntw (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_u32_m2:
+** decb x0, all, mul #2
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_u32_m2, svuint32x3_t, uint32_t,
+ svst3q_u32 (p0, x0 - svcntw () * 2, z0),
+ svst3q (p0, x0 - svcntw () * 2, z0))
+
+/*
+** st3q_u32_m3:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #-3, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_u32_m3, svuint32x3_t, uint32_t,
+ svst3q_u32 (p0, x0 - svcntw () * 3, z0),
+ svst3q (p0, x0 - svcntw () * 3, z0))
+
+/*
+** st3q_u32_m24:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #-24, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_u32_m24, svuint32x3_t, uint32_t,
+ svst3q_u32 (p0, x0 - svcntw () * 24, z0),
+ svst3q (p0, x0 - svcntw () * 24, z0))
+
+/*
+** st3q_u32_m27:
+** addvl (x[0-9]+), x0, #-27
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_u32_m27, svuint32x3_t, uint32_t,
+ svst3q_u32 (p0, x0 - svcntw () * 27, z0),
+ svst3q (p0, x0 - svcntw () * 27, z0))
+
+/*
+** st3q_vnum_u32_0:
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_u32_0, svuint32x3_t, uint32_t,
+ svst3q_vnum_u32 (p0, x0, 0, z0),
+ svst3q_vnum (p0, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_vnum_u32_1:
+** incb x0
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_u32_1, svuint32x3_t, uint32_t,
+ svst3q_vnum_u32 (p0, x0, 1, z0),
+ svst3q_vnum (p0, x0, 1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_vnum_u32_2:
+** incb x0, all, mul #2
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_u32_2, svuint32x3_t, uint32_t,
+ svst3q_vnum_u32 (p0, x0, 2, z0),
+ svst3q_vnum (p0, x0, 2, z0))
+
+/*
+** st3q_vnum_u32_3:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #3, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_vnum_u32_3, svuint32x3_t, uint32_t,
+ svst3q_vnum_u32 (p0, x0, 3, z0),
+ svst3q_vnum (p0, x0, 3, z0))
+
+/*
+** st3q_vnum_u32_21:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #21, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_vnum_u32_21, svuint32x3_t, uint32_t,
+ svst3q_vnum_u32 (p0, x0, 21, z0),
+ svst3q_vnum (p0, x0, 21, z0))
+
+/*
+** st3q_vnum_u32_24:
+** addvl (x[0-9]+), x0, #24
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_vnum_u32_24, svuint32x3_t, uint32_t,
+ svst3q_vnum_u32 (p0, x0, 24, z0),
+ svst3q_vnum (p0, x0, 24, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_vnum_u32_m1:
+** decb x0
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_u32_m1, svuint32x3_t, uint32_t,
+ svst3q_vnum_u32 (p0, x0, -1, z0),
+ svst3q_vnum (p0, x0, -1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_vnum_u32_m2:
+** decb x0, all, mul #2
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_u32_m2, svuint32x3_t, uint32_t,
+ svst3q_vnum_u32 (p0, x0, -2, z0),
+ svst3q_vnum (p0, x0, -2, z0))
+
+/*
+** st3q_vnum_u32_m3:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #-3, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_vnum_u32_m3, svuint32x3_t, uint32_t,
+ svst3q_vnum_u32 (p0, x0, -3, z0),
+ svst3q_vnum (p0, x0, -3, z0))
+
+/*
+** st3q_vnum_u32_m24:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #-24, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_vnum_u32_m24, svuint32x3_t, uint32_t,
+ svst3q_vnum_u32 (p0, x0, -24, z0),
+ svst3q_vnum (p0, x0, -24, z0))
+
+/*
+** st3q_vnum_u32_m27:
+** addvl (x[0-9]+), x0, #-27
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_vnum_u32_m27, svuint32x3_t, uint32_t,
+ svst3q_vnum_u32 (p0, x0, -27, z0),
+ svst3q_vnum (p0, x0, -27, z0))
+
+/*
+** st3q_vnum_u32_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st3q {z0\.q - z2\.q}, p0, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st3q {z0\.q - z2\.q}, p0, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE (st3q_vnum_u32_x1, svuint32x3_t, uint32_t,
+ svst3q_vnum_u32 (p0, x0, x1, z0),
+ svst3q_vnum (p0, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st3q_u64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st3q_u64.c
new file mode 100644
index 0000000..52fdc78
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st3q_u64.c
@@ -0,0 +1,261 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** st3q_u64_base:
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_u64_base, svuint64x3_t, uint64_t,
+ svst3q_u64 (p0, x0, z0),
+ svst3q (p0, x0, z0))
+
+/*
+** st3q_u64_index:
+** add (x[0-9]), x0, x1, lsl #?3
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_u64_index, svuint64x3_t, uint64_t,
+ svst3q_u64 (p0, x0 + x1, z0),
+ svst3q (p0, x0 + x1, z0))
+
+/*
+** st3q_u64_index2:
+** st3q {z0\.q - z2\.q}, p0, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_STORE (st3q_u64_index2, svuint64x3_t, uint64_t,
+ svst3q_u64 (p0, x0 + x1 * 2, z0),
+ svst3q (p0, x0 + x1 * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_u64_1:
+** incb x0
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_u64_1, svuint64x3_t, uint64_t,
+ svst3q_u64 (p0, x0 + svcntd (), z0),
+ svst3q (p0, x0 + svcntd (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_u64_2:
+** incb x0, all, mul #2
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_u64_2, svuint64x3_t, uint64_t,
+ svst3q_u64 (p0, x0 + svcntd () * 2, z0),
+ svst3q (p0, x0 + svcntd () * 2, z0))
+
+/*
+** st3q_u64_3:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #3, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_u64_3, svuint64x3_t, uint64_t,
+ svst3q_u64 (p0, x0 + svcntd () * 3, z0),
+ svst3q (p0, x0 + svcntd () * 3, z0))
+
+/*
+** st3q_u64_21:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #21, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_u64_21, svuint64x3_t, uint64_t,
+ svst3q_u64 (p0, x0 + svcntd () * 21, z0),
+ svst3q (p0, x0 + svcntd () * 21, z0))
+
+/*
+** st3q_u64_24:
+** addvl (x[0-9]+), x0, #24
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_u64_24, svuint64x3_t, uint64_t,
+ svst3q_u64 (p0, x0 + svcntd () * 24, z0),
+ svst3q (p0, x0 + svcntd () * 24, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_u64_m1:
+** decb x0
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_u64_m1, svuint64x3_t, uint64_t,
+ svst3q_u64 (p0, x0 - svcntd (), z0),
+ svst3q (p0, x0 - svcntd (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_u64_m2:
+** decb x0, all, mul #2
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_u64_m2, svuint64x3_t, uint64_t,
+ svst3q_u64 (p0, x0 - svcntd () * 2, z0),
+ svst3q (p0, x0 - svcntd () * 2, z0))
+
+/*
+** st3q_u64_m3:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #-3, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_u64_m3, svuint64x3_t, uint64_t,
+ svst3q_u64 (p0, x0 - svcntd () * 3, z0),
+ svst3q (p0, x0 - svcntd () * 3, z0))
+
+/*
+** st3q_u64_m24:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #-24, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_u64_m24, svuint64x3_t, uint64_t,
+ svst3q_u64 (p0, x0 - svcntd () * 24, z0),
+ svst3q (p0, x0 - svcntd () * 24, z0))
+
+/*
+** st3q_u64_m27:
+** addvl (x[0-9]+), x0, #-27
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_u64_m27, svuint64x3_t, uint64_t,
+ svst3q_u64 (p0, x0 - svcntd () * 27, z0),
+ svst3q (p0, x0 - svcntd () * 27, z0))
+
+/*
+** st3q_vnum_u64_0:
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_u64_0, svuint64x3_t, uint64_t,
+ svst3q_vnum_u64 (p0, x0, 0, z0),
+ svst3q_vnum (p0, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_vnum_u64_1:
+** incb x0
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_u64_1, svuint64x3_t, uint64_t,
+ svst3q_vnum_u64 (p0, x0, 1, z0),
+ svst3q_vnum (p0, x0, 1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_vnum_u64_2:
+** incb x0, all, mul #2
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_u64_2, svuint64x3_t, uint64_t,
+ svst3q_vnum_u64 (p0, x0, 2, z0),
+ svst3q_vnum (p0, x0, 2, z0))
+
+/*
+** st3q_vnum_u64_3:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #3, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_vnum_u64_3, svuint64x3_t, uint64_t,
+ svst3q_vnum_u64 (p0, x0, 3, z0),
+ svst3q_vnum (p0, x0, 3, z0))
+
+/*
+** st3q_vnum_u64_21:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #21, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_vnum_u64_21, svuint64x3_t, uint64_t,
+ svst3q_vnum_u64 (p0, x0, 21, z0),
+ svst3q_vnum (p0, x0, 21, z0))
+
+/*
+** st3q_vnum_u64_24:
+** addvl (x[0-9]+), x0, #24
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_vnum_u64_24, svuint64x3_t, uint64_t,
+ svst3q_vnum_u64 (p0, x0, 24, z0),
+ svst3q_vnum (p0, x0, 24, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_vnum_u64_m1:
+** decb x0
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_u64_m1, svuint64x3_t, uint64_t,
+ svst3q_vnum_u64 (p0, x0, -1, z0),
+ svst3q_vnum (p0, x0, -1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_vnum_u64_m2:
+** decb x0, all, mul #2
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_u64_m2, svuint64x3_t, uint64_t,
+ svst3q_vnum_u64 (p0, x0, -2, z0),
+ svst3q_vnum (p0, x0, -2, z0))
+
+/*
+** st3q_vnum_u64_m3:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #-3, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_vnum_u64_m3, svuint64x3_t, uint64_t,
+ svst3q_vnum_u64 (p0, x0, -3, z0),
+ svst3q_vnum (p0, x0, -3, z0))
+
+/*
+** st3q_vnum_u64_m24:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #-24, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_vnum_u64_m24, svuint64x3_t, uint64_t,
+ svst3q_vnum_u64 (p0, x0, -24, z0),
+ svst3q_vnum (p0, x0, -24, z0))
+
+/*
+** st3q_vnum_u64_m27:
+** addvl (x[0-9]+), x0, #-27
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_vnum_u64_m27, svuint64x3_t, uint64_t,
+ svst3q_vnum_u64 (p0, x0, -27, z0),
+ svst3q_vnum (p0, x0, -27, z0))
+
+/*
+** st3q_vnum_u64_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st3q {z0\.q - z2\.q}, p0, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st3q {z0\.q - z2\.q}, p0, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE (st3q_vnum_u64_x1, svuint64x3_t, uint64_t,
+ svst3q_vnum_u64 (p0, x0, x1, z0),
+ svst3q_vnum (p0, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st3q_u8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st3q_u8.c
new file mode 100644
index 0000000..9f75a24
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st3q_u8.c
@@ -0,0 +1,291 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** st3q_u8_base:
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_u8_base, svuint8x3_t, uint8_t,
+ svst3q_u8 (p0, x0, z0),
+ svst3q (p0, x0, z0))
+
+/*
+** st3q_u8_index:
+** add (x[0-9]), (?:x0, x1|x1, x0)
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_u8_index, svuint8x3_t, uint8_t,
+ svst3q_u8 (p0, x0 + x1, z0),
+ svst3q (p0, x0 + x1, z0))
+
+/*
+** st3q_u8_index2:
+** add (x[0-9]), x0, x1, lsl #?1
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_u8_index2, svuint8x3_t, uint8_t,
+ svst3q_u8 (p0, x0 + x1 * 2, z0),
+ svst3q (p0, x0 + x1 * 2, z0))
+
+/*
+** st3q_u8_index4:
+** add (x[0-9]), x0, x1, lsl #?2
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_u8_index4, svuint8x3_t, uint8_t,
+ svst3q_u8 (p0, x0 + x1 * 4, z0),
+ svst3q (p0, x0 + x1 * 4, z0))
+
+/*
+** st3q_u8_index8:
+** add (x[0-9]), x0, x1, lsl #?3
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_u8_index8, svuint8x3_t, uint8_t,
+ svst3q_u8 (p0, x0 + x1 * 8, z0),
+ svst3q (p0, x0 + x1 * 8, z0))
+
+/*
+** st3q_u8_index16:
+** st3q {z0\.q - z2\.q}, p0, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_STORE (st3q_u8_index16, svuint8x3_t, uint8_t,
+ svst3q_u8 (p0, x0 + x1 * 16, z0),
+ svst3q (p0, x0 + x1 * 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_u8_1:
+** incb x0
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_u8_1, svuint8x3_t, uint8_t,
+ svst3q_u8 (p0, x0 + svcntb (), z0),
+ svst3q (p0, x0 + svcntb (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_u8_2:
+** incb x0, all, mul #2
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_u8_2, svuint8x3_t, uint8_t,
+ svst3q_u8 (p0, x0 + svcntb () * 2, z0),
+ svst3q (p0, x0 + svcntb () * 2, z0))
+
+/*
+** st3q_u8_3:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #3, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_u8_3, svuint8x3_t, uint8_t,
+ svst3q_u8 (p0, x0 + svcntb () * 3, z0),
+ svst3q (p0, x0 + svcntb () * 3, z0))
+
+/*
+** st3q_u8_21:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #21, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_u8_21, svuint8x3_t, uint8_t,
+ svst3q_u8 (p0, x0 + svcntb () * 21, z0),
+ svst3q (p0, x0 + svcntb () * 21, z0))
+
+/*
+** st3q_u8_24:
+** addvl (x[0-9]+), x0, #24
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_u8_24, svuint8x3_t, uint8_t,
+ svst3q_u8 (p0, x0 + svcntb () * 24, z0),
+ svst3q (p0, x0 + svcntb () * 24, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_u8_m1:
+** decb x0
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_u8_m1, svuint8x3_t, uint8_t,
+ svst3q_u8 (p0, x0 - svcntb (), z0),
+ svst3q (p0, x0 - svcntb (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_u8_m2:
+** decb x0, all, mul #2
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_u8_m2, svuint8x3_t, uint8_t,
+ svst3q_u8 (p0, x0 - svcntb () * 2, z0),
+ svst3q (p0, x0 - svcntb () * 2, z0))
+
+/*
+** st3q_u8_m3:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #-3, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_u8_m3, svuint8x3_t, uint8_t,
+ svst3q_u8 (p0, x0 - svcntb () * 3, z0),
+ svst3q (p0, x0 - svcntb () * 3, z0))
+
+/*
+** st3q_u8_m24:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #-24, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_u8_m24, svuint8x3_t, uint8_t,
+ svst3q_u8 (p0, x0 - svcntb () * 24, z0),
+ svst3q (p0, x0 - svcntb () * 24, z0))
+
+/*
+** st3q_u8_m27:
+** addvl (x[0-9]+), x0, #-27
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_u8_m27, svuint8x3_t, uint8_t,
+ svst3q_u8 (p0, x0 - svcntb () * 27, z0),
+ svst3q (p0, x0 - svcntb () * 27, z0))
+
+/*
+** st3q_vnum_u8_0:
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_u8_0, svuint8x3_t, uint8_t,
+ svst3q_vnum_u8 (p0, x0, 0, z0),
+ svst3q_vnum (p0, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_vnum_u8_1:
+** incb x0
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_u8_1, svuint8x3_t, uint8_t,
+ svst3q_vnum_u8 (p0, x0, 1, z0),
+ svst3q_vnum (p0, x0, 1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_vnum_u8_2:
+** incb x0, all, mul #2
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_u8_2, svuint8x3_t, uint8_t,
+ svst3q_vnum_u8 (p0, x0, 2, z0),
+ svst3q_vnum (p0, x0, 2, z0))
+
+/*
+** st3q_vnum_u8_3:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #3, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_vnum_u8_3, svuint8x3_t, uint8_t,
+ svst3q_vnum_u8 (p0, x0, 3, z0),
+ svst3q_vnum (p0, x0, 3, z0))
+
+/*
+** st3q_vnum_u8_21:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #21, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_vnum_u8_21, svuint8x3_t, uint8_t,
+ svst3q_vnum_u8 (p0, x0, 21, z0),
+ svst3q_vnum (p0, x0, 21, z0))
+
+/*
+** st3q_vnum_u8_24:
+** addvl (x[0-9]+), x0, #24
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_vnum_u8_24, svuint8x3_t, uint8_t,
+ svst3q_vnum_u8 (p0, x0, 24, z0),
+ svst3q_vnum (p0, x0, 24, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_vnum_u8_m1:
+** decb x0
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_u8_m1, svuint8x3_t, uint8_t,
+ svst3q_vnum_u8 (p0, x0, -1, z0),
+ svst3q_vnum (p0, x0, -1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st3q_vnum_u8_m2:
+** decb x0, all, mul #2
+** st3q {z0\.q - z2\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st3q_vnum_u8_m2, svuint8x3_t, uint8_t,
+ svst3q_vnum_u8 (p0, x0, -2, z0),
+ svst3q_vnum (p0, x0, -2, z0))
+
+/*
+** st3q_vnum_u8_m3:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #-3, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_vnum_u8_m3, svuint8x3_t, uint8_t,
+ svst3q_vnum_u8 (p0, x0, -3, z0),
+ svst3q_vnum (p0, x0, -3, z0))
+
+/*
+** st3q_vnum_u8_m24:
+** st3q {z0\.q - z2\.q}, p0, \[x0, #-24, mul vl\]
+** ret
+*/
+TEST_STORE (st3q_vnum_u8_m24, svuint8x3_t, uint8_t,
+ svst3q_vnum_u8 (p0, x0, -24, z0),
+ svst3q_vnum (p0, x0, -24, z0))
+
+/*
+** st3q_vnum_u8_m27:
+** addvl (x[0-9]+), x0, #-27
+** st3q {z0\.q - z2\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st3q_vnum_u8_m27, svuint8x3_t, uint8_t,
+ svst3q_vnum_u8 (p0, x0, -27, z0),
+ svst3q_vnum (p0, x0, -27, z0))
+
+/*
+** st3q_vnum_u8_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st3q {z0\.q - z2\.q}, p0, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st3q {z0\.q - z2\.q}, p0, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE (st3q_vnum_u8_x1, svuint8x3_t, uint8_t,
+ svst3q_vnum_u8 (p0, x0, x1, z0),
+ svst3q_vnum (p0, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st4q_bf16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st4q_bf16.c
new file mode 100644
index 0000000..e3f9c95
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st4q_bf16.c
@@ -0,0 +1,325 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** st4q_bf16_base:
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_bf16_base, svbfloat16x4_t, bfloat16_t,
+ svst4q_bf16 (p0, x0, z0),
+ svst4q (p0, x0, z0))
+
+/*
+** st4q_bf16_index:
+** add (x[0-9]), x0, x1, lsl #?1
+** st4q {z0\.q - z3\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st4q_bf16_index, svbfloat16x4_t, bfloat16_t,
+ svst4q_bf16 (p0, x0 + x1, z0),
+ svst4q (p0, x0 + x1, z0))
+
+/*
+** st4q_bf16_index2:
+** add (x[0-9]), x0, x1, lsl #?2
+** st4q {z0\.q - z3\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st4q_bf16_index2, svbfloat16x4_t, bfloat16_t,
+ svst4q_bf16 (p0, x0 + x1 * 2, z0),
+ svst4q (p0, x0 + x1 * 2, z0))
+
+/*
+** st4q_bf16_index4:
+** add (x[0-9]), x0, x1, lsl #?3
+** st4q {z0\.q - z3\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st4q_bf16_index4, svbfloat16x4_t, bfloat16_t,
+ svst4q_bf16 (p0, x0 + x1 * 4, z0),
+ svst4q (p0, x0 + x1 * 4, z0))
+
+/*
+** st4q_bf16_index8:
+** st4q {z0\.q - z3\.q}, p0, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_STORE (st4q_bf16_index8, svbfloat16x4_t, bfloat16_t,
+ svst4q_bf16 (p0, x0 + x1 * 8, z0),
+ svst4q (p0, x0 + x1 * 8, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_bf16_1:
+** incb x0
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_bf16_1, svbfloat16x4_t, bfloat16_t,
+ svst4q_bf16 (p0, x0 + svcnth (), z0),
+ svst4q (p0, x0 + svcnth (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_bf16_2:
+** incb x0, all, mul #2
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_bf16_2, svbfloat16x4_t, bfloat16_t,
+ svst4q_bf16 (p0, x0 + svcnth () * 2, z0),
+ svst4q (p0, x0 + svcnth () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_bf16_3:
+** incb x0, all, mul #3
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_bf16_3, svbfloat16x4_t, bfloat16_t,
+ svst4q_bf16 (p0, x0 + svcnth () * 3, z0),
+ svst4q (p0, x0 + svcnth () * 3, z0))
+
+/*
+** st4q_bf16_4:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_bf16_4, svbfloat16x4_t, bfloat16_t,
+ svst4q_bf16 (p0, x0 + svcnth () * 4, z0),
+ svst4q (p0, x0 + svcnth () * 4, z0))
+
+/*
+** st4q_bf16_28:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_bf16_28, svbfloat16x4_t, bfloat16_t,
+ svst4q_bf16 (p0, x0 + svcnth () * 28, z0),
+ svst4q (p0, x0 + svcnth () * 28, z0))
+
+/*
+** st4q_bf16_32:
+** [^{]*
+** st4q {z0\.q - z3\.q}, p0, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE (st4q_bf16_32, svbfloat16x4_t, bfloat16_t,
+ svst4q_bf16 (p0, x0 + svcnth () * 32, z0),
+ svst4q (p0, x0 + svcnth () * 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_bf16_m1:
+** decb x0
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_bf16_m1, svbfloat16x4_t, bfloat16_t,
+ svst4q_bf16 (p0, x0 - svcnth (), z0),
+ svst4q (p0, x0 - svcnth (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_bf16_m2:
+** decb x0, all, mul #2
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_bf16_m2, svbfloat16x4_t, bfloat16_t,
+ svst4q_bf16 (p0, x0 - svcnth () * 2, z0),
+ svst4q (p0, x0 - svcnth () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_bf16_m3:
+** decb x0, all, mul #3
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_bf16_m3, svbfloat16x4_t, bfloat16_t,
+ svst4q_bf16 (p0, x0 - svcnth () * 3, z0),
+ svst4q (p0, x0 - svcnth () * 3, z0))
+
+/*
+** st4q_bf16_m4:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_bf16_m4, svbfloat16x4_t, bfloat16_t,
+ svst4q_bf16 (p0, x0 - svcnth () * 4, z0),
+ svst4q (p0, x0 - svcnth () * 4, z0))
+
+/*
+** st4q_bf16_m32:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_bf16_m32, svbfloat16x4_t, bfloat16_t,
+ svst4q_bf16 (p0, x0 - svcnth () * 32, z0),
+ svst4q (p0, x0 - svcnth () * 32, z0))
+
+/*
+** st4q_bf16_m36:
+** [^{]*
+** st4q {z0\.q - z3\.q}, p0, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE (st4q_bf16_m36, svbfloat16x4_t, bfloat16_t,
+ svst4q_bf16 (p0, x0 - svcnth () * 36, z0),
+ svst4q (p0, x0 - svcnth () * 36, z0))
+
+/*
+** st4q_vnum_bf16_0:
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_bf16_0, svbfloat16x4_t, bfloat16_t,
+ svst4q_vnum_bf16 (p0, x0, 0, z0),
+ svst4q_vnum (p0, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_bf16_1:
+** incb x0
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_bf16_1, svbfloat16x4_t, bfloat16_t,
+ svst4q_vnum_bf16 (p0, x0, 1, z0),
+ svst4q_vnum (p0, x0, 1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_bf16_2:
+** incb x0, all, mul #2
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_bf16_2, svbfloat16x4_t, bfloat16_t,
+ svst4q_vnum_bf16 (p0, x0, 2, z0),
+ svst4q_vnum (p0, x0, 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_bf16_3:
+** incb x0, all, mul #3
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_bf16_3, svbfloat16x4_t, bfloat16_t,
+ svst4q_vnum_bf16 (p0, x0, 3, z0),
+ svst4q_vnum (p0, x0, 3, z0))
+
+/*
+** st4q_vnum_bf16_4:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_vnum_bf16_4, svbfloat16x4_t, bfloat16_t,
+ svst4q_vnum_bf16 (p0, x0, 4, z0),
+ svst4q_vnum (p0, x0, 4, z0))
+
+/*
+** st4q_vnum_bf16_28:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_vnum_bf16_28, svbfloat16x4_t, bfloat16_t,
+ svst4q_vnum_bf16 (p0, x0, 28, z0),
+ svst4q_vnum (p0, x0, 28, z0))
+
+/*
+** st4q_vnum_bf16_32:
+** [^{]*
+** st4q {z0\.q - z3\.q}, p0, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE (st4q_vnum_bf16_32, svbfloat16x4_t, bfloat16_t,
+ svst4q_vnum_bf16 (p0, x0, 32, z0),
+ svst4q_vnum (p0, x0, 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_bf16_m1:
+** decb x0
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_bf16_m1, svbfloat16x4_t, bfloat16_t,
+ svst4q_vnum_bf16 (p0, x0, -1, z0),
+ svst4q_vnum (p0, x0, -1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_bf16_m2:
+** decb x0, all, mul #2
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_bf16_m2, svbfloat16x4_t, bfloat16_t,
+ svst4q_vnum_bf16 (p0, x0, -2, z0),
+ svst4q_vnum (p0, x0, -2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_bf16_m3:
+** decb x0, all, mul #3
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_bf16_m3, svbfloat16x4_t, bfloat16_t,
+ svst4q_vnum_bf16 (p0, x0, -3, z0),
+ svst4q_vnum (p0, x0, -3, z0))
+
+/*
+** st4q_vnum_bf16_m4:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_vnum_bf16_m4, svbfloat16x4_t, bfloat16_t,
+ svst4q_vnum_bf16 (p0, x0, -4, z0),
+ svst4q_vnum (p0, x0, -4, z0))
+
+/*
+** st4q_vnum_bf16_m32:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_vnum_bf16_m32, svbfloat16x4_t, bfloat16_t,
+ svst4q_vnum_bf16 (p0, x0, -32, z0),
+ svst4q_vnum (p0, x0, -32, z0))
+
+/*
+** st4q_vnum_bf16_m36:
+** [^{]*
+** st4q {z0\.q - z3\.q}, p0, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE (st4q_vnum_bf16_m36, svbfloat16x4_t, bfloat16_t,
+ svst4q_vnum_bf16 (p0, x0, -36, z0),
+ svst4q_vnum (p0, x0, -36, z0))
+
+/*
+** st4q_vnum_bf16_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st4q {z0\.q - z3\.q}, p0, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st4q {z0\.q - z3\.q}, p0, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE (st4q_vnum_bf16_x1, svbfloat16x4_t, bfloat16_t,
+ svst4q_vnum_bf16 (p0, x0, x1, z0),
+ svst4q_vnum (p0, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st4q_f16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st4q_f16.c
new file mode 100644
index 0000000..7ef9d37
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st4q_f16.c
@@ -0,0 +1,325 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** st4q_f16_base:
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_f16_base, svfloat16x4_t, float16_t,
+ svst4q_f16 (p0, x0, z0),
+ svst4q (p0, x0, z0))
+
+/*
+** st4q_f16_index:
+** add (x[0-9]), x0, x1, lsl #?1
+** st4q {z0\.q - z3\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st4q_f16_index, svfloat16x4_t, float16_t,
+ svst4q_f16 (p0, x0 + x1, z0),
+ svst4q (p0, x0 + x1, z0))
+
+/*
+** st4q_f16_index2:
+** add (x[0-9]), x0, x1, lsl #?2
+** st4q {z0\.q - z3\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st4q_f16_index2, svfloat16x4_t, float16_t,
+ svst4q_f16 (p0, x0 + x1 * 2, z0),
+ svst4q (p0, x0 + x1 * 2, z0))
+
+/*
+** st4q_f16_index4:
+** add (x[0-9]), x0, x1, lsl #?3
+** st4q {z0\.q - z3\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st4q_f16_index4, svfloat16x4_t, float16_t,
+ svst4q_f16 (p0, x0 + x1 * 4, z0),
+ svst4q (p0, x0 + x1 * 4, z0))
+
+/*
+** st4q_f16_index8:
+** st4q {z0\.q - z3\.q}, p0, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_STORE (st4q_f16_index8, svfloat16x4_t, float16_t,
+ svst4q_f16 (p0, x0 + x1 * 8, z0),
+ svst4q (p0, x0 + x1 * 8, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_f16_1:
+** incb x0
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_f16_1, svfloat16x4_t, float16_t,
+ svst4q_f16 (p0, x0 + svcnth (), z0),
+ svst4q (p0, x0 + svcnth (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_f16_2:
+** incb x0, all, mul #2
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_f16_2, svfloat16x4_t, float16_t,
+ svst4q_f16 (p0, x0 + svcnth () * 2, z0),
+ svst4q (p0, x0 + svcnth () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_f16_3:
+** incb x0, all, mul #3
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_f16_3, svfloat16x4_t, float16_t,
+ svst4q_f16 (p0, x0 + svcnth () * 3, z0),
+ svst4q (p0, x0 + svcnth () * 3, z0))
+
+/*
+** st4q_f16_4:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_f16_4, svfloat16x4_t, float16_t,
+ svst4q_f16 (p0, x0 + svcnth () * 4, z0),
+ svst4q (p0, x0 + svcnth () * 4, z0))
+
+/*
+** st4q_f16_28:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_f16_28, svfloat16x4_t, float16_t,
+ svst4q_f16 (p0, x0 + svcnth () * 28, z0),
+ svst4q (p0, x0 + svcnth () * 28, z0))
+
+/*
+** st4q_f16_32:
+** [^{]*
+** st4q {z0\.q - z3\.q}, p0, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE (st4q_f16_32, svfloat16x4_t, float16_t,
+ svst4q_f16 (p0, x0 + svcnth () * 32, z0),
+ svst4q (p0, x0 + svcnth () * 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_f16_m1:
+** decb x0
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_f16_m1, svfloat16x4_t, float16_t,
+ svst4q_f16 (p0, x0 - svcnth (), z0),
+ svst4q (p0, x0 - svcnth (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_f16_m2:
+** decb x0, all, mul #2
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_f16_m2, svfloat16x4_t, float16_t,
+ svst4q_f16 (p0, x0 - svcnth () * 2, z0),
+ svst4q (p0, x0 - svcnth () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_f16_m3:
+** decb x0, all, mul #3
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_f16_m3, svfloat16x4_t, float16_t,
+ svst4q_f16 (p0, x0 - svcnth () * 3, z0),
+ svst4q (p0, x0 - svcnth () * 3, z0))
+
+/*
+** st4q_f16_m4:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_f16_m4, svfloat16x4_t, float16_t,
+ svst4q_f16 (p0, x0 - svcnth () * 4, z0),
+ svst4q (p0, x0 - svcnth () * 4, z0))
+
+/*
+** st4q_f16_m32:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_f16_m32, svfloat16x4_t, float16_t,
+ svst4q_f16 (p0, x0 - svcnth () * 32, z0),
+ svst4q (p0, x0 - svcnth () * 32, z0))
+
+/*
+** st4q_f16_m36:
+** [^{]*
+** st4q {z0\.q - z3\.q}, p0, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE (st4q_f16_m36, svfloat16x4_t, float16_t,
+ svst4q_f16 (p0, x0 - svcnth () * 36, z0),
+ svst4q (p0, x0 - svcnth () * 36, z0))
+
+/*
+** st4q_vnum_f16_0:
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_f16_0, svfloat16x4_t, float16_t,
+ svst4q_vnum_f16 (p0, x0, 0, z0),
+ svst4q_vnum (p0, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_f16_1:
+** incb x0
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_f16_1, svfloat16x4_t, float16_t,
+ svst4q_vnum_f16 (p0, x0, 1, z0),
+ svst4q_vnum (p0, x0, 1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_f16_2:
+** incb x0, all, mul #2
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_f16_2, svfloat16x4_t, float16_t,
+ svst4q_vnum_f16 (p0, x0, 2, z0),
+ svst4q_vnum (p0, x0, 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_f16_3:
+** incb x0, all, mul #3
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_f16_3, svfloat16x4_t, float16_t,
+ svst4q_vnum_f16 (p0, x0, 3, z0),
+ svst4q_vnum (p0, x0, 3, z0))
+
+/*
+** st4q_vnum_f16_4:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_vnum_f16_4, svfloat16x4_t, float16_t,
+ svst4q_vnum_f16 (p0, x0, 4, z0),
+ svst4q_vnum (p0, x0, 4, z0))
+
+/*
+** st4q_vnum_f16_28:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_vnum_f16_28, svfloat16x4_t, float16_t,
+ svst4q_vnum_f16 (p0, x0, 28, z0),
+ svst4q_vnum (p0, x0, 28, z0))
+
+/*
+** st4q_vnum_f16_32:
+** [^{]*
+** st4q {z0\.q - z3\.q}, p0, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE (st4q_vnum_f16_32, svfloat16x4_t, float16_t,
+ svst4q_vnum_f16 (p0, x0, 32, z0),
+ svst4q_vnum (p0, x0, 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_f16_m1:
+** decb x0
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_f16_m1, svfloat16x4_t, float16_t,
+ svst4q_vnum_f16 (p0, x0, -1, z0),
+ svst4q_vnum (p0, x0, -1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_f16_m2:
+** decb x0, all, mul #2
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_f16_m2, svfloat16x4_t, float16_t,
+ svst4q_vnum_f16 (p0, x0, -2, z0),
+ svst4q_vnum (p0, x0, -2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_f16_m3:
+** decb x0, all, mul #3
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_f16_m3, svfloat16x4_t, float16_t,
+ svst4q_vnum_f16 (p0, x0, -3, z0),
+ svst4q_vnum (p0, x0, -3, z0))
+
+/*
+** st4q_vnum_f16_m4:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_vnum_f16_m4, svfloat16x4_t, float16_t,
+ svst4q_vnum_f16 (p0, x0, -4, z0),
+ svst4q_vnum (p0, x0, -4, z0))
+
+/*
+** st4q_vnum_f16_m32:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_vnum_f16_m32, svfloat16x4_t, float16_t,
+ svst4q_vnum_f16 (p0, x0, -32, z0),
+ svst4q_vnum (p0, x0, -32, z0))
+
+/*
+** st4q_vnum_f16_m36:
+** [^{]*
+** st4q {z0\.q - z3\.q}, p0, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE (st4q_vnum_f16_m36, svfloat16x4_t, float16_t,
+ svst4q_vnum_f16 (p0, x0, -36, z0),
+ svst4q_vnum (p0, x0, -36, z0))
+
+/*
+** st4q_vnum_f16_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st4q {z0\.q - z3\.q}, p0, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st4q {z0\.q - z3\.q}, p0, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE (st4q_vnum_f16_x1, svfloat16x4_t, float16_t,
+ svst4q_vnum_f16 (p0, x0, x1, z0),
+ svst4q_vnum (p0, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st4q_f32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st4q_f32.c
new file mode 100644
index 0000000..30dc35c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st4q_f32.c
@@ -0,0 +1,315 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** st4q_f32_base:
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_f32_base, svfloat32x4_t, float32_t,
+ svst4q_f32 (p0, x0, z0),
+ svst4q (p0, x0, z0))
+
+/*
+** st4q_f32_index:
+** add (x[0-9]), x0, x1, lsl #?2
+** st4q {z0\.q - z3\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st4q_f32_index, svfloat32x4_t, float32_t,
+ svst4q_f32 (p0, x0 + x1, z0),
+ svst4q (p0, x0 + x1, z0))
+
+/*
+** st4q_f32_index2:
+** add (x[0-9]), x0, x1, lsl #?3
+** st4q {z0\.q - z3\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st4q_f32_index2, svfloat32x4_t, float32_t,
+ svst4q_f32 (p0, x0 + x1 * 2, z0),
+ svst4q (p0, x0 + x1 * 2, z0))
+
+/*
+** st4q_f32_index4:
+** st4q {z0\.q - z3\.q}, p0, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_STORE (st4q_f32_index4, svfloat32x4_t, float32_t,
+ svst4q_f32 (p0, x0 + x1 * 4, z0),
+ svst4q (p0, x0 + x1 * 4, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_f32_1:
+** incb x0
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_f32_1, svfloat32x4_t, float32_t,
+ svst4q_f32 (p0, x0 + svcntw (), z0),
+ svst4q (p0, x0 + svcntw (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_f32_2:
+** incb x0, all, mul #2
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_f32_2, svfloat32x4_t, float32_t,
+ svst4q_f32 (p0, x0 + svcntw () * 2, z0),
+ svst4q (p0, x0 + svcntw () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_f32_3:
+** incb x0, all, mul #3
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_f32_3, svfloat32x4_t, float32_t,
+ svst4q_f32 (p0, x0 + svcntw () * 3, z0),
+ svst4q (p0, x0 + svcntw () * 3, z0))
+
+/*
+** st4q_f32_4:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_f32_4, svfloat32x4_t, float32_t,
+ svst4q_f32 (p0, x0 + svcntw () * 4, z0),
+ svst4q (p0, x0 + svcntw () * 4, z0))
+
+/*
+** st4q_f32_28:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_f32_28, svfloat32x4_t, float32_t,
+ svst4q_f32 (p0, x0 + svcntw () * 28, z0),
+ svst4q (p0, x0 + svcntw () * 28, z0))
+
+/*
+** st4q_f32_32:
+** [^{]*
+** st4q {z0\.q - z3\.q}, p0, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE (st4q_f32_32, svfloat32x4_t, float32_t,
+ svst4q_f32 (p0, x0 + svcntw () * 32, z0),
+ svst4q (p0, x0 + svcntw () * 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_f32_m1:
+** decb x0
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_f32_m1, svfloat32x4_t, float32_t,
+ svst4q_f32 (p0, x0 - svcntw (), z0),
+ svst4q (p0, x0 - svcntw (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_f32_m2:
+** decb x0, all, mul #2
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_f32_m2, svfloat32x4_t, float32_t,
+ svst4q_f32 (p0, x0 - svcntw () * 2, z0),
+ svst4q (p0, x0 - svcntw () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_f32_m3:
+** decb x0, all, mul #3
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_f32_m3, svfloat32x4_t, float32_t,
+ svst4q_f32 (p0, x0 - svcntw () * 3, z0),
+ svst4q (p0, x0 - svcntw () * 3, z0))
+
+/*
+** st4q_f32_m4:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_f32_m4, svfloat32x4_t, float32_t,
+ svst4q_f32 (p0, x0 - svcntw () * 4, z0),
+ svst4q (p0, x0 - svcntw () * 4, z0))
+
+/*
+** st4q_f32_m32:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_f32_m32, svfloat32x4_t, float32_t,
+ svst4q_f32 (p0, x0 - svcntw () * 32, z0),
+ svst4q (p0, x0 - svcntw () * 32, z0))
+
+/*
+** st4q_f32_m36:
+** [^{]*
+** st4q {z0\.q - z3\.q}, p0, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE (st4q_f32_m36, svfloat32x4_t, float32_t,
+ svst4q_f32 (p0, x0 - svcntw () * 36, z0),
+ svst4q (p0, x0 - svcntw () * 36, z0))
+
+/*
+** st4q_vnum_f32_0:
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_f32_0, svfloat32x4_t, float32_t,
+ svst4q_vnum_f32 (p0, x0, 0, z0),
+ svst4q_vnum (p0, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_f32_1:
+** incb x0
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_f32_1, svfloat32x4_t, float32_t,
+ svst4q_vnum_f32 (p0, x0, 1, z0),
+ svst4q_vnum (p0, x0, 1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_f32_2:
+** incb x0, all, mul #2
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_f32_2, svfloat32x4_t, float32_t,
+ svst4q_vnum_f32 (p0, x0, 2, z0),
+ svst4q_vnum (p0, x0, 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_f32_3:
+** incb x0, all, mul #3
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_f32_3, svfloat32x4_t, float32_t,
+ svst4q_vnum_f32 (p0, x0, 3, z0),
+ svst4q_vnum (p0, x0, 3, z0))
+
+/*
+** st4q_vnum_f32_4:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_vnum_f32_4, svfloat32x4_t, float32_t,
+ svst4q_vnum_f32 (p0, x0, 4, z0),
+ svst4q_vnum (p0, x0, 4, z0))
+
+/*
+** st4q_vnum_f32_28:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_vnum_f32_28, svfloat32x4_t, float32_t,
+ svst4q_vnum_f32 (p0, x0, 28, z0),
+ svst4q_vnum (p0, x0, 28, z0))
+
+/*
+** st4q_vnum_f32_32:
+** [^{]*
+** st4q {z0\.q - z3\.q}, p0, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE (st4q_vnum_f32_32, svfloat32x4_t, float32_t,
+ svst4q_vnum_f32 (p0, x0, 32, z0),
+ svst4q_vnum (p0, x0, 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_f32_m1:
+** decb x0
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_f32_m1, svfloat32x4_t, float32_t,
+ svst4q_vnum_f32 (p0, x0, -1, z0),
+ svst4q_vnum (p0, x0, -1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_f32_m2:
+** decb x0, all, mul #2
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_f32_m2, svfloat32x4_t, float32_t,
+ svst4q_vnum_f32 (p0, x0, -2, z0),
+ svst4q_vnum (p0, x0, -2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_f32_m3:
+** decb x0, all, mul #3
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_f32_m3, svfloat32x4_t, float32_t,
+ svst4q_vnum_f32 (p0, x0, -3, z0),
+ svst4q_vnum (p0, x0, -3, z0))
+
+/*
+** st4q_vnum_f32_m4:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_vnum_f32_m4, svfloat32x4_t, float32_t,
+ svst4q_vnum_f32 (p0, x0, -4, z0),
+ svst4q_vnum (p0, x0, -4, z0))
+
+/*
+** st4q_vnum_f32_m32:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_vnum_f32_m32, svfloat32x4_t, float32_t,
+ svst4q_vnum_f32 (p0, x0, -32, z0),
+ svst4q_vnum (p0, x0, -32, z0))
+
+/*
+** st4q_vnum_f32_m36:
+** [^{]*
+** st4q {z0\.q - z3\.q}, p0, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE (st4q_vnum_f32_m36, svfloat32x4_t, float32_t,
+ svst4q_vnum_f32 (p0, x0, -36, z0),
+ svst4q_vnum (p0, x0, -36, z0))
+
+/*
+** st4q_vnum_f32_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st4q {z0\.q - z3\.q}, p0, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st4q {z0\.q - z3\.q}, p0, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE (st4q_vnum_f32_x1, svfloat32x4_t, float32_t,
+ svst4q_vnum_f32 (p0, x0, x1, z0),
+ svst4q_vnum (p0, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st4q_f64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st4q_f64.c
new file mode 100644
index 0000000..9a5a856
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st4q_f64.c
@@ -0,0 +1,305 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** st4q_f64_base:
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_f64_base, svfloat64x4_t, float64_t,
+ svst4q_f64 (p0, x0, z0),
+ svst4q (p0, x0, z0))
+
+/*
+** st4q_f64_index:
+** add (x[0-9]), x0, x1, lsl #?3
+** st4q {z0\.q - z3\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st4q_f64_index, svfloat64x4_t, float64_t,
+ svst4q_f64 (p0, x0 + x1, z0),
+ svst4q (p0, x0 + x1, z0))
+
+/*
+** st4q_f64_index2:
+** st4q {z0\.q - z3\.q}, p0, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_STORE (st4q_f64_index2, svfloat64x4_t, float64_t,
+ svst4q_f64 (p0, x0 + x1 * 2, z0),
+ svst4q (p0, x0 + x1 * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_f64_1:
+** incb x0
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_f64_1, svfloat64x4_t, float64_t,
+ svst4q_f64 (p0, x0 + svcntd (), z0),
+ svst4q (p0, x0 + svcntd (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_f64_2:
+** incb x0, all, mul #2
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_f64_2, svfloat64x4_t, float64_t,
+ svst4q_f64 (p0, x0 + svcntd () * 2, z0),
+ svst4q (p0, x0 + svcntd () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_f64_3:
+** incb x0, all, mul #3
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_f64_3, svfloat64x4_t, float64_t,
+ svst4q_f64 (p0, x0 + svcntd () * 3, z0),
+ svst4q (p0, x0 + svcntd () * 3, z0))
+
+/*
+** st4q_f64_4:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_f64_4, svfloat64x4_t, float64_t,
+ svst4q_f64 (p0, x0 + svcntd () * 4, z0),
+ svst4q (p0, x0 + svcntd () * 4, z0))
+
+/*
+** st4q_f64_28:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_f64_28, svfloat64x4_t, float64_t,
+ svst4q_f64 (p0, x0 + svcntd () * 28, z0),
+ svst4q (p0, x0 + svcntd () * 28, z0))
+
+/*
+** st4q_f64_32:
+** [^{]*
+** st4q {z0\.q - z3\.q}, p0, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE (st4q_f64_32, svfloat64x4_t, float64_t,
+ svst4q_f64 (p0, x0 + svcntd () * 32, z0),
+ svst4q (p0, x0 + svcntd () * 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_f64_m1:
+** decb x0
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_f64_m1, svfloat64x4_t, float64_t,
+ svst4q_f64 (p0, x0 - svcntd (), z0),
+ svst4q (p0, x0 - svcntd (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_f64_m2:
+** decb x0, all, mul #2
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_f64_m2, svfloat64x4_t, float64_t,
+ svst4q_f64 (p0, x0 - svcntd () * 2, z0),
+ svst4q (p0, x0 - svcntd () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_f64_m3:
+** decb x0, all, mul #3
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_f64_m3, svfloat64x4_t, float64_t,
+ svst4q_f64 (p0, x0 - svcntd () * 3, z0),
+ svst4q (p0, x0 - svcntd () * 3, z0))
+
+/*
+** st4q_f64_m4:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_f64_m4, svfloat64x4_t, float64_t,
+ svst4q_f64 (p0, x0 - svcntd () * 4, z0),
+ svst4q (p0, x0 - svcntd () * 4, z0))
+
+/*
+** st4q_f64_m32:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_f64_m32, svfloat64x4_t, float64_t,
+ svst4q_f64 (p0, x0 - svcntd () * 32, z0),
+ svst4q (p0, x0 - svcntd () * 32, z0))
+
+/*
+** st4q_f64_m36:
+** [^{]*
+** st4q {z0\.q - z3\.q}, p0, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE (st4q_f64_m36, svfloat64x4_t, float64_t,
+ svst4q_f64 (p0, x0 - svcntd () * 36, z0),
+ svst4q (p0, x0 - svcntd () * 36, z0))
+
+/*
+** st4q_vnum_f64_0:
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_f64_0, svfloat64x4_t, float64_t,
+ svst4q_vnum_f64 (p0, x0, 0, z0),
+ svst4q_vnum (p0, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_f64_1:
+** incb x0
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_f64_1, svfloat64x4_t, float64_t,
+ svst4q_vnum_f64 (p0, x0, 1, z0),
+ svst4q_vnum (p0, x0, 1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_f64_2:
+** incb x0, all, mul #2
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_f64_2, svfloat64x4_t, float64_t,
+ svst4q_vnum_f64 (p0, x0, 2, z0),
+ svst4q_vnum (p0, x0, 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_f64_3:
+** incb x0, all, mul #3
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_f64_3, svfloat64x4_t, float64_t,
+ svst4q_vnum_f64 (p0, x0, 3, z0),
+ svst4q_vnum (p0, x0, 3, z0))
+
+/*
+** st4q_vnum_f64_4:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_vnum_f64_4, svfloat64x4_t, float64_t,
+ svst4q_vnum_f64 (p0, x0, 4, z0),
+ svst4q_vnum (p0, x0, 4, z0))
+
+/*
+** st4q_vnum_f64_28:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_vnum_f64_28, svfloat64x4_t, float64_t,
+ svst4q_vnum_f64 (p0, x0, 28, z0),
+ svst4q_vnum (p0, x0, 28, z0))
+
+/*
+** st4q_vnum_f64_32:
+** [^{]*
+** st4q {z0\.q - z3\.q}, p0, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE (st4q_vnum_f64_32, svfloat64x4_t, float64_t,
+ svst4q_vnum_f64 (p0, x0, 32, z0),
+ svst4q_vnum (p0, x0, 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_f64_m1:
+** decb x0
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_f64_m1, svfloat64x4_t, float64_t,
+ svst4q_vnum_f64 (p0, x0, -1, z0),
+ svst4q_vnum (p0, x0, -1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_f64_m2:
+** decb x0, all, mul #2
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_f64_m2, svfloat64x4_t, float64_t,
+ svst4q_vnum_f64 (p0, x0, -2, z0),
+ svst4q_vnum (p0, x0, -2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_f64_m3:
+** decb x0, all, mul #3
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_f64_m3, svfloat64x4_t, float64_t,
+ svst4q_vnum_f64 (p0, x0, -3, z0),
+ svst4q_vnum (p0, x0, -3, z0))
+
+/*
+** st4q_vnum_f64_m4:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_vnum_f64_m4, svfloat64x4_t, float64_t,
+ svst4q_vnum_f64 (p0, x0, -4, z0),
+ svst4q_vnum (p0, x0, -4, z0))
+
+/*
+** st4q_vnum_f64_m32:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_vnum_f64_m32, svfloat64x4_t, float64_t,
+ svst4q_vnum_f64 (p0, x0, -32, z0),
+ svst4q_vnum (p0, x0, -32, z0))
+
+/*
+** st4q_vnum_f64_m36:
+** [^{]*
+** st4q {z0\.q - z3\.q}, p0, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE (st4q_vnum_f64_m36, svfloat64x4_t, float64_t,
+ svst4q_vnum_f64 (p0, x0, -36, z0),
+ svst4q_vnum (p0, x0, -36, z0))
+
+/*
+** st4q_vnum_f64_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st4q {z0\.q - z3\.q}, p0, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st4q {z0\.q - z3\.q}, p0, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE (st4q_vnum_f64_x1, svfloat64x4_t, float64_t,
+ svst4q_vnum_f64 (p0, x0, x1, z0),
+ svst4q_vnum (p0, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st4q_s16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st4q_s16.c
new file mode 100644
index 0000000..6fab252
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st4q_s16.c
@@ -0,0 +1,325 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** st4q_s16_base:
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_s16_base, svint16x4_t, int16_t,
+ svst4q_s16 (p0, x0, z0),
+ svst4q (p0, x0, z0))
+
+/*
+** st4q_s16_index:
+** add (x[0-9]), x0, x1, lsl #?1
+** st4q {z0\.q - z3\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st4q_s16_index, svint16x4_t, int16_t,
+ svst4q_s16 (p0, x0 + x1, z0),
+ svst4q (p0, x0 + x1, z0))
+
+/*
+** st4q_s16_index2:
+** add (x[0-9]), x0, x1, lsl #?2
+** st4q {z0\.q - z3\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st4q_s16_index2, svint16x4_t, int16_t,
+ svst4q_s16 (p0, x0 + x1 * 2, z0),
+ svst4q (p0, x0 + x1 * 2, z0))
+
+/*
+** st4q_s16_index4:
+** add (x[0-9]), x0, x1, lsl #?3
+** st4q {z0\.q - z3\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st4q_s16_index4, svint16x4_t, int16_t,
+ svst4q_s16 (p0, x0 + x1 * 4, z0),
+ svst4q (p0, x0 + x1 * 4, z0))
+
+/*
+** st4q_s16_index8:
+** st4q {z0\.q - z3\.q}, p0, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_STORE (st4q_s16_index8, svint16x4_t, int16_t,
+ svst4q_s16 (p0, x0 + x1 * 8, z0),
+ svst4q (p0, x0 + x1 * 8, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_s16_1:
+** incb x0
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_s16_1, svint16x4_t, int16_t,
+ svst4q_s16 (p0, x0 + svcnth (), z0),
+ svst4q (p0, x0 + svcnth (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_s16_2:
+** incb x0, all, mul #2
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_s16_2, svint16x4_t, int16_t,
+ svst4q_s16 (p0, x0 + svcnth () * 2, z0),
+ svst4q (p0, x0 + svcnth () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_s16_3:
+** incb x0, all, mul #3
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_s16_3, svint16x4_t, int16_t,
+ svst4q_s16 (p0, x0 + svcnth () * 3, z0),
+ svst4q (p0, x0 + svcnth () * 3, z0))
+
+/*
+** st4q_s16_4:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_s16_4, svint16x4_t, int16_t,
+ svst4q_s16 (p0, x0 + svcnth () * 4, z0),
+ svst4q (p0, x0 + svcnth () * 4, z0))
+
+/*
+** st4q_s16_28:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_s16_28, svint16x4_t, int16_t,
+ svst4q_s16 (p0, x0 + svcnth () * 28, z0),
+ svst4q (p0, x0 + svcnth () * 28, z0))
+
+/*
+** st4q_s16_32:
+** [^{]*
+** st4q {z0\.q - z3\.q}, p0, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE (st4q_s16_32, svint16x4_t, int16_t,
+ svst4q_s16 (p0, x0 + svcnth () * 32, z0),
+ svst4q (p0, x0 + svcnth () * 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_s16_m1:
+** decb x0
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_s16_m1, svint16x4_t, int16_t,
+ svst4q_s16 (p0, x0 - svcnth (), z0),
+ svst4q (p0, x0 - svcnth (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_s16_m2:
+** decb x0, all, mul #2
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_s16_m2, svint16x4_t, int16_t,
+ svst4q_s16 (p0, x0 - svcnth () * 2, z0),
+ svst4q (p0, x0 - svcnth () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_s16_m3:
+** decb x0, all, mul #3
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_s16_m3, svint16x4_t, int16_t,
+ svst4q_s16 (p0, x0 - svcnth () * 3, z0),
+ svst4q (p0, x0 - svcnth () * 3, z0))
+
+/*
+** st4q_s16_m4:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_s16_m4, svint16x4_t, int16_t,
+ svst4q_s16 (p0, x0 - svcnth () * 4, z0),
+ svst4q (p0, x0 - svcnth () * 4, z0))
+
+/*
+** st4q_s16_m32:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_s16_m32, svint16x4_t, int16_t,
+ svst4q_s16 (p0, x0 - svcnth () * 32, z0),
+ svst4q (p0, x0 - svcnth () * 32, z0))
+
+/*
+** st4q_s16_m36:
+** [^{]*
+** st4q {z0\.q - z3\.q}, p0, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE (st4q_s16_m36, svint16x4_t, int16_t,
+ svst4q_s16 (p0, x0 - svcnth () * 36, z0),
+ svst4q (p0, x0 - svcnth () * 36, z0))
+
+/*
+** st4q_vnum_s16_0:
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_s16_0, svint16x4_t, int16_t,
+ svst4q_vnum_s16 (p0, x0, 0, z0),
+ svst4q_vnum (p0, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_s16_1:
+** incb x0
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_s16_1, svint16x4_t, int16_t,
+ svst4q_vnum_s16 (p0, x0, 1, z0),
+ svst4q_vnum (p0, x0, 1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_s16_2:
+** incb x0, all, mul #2
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_s16_2, svint16x4_t, int16_t,
+ svst4q_vnum_s16 (p0, x0, 2, z0),
+ svst4q_vnum (p0, x0, 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_s16_3:
+** incb x0, all, mul #3
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_s16_3, svint16x4_t, int16_t,
+ svst4q_vnum_s16 (p0, x0, 3, z0),
+ svst4q_vnum (p0, x0, 3, z0))
+
+/*
+** st4q_vnum_s16_4:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_vnum_s16_4, svint16x4_t, int16_t,
+ svst4q_vnum_s16 (p0, x0, 4, z0),
+ svst4q_vnum (p0, x0, 4, z0))
+
+/*
+** st4q_vnum_s16_28:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_vnum_s16_28, svint16x4_t, int16_t,
+ svst4q_vnum_s16 (p0, x0, 28, z0),
+ svst4q_vnum (p0, x0, 28, z0))
+
+/*
+** st4q_vnum_s16_32:
+** [^{]*
+** st4q {z0\.q - z3\.q}, p0, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE (st4q_vnum_s16_32, svint16x4_t, int16_t,
+ svst4q_vnum_s16 (p0, x0, 32, z0),
+ svst4q_vnum (p0, x0, 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_s16_m1:
+** decb x0
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_s16_m1, svint16x4_t, int16_t,
+ svst4q_vnum_s16 (p0, x0, -1, z0),
+ svst4q_vnum (p0, x0, -1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_s16_m2:
+** decb x0, all, mul #2
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_s16_m2, svint16x4_t, int16_t,
+ svst4q_vnum_s16 (p0, x0, -2, z0),
+ svst4q_vnum (p0, x0, -2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_s16_m3:
+** decb x0, all, mul #3
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_s16_m3, svint16x4_t, int16_t,
+ svst4q_vnum_s16 (p0, x0, -3, z0),
+ svst4q_vnum (p0, x0, -3, z0))
+
+/*
+** st4q_vnum_s16_m4:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_vnum_s16_m4, svint16x4_t, int16_t,
+ svst4q_vnum_s16 (p0, x0, -4, z0),
+ svst4q_vnum (p0, x0, -4, z0))
+
+/*
+** st4q_vnum_s16_m32:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_vnum_s16_m32, svint16x4_t, int16_t,
+ svst4q_vnum_s16 (p0, x0, -32, z0),
+ svst4q_vnum (p0, x0, -32, z0))
+
+/*
+** st4q_vnum_s16_m36:
+** [^{]*
+** st4q {z0\.q - z3\.q}, p0, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE (st4q_vnum_s16_m36, svint16x4_t, int16_t,
+ svst4q_vnum_s16 (p0, x0, -36, z0),
+ svst4q_vnum (p0, x0, -36, z0))
+
+/*
+** st4q_vnum_s16_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st4q {z0\.q - z3\.q}, p0, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st4q {z0\.q - z3\.q}, p0, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE (st4q_vnum_s16_x1, svint16x4_t, int16_t,
+ svst4q_vnum_s16 (p0, x0, x1, z0),
+ svst4q_vnum (p0, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st4q_s32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st4q_s32.c
new file mode 100644
index 0000000..02ced68
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st4q_s32.c
@@ -0,0 +1,315 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** st4q_s32_base:
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_s32_base, svint32x4_t, int32_t,
+ svst4q_s32 (p0, x0, z0),
+ svst4q (p0, x0, z0))
+
+/*
+** st4q_s32_index:
+** add (x[0-9]), x0, x1, lsl #?2
+** st4q {z0\.q - z3\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st4q_s32_index, svint32x4_t, int32_t,
+ svst4q_s32 (p0, x0 + x1, z0),
+ svst4q (p0, x0 + x1, z0))
+
+/*
+** st4q_s32_index2:
+** add (x[0-9]), x0, x1, lsl #?3
+** st4q {z0\.q - z3\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st4q_s32_index2, svint32x4_t, int32_t,
+ svst4q_s32 (p0, x0 + x1 * 2, z0),
+ svst4q (p0, x0 + x1 * 2, z0))
+
+/*
+** st4q_s32_index4:
+** st4q {z0\.q - z3\.q}, p0, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_STORE (st4q_s32_index4, svint32x4_t, int32_t,
+ svst4q_s32 (p0, x0 + x1 * 4, z0),
+ svst4q (p0, x0 + x1 * 4, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_s32_1:
+** incb x0
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_s32_1, svint32x4_t, int32_t,
+ svst4q_s32 (p0, x0 + svcntw (), z0),
+ svst4q (p0, x0 + svcntw (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_s32_2:
+** incb x0, all, mul #2
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_s32_2, svint32x4_t, int32_t,
+ svst4q_s32 (p0, x0 + svcntw () * 2, z0),
+ svst4q (p0, x0 + svcntw () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_s32_3:
+** incb x0, all, mul #3
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_s32_3, svint32x4_t, int32_t,
+ svst4q_s32 (p0, x0 + svcntw () * 3, z0),
+ svst4q (p0, x0 + svcntw () * 3, z0))
+
+/*
+** st4q_s32_4:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_s32_4, svint32x4_t, int32_t,
+ svst4q_s32 (p0, x0 + svcntw () * 4, z0),
+ svst4q (p0, x0 + svcntw () * 4, z0))
+
+/*
+** st4q_s32_28:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_s32_28, svint32x4_t, int32_t,
+ svst4q_s32 (p0, x0 + svcntw () * 28, z0),
+ svst4q (p0, x0 + svcntw () * 28, z0))
+
+/*
+** st4q_s32_32:
+** [^{]*
+** st4q {z0\.q - z3\.q}, p0, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE (st4q_s32_32, svint32x4_t, int32_t,
+ svst4q_s32 (p0, x0 + svcntw () * 32, z0),
+ svst4q (p0, x0 + svcntw () * 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_s32_m1:
+** decb x0
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_s32_m1, svint32x4_t, int32_t,
+ svst4q_s32 (p0, x0 - svcntw (), z0),
+ svst4q (p0, x0 - svcntw (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_s32_m2:
+** decb x0, all, mul #2
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_s32_m2, svint32x4_t, int32_t,
+ svst4q_s32 (p0, x0 - svcntw () * 2, z0),
+ svst4q (p0, x0 - svcntw () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_s32_m3:
+** decb x0, all, mul #3
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_s32_m3, svint32x4_t, int32_t,
+ svst4q_s32 (p0, x0 - svcntw () * 3, z0),
+ svst4q (p0, x0 - svcntw () * 3, z0))
+
+/*
+** st4q_s32_m4:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_s32_m4, svint32x4_t, int32_t,
+ svst4q_s32 (p0, x0 - svcntw () * 4, z0),
+ svst4q (p0, x0 - svcntw () * 4, z0))
+
+/*
+** st4q_s32_m32:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_s32_m32, svint32x4_t, int32_t,
+ svst4q_s32 (p0, x0 - svcntw () * 32, z0),
+ svst4q (p0, x0 - svcntw () * 32, z0))
+
+/*
+** st4q_s32_m36:
+** [^{]*
+** st4q {z0\.q - z3\.q}, p0, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE (st4q_s32_m36, svint32x4_t, int32_t,
+ svst4q_s32 (p0, x0 - svcntw () * 36, z0),
+ svst4q (p0, x0 - svcntw () * 36, z0))
+
+/*
+** st4q_vnum_s32_0:
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_s32_0, svint32x4_t, int32_t,
+ svst4q_vnum_s32 (p0, x0, 0, z0),
+ svst4q_vnum (p0, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_s32_1:
+** incb x0
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_s32_1, svint32x4_t, int32_t,
+ svst4q_vnum_s32 (p0, x0, 1, z0),
+ svst4q_vnum (p0, x0, 1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_s32_2:
+** incb x0, all, mul #2
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_s32_2, svint32x4_t, int32_t,
+ svst4q_vnum_s32 (p0, x0, 2, z0),
+ svst4q_vnum (p0, x0, 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_s32_3:
+** incb x0, all, mul #3
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_s32_3, svint32x4_t, int32_t,
+ svst4q_vnum_s32 (p0, x0, 3, z0),
+ svst4q_vnum (p0, x0, 3, z0))
+
+/*
+** st4q_vnum_s32_4:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_vnum_s32_4, svint32x4_t, int32_t,
+ svst4q_vnum_s32 (p0, x0, 4, z0),
+ svst4q_vnum (p0, x0, 4, z0))
+
+/*
+** st4q_vnum_s32_28:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_vnum_s32_28, svint32x4_t, int32_t,
+ svst4q_vnum_s32 (p0, x0, 28, z0),
+ svst4q_vnum (p0, x0, 28, z0))
+
+/*
+** st4q_vnum_s32_32:
+** [^{]*
+** st4q {z0\.q - z3\.q}, p0, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE (st4q_vnum_s32_32, svint32x4_t, int32_t,
+ svst4q_vnum_s32 (p0, x0, 32, z0),
+ svst4q_vnum (p0, x0, 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_s32_m1:
+** decb x0
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_s32_m1, svint32x4_t, int32_t,
+ svst4q_vnum_s32 (p0, x0, -1, z0),
+ svst4q_vnum (p0, x0, -1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_s32_m2:
+** decb x0, all, mul #2
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_s32_m2, svint32x4_t, int32_t,
+ svst4q_vnum_s32 (p0, x0, -2, z0),
+ svst4q_vnum (p0, x0, -2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_s32_m3:
+** decb x0, all, mul #3
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_s32_m3, svint32x4_t, int32_t,
+ svst4q_vnum_s32 (p0, x0, -3, z0),
+ svst4q_vnum (p0, x0, -3, z0))
+
+/*
+** st4q_vnum_s32_m4:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_vnum_s32_m4, svint32x4_t, int32_t,
+ svst4q_vnum_s32 (p0, x0, -4, z0),
+ svst4q_vnum (p0, x0, -4, z0))
+
+/*
+** st4q_vnum_s32_m32:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_vnum_s32_m32, svint32x4_t, int32_t,
+ svst4q_vnum_s32 (p0, x0, -32, z0),
+ svst4q_vnum (p0, x0, -32, z0))
+
+/*
+** st4q_vnum_s32_m36:
+** [^{]*
+** st4q {z0\.q - z3\.q}, p0, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE (st4q_vnum_s32_m36, svint32x4_t, int32_t,
+ svst4q_vnum_s32 (p0, x0, -36, z0),
+ svst4q_vnum (p0, x0, -36, z0))
+
+/*
+** st4q_vnum_s32_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st4q {z0\.q - z3\.q}, p0, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st4q {z0\.q - z3\.q}, p0, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE (st4q_vnum_s32_x1, svint32x4_t, int32_t,
+ svst4q_vnum_s32 (p0, x0, x1, z0),
+ svst4q_vnum (p0, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st4q_s64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st4q_s64.c
new file mode 100644
index 0000000..1f751d1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st4q_s64.c
@@ -0,0 +1,305 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** st4q_s64_base:
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_s64_base, svint64x4_t, int64_t,
+ svst4q_s64 (p0, x0, z0),
+ svst4q (p0, x0, z0))
+
+/*
+** st4q_s64_index:
+** add (x[0-9]), x0, x1, lsl #?3
+** st4q {z0\.q - z3\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st4q_s64_index, svint64x4_t, int64_t,
+ svst4q_s64 (p0, x0 + x1, z0),
+ svst4q (p0, x0 + x1, z0))
+
+/*
+** st4q_s64_index2:
+** st4q {z0\.q - z3\.q}, p0, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_STORE (st4q_s64_index2, svint64x4_t, int64_t,
+ svst4q_s64 (p0, x0 + x1 * 2, z0),
+ svst4q (p0, x0 + x1 * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_s64_1:
+** incb x0
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_s64_1, svint64x4_t, int64_t,
+ svst4q_s64 (p0, x0 + svcntd (), z0),
+ svst4q (p0, x0 + svcntd (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_s64_2:
+** incb x0, all, mul #2
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_s64_2, svint64x4_t, int64_t,
+ svst4q_s64 (p0, x0 + svcntd () * 2, z0),
+ svst4q (p0, x0 + svcntd () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_s64_3:
+** incb x0, all, mul #3
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_s64_3, svint64x4_t, int64_t,
+ svst4q_s64 (p0, x0 + svcntd () * 3, z0),
+ svst4q (p0, x0 + svcntd () * 3, z0))
+
+/*
+** st4q_s64_4:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_s64_4, svint64x4_t, int64_t,
+ svst4q_s64 (p0, x0 + svcntd () * 4, z0),
+ svst4q (p0, x0 + svcntd () * 4, z0))
+
+/*
+** st4q_s64_28:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_s64_28, svint64x4_t, int64_t,
+ svst4q_s64 (p0, x0 + svcntd () * 28, z0),
+ svst4q (p0, x0 + svcntd () * 28, z0))
+
+/*
+** st4q_s64_32:
+** [^{]*
+** st4q {z0\.q - z3\.q}, p0, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE (st4q_s64_32, svint64x4_t, int64_t,
+ svst4q_s64 (p0, x0 + svcntd () * 32, z0),
+ svst4q (p0, x0 + svcntd () * 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_s64_m1:
+** decb x0
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_s64_m1, svint64x4_t, int64_t,
+ svst4q_s64 (p0, x0 - svcntd (), z0),
+ svst4q (p0, x0 - svcntd (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_s64_m2:
+** decb x0, all, mul #2
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_s64_m2, svint64x4_t, int64_t,
+ svst4q_s64 (p0, x0 - svcntd () * 2, z0),
+ svst4q (p0, x0 - svcntd () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_s64_m3:
+** decb x0, all, mul #3
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_s64_m3, svint64x4_t, int64_t,
+ svst4q_s64 (p0, x0 - svcntd () * 3, z0),
+ svst4q (p0, x0 - svcntd () * 3, z0))
+
+/*
+** st4q_s64_m4:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_s64_m4, svint64x4_t, int64_t,
+ svst4q_s64 (p0, x0 - svcntd () * 4, z0),
+ svst4q (p0, x0 - svcntd () * 4, z0))
+
+/*
+** st4q_s64_m32:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_s64_m32, svint64x4_t, int64_t,
+ svst4q_s64 (p0, x0 - svcntd () * 32, z0),
+ svst4q (p0, x0 - svcntd () * 32, z0))
+
+/*
+** st4q_s64_m36:
+** [^{]*
+** st4q {z0\.q - z3\.q}, p0, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE (st4q_s64_m36, svint64x4_t, int64_t,
+ svst4q_s64 (p0, x0 - svcntd () * 36, z0),
+ svst4q (p0, x0 - svcntd () * 36, z0))
+
+/*
+** st4q_vnum_s64_0:
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_s64_0, svint64x4_t, int64_t,
+ svst4q_vnum_s64 (p0, x0, 0, z0),
+ svst4q_vnum (p0, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_s64_1:
+** incb x0
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_s64_1, svint64x4_t, int64_t,
+ svst4q_vnum_s64 (p0, x0, 1, z0),
+ svst4q_vnum (p0, x0, 1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_s64_2:
+** incb x0, all, mul #2
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_s64_2, svint64x4_t, int64_t,
+ svst4q_vnum_s64 (p0, x0, 2, z0),
+ svst4q_vnum (p0, x0, 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_s64_3:
+** incb x0, all, mul #3
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_s64_3, svint64x4_t, int64_t,
+ svst4q_vnum_s64 (p0, x0, 3, z0),
+ svst4q_vnum (p0, x0, 3, z0))
+
+/*
+** st4q_vnum_s64_4:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_vnum_s64_4, svint64x4_t, int64_t,
+ svst4q_vnum_s64 (p0, x0, 4, z0),
+ svst4q_vnum (p0, x0, 4, z0))
+
+/*
+** st4q_vnum_s64_28:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_vnum_s64_28, svint64x4_t, int64_t,
+ svst4q_vnum_s64 (p0, x0, 28, z0),
+ svst4q_vnum (p0, x0, 28, z0))
+
+/*
+** st4q_vnum_s64_32:
+** [^{]*
+** st4q {z0\.q - z3\.q}, p0, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE (st4q_vnum_s64_32, svint64x4_t, int64_t,
+ svst4q_vnum_s64 (p0, x0, 32, z0),
+ svst4q_vnum (p0, x0, 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_s64_m1:
+** decb x0
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_s64_m1, svint64x4_t, int64_t,
+ svst4q_vnum_s64 (p0, x0, -1, z0),
+ svst4q_vnum (p0, x0, -1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_s64_m2:
+** decb x0, all, mul #2
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_s64_m2, svint64x4_t, int64_t,
+ svst4q_vnum_s64 (p0, x0, -2, z0),
+ svst4q_vnum (p0, x0, -2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_s64_m3:
+** decb x0, all, mul #3
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_s64_m3, svint64x4_t, int64_t,
+ svst4q_vnum_s64 (p0, x0, -3, z0),
+ svst4q_vnum (p0, x0, -3, z0))
+
+/*
+** st4q_vnum_s64_m4:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_vnum_s64_m4, svint64x4_t, int64_t,
+ svst4q_vnum_s64 (p0, x0, -4, z0),
+ svst4q_vnum (p0, x0, -4, z0))
+
+/*
+** st4q_vnum_s64_m32:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_vnum_s64_m32, svint64x4_t, int64_t,
+ svst4q_vnum_s64 (p0, x0, -32, z0),
+ svst4q_vnum (p0, x0, -32, z0))
+
+/*
+** st4q_vnum_s64_m36:
+** [^{]*
+** st4q {z0\.q - z3\.q}, p0, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE (st4q_vnum_s64_m36, svint64x4_t, int64_t,
+ svst4q_vnum_s64 (p0, x0, -36, z0),
+ svst4q_vnum (p0, x0, -36, z0))
+
+/*
+** st4q_vnum_s64_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st4q {z0\.q - z3\.q}, p0, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st4q {z0\.q - z3\.q}, p0, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE (st4q_vnum_s64_x1, svint64x4_t, int64_t,
+ svst4q_vnum_s64 (p0, x0, x1, z0),
+ svst4q_vnum (p0, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st4q_s8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st4q_s8.c
new file mode 100644
index 0000000..e7e1da7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st4q_s8.c
@@ -0,0 +1,335 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** st4q_s8_base:
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_s8_base, svint8x4_t, int8_t,
+ svst4q_s8 (p0, x0, z0),
+ svst4q (p0, x0, z0))
+
+/*
+** st4q_s8_index:
+** add (x[0-9]), (?:x0, x1|x1, x0)
+** st4q {z0\.q - z3\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st4q_s8_index, svint8x4_t, int8_t,
+ svst4q_s8 (p0, x0 + x1, z0),
+ svst4q (p0, x0 + x1, z0))
+
+/*
+** st4q_s8_index2:
+** add (x[0-9]), x0, x1, lsl #?1
+** st4q {z0\.q - z3\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st4q_s8_index2, svint8x4_t, int8_t,
+ svst4q_s8 (p0, x0 + x1 * 2, z0),
+ svst4q (p0, x0 + x1 * 2, z0))
+
+/*
+** st4q_s8_index4:
+** add (x[0-9]), x0, x1, lsl #?2
+** st4q {z0\.q - z3\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st4q_s8_index4, svint8x4_t, int8_t,
+ svst4q_s8 (p0, x0 + x1 * 4, z0),
+ svst4q (p0, x0 + x1 * 4, z0))
+
+/*
+** st4q_s8_index8:
+** add (x[0-9]), x0, x1, lsl #?3
+** st4q {z0\.q - z3\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st4q_s8_index8, svint8x4_t, int8_t,
+ svst4q_s8 (p0, x0 + x1 * 8, z0),
+ svst4q (p0, x0 + x1 * 8, z0))
+
+/*
+** st4q_s8_index16:
+** st4q {z0\.q - z3\.q}, p0, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_STORE (st4q_s8_index16, svint8x4_t, int8_t,
+ svst4q_s8 (p0, x0 + x1 * 16, z0),
+ svst4q (p0, x0 + x1 * 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_s8_1:
+** incb x0
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_s8_1, svint8x4_t, int8_t,
+ svst4q_s8 (p0, x0 + svcntb (), z0),
+ svst4q (p0, x0 + svcntb (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_s8_2:
+** incb x0, all, mul #2
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_s8_2, svint8x4_t, int8_t,
+ svst4q_s8 (p0, x0 + svcntb () * 2, z0),
+ svst4q (p0, x0 + svcntb () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_s8_3:
+** incb x0, all, mul #3
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_s8_3, svint8x4_t, int8_t,
+ svst4q_s8 (p0, x0 + svcntb () * 3, z0),
+ svst4q (p0, x0 + svcntb () * 3, z0))
+
+/*
+** st4q_s8_4:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_s8_4, svint8x4_t, int8_t,
+ svst4q_s8 (p0, x0 + svcntb () * 4, z0),
+ svst4q (p0, x0 + svcntb () * 4, z0))
+
+/*
+** st4q_s8_28:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_s8_28, svint8x4_t, int8_t,
+ svst4q_s8 (p0, x0 + svcntb () * 28, z0),
+ svst4q (p0, x0 + svcntb () * 28, z0))
+
+/*
+** st4q_s8_32:
+** [^{]*
+** st4q {z0\.q - z3\.q}, p0, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE (st4q_s8_32, svint8x4_t, int8_t,
+ svst4q_s8 (p0, x0 + svcntb () * 32, z0),
+ svst4q (p0, x0 + svcntb () * 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_s8_m1:
+** decb x0
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_s8_m1, svint8x4_t, int8_t,
+ svst4q_s8 (p0, x0 - svcntb (), z0),
+ svst4q (p0, x0 - svcntb (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_s8_m2:
+** decb x0, all, mul #2
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_s8_m2, svint8x4_t, int8_t,
+ svst4q_s8 (p0, x0 - svcntb () * 2, z0),
+ svst4q (p0, x0 - svcntb () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_s8_m3:
+** decb x0, all, mul #3
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_s8_m3, svint8x4_t, int8_t,
+ svst4q_s8 (p0, x0 - svcntb () * 3, z0),
+ svst4q (p0, x0 - svcntb () * 3, z0))
+
+/*
+** st4q_s8_m4:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_s8_m4, svint8x4_t, int8_t,
+ svst4q_s8 (p0, x0 - svcntb () * 4, z0),
+ svst4q (p0, x0 - svcntb () * 4, z0))
+
+/*
+** st4q_s8_m32:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_s8_m32, svint8x4_t, int8_t,
+ svst4q_s8 (p0, x0 - svcntb () * 32, z0),
+ svst4q (p0, x0 - svcntb () * 32, z0))
+
+/*
+** st4q_s8_m36:
+** [^{]*
+** st4q {z0\.q - z3\.q}, p0, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE (st4q_s8_m36, svint8x4_t, int8_t,
+ svst4q_s8 (p0, x0 - svcntb () * 36, z0),
+ svst4q (p0, x0 - svcntb () * 36, z0))
+
+/*
+** st4q_vnum_s8_0:
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_s8_0, svint8x4_t, int8_t,
+ svst4q_vnum_s8 (p0, x0, 0, z0),
+ svst4q_vnum (p0, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_s8_1:
+** incb x0
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_s8_1, svint8x4_t, int8_t,
+ svst4q_vnum_s8 (p0, x0, 1, z0),
+ svst4q_vnum (p0, x0, 1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_s8_2:
+** incb x0, all, mul #2
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_s8_2, svint8x4_t, int8_t,
+ svst4q_vnum_s8 (p0, x0, 2, z0),
+ svst4q_vnum (p0, x0, 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_s8_3:
+** incb x0, all, mul #3
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_s8_3, svint8x4_t, int8_t,
+ svst4q_vnum_s8 (p0, x0, 3, z0),
+ svst4q_vnum (p0, x0, 3, z0))
+
+/*
+** st4q_vnum_s8_4:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_vnum_s8_4, svint8x4_t, int8_t,
+ svst4q_vnum_s8 (p0, x0, 4, z0),
+ svst4q_vnum (p0, x0, 4, z0))
+
+/*
+** st4q_vnum_s8_28:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_vnum_s8_28, svint8x4_t, int8_t,
+ svst4q_vnum_s8 (p0, x0, 28, z0),
+ svst4q_vnum (p0, x0, 28, z0))
+
+/*
+** st4q_vnum_s8_32:
+** [^{]*
+** st4q {z0\.q - z3\.q}, p0, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE (st4q_vnum_s8_32, svint8x4_t, int8_t,
+ svst4q_vnum_s8 (p0, x0, 32, z0),
+ svst4q_vnum (p0, x0, 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_s8_m1:
+** decb x0
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_s8_m1, svint8x4_t, int8_t,
+ svst4q_vnum_s8 (p0, x0, -1, z0),
+ svst4q_vnum (p0, x0, -1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_s8_m2:
+** decb x0, all, mul #2
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_s8_m2, svint8x4_t, int8_t,
+ svst4q_vnum_s8 (p0, x0, -2, z0),
+ svst4q_vnum (p0, x0, -2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_s8_m3:
+** decb x0, all, mul #3
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_s8_m3, svint8x4_t, int8_t,
+ svst4q_vnum_s8 (p0, x0, -3, z0),
+ svst4q_vnum (p0, x0, -3, z0))
+
+/*
+** st4q_vnum_s8_m4:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_vnum_s8_m4, svint8x4_t, int8_t,
+ svst4q_vnum_s8 (p0, x0, -4, z0),
+ svst4q_vnum (p0, x0, -4, z0))
+
+/*
+** st4q_vnum_s8_m32:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_vnum_s8_m32, svint8x4_t, int8_t,
+ svst4q_vnum_s8 (p0, x0, -32, z0),
+ svst4q_vnum (p0, x0, -32, z0))
+
+/*
+** st4q_vnum_s8_m36:
+** [^{]*
+** st4q {z0\.q - z3\.q}, p0, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE (st4q_vnum_s8_m36, svint8x4_t, int8_t,
+ svst4q_vnum_s8 (p0, x0, -36, z0),
+ svst4q_vnum (p0, x0, -36, z0))
+
+/*
+** st4q_vnum_s8_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st4q {z0\.q - z3\.q}, p0, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st4q {z0\.q - z3\.q}, p0, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE (st4q_vnum_s8_x1, svint8x4_t, int8_t,
+ svst4q_vnum_s8 (p0, x0, x1, z0),
+ svst4q_vnum (p0, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st4q_u16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st4q_u16.c
new file mode 100644
index 0000000..99c7c118a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st4q_u16.c
@@ -0,0 +1,325 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** st4q_u16_base:
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_u16_base, svuint16x4_t, uint16_t,
+ svst4q_u16 (p0, x0, z0),
+ svst4q (p0, x0, z0))
+
+/*
+** st4q_u16_index:
+** add (x[0-9]), x0, x1, lsl #?1
+** st4q {z0\.q - z3\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st4q_u16_index, svuint16x4_t, uint16_t,
+ svst4q_u16 (p0, x0 + x1, z0),
+ svst4q (p0, x0 + x1, z0))
+
+/*
+** st4q_u16_index2:
+** add (x[0-9]), x0, x1, lsl #?2
+** st4q {z0\.q - z3\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st4q_u16_index2, svuint16x4_t, uint16_t,
+ svst4q_u16 (p0, x0 + x1 * 2, z0),
+ svst4q (p0, x0 + x1 * 2, z0))
+
+/*
+** st4q_u16_index4:
+** add (x[0-9]), x0, x1, lsl #?3
+** st4q {z0\.q - z3\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st4q_u16_index4, svuint16x4_t, uint16_t,
+ svst4q_u16 (p0, x0 + x1 * 4, z0),
+ svst4q (p0, x0 + x1 * 4, z0))
+
+/*
+** st4q_u16_index8:
+** st4q {z0\.q - z3\.q}, p0, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_STORE (st4q_u16_index8, svuint16x4_t, uint16_t,
+ svst4q_u16 (p0, x0 + x1 * 8, z0),
+ svst4q (p0, x0 + x1 * 8, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_u16_1:
+** incb x0
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_u16_1, svuint16x4_t, uint16_t,
+ svst4q_u16 (p0, x0 + svcnth (), z0),
+ svst4q (p0, x0 + svcnth (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_u16_2:
+** incb x0, all, mul #2
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_u16_2, svuint16x4_t, uint16_t,
+ svst4q_u16 (p0, x0 + svcnth () * 2, z0),
+ svst4q (p0, x0 + svcnth () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_u16_3:
+** incb x0, all, mul #3
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_u16_3, svuint16x4_t, uint16_t,
+ svst4q_u16 (p0, x0 + svcnth () * 3, z0),
+ svst4q (p0, x0 + svcnth () * 3, z0))
+
+/*
+** st4q_u16_4:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_u16_4, svuint16x4_t, uint16_t,
+ svst4q_u16 (p0, x0 + svcnth () * 4, z0),
+ svst4q (p0, x0 + svcnth () * 4, z0))
+
+/*
+** st4q_u16_28:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_u16_28, svuint16x4_t, uint16_t,
+ svst4q_u16 (p0, x0 + svcnth () * 28, z0),
+ svst4q (p0, x0 + svcnth () * 28, z0))
+
+/*
+** st4q_u16_32:
+** [^{]*
+** st4q {z0\.q - z3\.q}, p0, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE (st4q_u16_32, svuint16x4_t, uint16_t,
+ svst4q_u16 (p0, x0 + svcnth () * 32, z0),
+ svst4q (p0, x0 + svcnth () * 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_u16_m1:
+** decb x0
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_u16_m1, svuint16x4_t, uint16_t,
+ svst4q_u16 (p0, x0 - svcnth (), z0),
+ svst4q (p0, x0 - svcnth (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_u16_m2:
+** decb x0, all, mul #2
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_u16_m2, svuint16x4_t, uint16_t,
+ svst4q_u16 (p0, x0 - svcnth () * 2, z0),
+ svst4q (p0, x0 - svcnth () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_u16_m3:
+** decb x0, all, mul #3
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_u16_m3, svuint16x4_t, uint16_t,
+ svst4q_u16 (p0, x0 - svcnth () * 3, z0),
+ svst4q (p0, x0 - svcnth () * 3, z0))
+
+/*
+** st4q_u16_m4:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_u16_m4, svuint16x4_t, uint16_t,
+ svst4q_u16 (p0, x0 - svcnth () * 4, z0),
+ svst4q (p0, x0 - svcnth () * 4, z0))
+
+/*
+** st4q_u16_m32:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_u16_m32, svuint16x4_t, uint16_t,
+ svst4q_u16 (p0, x0 - svcnth () * 32, z0),
+ svst4q (p0, x0 - svcnth () * 32, z0))
+
+/*
+** st4q_u16_m36:
+** [^{]*
+** st4q {z0\.q - z3\.q}, p0, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE (st4q_u16_m36, svuint16x4_t, uint16_t,
+ svst4q_u16 (p0, x0 - svcnth () * 36, z0),
+ svst4q (p0, x0 - svcnth () * 36, z0))
+
+/*
+** st4q_vnum_u16_0:
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_u16_0, svuint16x4_t, uint16_t,
+ svst4q_vnum_u16 (p0, x0, 0, z0),
+ svst4q_vnum (p0, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_u16_1:
+** incb x0
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_u16_1, svuint16x4_t, uint16_t,
+ svst4q_vnum_u16 (p0, x0, 1, z0),
+ svst4q_vnum (p0, x0, 1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_u16_2:
+** incb x0, all, mul #2
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_u16_2, svuint16x4_t, uint16_t,
+ svst4q_vnum_u16 (p0, x0, 2, z0),
+ svst4q_vnum (p0, x0, 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_u16_3:
+** incb x0, all, mul #3
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_u16_3, svuint16x4_t, uint16_t,
+ svst4q_vnum_u16 (p0, x0, 3, z0),
+ svst4q_vnum (p0, x0, 3, z0))
+
+/*
+** st4q_vnum_u16_4:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_vnum_u16_4, svuint16x4_t, uint16_t,
+ svst4q_vnum_u16 (p0, x0, 4, z0),
+ svst4q_vnum (p0, x0, 4, z0))
+
+/*
+** st4q_vnum_u16_28:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_vnum_u16_28, svuint16x4_t, uint16_t,
+ svst4q_vnum_u16 (p0, x0, 28, z0),
+ svst4q_vnum (p0, x0, 28, z0))
+
+/*
+** st4q_vnum_u16_32:
+** [^{]*
+** st4q {z0\.q - z3\.q}, p0, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE (st4q_vnum_u16_32, svuint16x4_t, uint16_t,
+ svst4q_vnum_u16 (p0, x0, 32, z0),
+ svst4q_vnum (p0, x0, 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_u16_m1:
+** decb x0
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_u16_m1, svuint16x4_t, uint16_t,
+ svst4q_vnum_u16 (p0, x0, -1, z0),
+ svst4q_vnum (p0, x0, -1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_u16_m2:
+** decb x0, all, mul #2
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_u16_m2, svuint16x4_t, uint16_t,
+ svst4q_vnum_u16 (p0, x0, -2, z0),
+ svst4q_vnum (p0, x0, -2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_u16_m3:
+** decb x0, all, mul #3
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_u16_m3, svuint16x4_t, uint16_t,
+ svst4q_vnum_u16 (p0, x0, -3, z0),
+ svst4q_vnum (p0, x0, -3, z0))
+
+/*
+** st4q_vnum_u16_m4:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_vnum_u16_m4, svuint16x4_t, uint16_t,
+ svst4q_vnum_u16 (p0, x0, -4, z0),
+ svst4q_vnum (p0, x0, -4, z0))
+
+/*
+** st4q_vnum_u16_m32:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_vnum_u16_m32, svuint16x4_t, uint16_t,
+ svst4q_vnum_u16 (p0, x0, -32, z0),
+ svst4q_vnum (p0, x0, -32, z0))
+
+/*
+** st4q_vnum_u16_m36:
+** [^{]*
+** st4q {z0\.q - z3\.q}, p0, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE (st4q_vnum_u16_m36, svuint16x4_t, uint16_t,
+ svst4q_vnum_u16 (p0, x0, -36, z0),
+ svst4q_vnum (p0, x0, -36, z0))
+
+/*
+** st4q_vnum_u16_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st4q {z0\.q - z3\.q}, p0, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st4q {z0\.q - z3\.q}, p0, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE (st4q_vnum_u16_x1, svuint16x4_t, uint16_t,
+ svst4q_vnum_u16 (p0, x0, x1, z0),
+ svst4q_vnum (p0, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st4q_u32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st4q_u32.c
new file mode 100644
index 0000000..66ae2ff
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st4q_u32.c
@@ -0,0 +1,315 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** st4q_u32_base:
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_u32_base, svuint32x4_t, uint32_t,
+ svst4q_u32 (p0, x0, z0),
+ svst4q (p0, x0, z0))
+
+/*
+** st4q_u32_index:
+** add (x[0-9]), x0, x1, lsl #?2
+** st4q {z0\.q - z3\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st4q_u32_index, svuint32x4_t, uint32_t,
+ svst4q_u32 (p0, x0 + x1, z0),
+ svst4q (p0, x0 + x1, z0))
+
+/*
+** st4q_u32_index2:
+** add (x[0-9]), x0, x1, lsl #?3
+** st4q {z0\.q - z3\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st4q_u32_index2, svuint32x4_t, uint32_t,
+ svst4q_u32 (p0, x0 + x1 * 2, z0),
+ svst4q (p0, x0 + x1 * 2, z0))
+
+/*
+** st4q_u32_index4:
+** st4q {z0\.q - z3\.q}, p0, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_STORE (st4q_u32_index4, svuint32x4_t, uint32_t,
+ svst4q_u32 (p0, x0 + x1 * 4, z0),
+ svst4q (p0, x0 + x1 * 4, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_u32_1:
+** incb x0
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_u32_1, svuint32x4_t, uint32_t,
+ svst4q_u32 (p0, x0 + svcntw (), z0),
+ svst4q (p0, x0 + svcntw (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_u32_2:
+** incb x0, all, mul #2
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_u32_2, svuint32x4_t, uint32_t,
+ svst4q_u32 (p0, x0 + svcntw () * 2, z0),
+ svst4q (p0, x0 + svcntw () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_u32_3:
+** incb x0, all, mul #3
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_u32_3, svuint32x4_t, uint32_t,
+ svst4q_u32 (p0, x0 + svcntw () * 3, z0),
+ svst4q (p0, x0 + svcntw () * 3, z0))
+
+/*
+** st4q_u32_4:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_u32_4, svuint32x4_t, uint32_t,
+ svst4q_u32 (p0, x0 + svcntw () * 4, z0),
+ svst4q (p0, x0 + svcntw () * 4, z0))
+
+/*
+** st4q_u32_28:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_u32_28, svuint32x4_t, uint32_t,
+ svst4q_u32 (p0, x0 + svcntw () * 28, z0),
+ svst4q (p0, x0 + svcntw () * 28, z0))
+
+/*
+** st4q_u32_32:
+** [^{]*
+** st4q {z0\.q - z3\.q}, p0, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE (st4q_u32_32, svuint32x4_t, uint32_t,
+ svst4q_u32 (p0, x0 + svcntw () * 32, z0),
+ svst4q (p0, x0 + svcntw () * 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_u32_m1:
+** decb x0
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_u32_m1, svuint32x4_t, uint32_t,
+ svst4q_u32 (p0, x0 - svcntw (), z0),
+ svst4q (p0, x0 - svcntw (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_u32_m2:
+** decb x0, all, mul #2
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_u32_m2, svuint32x4_t, uint32_t,
+ svst4q_u32 (p0, x0 - svcntw () * 2, z0),
+ svst4q (p0, x0 - svcntw () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_u32_m3:
+** decb x0, all, mul #3
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_u32_m3, svuint32x4_t, uint32_t,
+ svst4q_u32 (p0, x0 - svcntw () * 3, z0),
+ svst4q (p0, x0 - svcntw () * 3, z0))
+
+/*
+** st4q_u32_m4:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_u32_m4, svuint32x4_t, uint32_t,
+ svst4q_u32 (p0, x0 - svcntw () * 4, z0),
+ svst4q (p0, x0 - svcntw () * 4, z0))
+
+/*
+** st4q_u32_m32:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_u32_m32, svuint32x4_t, uint32_t,
+ svst4q_u32 (p0, x0 - svcntw () * 32, z0),
+ svst4q (p0, x0 - svcntw () * 32, z0))
+
+/*
+** st4q_u32_m36:
+** [^{]*
+** st4q {z0\.q - z3\.q}, p0, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE (st4q_u32_m36, svuint32x4_t, uint32_t,
+ svst4q_u32 (p0, x0 - svcntw () * 36, z0),
+ svst4q (p0, x0 - svcntw () * 36, z0))
+
+/*
+** st4q_vnum_u32_0:
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_u32_0, svuint32x4_t, uint32_t,
+ svst4q_vnum_u32 (p0, x0, 0, z0),
+ svst4q_vnum (p0, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_u32_1:
+** incb x0
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_u32_1, svuint32x4_t, uint32_t,
+ svst4q_vnum_u32 (p0, x0, 1, z0),
+ svst4q_vnum (p0, x0, 1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_u32_2:
+** incb x0, all, mul #2
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_u32_2, svuint32x4_t, uint32_t,
+ svst4q_vnum_u32 (p0, x0, 2, z0),
+ svst4q_vnum (p0, x0, 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_u32_3:
+** incb x0, all, mul #3
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_u32_3, svuint32x4_t, uint32_t,
+ svst4q_vnum_u32 (p0, x0, 3, z0),
+ svst4q_vnum (p0, x0, 3, z0))
+
+/*
+** st4q_vnum_u32_4:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_vnum_u32_4, svuint32x4_t, uint32_t,
+ svst4q_vnum_u32 (p0, x0, 4, z0),
+ svst4q_vnum (p0, x0, 4, z0))
+
+/*
+** st4q_vnum_u32_28:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_vnum_u32_28, svuint32x4_t, uint32_t,
+ svst4q_vnum_u32 (p0, x0, 28, z0),
+ svst4q_vnum (p0, x0, 28, z0))
+
+/*
+** st4q_vnum_u32_32:
+** [^{]*
+** st4q {z0\.q - z3\.q}, p0, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE (st4q_vnum_u32_32, svuint32x4_t, uint32_t,
+ svst4q_vnum_u32 (p0, x0, 32, z0),
+ svst4q_vnum (p0, x0, 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_u32_m1:
+** decb x0
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_u32_m1, svuint32x4_t, uint32_t,
+ svst4q_vnum_u32 (p0, x0, -1, z0),
+ svst4q_vnum (p0, x0, -1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_u32_m2:
+** decb x0, all, mul #2
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_u32_m2, svuint32x4_t, uint32_t,
+ svst4q_vnum_u32 (p0, x0, -2, z0),
+ svst4q_vnum (p0, x0, -2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_u32_m3:
+** decb x0, all, mul #3
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_u32_m3, svuint32x4_t, uint32_t,
+ svst4q_vnum_u32 (p0, x0, -3, z0),
+ svst4q_vnum (p0, x0, -3, z0))
+
+/*
+** st4q_vnum_u32_m4:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_vnum_u32_m4, svuint32x4_t, uint32_t,
+ svst4q_vnum_u32 (p0, x0, -4, z0),
+ svst4q_vnum (p0, x0, -4, z0))
+
+/*
+** st4q_vnum_u32_m32:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_vnum_u32_m32, svuint32x4_t, uint32_t,
+ svst4q_vnum_u32 (p0, x0, -32, z0),
+ svst4q_vnum (p0, x0, -32, z0))
+
+/*
+** st4q_vnum_u32_m36:
+** [^{]*
+** st4q {z0\.q - z3\.q}, p0, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE (st4q_vnum_u32_m36, svuint32x4_t, uint32_t,
+ svst4q_vnum_u32 (p0, x0, -36, z0),
+ svst4q_vnum (p0, x0, -36, z0))
+
+/*
+** st4q_vnum_u32_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st4q {z0\.q - z3\.q}, p0, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st4q {z0\.q - z3\.q}, p0, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE (st4q_vnum_u32_x1, svuint32x4_t, uint32_t,
+ svst4q_vnum_u32 (p0, x0, x1, z0),
+ svst4q_vnum (p0, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st4q_u64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st4q_u64.c
new file mode 100644
index 0000000..13f3a99
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st4q_u64.c
@@ -0,0 +1,305 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** st4q_u64_base:
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_u64_base, svuint64x4_t, uint64_t,
+ svst4q_u64 (p0, x0, z0),
+ svst4q (p0, x0, z0))
+
+/*
+** st4q_u64_index:
+** add (x[0-9]), x0, x1, lsl #?3
+** st4q {z0\.q - z3\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st4q_u64_index, svuint64x4_t, uint64_t,
+ svst4q_u64 (p0, x0 + x1, z0),
+ svst4q (p0, x0 + x1, z0))
+
+/*
+** st4q_u64_index2:
+** st4q {z0\.q - z3\.q}, p0, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_STORE (st4q_u64_index2, svuint64x4_t, uint64_t,
+ svst4q_u64 (p0, x0 + x1 * 2, z0),
+ svst4q (p0, x0 + x1 * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_u64_1:
+** incb x0
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_u64_1, svuint64x4_t, uint64_t,
+ svst4q_u64 (p0, x0 + svcntd (), z0),
+ svst4q (p0, x0 + svcntd (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_u64_2:
+** incb x0, all, mul #2
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_u64_2, svuint64x4_t, uint64_t,
+ svst4q_u64 (p0, x0 + svcntd () * 2, z0),
+ svst4q (p0, x0 + svcntd () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_u64_3:
+** incb x0, all, mul #3
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_u64_3, svuint64x4_t, uint64_t,
+ svst4q_u64 (p0, x0 + svcntd () * 3, z0),
+ svst4q (p0, x0 + svcntd () * 3, z0))
+
+/*
+** st4q_u64_4:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_u64_4, svuint64x4_t, uint64_t,
+ svst4q_u64 (p0, x0 + svcntd () * 4, z0),
+ svst4q (p0, x0 + svcntd () * 4, z0))
+
+/*
+** st4q_u64_28:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_u64_28, svuint64x4_t, uint64_t,
+ svst4q_u64 (p0, x0 + svcntd () * 28, z0),
+ svst4q (p0, x0 + svcntd () * 28, z0))
+
+/*
+** st4q_u64_32:
+** [^{]*
+** st4q {z0\.q - z3\.q}, p0, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE (st4q_u64_32, svuint64x4_t, uint64_t,
+ svst4q_u64 (p0, x0 + svcntd () * 32, z0),
+ svst4q (p0, x0 + svcntd () * 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_u64_m1:
+** decb x0
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_u64_m1, svuint64x4_t, uint64_t,
+ svst4q_u64 (p0, x0 - svcntd (), z0),
+ svst4q (p0, x0 - svcntd (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_u64_m2:
+** decb x0, all, mul #2
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_u64_m2, svuint64x4_t, uint64_t,
+ svst4q_u64 (p0, x0 - svcntd () * 2, z0),
+ svst4q (p0, x0 - svcntd () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_u64_m3:
+** decb x0, all, mul #3
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_u64_m3, svuint64x4_t, uint64_t,
+ svst4q_u64 (p0, x0 - svcntd () * 3, z0),
+ svst4q (p0, x0 - svcntd () * 3, z0))
+
+/*
+** st4q_u64_m4:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_u64_m4, svuint64x4_t, uint64_t,
+ svst4q_u64 (p0, x0 - svcntd () * 4, z0),
+ svst4q (p0, x0 - svcntd () * 4, z0))
+
+/*
+** st4q_u64_m32:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_u64_m32, svuint64x4_t, uint64_t,
+ svst4q_u64 (p0, x0 - svcntd () * 32, z0),
+ svst4q (p0, x0 - svcntd () * 32, z0))
+
+/*
+** st4q_u64_m36:
+** [^{]*
+** st4q {z0\.q - z3\.q}, p0, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE (st4q_u64_m36, svuint64x4_t, uint64_t,
+ svst4q_u64 (p0, x0 - svcntd () * 36, z0),
+ svst4q (p0, x0 - svcntd () * 36, z0))
+
+/*
+** st4q_vnum_u64_0:
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_u64_0, svuint64x4_t, uint64_t,
+ svst4q_vnum_u64 (p0, x0, 0, z0),
+ svst4q_vnum (p0, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_u64_1:
+** incb x0
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_u64_1, svuint64x4_t, uint64_t,
+ svst4q_vnum_u64 (p0, x0, 1, z0),
+ svst4q_vnum (p0, x0, 1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_u64_2:
+** incb x0, all, mul #2
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_u64_2, svuint64x4_t, uint64_t,
+ svst4q_vnum_u64 (p0, x0, 2, z0),
+ svst4q_vnum (p0, x0, 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_u64_3:
+** incb x0, all, mul #3
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_u64_3, svuint64x4_t, uint64_t,
+ svst4q_vnum_u64 (p0, x0, 3, z0),
+ svst4q_vnum (p0, x0, 3, z0))
+
+/*
+** st4q_vnum_u64_4:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_vnum_u64_4, svuint64x4_t, uint64_t,
+ svst4q_vnum_u64 (p0, x0, 4, z0),
+ svst4q_vnum (p0, x0, 4, z0))
+
+/*
+** st4q_vnum_u64_28:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_vnum_u64_28, svuint64x4_t, uint64_t,
+ svst4q_vnum_u64 (p0, x0, 28, z0),
+ svst4q_vnum (p0, x0, 28, z0))
+
+/*
+** st4q_vnum_u64_32:
+** [^{]*
+** st4q {z0\.q - z3\.q}, p0, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE (st4q_vnum_u64_32, svuint64x4_t, uint64_t,
+ svst4q_vnum_u64 (p0, x0, 32, z0),
+ svst4q_vnum (p0, x0, 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_u64_m1:
+** decb x0
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_u64_m1, svuint64x4_t, uint64_t,
+ svst4q_vnum_u64 (p0, x0, -1, z0),
+ svst4q_vnum (p0, x0, -1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_u64_m2:
+** decb x0, all, mul #2
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_u64_m2, svuint64x4_t, uint64_t,
+ svst4q_vnum_u64 (p0, x0, -2, z0),
+ svst4q_vnum (p0, x0, -2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_u64_m3:
+** decb x0, all, mul #3
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_u64_m3, svuint64x4_t, uint64_t,
+ svst4q_vnum_u64 (p0, x0, -3, z0),
+ svst4q_vnum (p0, x0, -3, z0))
+
+/*
+** st4q_vnum_u64_m4:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_vnum_u64_m4, svuint64x4_t, uint64_t,
+ svst4q_vnum_u64 (p0, x0, -4, z0),
+ svst4q_vnum (p0, x0, -4, z0))
+
+/*
+** st4q_vnum_u64_m32:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_vnum_u64_m32, svuint64x4_t, uint64_t,
+ svst4q_vnum_u64 (p0, x0, -32, z0),
+ svst4q_vnum (p0, x0, -32, z0))
+
+/*
+** st4q_vnum_u64_m36:
+** [^{]*
+** st4q {z0\.q - z3\.q}, p0, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE (st4q_vnum_u64_m36, svuint64x4_t, uint64_t,
+ svst4q_vnum_u64 (p0, x0, -36, z0),
+ svst4q_vnum (p0, x0, -36, z0))
+
+/*
+** st4q_vnum_u64_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st4q {z0\.q - z3\.q}, p0, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st4q {z0\.q - z3\.q}, p0, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE (st4q_vnum_u64_x1, svuint64x4_t, uint64_t,
+ svst4q_vnum_u64 (p0, x0, x1, z0),
+ svst4q_vnum (p0, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st4q_u8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st4q_u8.c
new file mode 100644
index 0000000..318e25b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/st4q_u8.c
@@ -0,0 +1,335 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** st4q_u8_base:
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_u8_base, svuint8x4_t, uint8_t,
+ svst4q_u8 (p0, x0, z0),
+ svst4q (p0, x0, z0))
+
+/*
+** st4q_u8_index:
+** add (x[0-9]), (?:x0, x1|x1, x0)
+** st4q {z0\.q - z3\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st4q_u8_index, svuint8x4_t, uint8_t,
+ svst4q_u8 (p0, x0 + x1, z0),
+ svst4q (p0, x0 + x1, z0))
+
+/*
+** st4q_u8_index2:
+** add (x[0-9]), x0, x1, lsl #?1
+** st4q {z0\.q - z3\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st4q_u8_index2, svuint8x4_t, uint8_t,
+ svst4q_u8 (p0, x0 + x1 * 2, z0),
+ svst4q (p0, x0 + x1 * 2, z0))
+
+/*
+** st4q_u8_index4:
+** add (x[0-9]), x0, x1, lsl #?2
+** st4q {z0\.q - z3\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st4q_u8_index4, svuint8x4_t, uint8_t,
+ svst4q_u8 (p0, x0 + x1 * 4, z0),
+ svst4q (p0, x0 + x1 * 4, z0))
+
+/*
+** st4q_u8_index8:
+** add (x[0-9]), x0, x1, lsl #?3
+** st4q {z0\.q - z3\.q}, p0, \[\1\]
+** ret
+*/
+TEST_STORE (st4q_u8_index8, svuint8x4_t, uint8_t,
+ svst4q_u8 (p0, x0 + x1 * 8, z0),
+ svst4q (p0, x0 + x1 * 8, z0))
+
+/*
+** st4q_u8_index16:
+** st4q {z0\.q - z3\.q}, p0, \[x0, x1, lsl #?4\]
+** ret
+*/
+TEST_STORE (st4q_u8_index16, svuint8x4_t, uint8_t,
+ svst4q_u8 (p0, x0 + x1 * 16, z0),
+ svst4q (p0, x0 + x1 * 16, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_u8_1:
+** incb x0
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_u8_1, svuint8x4_t, uint8_t,
+ svst4q_u8 (p0, x0 + svcntb (), z0),
+ svst4q (p0, x0 + svcntb (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_u8_2:
+** incb x0, all, mul #2
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_u8_2, svuint8x4_t, uint8_t,
+ svst4q_u8 (p0, x0 + svcntb () * 2, z0),
+ svst4q (p0, x0 + svcntb () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_u8_3:
+** incb x0, all, mul #3
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_u8_3, svuint8x4_t, uint8_t,
+ svst4q_u8 (p0, x0 + svcntb () * 3, z0),
+ svst4q (p0, x0 + svcntb () * 3, z0))
+
+/*
+** st4q_u8_4:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_u8_4, svuint8x4_t, uint8_t,
+ svst4q_u8 (p0, x0 + svcntb () * 4, z0),
+ svst4q (p0, x0 + svcntb () * 4, z0))
+
+/*
+** st4q_u8_28:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_u8_28, svuint8x4_t, uint8_t,
+ svst4q_u8 (p0, x0 + svcntb () * 28, z0),
+ svst4q (p0, x0 + svcntb () * 28, z0))
+
+/*
+** st4q_u8_32:
+** [^{]*
+** st4q {z0\.q - z3\.q}, p0, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE (st4q_u8_32, svuint8x4_t, uint8_t,
+ svst4q_u8 (p0, x0 + svcntb () * 32, z0),
+ svst4q (p0, x0 + svcntb () * 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_u8_m1:
+** decb x0
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_u8_m1, svuint8x4_t, uint8_t,
+ svst4q_u8 (p0, x0 - svcntb (), z0),
+ svst4q (p0, x0 - svcntb (), z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_u8_m2:
+** decb x0, all, mul #2
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_u8_m2, svuint8x4_t, uint8_t,
+ svst4q_u8 (p0, x0 - svcntb () * 2, z0),
+ svst4q (p0, x0 - svcntb () * 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_u8_m3:
+** decb x0, all, mul #3
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_u8_m3, svuint8x4_t, uint8_t,
+ svst4q_u8 (p0, x0 - svcntb () * 3, z0),
+ svst4q (p0, x0 - svcntb () * 3, z0))
+
+/*
+** st4q_u8_m4:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_u8_m4, svuint8x4_t, uint8_t,
+ svst4q_u8 (p0, x0 - svcntb () * 4, z0),
+ svst4q (p0, x0 - svcntb () * 4, z0))
+
+/*
+** st4q_u8_m32:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_u8_m32, svuint8x4_t, uint8_t,
+ svst4q_u8 (p0, x0 - svcntb () * 32, z0),
+ svst4q (p0, x0 - svcntb () * 32, z0))
+
+/*
+** st4q_u8_m36:
+** [^{]*
+** st4q {z0\.q - z3\.q}, p0, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE (st4q_u8_m36, svuint8x4_t, uint8_t,
+ svst4q_u8 (p0, x0 - svcntb () * 36, z0),
+ svst4q (p0, x0 - svcntb () * 36, z0))
+
+/*
+** st4q_vnum_u8_0:
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_u8_0, svuint8x4_t, uint8_t,
+ svst4q_vnum_u8 (p0, x0, 0, z0),
+ svst4q_vnum (p0, x0, 0, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_u8_1:
+** incb x0
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_u8_1, svuint8x4_t, uint8_t,
+ svst4q_vnum_u8 (p0, x0, 1, z0),
+ svst4q_vnum (p0, x0, 1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_u8_2:
+** incb x0, all, mul #2
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_u8_2, svuint8x4_t, uint8_t,
+ svst4q_vnum_u8 (p0, x0, 2, z0),
+ svst4q_vnum (p0, x0, 2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_u8_3:
+** incb x0, all, mul #3
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_u8_3, svuint8x4_t, uint8_t,
+ svst4q_vnum_u8 (p0, x0, 3, z0),
+ svst4q_vnum (p0, x0, 3, z0))
+
+/*
+** st4q_vnum_u8_4:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #4, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_vnum_u8_4, svuint8x4_t, uint8_t,
+ svst4q_vnum_u8 (p0, x0, 4, z0),
+ svst4q_vnum (p0, x0, 4, z0))
+
+/*
+** st4q_vnum_u8_28:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #28, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_vnum_u8_28, svuint8x4_t, uint8_t,
+ svst4q_vnum_u8 (p0, x0, 28, z0),
+ svst4q_vnum (p0, x0, 28, z0))
+
+/*
+** st4q_vnum_u8_32:
+** [^{]*
+** st4q {z0\.q - z3\.q}, p0, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE (st4q_vnum_u8_32, svuint8x4_t, uint8_t,
+ svst4q_vnum_u8 (p0, x0, 32, z0),
+ svst4q_vnum (p0, x0, 32, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_u8_m1:
+** decb x0
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_u8_m1, svuint8x4_t, uint8_t,
+ svst4q_vnum_u8 (p0, x0, -1, z0),
+ svst4q_vnum (p0, x0, -1, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_u8_m2:
+** decb x0, all, mul #2
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_u8_m2, svuint8x4_t, uint8_t,
+ svst4q_vnum_u8 (p0, x0, -2, z0),
+ svst4q_vnum (p0, x0, -2, z0))
+
+/* Moving the constant into a register would also be OK. */
+/*
+** st4q_vnum_u8_m3:
+** decb x0, all, mul #3
+** st4q {z0\.q - z3\.q}, p0, \[x0\]
+** ret
+*/
+TEST_STORE (st4q_vnum_u8_m3, svuint8x4_t, uint8_t,
+ svst4q_vnum_u8 (p0, x0, -3, z0),
+ svst4q_vnum (p0, x0, -3, z0))
+
+/*
+** st4q_vnum_u8_m4:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #-4, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_vnum_u8_m4, svuint8x4_t, uint8_t,
+ svst4q_vnum_u8 (p0, x0, -4, z0),
+ svst4q_vnum (p0, x0, -4, z0))
+
+/*
+** st4q_vnum_u8_m32:
+** st4q {z0\.q - z3\.q}, p0, \[x0, #-32, mul vl\]
+** ret
+*/
+TEST_STORE (st4q_vnum_u8_m32, svuint8x4_t, uint8_t,
+ svst4q_vnum_u8 (p0, x0, -32, z0),
+ svst4q_vnum (p0, x0, -32, z0))
+
+/*
+** st4q_vnum_u8_m36:
+** [^{]*
+** st4q {z0\.q - z3\.q}, p0, \[x[0-9]+\]
+** ret
+*/
+TEST_STORE (st4q_vnum_u8_m36, svuint8x4_t, uint8_t,
+ svst4q_vnum_u8 (p0, x0, -36, z0),
+ svst4q_vnum (p0, x0, -36, z0))
+
+/*
+** st4q_vnum_u8_x1:
+** cntb (x[0-9]+)
+** (
+** madd (x[0-9]+), (?:x1, \1|\1, x1), x0
+** st4q {z0\.q - z3\.q}, p0, \[\2\]
+** |
+** mul (x[0-9]+), (?:x1, \1|\1, x1)
+** st4q {z0\.q - z3\.q}, p0, \[x0, \3\]
+** )
+** ret
+*/
+TEST_STORE (st4q_vnum_u8_x1, svuint8x4_t, uint8_t,
+ svst4q_vnum_u8 (p0, x0, x1, z0),
+ svst4q_vnum (p0, x0, x1, z0))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tblq_bf16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tblq_bf16.c
new file mode 100644
index 0000000..25e433a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tblq_bf16.c
@@ -0,0 +1,35 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** tblq_bf16_tied1:
+** tblq z0\.h, {z0\.h}, z4\.h
+** ret
+*/
+TEST_DUAL_Z (tblq_bf16_tied1, svbfloat16_t, svuint16_t,
+ z0 = svtblq_bf16 (z0, z4),
+ z0 = svtblq (z0, z4))
+
+/*
+** tblq_bf16_tied2:
+** tblq z0\.h, {z4\.h}, z0\.h
+** ret
+*/
+TEST_DUAL_Z_REV (tblq_bf16_tied2, svbfloat16_t, svuint16_t,
+ z0_res = svtblq_bf16 (z4, z0),
+ z0_res = svtblq (z4, z0))
+
+/*
+** tblq_bf16_untied:
+** tblq z0\.h, {z1\.h}, z4\.h
+** ret
+*/
+TEST_DUAL_Z (tblq_bf16_untied, svbfloat16_t, svuint16_t,
+ z0 = svtblq_bf16 (z1, z4),
+ z0 = svtblq (z1, z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tblq_f16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tblq_f16.c
new file mode 100644
index 0000000..623d324
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tblq_f16.c
@@ -0,0 +1,35 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** tblq_f16_tied1:
+** tblq z0\.h, {z0\.h}, z4\.h
+** ret
+*/
+TEST_DUAL_Z (tblq_f16_tied1, svfloat16_t, svuint16_t,
+ z0 = svtblq_f16 (z0, z4),
+ z0 = svtblq (z0, z4))
+
+/*
+** tblq_f16_tied2:
+** tblq z0\.h, {z4\.h}, z0\.h
+** ret
+*/
+TEST_DUAL_Z_REV (tblq_f16_tied2, svfloat16_t, svuint16_t,
+ z0_res = svtblq_f16 (z4, z0),
+ z0_res = svtblq (z4, z0))
+
+/*
+** tblq_f16_untied:
+** tblq z0\.h, {z1\.h}, z4\.h
+** ret
+*/
+TEST_DUAL_Z (tblq_f16_untied, svfloat16_t, svuint16_t,
+ z0 = svtblq_f16 (z1, z4),
+ z0 = svtblq (z1, z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tblq_f32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tblq_f32.c
new file mode 100644
index 0000000..3c42fac
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tblq_f32.c
@@ -0,0 +1,35 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** tblq_f32_tied1:
+** tblq z0\.s, {z0\.s}, z4\.s
+** ret
+*/
+TEST_DUAL_Z (tblq_f32_tied1, svfloat32_t, svuint32_t,
+ z0 = svtblq_f32 (z0, z4),
+ z0 = svtblq (z0, z4))
+
+/*
+** tblq_f32_tied2:
+** tblq z0\.s, {z4\.s}, z0\.s
+** ret
+*/
+TEST_DUAL_Z_REV (tblq_f32_tied2, svfloat32_t, svuint32_t,
+ z0_res = svtblq_f32 (z4, z0),
+ z0_res = svtblq (z4, z0))
+
+/*
+** tblq_f32_untied:
+** tblq z0\.s, {z1\.s}, z4\.s
+** ret
+*/
+TEST_DUAL_Z (tblq_f32_untied, svfloat32_t, svuint32_t,
+ z0 = svtblq_f32 (z1, z4),
+ z0 = svtblq (z1, z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tblq_f64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tblq_f64.c
new file mode 100644
index 0000000..3293eae
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tblq_f64.c
@@ -0,0 +1,35 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** tblq_f64_tied1:
+** tblq z0\.d, {z0\.d}, z4\.d
+** ret
+*/
+TEST_DUAL_Z (tblq_f64_tied1, svfloat64_t, svuint64_t,
+ z0 = svtblq_f64 (z0, z4),
+ z0 = svtblq (z0, z4))
+
+/*
+** tblq_f64_tied2:
+** tblq z0\.d, {z4\.d}, z0\.d
+** ret
+*/
+TEST_DUAL_Z_REV (tblq_f64_tied2, svfloat64_t, svuint64_t,
+ z0_res = svtblq_f64 (z4, z0),
+ z0_res = svtblq (z4, z0))
+
+/*
+** tblq_f64_untied:
+** tblq z0\.d, {z1\.d}, z4\.d
+** ret
+*/
+TEST_DUAL_Z (tblq_f64_untied, svfloat64_t, svuint64_t,
+ z0 = svtblq_f64 (z1, z4),
+ z0 = svtblq (z1, z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tblq_s16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tblq_s16.c
new file mode 100644
index 0000000..d627cdc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tblq_s16.c
@@ -0,0 +1,35 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** tblq_s16_tied1:
+** tblq z0\.h, {z0\.h}, z4\.h
+** ret
+*/
+TEST_DUAL_Z (tblq_s16_tied1, svint16_t, svuint16_t,
+ z0 = svtblq_s16 (z0, z4),
+ z0 = svtblq (z0, z4))
+
+/*
+** tblq_s16_tied2:
+** tblq z0\.h, {z4\.h}, z0\.h
+** ret
+*/
+TEST_DUAL_Z_REV (tblq_s16_tied2, svint16_t, svuint16_t,
+ z0_res = svtblq_s16 (z4, z0),
+ z0_res = svtblq (z4, z0))
+
+/*
+** tblq_s16_untied:
+** tblq z0\.h, {z1\.h}, z4\.h
+** ret
+*/
+TEST_DUAL_Z (tblq_s16_untied, svint16_t, svuint16_t,
+ z0 = svtblq_s16 (z1, z4),
+ z0 = svtblq (z1, z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tblq_s32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tblq_s32.c
new file mode 100644
index 0000000..d60f43b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tblq_s32.c
@@ -0,0 +1,35 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** tblq_s32_tied1:
+** tblq z0\.s, {z0\.s}, z4\.s
+** ret
+*/
+TEST_DUAL_Z (tblq_s32_tied1, svint32_t, svuint32_t,
+ z0 = svtblq_s32 (z0, z4),
+ z0 = svtblq (z0, z4))
+
+/*
+** tblq_s32_tied2:
+** tblq z0\.s, {z4\.s}, z0\.s
+** ret
+*/
+TEST_DUAL_Z_REV (tblq_s32_tied2, svint32_t, svuint32_t,
+ z0_res = svtblq_s32 (z4, z0),
+ z0_res = svtblq (z4, z0))
+
+/*
+** tblq_s32_untied:
+** tblq z0\.s, {z1\.s}, z4\.s
+** ret
+*/
+TEST_DUAL_Z (tblq_s32_untied, svint32_t, svuint32_t,
+ z0 = svtblq_s32 (z1, z4),
+ z0 = svtblq (z1, z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tblq_s64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tblq_s64.c
new file mode 100644
index 0000000..71e087c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tblq_s64.c
@@ -0,0 +1,35 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** tblq_s64_tied1:
+** tblq z0\.d, {z0\.d}, z4\.d
+** ret
+*/
+TEST_DUAL_Z (tblq_s64_tied1, svint64_t, svuint64_t,
+ z0 = svtblq_s64 (z0, z4),
+ z0 = svtblq (z0, z4))
+
+/*
+** tblq_s64_tied2:
+** tblq z0\.d, {z4\.d}, z0\.d
+** ret
+*/
+TEST_DUAL_Z_REV (tblq_s64_tied2, svint64_t, svuint64_t,
+ z0_res = svtblq_s64 (z4, z0),
+ z0_res = svtblq (z4, z0))
+
+/*
+** tblq_s64_untied:
+** tblq z0\.d, {z1\.d}, z4\.d
+** ret
+*/
+TEST_DUAL_Z (tblq_s64_untied, svint64_t, svuint64_t,
+ z0 = svtblq_s64 (z1, z4),
+ z0 = svtblq (z1, z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tblq_s8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tblq_s8.c
new file mode 100644
index 0000000..7e0bac2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tblq_s8.c
@@ -0,0 +1,35 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** tblq_s8_tied1:
+** tblq z0\.b, {z0\.b}, z4\.b
+** ret
+*/
+TEST_DUAL_Z (tblq_s8_tied1, svint8_t, svuint8_t,
+ z0 = svtblq_s8 (z0, z4),
+ z0 = svtblq (z0, z4))
+
+/*
+** tblq_s8_tied2:
+** tblq z0\.b, {z4\.b}, z0\.b
+** ret
+*/
+TEST_DUAL_Z_REV (tblq_s8_tied2, svint8_t, svuint8_t,
+ z0_res = svtblq_s8 (z4, z0),
+ z0_res = svtblq (z4, z0))
+
+/*
+** tblq_s8_untied:
+** tblq z0\.b, {z1\.b}, z4\.b
+** ret
+*/
+TEST_DUAL_Z (tblq_s8_untied, svint8_t, svuint8_t,
+ z0 = svtblq_s8 (z1, z4),
+ z0 = svtblq (z1, z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tblq_u16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tblq_u16.c
new file mode 100644
index 0000000..8471d73
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tblq_u16.c
@@ -0,0 +1,35 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** tblq_u16_tied1:
+** tblq z0\.h, {z0\.h}, z4\.h
+** ret
+*/
+TEST_DUAL_Z (tblq_u16_tied1, svuint16_t, svuint16_t,
+ z0 = svtblq_u16 (z0, z4),
+ z0 = svtblq (z0, z4))
+
+/*
+** tblq_u16_tied2:
+** tblq z0\.h, {z4\.h}, z0\.h
+** ret
+*/
+TEST_DUAL_Z_REV (tblq_u16_tied2, svuint16_t, svuint16_t,
+ z0_res = svtblq_u16 (z4, z0),
+ z0_res = svtblq (z4, z0))
+
+/*
+** tblq_u16_untied:
+** tblq z0\.h, {z1\.h}, z4\.h
+** ret
+*/
+TEST_DUAL_Z (tblq_u16_untied, svuint16_t, svuint16_t,
+ z0 = svtblq_u16 (z1, z4),
+ z0 = svtblq (z1, z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tblq_u32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tblq_u32.c
new file mode 100644
index 0000000..695c71d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tblq_u32.c
@@ -0,0 +1,35 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** tblq_u32_tied1:
+** tblq z0\.s, {z0\.s}, z4\.s
+** ret
+*/
+TEST_DUAL_Z (tblq_u32_tied1, svuint32_t, svuint32_t,
+ z0 = svtblq_u32 (z0, z4),
+ z0 = svtblq (z0, z4))
+
+/*
+** tblq_u32_tied2:
+** tblq z0\.s, {z4\.s}, z0\.s
+** ret
+*/
+TEST_DUAL_Z_REV (tblq_u32_tied2, svuint32_t, svuint32_t,
+ z0_res = svtblq_u32 (z4, z0),
+ z0_res = svtblq (z4, z0))
+
+/*
+** tblq_u32_untied:
+** tblq z0\.s, {z1\.s}, z4\.s
+** ret
+*/
+TEST_DUAL_Z (tblq_u32_untied, svuint32_t, svuint32_t,
+ z0 = svtblq_u32 (z1, z4),
+ z0 = svtblq (z1, z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tblq_u64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tblq_u64.c
new file mode 100644
index 0000000..8d22dcdf
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tblq_u64.c
@@ -0,0 +1,35 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** tblq_u64_tied1:
+** tblq z0\.d, {z0\.d}, z4\.d
+** ret
+*/
+TEST_DUAL_Z (tblq_u64_tied1, svuint64_t, svuint64_t,
+ z0 = svtblq_u64 (z0, z4),
+ z0 = svtblq (z0, z4))
+
+/*
+** tblq_u64_tied2:
+** tblq z0\.d, {z4\.d}, z0\.d
+** ret
+*/
+TEST_DUAL_Z_REV (tblq_u64_tied2, svuint64_t, svuint64_t,
+ z0_res = svtblq_u64 (z4, z0),
+ z0_res = svtblq (z4, z0))
+
+/*
+** tblq_u64_untied:
+** tblq z0\.d, {z1\.d}, z4\.d
+** ret
+*/
+TEST_DUAL_Z (tblq_u64_untied, svuint64_t, svuint64_t,
+ z0 = svtblq_u64 (z1, z4),
+ z0 = svtblq (z1, z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tblq_u8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tblq_u8.c
new file mode 100644
index 0000000..098b489
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tblq_u8.c
@@ -0,0 +1,35 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** tblq_u8_tied1:
+** tblq z0\.b, {z0\.b}, z4\.b
+** ret
+*/
+TEST_DUAL_Z (tblq_u8_tied1, svuint8_t, svuint8_t,
+ z0 = svtblq_u8 (z0, z4),
+ z0 = svtblq (z0, z4))
+
+/*
+** tblq_u8_tied2:
+** tblq z0\.b, {z4\.b}, z0\.b
+** ret
+*/
+TEST_DUAL_Z_REV (tblq_u8_tied2, svuint8_t, svuint8_t,
+ z0_res = svtblq_u8 (z4, z0),
+ z0_res = svtblq (z4, z0))
+
+/*
+** tblq_u8_untied:
+** tblq z0\.b, {z1\.b}, z4\.b
+** ret
+*/
+TEST_DUAL_Z (tblq_u8_untied, svuint8_t, svuint8_t,
+ z0 = svtblq_u8 (z1, z4),
+ z0 = svtblq (z1, z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tbxq_bf16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tbxq_bf16.c
new file mode 100644
index 0000000..9d9acdf
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tbxq_bf16.c
@@ -0,0 +1,42 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** tbx_bf16_tied1:
+** tbx z0\.h, z1\.h, z4\.h
+** ret
+*/
+TEST_DUAL_Z (tbx_bf16_tied1, svbfloat16_t, svuint16_t,
+ z0 = svtbx_bf16 (z0, z1, z4),
+ z0 = svtbx (z0, z1, z4))
+
+/* Bad RA choice: no preferred output sequence. */
+TEST_DUAL_Z (tbx_bf16_tied2, svbfloat16_t, svuint16_t,
+ z0 = svtbx_bf16 (z1, z0, z4),
+ z0 = svtbx (z1, z0, z4))
+
+/* Bad RA choice: no preferred output sequence. */
+TEST_DUAL_Z_REV (tbx_bf16_tied3, svbfloat16_t, svuint16_t,
+ z0_res = svtbx_bf16 (z4, z5, z0),
+ z0_res = svtbx (z4, z5, z0))
+
+/*
+** tbx_bf16_untied:
+** (
+** mov z0\.d, z1\.d
+** tbx z0\.h, z2\.h, z4\.h
+** |
+** tbx z1\.h, z2\.h, z4\.h
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_DUAL_Z (tbx_bf16_untied, svbfloat16_t, svuint16_t,
+ z0 = svtbx_bf16 (z1, z2, z4),
+ z0 = svtbx (z1, z2, z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tbxq_f16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tbxq_f16.c
new file mode 100644
index 0000000..5dadf63
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tbxq_f16.c
@@ -0,0 +1,42 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** tbx_f16_tied1:
+** tbx z0\.h, z1\.h, z4\.h
+** ret
+*/
+TEST_DUAL_Z (tbx_f16_tied1, svfloat16_t, svuint16_t,
+ z0 = svtbx_f16 (z0, z1, z4),
+ z0 = svtbx (z0, z1, z4))
+
+/* Bad RA choice: no preferred output sequence. */
+TEST_DUAL_Z (tbx_f16_tied2, svfloat16_t, svuint16_t,
+ z0 = svtbx_f16 (z1, z0, z4),
+ z0 = svtbx (z1, z0, z4))
+
+/* Bad RA choice: no preferred output sequence. */
+TEST_DUAL_Z_REV (tbx_f16_tied3, svfloat16_t, svuint16_t,
+ z0_res = svtbx_f16 (z4, z5, z0),
+ z0_res = svtbx (z4, z5, z0))
+
+/*
+** tbx_f16_untied:
+** (
+** mov z0\.d, z1\.d
+** tbx z0\.h, z2\.h, z4\.h
+** |
+** tbx z1\.h, z2\.h, z4\.h
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_DUAL_Z (tbx_f16_untied, svfloat16_t, svuint16_t,
+ z0 = svtbx_f16 (z1, z2, z4),
+ z0 = svtbx (z1, z2, z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tbxq_f32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tbxq_f32.c
new file mode 100644
index 0000000..2d987f13a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tbxq_f32.c
@@ -0,0 +1,42 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** tbx_f32_tied1:
+** tbx z0\.s, z1\.s, z4\.s
+** ret
+*/
+TEST_DUAL_Z (tbx_f32_tied1, svfloat32_t, svuint32_t,
+ z0 = svtbx_f32 (z0, z1, z4),
+ z0 = svtbx (z0, z1, z4))
+
+/* Bad RA choice: no preferred output sequence. */
+TEST_DUAL_Z (tbx_f32_tied2, svfloat32_t, svuint32_t,
+ z0 = svtbx_f32 (z1, z0, z4),
+ z0 = svtbx (z1, z0, z4))
+
+/* Bad RA choice: no preferred output sequence. */
+TEST_DUAL_Z_REV (tbx_f32_tied3, svfloat32_t, svuint32_t,
+ z0_res = svtbx_f32 (z4, z5, z0),
+ z0_res = svtbx (z4, z5, z0))
+
+/*
+** tbx_f32_untied:
+** (
+** mov z0\.d, z1\.d
+** tbx z0\.s, z2\.s, z4\.s
+** |
+** tbx z1\.s, z2\.s, z4\.s
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_DUAL_Z (tbx_f32_untied, svfloat32_t, svuint32_t,
+ z0 = svtbx_f32 (z1, z2, z4),
+ z0 = svtbx (z1, z2, z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tbxq_f64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tbxq_f64.c
new file mode 100644
index 0000000..d9e98d3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tbxq_f64.c
@@ -0,0 +1,42 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** tbx_f64_tied1:
+** tbx z0\.d, z1\.d, z4\.d
+** ret
+*/
+TEST_DUAL_Z (tbx_f64_tied1, svfloat64_t, svuint64_t,
+ z0 = svtbx_f64 (z0, z1, z4),
+ z0 = svtbx (z0, z1, z4))
+
+/* Bad RA choice: no preferred output sequence. */
+TEST_DUAL_Z (tbx_f64_tied2, svfloat64_t, svuint64_t,
+ z0 = svtbx_f64 (z1, z0, z4),
+ z0 = svtbx (z1, z0, z4))
+
+/* Bad RA choice: no preferred output sequence. */
+TEST_DUAL_Z_REV (tbx_f64_tied3, svfloat64_t, svuint64_t,
+ z0_res = svtbx_f64 (z4, z5, z0),
+ z0_res = svtbx (z4, z5, z0))
+
+/*
+** tbx_f64_untied:
+** (
+** mov z0\.d, z1\.d
+** tbx z0\.d, z2\.d, z4\.d
+** |
+** tbx z1\.d, z2\.d, z4\.d
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_DUAL_Z (tbx_f64_untied, svfloat64_t, svuint64_t,
+ z0 = svtbx_f64 (z1, z2, z4),
+ z0 = svtbx (z1, z2, z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tbxq_s16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tbxq_s16.c
new file mode 100644
index 0000000..a0468cb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tbxq_s16.c
@@ -0,0 +1,42 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** tbx_s16_tied1:
+** tbx z0\.h, z1\.h, z4\.h
+** ret
+*/
+TEST_DUAL_Z (tbx_s16_tied1, svint16_t, svuint16_t,
+ z0 = svtbx_s16 (z0, z1, z4),
+ z0 = svtbx (z0, z1, z4))
+
+/* Bad RA choice: no preferred output sequence. */
+TEST_DUAL_Z (tbx_s16_tied2, svint16_t, svuint16_t,
+ z0 = svtbx_s16 (z1, z0, z4),
+ z0 = svtbx (z1, z0, z4))
+
+/* Bad RA choice: no preferred output sequence. */
+TEST_DUAL_Z_REV (tbx_s16_tied3, svint16_t, svuint16_t,
+ z0_res = svtbx_s16 (z4, z5, z0),
+ z0_res = svtbx (z4, z5, z0))
+
+/*
+** tbx_s16_untied:
+** (
+** mov z0\.d, z1\.d
+** tbx z0\.h, z2\.h, z4\.h
+** |
+** tbx z1\.h, z2\.h, z4\.h
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_DUAL_Z (tbx_s16_untied, svint16_t, svuint16_t,
+ z0 = svtbx_s16 (z1, z2, z4),
+ z0 = svtbx (z1, z2, z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tbxq_s32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tbxq_s32.c
new file mode 100644
index 0000000..89a11c2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tbxq_s32.c
@@ -0,0 +1,42 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** tbx_s32_tied1:
+** tbx z0\.s, z1\.s, z4\.s
+** ret
+*/
+TEST_DUAL_Z (tbx_s32_tied1, svint32_t, svuint32_t,
+ z0 = svtbx_s32 (z0, z1, z4),
+ z0 = svtbx (z0, z1, z4))
+
+/* Bad RA choice: no preferred output sequence. */
+TEST_DUAL_Z (tbx_s32_tied2, svint32_t, svuint32_t,
+ z0 = svtbx_s32 (z1, z0, z4),
+ z0 = svtbx (z1, z0, z4))
+
+/* Bad RA choice: no preferred output sequence. */
+TEST_DUAL_Z_REV (tbx_s32_tied3, svint32_t, svuint32_t,
+ z0_res = svtbx_s32 (z4, z5, z0),
+ z0_res = svtbx (z4, z5, z0))
+
+/*
+** tbx_s32_untied:
+** (
+** mov z0\.d, z1\.d
+** tbx z0\.s, z2\.s, z4\.s
+** |
+** tbx z1\.s, z2\.s, z4\.s
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_DUAL_Z (tbx_s32_untied, svint32_t, svuint32_t,
+ z0 = svtbx_s32 (z1, z2, z4),
+ z0 = svtbx (z1, z2, z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tbxq_s64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tbxq_s64.c
new file mode 100644
index 0000000..5cecfe0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tbxq_s64.c
@@ -0,0 +1,42 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** tbx_s64_tied1:
+** tbx z0\.d, z1\.d, z4\.d
+** ret
+*/
+TEST_DUAL_Z (tbx_s64_tied1, svint64_t, svuint64_t,
+ z0 = svtbx_s64 (z0, z1, z4),
+ z0 = svtbx (z0, z1, z4))
+
+/* Bad RA choice: no preferred output sequence. */
+TEST_DUAL_Z (tbx_s64_tied2, svint64_t, svuint64_t,
+ z0 = svtbx_s64 (z1, z0, z4),
+ z0 = svtbx (z1, z0, z4))
+
+/* Bad RA choice: no preferred output sequence. */
+TEST_DUAL_Z_REV (tbx_s64_tied3, svint64_t, svuint64_t,
+ z0_res = svtbx_s64 (z4, z5, z0),
+ z0_res = svtbx (z4, z5, z0))
+
+/*
+** tbx_s64_untied:
+** (
+** mov z0\.d, z1\.d
+** tbx z0\.d, z2\.d, z4\.d
+** |
+** tbx z1\.d, z2\.d, z4\.d
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_DUAL_Z (tbx_s64_untied, svint64_t, svuint64_t,
+ z0 = svtbx_s64 (z1, z2, z4),
+ z0 = svtbx (z1, z2, z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tbxq_s8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tbxq_s8.c
new file mode 100644
index 0000000..2324fe1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tbxq_s8.c
@@ -0,0 +1,42 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** tbx_s8_tied1:
+** tbx z0\.b, z1\.b, z4\.b
+** ret
+*/
+TEST_DUAL_Z (tbx_s8_tied1, svint8_t, svuint8_t,
+ z0 = svtbx_s8 (z0, z1, z4),
+ z0 = svtbx (z0, z1, z4))
+
+/* Bad RA choice: no preferred output sequence. */
+TEST_DUAL_Z (tbx_s8_tied2, svint8_t, svuint8_t,
+ z0 = svtbx_s8 (z1, z0, z4),
+ z0 = svtbx (z1, z0, z4))
+
+/* Bad RA choice: no preferred output sequence. */
+TEST_DUAL_Z_REV (tbx_s8_tied3, svint8_t, svuint8_t,
+ z0_res = svtbx_s8 (z4, z5, z0),
+ z0_res = svtbx (z4, z5, z0))
+
+/*
+** tbx_s8_untied:
+** (
+** mov z0\.d, z1\.d
+** tbx z0\.b, z2\.b, z4\.b
+** |
+** tbx z1\.b, z2\.b, z4\.b
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_DUAL_Z (tbx_s8_untied, svint8_t, svuint8_t,
+ z0 = svtbx_s8 (z1, z2, z4),
+ z0 = svtbx (z1, z2, z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tbxq_u16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tbxq_u16.c
new file mode 100644
index 0000000..d59a8d1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tbxq_u16.c
@@ -0,0 +1,42 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** tbx_u16_tied1:
+** tbx z0\.h, z1\.h, z4\.h
+** ret
+*/
+TEST_DUAL_Z (tbx_u16_tied1, svuint16_t, svuint16_t,
+ z0 = svtbx_u16 (z0, z1, z4),
+ z0 = svtbx (z0, z1, z4))
+
+/* Bad RA choice: no preferred output sequence. */
+TEST_DUAL_Z (tbx_u16_tied2, svuint16_t, svuint16_t,
+ z0 = svtbx_u16 (z1, z0, z4),
+ z0 = svtbx (z1, z0, z4))
+
+/* Bad RA choice: no preferred output sequence. */
+TEST_DUAL_Z_REV (tbx_u16_tied3, svuint16_t, svuint16_t,
+ z0_res = svtbx_u16 (z4, z5, z0),
+ z0_res = svtbx (z4, z5, z0))
+
+/*
+** tbx_u16_untied:
+** (
+** mov z0\.d, z1\.d
+** tbx z0\.h, z2\.h, z4\.h
+** |
+** tbx z1\.h, z2\.h, z4\.h
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_DUAL_Z (tbx_u16_untied, svuint16_t, svuint16_t,
+ z0 = svtbx_u16 (z1, z2, z4),
+ z0 = svtbx (z1, z2, z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tbxq_u32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tbxq_u32.c
new file mode 100644
index 0000000..8100b15
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tbxq_u32.c
@@ -0,0 +1,42 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** tbx_u32_tied1:
+** tbx z0\.s, z1\.s, z4\.s
+** ret
+*/
+TEST_DUAL_Z (tbx_u32_tied1, svuint32_t, svuint32_t,
+ z0 = svtbx_u32 (z0, z1, z4),
+ z0 = svtbx (z0, z1, z4))
+
+/* Bad RA choice: no preferred output sequence. */
+TEST_DUAL_Z (tbx_u32_tied2, svuint32_t, svuint32_t,
+ z0 = svtbx_u32 (z1, z0, z4),
+ z0 = svtbx (z1, z0, z4))
+
+/* Bad RA choice: no preferred output sequence. */
+TEST_DUAL_Z_REV (tbx_u32_tied3, svuint32_t, svuint32_t,
+ z0_res = svtbx_u32 (z4, z5, z0),
+ z0_res = svtbx (z4, z5, z0))
+
+/*
+** tbx_u32_untied:
+** (
+** mov z0\.d, z1\.d
+** tbx z0\.s, z2\.s, z4\.s
+** |
+** tbx z1\.s, z2\.s, z4\.s
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_DUAL_Z (tbx_u32_untied, svuint32_t, svuint32_t,
+ z0 = svtbx_u32 (z1, z2, z4),
+ z0 = svtbx (z1, z2, z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tbxq_u64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tbxq_u64.c
new file mode 100644
index 0000000..37da8e1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tbxq_u64.c
@@ -0,0 +1,42 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** tbx_u64_tied1:
+** tbx z0\.d, z1\.d, z4\.d
+** ret
+*/
+TEST_DUAL_Z (tbx_u64_tied1, svuint64_t, svuint64_t,
+ z0 = svtbx_u64 (z0, z1, z4),
+ z0 = svtbx (z0, z1, z4))
+
+/* Bad RA choice: no preferred output sequence. */
+TEST_DUAL_Z (tbx_u64_tied2, svuint64_t, svuint64_t,
+ z0 = svtbx_u64 (z1, z0, z4),
+ z0 = svtbx (z1, z0, z4))
+
+/* Bad RA choice: no preferred output sequence. */
+TEST_DUAL_Z_REV (tbx_u64_tied3, svuint64_t, svuint64_t,
+ z0_res = svtbx_u64 (z4, z5, z0),
+ z0_res = svtbx (z4, z5, z0))
+
+/*
+** tbx_u64_untied:
+** (
+** mov z0\.d, z1\.d
+** tbx z0\.d, z2\.d, z4\.d
+** |
+** tbx z1\.d, z2\.d, z4\.d
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_DUAL_Z (tbx_u64_untied, svuint64_t, svuint64_t,
+ z0 = svtbx_u64 (z1, z2, z4),
+ z0 = svtbx (z1, z2, z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tbxq_u8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tbxq_u8.c
new file mode 100644
index 0000000..fc20c09
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/tbxq_u8.c
@@ -0,0 +1,42 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** tbx_u8_tied1:
+** tbx z0\.b, z1\.b, z4\.b
+** ret
+*/
+TEST_DUAL_Z (tbx_u8_tied1, svuint8_t, svuint8_t,
+ z0 = svtbx_u8 (z0, z1, z4),
+ z0 = svtbx (z0, z1, z4))
+
+/* Bad RA choice: no preferred output sequence. */
+TEST_DUAL_Z (tbx_u8_tied2, svuint8_t, svuint8_t,
+ z0 = svtbx_u8 (z1, z0, z4),
+ z0 = svtbx (z1, z0, z4))
+
+/* Bad RA choice: no preferred output sequence. */
+TEST_DUAL_Z_REV (tbx_u8_tied3, svuint8_t, svuint8_t,
+ z0_res = svtbx_u8 (z4, z5, z0),
+ z0_res = svtbx (z4, z5, z0))
+
+/*
+** tbx_u8_untied:
+** (
+** mov z0\.d, z1\.d
+** tbx z0\.b, z2\.b, z4\.b
+** |
+** tbx z1\.b, z2\.b, z4\.b
+** mov z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_DUAL_Z (tbx_u8_untied, svuint8_t, svuint8_t,
+ z0 = svtbx_u8 (z1, z2, z4),
+ z0 = svtbx (z1, z2, z4))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq1_bf16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq1_bf16.c
new file mode 100644
index 0000000..ecac744
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq1_bf16.c
@@ -0,0 +1,35 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** uzpq1_bf16_tied1:
+** uzpq1 z0\.h, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_Z (uzpq1_bf16_tied1, svbfloat16_t,
+ z0 = svuzpq1_bf16 (z0, z1),
+ z0 = svuzpq1 (z0, z1))
+
+/*
+** uzpq1_bf16_tied2:
+** uzpq1 z0\.h, z1\.h, z0\.h
+** ret
+*/
+TEST_UNIFORM_Z (uzpq1_bf16_tied2, svbfloat16_t,
+ z0 = svuzpq1_bf16 (z1, z0),
+ z0 = svuzpq1 (z1, z0))
+
+/*
+** uzpq1_bf16_untied:
+** uzpq1 z0\.h, z1\.h, z2\.h
+** ret
+*/
+TEST_UNIFORM_Z (uzpq1_bf16_untied, svbfloat16_t,
+ z0 = svuzpq1_bf16 (z1, z2),
+ z0 = svuzpq1 (z1, z2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq1_f16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq1_f16.c
new file mode 100644
index 0000000..9ed5c7e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq1_f16.c
@@ -0,0 +1,35 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** uzpq1_f16_tied1:
+** uzpq1 z0\.h, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_Z (uzpq1_f16_tied1, svfloat16_t,
+ z0 = svuzpq1_f16 (z0, z1),
+ z0 = svuzpq1 (z0, z1))
+
+/*
+** uzpq1_f16_tied2:
+** uzpq1 z0\.h, z1\.h, z0\.h
+** ret
+*/
+TEST_UNIFORM_Z (uzpq1_f16_tied2, svfloat16_t,
+ z0 = svuzpq1_f16 (z1, z0),
+ z0 = svuzpq1 (z1, z0))
+
+/*
+** uzpq1_f16_untied:
+** uzpq1 z0\.h, z1\.h, z2\.h
+** ret
+*/
+TEST_UNIFORM_Z (uzpq1_f16_untied, svfloat16_t,
+ z0 = svuzpq1_f16 (z1, z2),
+ z0 = svuzpq1 (z1, z2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq1_f32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq1_f32.c
new file mode 100644
index 0000000..69dc64e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq1_f32.c
@@ -0,0 +1,35 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** uzpq1_f32_tied1:
+** uzpq1 z0\.s, z0\.s, z1\.s
+** ret
+*/
+TEST_UNIFORM_Z (uzpq1_f32_tied1, svfloat32_t,
+ z0 = svuzpq1_f32 (z0, z1),
+ z0 = svuzpq1 (z0, z1))
+
+/*
+** uzpq1_f32_tied2:
+** uzpq1 z0\.s, z1\.s, z0\.s
+** ret
+*/
+TEST_UNIFORM_Z (uzpq1_f32_tied2, svfloat32_t,
+ z0 = svuzpq1_f32 (z1, z0),
+ z0 = svuzpq1 (z1, z0))
+
+/*
+** uzpq1_f32_untied:
+** uzpq1 z0\.s, z1\.s, z2\.s
+** ret
+*/
+TEST_UNIFORM_Z (uzpq1_f32_untied, svfloat32_t,
+ z0 = svuzpq1_f32 (z1, z2),
+ z0 = svuzpq1 (z1, z2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq1_f64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq1_f64.c
new file mode 100644
index 0000000..5b1de1a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq1_f64.c
@@ -0,0 +1,47 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** uzpq1_f64_tied1:
+** (
+** uzpq1 z0\.d, z0\.d, z1\.d
+** |
+** trn1 z0\.d, z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_UNIFORM_Z (uzpq1_f64_tied1, svfloat64_t,
+ z0 = svuzpq1_f64 (z0, z1),
+ z0 = svuzpq1 (z0, z1))
+
+/*
+** uzpq1_f64_tied2:
+** (
+** uzpq1 z0\.d, z1\.d, z0\.d
+** |
+** trn1 z0\.d, z1\.d, z0\.d
+** )
+** ret
+*/
+TEST_UNIFORM_Z (uzpq1_f64_tied2, svfloat64_t,
+ z0 = svuzpq1_f64 (z1, z0),
+ z0 = svuzpq1 (z1, z0))
+
+/*
+** uzpq1_f64_untied:
+** (
+** uzpq1 z0\.d, z1\.d, z2\.d
+** |
+** trn1 z0\.d, z1\.d, z2\.d
+** )
+** ret
+*/
+TEST_UNIFORM_Z (uzpq1_f64_untied, svfloat64_t,
+ z0 = svuzpq1_f64 (z1, z2),
+ z0 = svuzpq1 (z1, z2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq1_s16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq1_s16.c
new file mode 100644
index 0000000..d8095d8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq1_s16.c
@@ -0,0 +1,35 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** uzpq1_s16_tied1:
+** uzpq1 z0\.h, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_Z (uzpq1_s16_tied1, svint16_t,
+ z0 = svuzpq1_s16 (z0, z1),
+ z0 = svuzpq1 (z0, z1))
+
+/*
+** uzpq1_s16_tied2:
+** uzpq1 z0\.h, z1\.h, z0\.h
+** ret
+*/
+TEST_UNIFORM_Z (uzpq1_s16_tied2, svint16_t,
+ z0 = svuzpq1_s16 (z1, z0),
+ z0 = svuzpq1 (z1, z0))
+
+/*
+** uzpq1_s16_untied:
+** uzpq1 z0\.h, z1\.h, z2\.h
+** ret
+*/
+TEST_UNIFORM_Z (uzpq1_s16_untied, svint16_t,
+ z0 = svuzpq1_s16 (z1, z2),
+ z0 = svuzpq1 (z1, z2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq1_s32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq1_s32.c
new file mode 100644
index 0000000..111dfb1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq1_s32.c
@@ -0,0 +1,35 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** uzpq1_s32_tied1:
+** uzpq1 z0\.s, z0\.s, z1\.s
+** ret
+*/
+TEST_UNIFORM_Z (uzpq1_s32_tied1, svint32_t,
+ z0 = svuzpq1_s32 (z0, z1),
+ z0 = svuzpq1 (z0, z1))
+
+/*
+** uzpq1_s32_tied2:
+** uzpq1 z0\.s, z1\.s, z0\.s
+** ret
+*/
+TEST_UNIFORM_Z (uzpq1_s32_tied2, svint32_t,
+ z0 = svuzpq1_s32 (z1, z0),
+ z0 = svuzpq1 (z1, z0))
+
+/*
+** uzpq1_s32_untied:
+** uzpq1 z0\.s, z1\.s, z2\.s
+** ret
+*/
+TEST_UNIFORM_Z (uzpq1_s32_untied, svint32_t,
+ z0 = svuzpq1_s32 (z1, z2),
+ z0 = svuzpq1 (z1, z2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq1_s64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq1_s64.c
new file mode 100644
index 0000000..3cad2a9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq1_s64.c
@@ -0,0 +1,47 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** uzpq1_s64_tied1:
+** (
+** uzpq1 z0\.d, z0\.d, z1\.d
+** |
+** trn1 z0\.d, z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_UNIFORM_Z (uzpq1_s64_tied1, svint64_t,
+ z0 = svuzpq1_s64 (z0, z1),
+ z0 = svuzpq1 (z0, z1))
+
+/*
+** uzpq1_s64_tied2:
+** (
+** uzpq1 z0\.d, z1\.d, z0\.d
+** |
+** trn1 z0\.d, z1\.d, z0\.d
+** )
+** ret
+*/
+TEST_UNIFORM_Z (uzpq1_s64_tied2, svint64_t,
+ z0 = svuzpq1_s64 (z1, z0),
+ z0 = svuzpq1 (z1, z0))
+
+/*
+** uzpq1_s64_untied:
+** (
+** uzpq1 z0\.d, z1\.d, z2\.d
+** |
+** trn1 z0\.d, z1\.d, z2\.d
+** )
+** ret
+*/
+TEST_UNIFORM_Z (uzpq1_s64_untied, svint64_t,
+ z0 = svuzpq1_s64 (z1, z2),
+ z0 = svuzpq1 (z1, z2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq1_s8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq1_s8.c
new file mode 100644
index 0000000..571e77e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq1_s8.c
@@ -0,0 +1,35 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** uzpq1_s8_tied1:
+** uzpq1 z0\.b, z0\.b, z1\.b
+** ret
+*/
+TEST_UNIFORM_Z (uzpq1_s8_tied1, svint8_t,
+ z0 = svuzpq1_s8 (z0, z1),
+ z0 = svuzpq1 (z0, z1))
+
+/*
+** uzpq1_s8_tied2:
+** uzpq1 z0\.b, z1\.b, z0\.b
+** ret
+*/
+TEST_UNIFORM_Z (uzpq1_s8_tied2, svint8_t,
+ z0 = svuzpq1_s8 (z1, z0),
+ z0 = svuzpq1 (z1, z0))
+
+/*
+** uzpq1_s8_untied:
+** uzpq1 z0\.b, z1\.b, z2\.b
+** ret
+*/
+TEST_UNIFORM_Z (uzpq1_s8_untied, svint8_t,
+ z0 = svuzpq1_s8 (z1, z2),
+ z0 = svuzpq1 (z1, z2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq1_u16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq1_u16.c
new file mode 100644
index 0000000..17567dc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq1_u16.c
@@ -0,0 +1,35 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** uzpq1_u16_tied1:
+** uzpq1 z0\.h, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_Z (uzpq1_u16_tied1, svuint16_t,
+ z0 = svuzpq1_u16 (z0, z1),
+ z0 = svuzpq1 (z0, z1))
+
+/*
+** uzpq1_u16_tied2:
+** uzpq1 z0\.h, z1\.h, z0\.h
+** ret
+*/
+TEST_UNIFORM_Z (uzpq1_u16_tied2, svuint16_t,
+ z0 = svuzpq1_u16 (z1, z0),
+ z0 = svuzpq1 (z1, z0))
+
+/*
+** uzpq1_u16_untied:
+** uzpq1 z0\.h, z1\.h, z2\.h
+** ret
+*/
+TEST_UNIFORM_Z (uzpq1_u16_untied, svuint16_t,
+ z0 = svuzpq1_u16 (z1, z2),
+ z0 = svuzpq1 (z1, z2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq1_u32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq1_u32.c
new file mode 100644
index 0000000..a3cac2f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq1_u32.c
@@ -0,0 +1,35 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** uzpq1_u32_tied1:
+** uzpq1 z0\.s, z0\.s, z1\.s
+** ret
+*/
+TEST_UNIFORM_Z (uzpq1_u32_tied1, svuint32_t,
+ z0 = svuzpq1_u32 (z0, z1),
+ z0 = svuzpq1 (z0, z1))
+
+/*
+** uzpq1_u32_tied2:
+** uzpq1 z0\.s, z1\.s, z0\.s
+** ret
+*/
+TEST_UNIFORM_Z (uzpq1_u32_tied2, svuint32_t,
+ z0 = svuzpq1_u32 (z1, z0),
+ z0 = svuzpq1 (z1, z0))
+
+/*
+** uzpq1_u32_untied:
+** uzpq1 z0\.s, z1\.s, z2\.s
+** ret
+*/
+TEST_UNIFORM_Z (uzpq1_u32_untied, svuint32_t,
+ z0 = svuzpq1_u32 (z1, z2),
+ z0 = svuzpq1 (z1, z2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq1_u64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq1_u64.c
new file mode 100644
index 0000000..caccec4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq1_u64.c
@@ -0,0 +1,47 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** uzpq1_u64_tied1:
+** (
+** uzpq1 z0\.d, z0\.d, z1\.d
+** |
+** trn1 z0\.d, z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_UNIFORM_Z (uzpq1_u64_tied1, svuint64_t,
+ z0 = svuzpq1_u64 (z0, z1),
+ z0 = svuzpq1 (z0, z1))
+
+/*
+** uzpq1_u64_tied2:
+** (
+** uzpq1 z0\.d, z1\.d, z0\.d
+** |
+** trn1 z0\.d, z1\.d, z0\.d
+** )
+** ret
+*/
+TEST_UNIFORM_Z (uzpq1_u64_tied2, svuint64_t,
+ z0 = svuzpq1_u64 (z1, z0),
+ z0 = svuzpq1 (z1, z0))
+
+/*
+** uzpq1_u64_untied:
+** (
+** uzpq1 z0\.d, z1\.d, z2\.d
+** |
+** trn1 z0\.d, z1\.d, z2\.d
+** )
+** ret
+*/
+TEST_UNIFORM_Z (uzpq1_u64_untied, svuint64_t,
+ z0 = svuzpq1_u64 (z1, z2),
+ z0 = svuzpq1 (z1, z2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq1_u8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq1_u8.c
new file mode 100644
index 0000000..428ff1e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq1_u8.c
@@ -0,0 +1,35 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** uzpq1_u8_tied1:
+** uzpq1 z0\.b, z0\.b, z1\.b
+** ret
+*/
+TEST_UNIFORM_Z (uzpq1_u8_tied1, svuint8_t,
+ z0 = svuzpq1_u8 (z0, z1),
+ z0 = svuzpq1 (z0, z1))
+
+/*
+** uzpq1_u8_tied2:
+** uzpq1 z0\.b, z1\.b, z0\.b
+** ret
+*/
+TEST_UNIFORM_Z (uzpq1_u8_tied2, svuint8_t,
+ z0 = svuzpq1_u8 (z1, z0),
+ z0 = svuzpq1 (z1, z0))
+
+/*
+** uzpq1_u8_untied:
+** uzpq1 z0\.b, z1\.b, z2\.b
+** ret
+*/
+TEST_UNIFORM_Z (uzpq1_u8_untied, svuint8_t,
+ z0 = svuzpq1_u8 (z1, z2),
+ z0 = svuzpq1 (z1, z2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq2_bf16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq2_bf16.c
new file mode 100644
index 0000000..72225d9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq2_bf16.c
@@ -0,0 +1,35 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** uzpq2_bf16_tied1:
+** uzpq2 z0\.h, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_Z (uzpq2_bf16_tied1, svbfloat16_t,
+ z0 = svuzpq2_bf16 (z0, z1),
+ z0 = svuzpq2 (z0, z1))
+
+/*
+** uzpq2_bf16_tied2:
+** uzpq2 z0\.h, z1\.h, z0\.h
+** ret
+*/
+TEST_UNIFORM_Z (uzpq2_bf16_tied2, svbfloat16_t,
+ z0 = svuzpq2_bf16 (z1, z0),
+ z0 = svuzpq2 (z1, z0))
+
+/*
+** uzpq2_bf16_untied:
+** uzpq2 z0\.h, z1\.h, z2\.h
+** ret
+*/
+TEST_UNIFORM_Z (uzpq2_bf16_untied, svbfloat16_t,
+ z0 = svuzpq2_bf16 (z1, z2),
+ z0 = svuzpq2 (z1, z2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq2_f16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq2_f16.c
new file mode 100644
index 0000000..45deb51
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq2_f16.c
@@ -0,0 +1,35 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** uzpq2_f16_tied1:
+** uzpq2 z0\.h, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_Z (uzpq2_f16_tied1, svfloat16_t,
+ z0 = svuzpq2_f16 (z0, z1),
+ z0 = svuzpq2 (z0, z1))
+
+/*
+** uzpq2_f16_tied2:
+** uzpq2 z0\.h, z1\.h, z0\.h
+** ret
+*/
+TEST_UNIFORM_Z (uzpq2_f16_tied2, svfloat16_t,
+ z0 = svuzpq2_f16 (z1, z0),
+ z0 = svuzpq2 (z1, z0))
+
+/*
+** uzpq2_f16_untied:
+** uzpq2 z0\.h, z1\.h, z2\.h
+** ret
+*/
+TEST_UNIFORM_Z (uzpq2_f16_untied, svfloat16_t,
+ z0 = svuzpq2_f16 (z1, z2),
+ z0 = svuzpq2 (z1, z2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq2_f32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq2_f32.c
new file mode 100644
index 0000000..2791df7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq2_f32.c
@@ -0,0 +1,35 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** uzpq2_f32_tied1:
+** uzpq2 z0\.s, z0\.s, z1\.s
+** ret
+*/
+TEST_UNIFORM_Z (uzpq2_f32_tied1, svfloat32_t,
+ z0 = svuzpq2_f32 (z0, z1),
+ z0 = svuzpq2 (z0, z1))
+
+/*
+** uzpq2_f32_tied2:
+** uzpq2 z0\.s, z1\.s, z0\.s
+** ret
+*/
+TEST_UNIFORM_Z (uzpq2_f32_tied2, svfloat32_t,
+ z0 = svuzpq2_f32 (z1, z0),
+ z0 = svuzpq2 (z1, z0))
+
+/*
+** uzpq2_f32_untied:
+** uzpq2 z0\.s, z1\.s, z2\.s
+** ret
+*/
+TEST_UNIFORM_Z (uzpq2_f32_untied, svfloat32_t,
+ z0 = svuzpq2_f32 (z1, z2),
+ z0 = svuzpq2 (z1, z2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq2_f64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq2_f64.c
new file mode 100644
index 0000000..ac2dd50
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq2_f64.c
@@ -0,0 +1,47 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** uzpq2_f64_tied1:
+** (
+** uzpq2 z0\.d, z0\.d, z1\.d
+** |
+** trn2 z0\.d, z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_UNIFORM_Z (uzpq2_f64_tied1, svfloat64_t,
+ z0 = svuzpq2_f64 (z0, z1),
+ z0 = svuzpq2 (z0, z1))
+
+/*
+** uzpq2_f64_tied2:
+** (
+** uzpq2 z0\.d, z1\.d, z0\.d
+** |
+** trn2 z0\.d, z1\.d, z0\.d
+** )
+** ret
+*/
+TEST_UNIFORM_Z (uzpq2_f64_tied2, svfloat64_t,
+ z0 = svuzpq2_f64 (z1, z0),
+ z0 = svuzpq2 (z1, z0))
+
+/*
+** uzpq2_f64_untied:
+** (
+** uzpq2 z0\.d, z1\.d, z2\.d
+** |
+** trn2 z0\.d, z1\.d, z2\.d
+** )
+** ret
+*/
+TEST_UNIFORM_Z (uzpq2_f64_untied, svfloat64_t,
+ z0 = svuzpq2_f64 (z1, z2),
+ z0 = svuzpq2 (z1, z2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq2_s16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq2_s16.c
new file mode 100644
index 0000000..8034719
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq2_s16.c
@@ -0,0 +1,35 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** uzpq2_s16_tied1:
+** uzpq2 z0\.h, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_Z (uzpq2_s16_tied1, svint16_t,
+ z0 = svuzpq2_s16 (z0, z1),
+ z0 = svuzpq2 (z0, z1))
+
+/*
+** uzpq2_s16_tied2:
+** uzpq2 z0\.h, z1\.h, z0\.h
+** ret
+*/
+TEST_UNIFORM_Z (uzpq2_s16_tied2, svint16_t,
+ z0 = svuzpq2_s16 (z1, z0),
+ z0 = svuzpq2 (z1, z0))
+
+/*
+** uzpq2_s16_untied:
+** uzpq2 z0\.h, z1\.h, z2\.h
+** ret
+*/
+TEST_UNIFORM_Z (uzpq2_s16_untied, svint16_t,
+ z0 = svuzpq2_s16 (z1, z2),
+ z0 = svuzpq2 (z1, z2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq2_s32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq2_s32.c
new file mode 100644
index 0000000..2840e12
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq2_s32.c
@@ -0,0 +1,35 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** uzpq2_s32_tied1:
+** uzpq2 z0\.s, z0\.s, z1\.s
+** ret
+*/
+TEST_UNIFORM_Z (uzpq2_s32_tied1, svint32_t,
+ z0 = svuzpq2_s32 (z0, z1),
+ z0 = svuzpq2 (z0, z1))
+
+/*
+** uzpq2_s32_tied2:
+** uzpq2 z0\.s, z1\.s, z0\.s
+** ret
+*/
+TEST_UNIFORM_Z (uzpq2_s32_tied2, svint32_t,
+ z0 = svuzpq2_s32 (z1, z0),
+ z0 = svuzpq2 (z1, z0))
+
+/*
+** uzpq2_s32_untied:
+** uzpq2 z0\.s, z1\.s, z2\.s
+** ret
+*/
+TEST_UNIFORM_Z (uzpq2_s32_untied, svint32_t,
+ z0 = svuzpq2_s32 (z1, z2),
+ z0 = svuzpq2 (z1, z2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq2_s64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq2_s64.c
new file mode 100644
index 0000000..df93f77
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq2_s64.c
@@ -0,0 +1,47 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** uzpq2_s64_tied1:
+** (
+** uzpq2 z0\.d, z0\.d, z1\.d
+** |
+** trn2 z0\.d, z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_UNIFORM_Z (uzpq2_s64_tied1, svint64_t,
+ z0 = svuzpq2_s64 (z0, z1),
+ z0 = svuzpq2 (z0, z1))
+
+/*
+** uzpq2_s64_tied2:
+** (
+** uzpq2 z0\.d, z1\.d, z0\.d
+** |
+** trn2 z0\.d, z1\.d, z0\.d
+** )
+** ret
+*/
+TEST_UNIFORM_Z (uzpq2_s64_tied2, svint64_t,
+ z0 = svuzpq2_s64 (z1, z0),
+ z0 = svuzpq2 (z1, z0))
+
+/*
+** uzpq2_s64_untied:
+** (
+** uzpq2 z0\.d, z1\.d, z2\.d
+** |
+** trn2 z0\.d, z1\.d, z2\.d
+** )
+** ret
+*/
+TEST_UNIFORM_Z (uzpq2_s64_untied, svint64_t,
+ z0 = svuzpq2_s64 (z1, z2),
+ z0 = svuzpq2 (z1, z2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq2_s8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq2_s8.c
new file mode 100644
index 0000000..91f4a4a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq2_s8.c
@@ -0,0 +1,35 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** uzpq2_s8_tied1:
+** uzpq2 z0\.b, z0\.b, z1\.b
+** ret
+*/
+TEST_UNIFORM_Z (uzpq2_s8_tied1, svint8_t,
+ z0 = svuzpq2_s8 (z0, z1),
+ z0 = svuzpq2 (z0, z1))
+
+/*
+** uzpq2_s8_tied2:
+** uzpq2 z0\.b, z1\.b, z0\.b
+** ret
+*/
+TEST_UNIFORM_Z (uzpq2_s8_tied2, svint8_t,
+ z0 = svuzpq2_s8 (z1, z0),
+ z0 = svuzpq2 (z1, z0))
+
+/*
+** uzpq2_s8_untied:
+** uzpq2 z0\.b, z1\.b, z2\.b
+** ret
+*/
+TEST_UNIFORM_Z (uzpq2_s8_untied, svint8_t,
+ z0 = svuzpq2_s8 (z1, z2),
+ z0 = svuzpq2 (z1, z2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq2_u16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq2_u16.c
new file mode 100644
index 0000000..559c784
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq2_u16.c
@@ -0,0 +1,35 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** uzpq2_u16_tied1:
+** uzpq2 z0\.h, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_Z (uzpq2_u16_tied1, svuint16_t,
+ z0 = svuzpq2_u16 (z0, z1),
+ z0 = svuzpq2 (z0, z1))
+
+/*
+** uzpq2_u16_tied2:
+** uzpq2 z0\.h, z1\.h, z0\.h
+** ret
+*/
+TEST_UNIFORM_Z (uzpq2_u16_tied2, svuint16_t,
+ z0 = svuzpq2_u16 (z1, z0),
+ z0 = svuzpq2 (z1, z0))
+
+/*
+** uzpq2_u16_untied:
+** uzpq2 z0\.h, z1\.h, z2\.h
+** ret
+*/
+TEST_UNIFORM_Z (uzpq2_u16_untied, svuint16_t,
+ z0 = svuzpq2_u16 (z1, z2),
+ z0 = svuzpq2 (z1, z2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq2_u32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq2_u32.c
new file mode 100644
index 0000000..135df9f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq2_u32.c
@@ -0,0 +1,35 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** uzpq2_u32_tied1:
+** uzpq2 z0\.s, z0\.s, z1\.s
+** ret
+*/
+TEST_UNIFORM_Z (uzpq2_u32_tied1, svuint32_t,
+ z0 = svuzpq2_u32 (z0, z1),
+ z0 = svuzpq2 (z0, z1))
+
+/*
+** uzpq2_u32_tied2:
+** uzpq2 z0\.s, z1\.s, z0\.s
+** ret
+*/
+TEST_UNIFORM_Z (uzpq2_u32_tied2, svuint32_t,
+ z0 = svuzpq2_u32 (z1, z0),
+ z0 = svuzpq2 (z1, z0))
+
+/*
+** uzpq2_u32_untied:
+** uzpq2 z0\.s, z1\.s, z2\.s
+** ret
+*/
+TEST_UNIFORM_Z (uzpq2_u32_untied, svuint32_t,
+ z0 = svuzpq2_u32 (z1, z2),
+ z0 = svuzpq2 (z1, z2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq2_u64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq2_u64.c
new file mode 100644
index 0000000..76fd3b4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq2_u64.c
@@ -0,0 +1,47 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** uzpq2_u64_tied1:
+** (
+** uzpq2 z0\.d, z0\.d, z1\.d
+** |
+** trn2 z0\.d, z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_UNIFORM_Z (uzpq2_u64_tied1, svuint64_t,
+ z0 = svuzpq2_u64 (z0, z1),
+ z0 = svuzpq2 (z0, z1))
+
+/*
+** uzpq2_u64_tied2:
+** (
+** uzpq2 z0\.d, z1\.d, z0\.d
+** |
+** trn2 z0\.d, z1\.d, z0\.d
+** )
+** ret
+*/
+TEST_UNIFORM_Z (uzpq2_u64_tied2, svuint64_t,
+ z0 = svuzpq2_u64 (z1, z0),
+ z0 = svuzpq2 (z1, z0))
+
+/*
+** uzpq2_u64_untied:
+** (
+** uzpq2 z0\.d, z1\.d, z2\.d
+** |
+** trn2 z0\.d, z1\.d, z2\.d
+** )
+** ret
+*/
+TEST_UNIFORM_Z (uzpq2_u64_untied, svuint64_t,
+ z0 = svuzpq2_u64 (z1, z2),
+ z0 = svuzpq2 (z1, z2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq2_u8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq2_u8.c
new file mode 100644
index 0000000..2f0afc2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/uzpq2_u8.c
@@ -0,0 +1,35 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** uzpq2_u8_tied1:
+** uzpq2 z0\.b, z0\.b, z1\.b
+** ret
+*/
+TEST_UNIFORM_Z (uzpq2_u8_tied1, svuint8_t,
+ z0 = svuzpq2_u8 (z0, z1),
+ z0 = svuzpq2 (z0, z1))
+
+/*
+** uzpq2_u8_tied2:
+** uzpq2 z0\.b, z1\.b, z0\.b
+** ret
+*/
+TEST_UNIFORM_Z (uzpq2_u8_tied2, svuint8_t,
+ z0 = svuzpq2_u8 (z1, z0),
+ z0 = svuzpq2 (z1, z0))
+
+/*
+** uzpq2_u8_untied:
+** uzpq2 z0\.b, z1\.b, z2\.b
+** ret
+*/
+TEST_UNIFORM_Z (uzpq2_u8_untied, svuint8_t,
+ z0 = svuzpq2_u8 (z1, z2),
+ z0 = svuzpq2 (z1, z2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq1_bf16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq1_bf16.c
new file mode 100644
index 0000000..05dc8ce
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq1_bf16.c
@@ -0,0 +1,35 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** zipq1_bf16_tied1:
+** zipq1 z0\.h, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_Z (zipq1_bf16_tied1, svbfloat16_t,
+ z0 = svzipq1_bf16 (z0, z1),
+ z0 = svzipq1 (z0, z1))
+
+/*
+** zipq1_bf16_tied2:
+** zipq1 z0\.h, z1\.h, z0\.h
+** ret
+*/
+TEST_UNIFORM_Z (zipq1_bf16_tied2, svbfloat16_t,
+ z0 = svzipq1_bf16 (z1, z0),
+ z0 = svzipq1 (z1, z0))
+
+/*
+** zipq1_bf16_untied:
+** zipq1 z0\.h, z1\.h, z2\.h
+** ret
+*/
+TEST_UNIFORM_Z (zipq1_bf16_untied, svbfloat16_t,
+ z0 = svzipq1_bf16 (z1, z2),
+ z0 = svzipq1 (z1, z2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq1_f16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq1_f16.c
new file mode 100644
index 0000000..d271a7d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq1_f16.c
@@ -0,0 +1,35 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** zipq1_f16_tied1:
+** zipq1 z0\.h, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_Z (zipq1_f16_tied1, svfloat16_t,
+ z0 = svzipq1_f16 (z0, z1),
+ z0 = svzipq1 (z0, z1))
+
+/*
+** zipq1_f16_tied2:
+** zipq1 z0\.h, z1\.h, z0\.h
+** ret
+*/
+TEST_UNIFORM_Z (zipq1_f16_tied2, svfloat16_t,
+ z0 = svzipq1_f16 (z1, z0),
+ z0 = svzipq1 (z1, z0))
+
+/*
+** zipq1_f16_untied:
+** zipq1 z0\.h, z1\.h, z2\.h
+** ret
+*/
+TEST_UNIFORM_Z (zipq1_f16_untied, svfloat16_t,
+ z0 = svzipq1_f16 (z1, z2),
+ z0 = svzipq1 (z1, z2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq1_f32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq1_f32.c
new file mode 100644
index 0000000..324d11e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq1_f32.c
@@ -0,0 +1,35 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** zipq1_f32_tied1:
+** zipq1 z0\.s, z0\.s, z1\.s
+** ret
+*/
+TEST_UNIFORM_Z (zipq1_f32_tied1, svfloat32_t,
+ z0 = svzipq1_f32 (z0, z1),
+ z0 = svzipq1 (z0, z1))
+
+/*
+** zipq1_f32_tied2:
+** zipq1 z0\.s, z1\.s, z0\.s
+** ret
+*/
+TEST_UNIFORM_Z (zipq1_f32_tied2, svfloat32_t,
+ z0 = svzipq1_f32 (z1, z0),
+ z0 = svzipq1 (z1, z0))
+
+/*
+** zipq1_f32_untied:
+** zipq1 z0\.s, z1\.s, z2\.s
+** ret
+*/
+TEST_UNIFORM_Z (zipq1_f32_untied, svfloat32_t,
+ z0 = svzipq1_f32 (z1, z2),
+ z0 = svzipq1 (z1, z2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq1_f64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq1_f64.c
new file mode 100644
index 0000000..7a9976d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq1_f64.c
@@ -0,0 +1,47 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** zipq1_f64_tied1:
+** (
+** zipq1 z0\.d, z0\.d, z1\.d
+** |
+** trn1 z0\.d, z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_UNIFORM_Z (zipq1_f64_tied1, svfloat64_t,
+ z0 = svzipq1_f64 (z0, z1),
+ z0 = svzipq1 (z0, z1))
+
+/*
+** zipq1_f64_tied2:
+** (
+** zipq1 z0\.d, z1\.d, z0\.d
+** |
+** trn1 z0\.d, z1\.d, z0\.d
+** )
+** ret
+*/
+TEST_UNIFORM_Z (zipq1_f64_tied2, svfloat64_t,
+ z0 = svzipq1_f64 (z1, z0),
+ z0 = svzipq1 (z1, z0))
+
+/*
+** zipq1_f64_untied:
+** (
+** zipq1 z0\.d, z1\.d, z2\.d
+** |
+** trn1 z0\.d, z1\.d, z2\.d
+** )
+** ret
+*/
+TEST_UNIFORM_Z (zipq1_f64_untied, svfloat64_t,
+ z0 = svzipq1_f64 (z1, z2),
+ z0 = svzipq1 (z1, z2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq1_s16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq1_s16.c
new file mode 100644
index 0000000..6c80e58
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq1_s16.c
@@ -0,0 +1,35 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** zipq1_s16_tied1:
+** zipq1 z0\.h, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_Z (zipq1_s16_tied1, svint16_t,
+ z0 = svzipq1_s16 (z0, z1),
+ z0 = svzipq1 (z0, z1))
+
+/*
+** zipq1_s16_tied2:
+** zipq1 z0\.h, z1\.h, z0\.h
+** ret
+*/
+TEST_UNIFORM_Z (zipq1_s16_tied2, svint16_t,
+ z0 = svzipq1_s16 (z1, z0),
+ z0 = svzipq1 (z1, z0))
+
+/*
+** zipq1_s16_untied:
+** zipq1 z0\.h, z1\.h, z2\.h
+** ret
+*/
+TEST_UNIFORM_Z (zipq1_s16_untied, svint16_t,
+ z0 = svzipq1_s16 (z1, z2),
+ z0 = svzipq1 (z1, z2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq1_s32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq1_s32.c
new file mode 100644
index 0000000..0c353eb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq1_s32.c
@@ -0,0 +1,35 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** zipq1_s32_tied1:
+** zipq1 z0\.s, z0\.s, z1\.s
+** ret
+*/
+TEST_UNIFORM_Z (zipq1_s32_tied1, svint32_t,
+ z0 = svzipq1_s32 (z0, z1),
+ z0 = svzipq1 (z0, z1))
+
+/*
+** zipq1_s32_tied2:
+** zipq1 z0\.s, z1\.s, z0\.s
+** ret
+*/
+TEST_UNIFORM_Z (zipq1_s32_tied2, svint32_t,
+ z0 = svzipq1_s32 (z1, z0),
+ z0 = svzipq1 (z1, z0))
+
+/*
+** zipq1_s32_untied:
+** zipq1 z0\.s, z1\.s, z2\.s
+** ret
+*/
+TEST_UNIFORM_Z (zipq1_s32_untied, svint32_t,
+ z0 = svzipq1_s32 (z1, z2),
+ z0 = svzipq1 (z1, z2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq1_s64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq1_s64.c
new file mode 100644
index 0000000..f853bcd
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq1_s64.c
@@ -0,0 +1,47 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** zipq1_s64_tied1:
+** (
+** zipq1 z0\.d, z0\.d, z1\.d
+** |
+** trn1 z0\.d, z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_UNIFORM_Z (zipq1_s64_tied1, svint64_t,
+ z0 = svzipq1_s64 (z0, z1),
+ z0 = svzipq1 (z0, z1))
+
+/*
+** zipq1_s64_tied2:
+** (
+** zipq1 z0\.d, z1\.d, z0\.d
+** |
+** trn1 z0\.d, z1\.d, z0\.d
+** )
+** ret
+*/
+TEST_UNIFORM_Z (zipq1_s64_tied2, svint64_t,
+ z0 = svzipq1_s64 (z1, z0),
+ z0 = svzipq1 (z1, z0))
+
+/*
+** zipq1_s64_untied:
+** (
+** zipq1 z0\.d, z1\.d, z2\.d
+** |
+** trn1 z0\.d, z1\.d, z2\.d
+** )
+** ret
+*/
+TEST_UNIFORM_Z (zipq1_s64_untied, svint64_t,
+ z0 = svzipq1_s64 (z1, z2),
+ z0 = svzipq1 (z1, z2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq1_s8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq1_s8.c
new file mode 100644
index 0000000..63ec9ab
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq1_s8.c
@@ -0,0 +1,35 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** zipq1_s8_tied1:
+** zipq1 z0\.b, z0\.b, z1\.b
+** ret
+*/
+TEST_UNIFORM_Z (zipq1_s8_tied1, svint8_t,
+ z0 = svzipq1_s8 (z0, z1),
+ z0 = svzipq1 (z0, z1))
+
+/*
+** zipq1_s8_tied2:
+** zipq1 z0\.b, z1\.b, z0\.b
+** ret
+*/
+TEST_UNIFORM_Z (zipq1_s8_tied2, svint8_t,
+ z0 = svzipq1_s8 (z1, z0),
+ z0 = svzipq1 (z1, z0))
+
+/*
+** zipq1_s8_untied:
+** zipq1 z0\.b, z1\.b, z2\.b
+** ret
+*/
+TEST_UNIFORM_Z (zipq1_s8_untied, svint8_t,
+ z0 = svzipq1_s8 (z1, z2),
+ z0 = svzipq1 (z1, z2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq1_u16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq1_u16.c
new file mode 100644
index 0000000..6bce83a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq1_u16.c
@@ -0,0 +1,35 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** zipq1_u16_tied1:
+** zipq1 z0\.h, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_Z (zipq1_u16_tied1, svuint16_t,
+ z0 = svzipq1_u16 (z0, z1),
+ z0 = svzipq1 (z0, z1))
+
+/*
+** zipq1_u16_tied2:
+** zipq1 z0\.h, z1\.h, z0\.h
+** ret
+*/
+TEST_UNIFORM_Z (zipq1_u16_tied2, svuint16_t,
+ z0 = svzipq1_u16 (z1, z0),
+ z0 = svzipq1 (z1, z0))
+
+/*
+** zipq1_u16_untied:
+** zipq1 z0\.h, z1\.h, z2\.h
+** ret
+*/
+TEST_UNIFORM_Z (zipq1_u16_untied, svuint16_t,
+ z0 = svzipq1_u16 (z1, z2),
+ z0 = svzipq1 (z1, z2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq1_u32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq1_u32.c
new file mode 100644
index 0000000..f8e65e5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq1_u32.c
@@ -0,0 +1,35 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** zipq1_u32_tied1:
+** zipq1 z0\.s, z0\.s, z1\.s
+** ret
+*/
+TEST_UNIFORM_Z (zipq1_u32_tied1, svuint32_t,
+ z0 = svzipq1_u32 (z0, z1),
+ z0 = svzipq1 (z0, z1))
+
+/*
+** zipq1_u32_tied2:
+** zipq1 z0\.s, z1\.s, z0\.s
+** ret
+*/
+TEST_UNIFORM_Z (zipq1_u32_tied2, svuint32_t,
+ z0 = svzipq1_u32 (z1, z0),
+ z0 = svzipq1 (z1, z0))
+
+/*
+** zipq1_u32_untied:
+** zipq1 z0\.s, z1\.s, z2\.s
+** ret
+*/
+TEST_UNIFORM_Z (zipq1_u32_untied, svuint32_t,
+ z0 = svzipq1_u32 (z1, z2),
+ z0 = svzipq1 (z1, z2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq1_u64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq1_u64.c
new file mode 100644
index 0000000..d4742f9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq1_u64.c
@@ -0,0 +1,47 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** zipq1_u64_tied1:
+** (
+** zipq1 z0\.d, z0\.d, z1\.d
+** |
+** trn1 z0\.d, z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_UNIFORM_Z (zipq1_u64_tied1, svuint64_t,
+ z0 = svzipq1_u64 (z0, z1),
+ z0 = svzipq1 (z0, z1))
+
+/*
+** zipq1_u64_tied2:
+** (
+** zipq1 z0\.d, z1\.d, z0\.d
+** |
+** trn1 z0\.d, z1\.d, z0\.d
+** )
+** ret
+*/
+TEST_UNIFORM_Z (zipq1_u64_tied2, svuint64_t,
+ z0 = svzipq1_u64 (z1, z0),
+ z0 = svzipq1 (z1, z0))
+
+/*
+** zipq1_u64_untied:
+** (
+** zipq1 z0\.d, z1\.d, z2\.d
+** |
+** trn1 z0\.d, z1\.d, z2\.d
+** )
+** ret
+*/
+TEST_UNIFORM_Z (zipq1_u64_untied, svuint64_t,
+ z0 = svzipq1_u64 (z1, z2),
+ z0 = svzipq1 (z1, z2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq1_u8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq1_u8.c
new file mode 100644
index 0000000..f5c0859
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq1_u8.c
@@ -0,0 +1,35 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** zipq1_u8_tied1:
+** zipq1 z0\.b, z0\.b, z1\.b
+** ret
+*/
+TEST_UNIFORM_Z (zipq1_u8_tied1, svuint8_t,
+ z0 = svzipq1_u8 (z0, z1),
+ z0 = svzipq1 (z0, z1))
+
+/*
+** zipq1_u8_tied2:
+** zipq1 z0\.b, z1\.b, z0\.b
+** ret
+*/
+TEST_UNIFORM_Z (zipq1_u8_tied2, svuint8_t,
+ z0 = svzipq1_u8 (z1, z0),
+ z0 = svzipq1 (z1, z0))
+
+/*
+** zipq1_u8_untied:
+** zipq1 z0\.b, z1\.b, z2\.b
+** ret
+*/
+TEST_UNIFORM_Z (zipq1_u8_untied, svuint8_t,
+ z0 = svzipq1_u8 (z1, z2),
+ z0 = svzipq1 (z1, z2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq2_bf16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq2_bf16.c
new file mode 100644
index 0000000..fbc3caa
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq2_bf16.c
@@ -0,0 +1,35 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** zipq2_bf16_tied1:
+** zipq2 z0\.h, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_Z (zipq2_bf16_tied1, svbfloat16_t,
+ z0 = svzipq2_bf16 (z0, z1),
+ z0 = svzipq2 (z0, z1))
+
+/*
+** zipq2_bf16_tied2:
+** zipq2 z0\.h, z1\.h, z0\.h
+** ret
+*/
+TEST_UNIFORM_Z (zipq2_bf16_tied2, svbfloat16_t,
+ z0 = svzipq2_bf16 (z1, z0),
+ z0 = svzipq2 (z1, z0))
+
+/*
+** zipq2_bf16_untied:
+** zipq2 z0\.h, z1\.h, z2\.h
+** ret
+*/
+TEST_UNIFORM_Z (zipq2_bf16_untied, svbfloat16_t,
+ z0 = svzipq2_bf16 (z1, z2),
+ z0 = svzipq2 (z1, z2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq2_f16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq2_f16.c
new file mode 100644
index 0000000..4202fc6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq2_f16.c
@@ -0,0 +1,35 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** zipq2_f16_tied1:
+** zipq2 z0\.h, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_Z (zipq2_f16_tied1, svfloat16_t,
+ z0 = svzipq2_f16 (z0, z1),
+ z0 = svzipq2 (z0, z1))
+
+/*
+** zipq2_f16_tied2:
+** zipq2 z0\.h, z1\.h, z0\.h
+** ret
+*/
+TEST_UNIFORM_Z (zipq2_f16_tied2, svfloat16_t,
+ z0 = svzipq2_f16 (z1, z0),
+ z0 = svzipq2 (z1, z0))
+
+/*
+** zipq2_f16_untied:
+** zipq2 z0\.h, z1\.h, z2\.h
+** ret
+*/
+TEST_UNIFORM_Z (zipq2_f16_untied, svfloat16_t,
+ z0 = svzipq2_f16 (z1, z2),
+ z0 = svzipq2 (z1, z2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq2_f32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq2_f32.c
new file mode 100644
index 0000000..a9eb28b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq2_f32.c
@@ -0,0 +1,35 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** zipq2_f32_tied1:
+** zipq2 z0\.s, z0\.s, z1\.s
+** ret
+*/
+TEST_UNIFORM_Z (zipq2_f32_tied1, svfloat32_t,
+ z0 = svzipq2_f32 (z0, z1),
+ z0 = svzipq2 (z0, z1))
+
+/*
+** zipq2_f32_tied2:
+** zipq2 z0\.s, z1\.s, z0\.s
+** ret
+*/
+TEST_UNIFORM_Z (zipq2_f32_tied2, svfloat32_t,
+ z0 = svzipq2_f32 (z1, z0),
+ z0 = svzipq2 (z1, z0))
+
+/*
+** zipq2_f32_untied:
+** zipq2 z0\.s, z1\.s, z2\.s
+** ret
+*/
+TEST_UNIFORM_Z (zipq2_f32_untied, svfloat32_t,
+ z0 = svzipq2_f32 (z1, z2),
+ z0 = svzipq2 (z1, z2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq2_f64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq2_f64.c
new file mode 100644
index 0000000..a823a40
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq2_f64.c
@@ -0,0 +1,47 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** zipq2_f64_tied1:
+** (
+** zipq2 z0\.d, z0\.d, z1\.d
+** |
+** trn2 z0\.d, z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_UNIFORM_Z (zipq2_f64_tied1, svfloat64_t,
+ z0 = svzipq2_f64 (z0, z1),
+ z0 = svzipq2 (z0, z1))
+
+/*
+** zipq2_f64_tied2:
+** (
+** zipq2 z0\.d, z1\.d, z0\.d
+** |
+** trn2 z0\.d, z1\.d, z0\.d
+** )
+** ret
+*/
+TEST_UNIFORM_Z (zipq2_f64_tied2, svfloat64_t,
+ z0 = svzipq2_f64 (z1, z0),
+ z0 = svzipq2 (z1, z0))
+
+/*
+** zipq2_f64_untied:
+** (
+** zipq2 z0\.d, z1\.d, z2\.d
+** |
+** trn2 z0\.d, z1\.d, z2\.d
+** )
+** ret
+*/
+TEST_UNIFORM_Z (zipq2_f64_untied, svfloat64_t,
+ z0 = svzipq2_f64 (z1, z2),
+ z0 = svzipq2 (z1, z2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq2_s16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq2_s16.c
new file mode 100644
index 0000000..70e7d08
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq2_s16.c
@@ -0,0 +1,35 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** zipq2_s16_tied1:
+** zipq2 z0\.h, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_Z (zipq2_s16_tied1, svint16_t,
+ z0 = svzipq2_s16 (z0, z1),
+ z0 = svzipq2 (z0, z1))
+
+/*
+** zipq2_s16_tied2:
+** zipq2 z0\.h, z1\.h, z0\.h
+** ret
+*/
+TEST_UNIFORM_Z (zipq2_s16_tied2, svint16_t,
+ z0 = svzipq2_s16 (z1, z0),
+ z0 = svzipq2 (z1, z0))
+
+/*
+** zipq2_s16_untied:
+** zipq2 z0\.h, z1\.h, z2\.h
+** ret
+*/
+TEST_UNIFORM_Z (zipq2_s16_untied, svint16_t,
+ z0 = svzipq2_s16 (z1, z2),
+ z0 = svzipq2 (z1, z2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq2_s32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq2_s32.c
new file mode 100644
index 0000000..2d9cf79
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq2_s32.c
@@ -0,0 +1,35 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** zipq2_s32_tied1:
+** zipq2 z0\.s, z0\.s, z1\.s
+** ret
+*/
+TEST_UNIFORM_Z (zipq2_s32_tied1, svint32_t,
+ z0 = svzipq2_s32 (z0, z1),
+ z0 = svzipq2 (z0, z1))
+
+/*
+** zipq2_s32_tied2:
+** zipq2 z0\.s, z1\.s, z0\.s
+** ret
+*/
+TEST_UNIFORM_Z (zipq2_s32_tied2, svint32_t,
+ z0 = svzipq2_s32 (z1, z0),
+ z0 = svzipq2 (z1, z0))
+
+/*
+** zipq2_s32_untied:
+** zipq2 z0\.s, z1\.s, z2\.s
+** ret
+*/
+TEST_UNIFORM_Z (zipq2_s32_untied, svint32_t,
+ z0 = svzipq2_s32 (z1, z2),
+ z0 = svzipq2 (z1, z2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq2_s64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq2_s64.c
new file mode 100644
index 0000000..75d7c38
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq2_s64.c
@@ -0,0 +1,47 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** zipq2_s64_tied1:
+** (
+** zipq2 z0\.d, z0\.d, z1\.d
+** |
+** trn2 z0\.d, z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_UNIFORM_Z (zipq2_s64_tied1, svint64_t,
+ z0 = svzipq2_s64 (z0, z1),
+ z0 = svzipq2 (z0, z1))
+
+/*
+** zipq2_s64_tied2:
+** (
+** zipq2 z0\.d, z1\.d, z0\.d
+** |
+** trn2 z0\.d, z1\.d, z0\.d
+** )
+** ret
+*/
+TEST_UNIFORM_Z (zipq2_s64_tied2, svint64_t,
+ z0 = svzipq2_s64 (z1, z0),
+ z0 = svzipq2 (z1, z0))
+
+/*
+** zipq2_s64_untied:
+** (
+** zipq2 z0\.d, z1\.d, z2\.d
+** |
+** trn2 z0\.d, z1\.d, z2\.d
+** )
+** ret
+*/
+TEST_UNIFORM_Z (zipq2_s64_untied, svint64_t,
+ z0 = svzipq2_s64 (z1, z2),
+ z0 = svzipq2 (z1, z2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq2_s8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq2_s8.c
new file mode 100644
index 0000000..fdb578f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq2_s8.c
@@ -0,0 +1,35 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** zipq2_s8_tied1:
+** zipq2 z0\.b, z0\.b, z1\.b
+** ret
+*/
+TEST_UNIFORM_Z (zipq2_s8_tied1, svint8_t,
+ z0 = svzipq2_s8 (z0, z1),
+ z0 = svzipq2 (z0, z1))
+
+/*
+** zipq2_s8_tied2:
+** zipq2 z0\.b, z1\.b, z0\.b
+** ret
+*/
+TEST_UNIFORM_Z (zipq2_s8_tied2, svint8_t,
+ z0 = svzipq2_s8 (z1, z0),
+ z0 = svzipq2 (z1, z0))
+
+/*
+** zipq2_s8_untied:
+** zipq2 z0\.b, z1\.b, z2\.b
+** ret
+*/
+TEST_UNIFORM_Z (zipq2_s8_untied, svint8_t,
+ z0 = svzipq2_s8 (z1, z2),
+ z0 = svzipq2 (z1, z2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq2_u16.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq2_u16.c
new file mode 100644
index 0000000..57cdc57
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq2_u16.c
@@ -0,0 +1,35 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** zipq2_u16_tied1:
+** zipq2 z0\.h, z0\.h, z1\.h
+** ret
+*/
+TEST_UNIFORM_Z (zipq2_u16_tied1, svuint16_t,
+ z0 = svzipq2_u16 (z0, z1),
+ z0 = svzipq2 (z0, z1))
+
+/*
+** zipq2_u16_tied2:
+** zipq2 z0\.h, z1\.h, z0\.h
+** ret
+*/
+TEST_UNIFORM_Z (zipq2_u16_tied2, svuint16_t,
+ z0 = svzipq2_u16 (z1, z0),
+ z0 = svzipq2 (z1, z0))
+
+/*
+** zipq2_u16_untied:
+** zipq2 z0\.h, z1\.h, z2\.h
+** ret
+*/
+TEST_UNIFORM_Z (zipq2_u16_untied, svuint16_t,
+ z0 = svzipq2_u16 (z1, z2),
+ z0 = svzipq2 (z1, z2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq2_u32.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq2_u32.c
new file mode 100644
index 0000000..48af067
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq2_u32.c
@@ -0,0 +1,35 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** zipq2_u32_tied1:
+** zipq2 z0\.s, z0\.s, z1\.s
+** ret
+*/
+TEST_UNIFORM_Z (zipq2_u32_tied1, svuint32_t,
+ z0 = svzipq2_u32 (z0, z1),
+ z0 = svzipq2 (z0, z1))
+
+/*
+** zipq2_u32_tied2:
+** zipq2 z0\.s, z1\.s, z0\.s
+** ret
+*/
+TEST_UNIFORM_Z (zipq2_u32_tied2, svuint32_t,
+ z0 = svzipq2_u32 (z1, z0),
+ z0 = svzipq2 (z1, z0))
+
+/*
+** zipq2_u32_untied:
+** zipq2 z0\.s, z1\.s, z2\.s
+** ret
+*/
+TEST_UNIFORM_Z (zipq2_u32_untied, svuint32_t,
+ z0 = svzipq2_u32 (z1, z2),
+ z0 = svzipq2 (z1, z2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq2_u64.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq2_u64.c
new file mode 100644
index 0000000..9f8f793
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq2_u64.c
@@ -0,0 +1,47 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** zipq2_u64_tied1:
+** (
+** zipq2 z0\.d, z0\.d, z1\.d
+** |
+** trn2 z0\.d, z0\.d, z1\.d
+** )
+** ret
+*/
+TEST_UNIFORM_Z (zipq2_u64_tied1, svuint64_t,
+ z0 = svzipq2_u64 (z0, z1),
+ z0 = svzipq2 (z0, z1))
+
+/*
+** zipq2_u64_tied2:
+** (
+** zipq2 z0\.d, z1\.d, z0\.d
+** |
+** trn2 z0\.d, z1\.d, z0\.d
+** )
+** ret
+*/
+TEST_UNIFORM_Z (zipq2_u64_tied2, svuint64_t,
+ z0 = svzipq2_u64 (z1, z0),
+ z0 = svzipq2 (z1, z0))
+
+/*
+** zipq2_u64_untied:
+** (
+** zipq2 z0\.d, z1\.d, z2\.d
+** |
+** trn2 z0\.d, z1\.d, z2\.d
+** )
+** ret
+*/
+TEST_UNIFORM_Z (zipq2_u64_untied, svuint64_t,
+ z0 = svzipq2_u64 (z1, z2),
+ z0 = svzipq2 (z1, z2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq2_u8.c b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq2_u8.c
new file mode 100644
index 0000000..9a5b428
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/acle/asm/zipq2_u8.c
@@ -0,0 +1,35 @@
+/* { dg-do assemble { target aarch64_asm_sve2p1_ok } } */
+/* { dg-do compile { target { ! aarch64_asm_sve2p1_ok } } } */
+/* { dg-skip-if "" { *-*-* } { "-DSTREAMING_COMPATIBLE" } { "" } } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+
+#pragma GCC target "+sve2p1"
+
+/*
+** zipq2_u8_tied1:
+** zipq2 z0\.b, z0\.b, z1\.b
+** ret
+*/
+TEST_UNIFORM_Z (zipq2_u8_tied1, svuint8_t,
+ z0 = svzipq2_u8 (z0, z1),
+ z0 = svzipq2 (z0, z1))
+
+/*
+** zipq2_u8_tied2:
+** zipq2 z0\.b, z1\.b, z0\.b
+** ret
+*/
+TEST_UNIFORM_Z (zipq2_u8_tied2, svuint8_t,
+ z0 = svzipq2_u8 (z1, z0),
+ z0 = svzipq2 (z1, z0))
+
+/*
+** zipq2_u8_untied:
+** zipq2 z0\.b, z1\.b, z2\.b
+** ret
+*/
+TEST_UNIFORM_Z (zipq2_u8_untied, svuint8_t,
+ z0 = svzipq2_u8 (z1, z2),
+ z0 = svzipq2 (z1, z2))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/dupq_1.c b/gcc/testsuite/gcc.target/aarch64/sve2/dupq_1.c
new file mode 100644
index 0000000..5472e30
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/dupq_1.c
@@ -0,0 +1,162 @@
+/* { dg-options "-O2 -msve-vector-bits=256" } */
+/* { dg-final { check-function-bodies "**" "" "" { target { le } } } } */
+
+#include <arm_sve.h>
+
+#pragma GCC target "+sve2p1"
+
+typedef svint8_t fixed_uint8_t __attribute__((arm_sve_vector_bits(256)));
+typedef svuint16_t fixed_uint16_t __attribute__((arm_sve_vector_bits(256)));
+typedef svint32_t fixed_int32_t __attribute__((arm_sve_vector_bits(256)));
+typedef svuint64_t fixed_uint64_t __attribute__((arm_sve_vector_bits(256)));
+
+/*
+** f1:
+** trn1 z0\.d, z0\.d, z0\.d
+** ret
+*/
+fixed_uint64_t
+f1 (fixed_uint64_t z0)
+{
+ return __builtin_shufflevector (z0, z0, 0, 0, 2, 2);
+}
+
+/*
+** f2:
+** trn2 z0\.d, z0\.d, z0\.d
+** ret
+*/
+fixed_uint64_t
+f2 (fixed_uint64_t z0)
+{
+ return __builtin_shufflevector (z0, z0, 1, 1, 3, 3);
+}
+
+/*
+** f3:
+** dupq z0\.s, z0\.s\[0\]
+** ret
+*/
+fixed_int32_t
+f3 (fixed_int32_t z0)
+{
+ return __builtin_shufflevector (z0, z0, 0, 0, 0, 0, 4, 4, 4, 4);
+}
+
+/*
+** f4:
+** dupq z0\.s, z0\.s\[1\]
+** ret
+*/
+fixed_int32_t
+f4 (fixed_int32_t z0)
+{
+ return __builtin_shufflevector (z0, z0, 1, 1, 1, 1, 5, 5, 5, 5);
+}
+
+/*
+** f5:
+** dupq z0\.s, z0\.s\[2\]
+** ret
+*/
+fixed_int32_t
+f5 (fixed_int32_t z0)
+{
+ return __builtin_shufflevector (z0, z0, 2, 2, 2, 2, 6, 6, 6, 6);
+}
+
+/*
+** f6:
+** dupq z0\.s, z0\.s\[3\]
+** ret
+*/
+fixed_int32_t
+f6 (fixed_int32_t z0)
+{
+ return __builtin_shufflevector (z0, z0, 3, 3, 3, 3, 7, 7, 7, 7);
+}
+
+/*
+** f7:
+** dupq z0\.h, z0\.h\[0\]
+** ret
+*/
+fixed_uint16_t
+f7 (fixed_uint16_t z0)
+{
+ return __builtin_shufflevector (z0, z0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 8, 8, 8, 8, 8, 8, 8, 8);
+}
+
+
+/*
+** f8:
+** dupq z0\.h, z0\.h\[5\]
+** ret
+*/
+fixed_uint16_t
+f8 (fixed_uint16_t z0)
+{
+ return __builtin_shufflevector (z0, z0,
+ 5, 5, 5, 5, 5, 5, 5, 5,
+ 13, 13, 13, 13, 13, 13, 13, 13);
+}
+
+/*
+** f9:
+** dupq z0\.h, z0\.h\[7\]
+** ret
+*/
+fixed_uint16_t
+f9 (fixed_uint16_t z0)
+{
+ return __builtin_shufflevector (z0, z0,
+ 7, 7, 7, 7, 7, 7, 7, 7,
+ 15, 15, 15, 15, 15, 15, 15, 15);
+}
+
+/*
+** f10:
+** dupq z0\.b, z0\.b\[0\]
+** ret
+*/
+fixed_uint8_t
+f10 (fixed_uint8_t z0)
+{
+ return __builtin_shufflevector (z0, z0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16);
+}
+
+/*
+** f11:
+** dupq z0\.b, z0\.b\[13\]
+** ret
+*/
+fixed_uint8_t
+f11 (fixed_uint8_t z0)
+{
+ return __builtin_shufflevector (z0, z0,
+ 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13,
+ 29, 29, 29, 29, 29, 29, 29, 29,
+ 29, 29, 29, 29, 29, 29, 29, 29);
+}
+
+/*
+** f12:
+** dupq z0\.b, z0\.b\[15\]
+** ret
+*/
+fixed_uint8_t
+f12 (fixed_uint8_t z0)
+{
+ return __builtin_shufflevector (z0, z0,
+ 15, 15, 15, 15, 15, 15, 15, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15,
+ 31, 31, 31, 31, 31, 31, 31, 31,
+ 31, 31, 31, 31, 31, 31, 31, 31);
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/extq_1.c b/gcc/testsuite/gcc.target/aarch64/sve2/extq_1.c
new file mode 100644
index 0000000..03c5fb1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/extq_1.c
@@ -0,0 +1,128 @@
+/* { dg-options "-O2 -msve-vector-bits=256" } */
+/* { dg-final { check-function-bodies "**" "" "" { target { le } } } } */
+
+#include <arm_sve.h>
+
+#pragma GCC target "+sve2p1"
+
+typedef svint8_t fixed_int8_t __attribute__((arm_sve_vector_bits(256)));
+typedef svfloat16_t fixed_float16_t __attribute__((arm_sve_vector_bits(256)));
+typedef svuint32_t fixed_uint32_t __attribute__((arm_sve_vector_bits(256)));
+typedef svfloat64_t fixed_float64_t __attribute__((arm_sve_vector_bits(256)));
+
+/*
+** f1:
+** extq z0\.b, z0\.b, z1\.b, #8
+** ret
+*/
+fixed_float64_t
+f1 (fixed_float64_t z0, fixed_float64_t z1)
+{
+ return __builtin_shufflevector (z0, z1, 1, 4, 3, 6);
+}
+
+/*
+** f2:
+** extq z0\.b, z0\.b, z1\.b, #4
+** ret
+*/
+fixed_uint32_t
+f2 (fixed_uint32_t z0, fixed_uint32_t z1)
+{
+ return __builtin_shufflevector (z0, z1, 1, 2, 3, 8, 5, 6, 7, 12);
+}
+
+/*
+** f3:
+** extq z0\.b, z0\.b, z1\.b, #12
+** ret
+*/
+fixed_uint32_t
+f3 (fixed_uint32_t z0, fixed_uint32_t z1)
+{
+ return __builtin_shufflevector (z0, z1, 3, 8, 9, 10, 7, 12, 13, 14);
+}
+
+/*
+** f4:
+** extq z0\.b, z0\.b, z1\.b, #2
+** ret
+*/
+fixed_float16_t
+f4 (fixed_float16_t z0, fixed_float16_t z1)
+{
+ return __builtin_shufflevector (z0, z1,
+ 1, 2, 3, 4, 5, 6, 7, 16,
+ 9, 10, 11, 12, 13, 14, 15, 24);
+}
+
+/*
+** f5:
+** extq z0\.b, z0\.b, z1\.b, #10
+** ret
+*/
+fixed_float16_t
+f5 (fixed_float16_t z0, fixed_float16_t z1)
+{
+ return __builtin_shufflevector (z0, z1,
+ 5, 6, 7, 16, 17, 18, 19, 20,
+ 13, 14, 15, 24, 25, 26, 27, 28);
+}
+
+/*
+** f6:
+** extq z0\.b, z0\.b, z1\.b, #14
+** ret
+*/
+fixed_float16_t
+f6 (fixed_float16_t z0, fixed_float16_t z1)
+{
+ return __builtin_shufflevector (z0, z1,
+ 7, 16, 17, 18, 19, 20, 21, 22,
+ 15, 24, 25, 26, 27, 28, 29, 30);
+}
+
+/*
+** f7:
+** extq z0\.b, z0\.b, z1\.b, #1
+** ret
+*/
+fixed_int8_t
+f7 (fixed_int8_t z0, fixed_int8_t z1)
+{
+ return __builtin_shufflevector (z0, z1,
+ 1, 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 32,
+ 17, 18, 19, 20, 21, 22, 23, 24,
+ 25, 26, 27, 28, 29, 30, 31, 48);
+}
+
+/*
+** f8:
+** extq z0\.b, z0\.b, z1\.b, #11
+** ret
+*/
+fixed_int8_t
+f8 (fixed_int8_t z0, fixed_int8_t z1)
+{
+ return __builtin_shufflevector (z0, z1,
+ 11, 12, 13, 14, 15, 32, 33, 34,
+ 35, 36, 37, 38, 39, 40, 41, 42,
+ 27, 28, 29, 30, 31, 48, 49, 50,
+ 51, 52, 53, 54, 55, 56, 57, 58);
+}
+
+/*
+** f9:
+** extq z0\.b, z0\.b, z1\.b, #15
+** ret
+*/
+fixed_int8_t
+f9 (fixed_int8_t z0, fixed_int8_t z1)
+{
+ return __builtin_shufflevector (z0, z1,
+ 15, 32, 33, 34, 35, 36, 37, 38,
+ 39, 40, 41, 42, 43, 44, 45, 46,
+ 31, 48, 49, 50, 51, 52, 53, 54,
+ 55, 56, 57, 58, 59, 60, 61, 62);
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/uzpq_1.c b/gcc/testsuite/gcc.target/aarch64/sve2/uzpq_1.c
new file mode 100644
index 0000000..f923e94
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/uzpq_1.c
@@ -0,0 +1,111 @@
+/* { dg-options "-O2 -msve-vector-bits=256" } */
+/* { dg-final { check-function-bodies "**" "" "" { target { le } } } } */
+
+#include <arm_sve.h>
+
+#pragma GCC target "+sve2p1"
+
+typedef svuint8_t fixed_uint8_t __attribute__((arm_sve_vector_bits(256)));
+typedef svbfloat16_t fixed_bfloat16_t __attribute__((arm_sve_vector_bits(256)));
+typedef svfloat32_t fixed_float32_t __attribute__((arm_sve_vector_bits(256)));
+typedef svint64_t fixed_int64_t __attribute__((arm_sve_vector_bits(256)));
+
+/*
+** f1:
+** trn1 z0\.d, z0\.d, z1\.d
+** ret
+*/
+fixed_int64_t
+f1 (fixed_int64_t z0, fixed_int64_t z1)
+{
+ return __builtin_shufflevector (z0, z1, 0, 4, 2, 6);
+}
+
+/*
+** f2:
+** trn2 z0\.d, z0\.d, z1\.d
+** ret
+*/
+fixed_int64_t
+f2 (fixed_int64_t z0, fixed_int64_t z1)
+{
+ return __builtin_shufflevector (z0, z1, 1, 5, 3, 7);
+}
+
+/*
+** f3:
+** uzpq1 z0\.s, z0\.s, z1\.s
+** ret
+*/
+fixed_float32_t
+f3 (fixed_float32_t z0, fixed_float32_t z1)
+{
+ return __builtin_shufflevector (z0, z1, 0, 2, 8, 10, 4, 6, 12, 14);
+}
+
+/*
+** f4:
+** uzpq2 z0\.s, z0\.s, z1\.s
+** ret
+*/
+fixed_float32_t
+f4 (fixed_float32_t z0, fixed_float32_t z1)
+{
+ return __builtin_shufflevector (z0, z1, 1, 3, 9, 11, 5, 7, 13, 15);
+}
+
+/*
+** f5:
+** uzpq1 z0\.h, z0\.h, z1\.h
+** ret
+*/
+fixed_bfloat16_t
+f5 (fixed_bfloat16_t z0, fixed_bfloat16_t z1)
+{
+ return __builtin_shufflevector (z0, z1,
+ 0, 2, 4, 6, 16, 18, 20, 22,
+ 8, 10, 12, 14, 24, 26, 28, 30);
+}
+
+/*
+** f6:
+** uzpq2 z0\.h, z0\.h, z1\.h
+** ret
+*/
+fixed_bfloat16_t
+f6 (fixed_bfloat16_t z0, fixed_bfloat16_t z1)
+{
+ return __builtin_shufflevector (z0, z1,
+ 1, 3, 5, 7, 17, 19, 21, 23,
+ 9, 11, 13, 15, 25, 27, 29, 31);
+}
+
+/*
+** f7:
+** uzpq1 z0\.b, z0\.b, z1\.b
+** ret
+*/
+fixed_uint8_t
+f7 (fixed_uint8_t z0, fixed_uint8_t z1)
+{
+ return __builtin_shufflevector (z0, z1,
+ 0, 2, 4, 6, 8, 10, 12, 14,
+ 32, 34, 36, 38, 40, 42, 44, 46,
+ 16, 18, 20, 22, 24, 26, 28, 30,
+ 48, 50, 52, 54, 56, 58, 60, 62);
+}
+
+/*
+** f8:
+** uzpq2 z0\.b, z0\.b, z1\.b
+** ret
+*/
+fixed_uint8_t
+f8 (fixed_uint8_t z0, fixed_uint8_t z1)
+{
+ return __builtin_shufflevector (z0, z1,
+ 1, 3, 5, 7, 9, 11, 13, 15,
+ 33, 35, 37, 39, 41, 43, 45, 47,
+ 17, 19, 21, 23, 25, 27, 29, 31,
+ 49, 51, 53, 55, 57, 59, 61, 63);
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/sve2/zipq_1.c b/gcc/testsuite/gcc.target/aarch64/sve2/zipq_1.c
new file mode 100644
index 0000000..fa420a9
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve2/zipq_1.c
@@ -0,0 +1,111 @@
+/* { dg-options "-O2 -msve-vector-bits=256" } */
+/* { dg-final { check-function-bodies "**" "" "" { target { le } } } } */
+
+#include <arm_sve.h>
+
+#pragma GCC target "+sve2p1"
+
+typedef svuint8_t fixed_uint8_t __attribute__((arm_sve_vector_bits(256)));
+typedef svbfloat16_t fixed_bfloat16_t __attribute__((arm_sve_vector_bits(256)));
+typedef svfloat32_t fixed_float32_t __attribute__((arm_sve_vector_bits(256)));
+typedef svint64_t fixed_int64_t __attribute__((arm_sve_vector_bits(256)));
+
+/*
+** f1:
+** trn1 z0\.d, z0\.d, z1\.d
+** ret
+*/
+fixed_int64_t
+f1 (fixed_int64_t z0, fixed_int64_t z1)
+{
+ return __builtin_shufflevector (z0, z1, 0, 4, 2, 6);
+}
+
+/*
+** f2:
+** trn2 z0\.d, z0\.d, z1\.d
+** ret
+*/
+fixed_int64_t
+f2 (fixed_int64_t z0, fixed_int64_t z1)
+{
+ return __builtin_shufflevector (z0, z1, 1, 5, 3, 7);
+}
+
+/*
+** f3:
+** zipq1 z0\.s, z0\.s, z1\.s
+** ret
+*/
+fixed_float32_t
+f3 (fixed_float32_t z0, fixed_float32_t z1)
+{
+ return __builtin_shufflevector (z0, z1, 0, 8, 1, 9, 4, 12, 5, 13);
+}
+
+/*
+** f4:
+** zipq2 z0\.s, z0\.s, z1\.s
+** ret
+*/
+fixed_float32_t
+f4 (fixed_float32_t z0, fixed_float32_t z1)
+{
+ return __builtin_shufflevector (z0, z1, 2, 10, 3, 11, 6, 14, 7, 15);
+}
+
+/*
+** f5:
+** zipq1 z0\.h, z0\.h, z1\.h
+** ret
+*/
+fixed_bfloat16_t
+f5 (fixed_bfloat16_t z0, fixed_bfloat16_t z1)
+{
+ return __builtin_shufflevector (z0, z1,
+ 0, 16, 1, 17, 2, 18, 3, 19,
+ 8, 24, 9, 25, 10, 26, 11, 27);
+}
+
+/*
+** f6:
+** zipq2 z0\.h, z0\.h, z1\.h
+** ret
+*/
+fixed_bfloat16_t
+f6 (fixed_bfloat16_t z0, fixed_bfloat16_t z1)
+{
+ return __builtin_shufflevector (z0, z1,
+ 4, 20, 5, 21, 6, 22, 7, 23,
+ 12, 28, 13, 29, 14, 30, 15, 31);
+}
+
+/*
+** f7:
+** zipq1 z0\.b, z0\.b, z1\.b
+** ret
+*/
+fixed_uint8_t
+f7 (fixed_uint8_t z0, fixed_uint8_t z1)
+{
+ return __builtin_shufflevector (z0, z1,
+ 0, 32, 1, 33, 2, 34, 3, 35,
+ 4, 36, 5, 37, 6, 38, 7, 39,
+ 16, 48, 17, 49, 18, 50, 19, 51,
+ 20, 52, 21, 53, 22, 54, 23, 55);
+}
+
+/*
+** f8:
+** zipq2 z0\.b, z0\.b, z1\.b
+** ret
+*/
+fixed_uint8_t
+f8 (fixed_uint8_t z0, fixed_uint8_t z1)
+{
+ return __builtin_shufflevector (z0, z1,
+ 8, 40, 9, 41, 10, 42, 11, 43,
+ 12, 44, 13, 45, 14, 46, 15, 47,
+ 24, 56, 25, 57, 26, 58, 27, 59,
+ 28, 60, 29, 61, 30, 62, 31, 63);
+}